From 64371504bd0bfeea4bba2b1fb3aa064034baadb1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Feb 2019 17:42:24 +0000 Subject: initial revision --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 298 ++++++++++++++++++++++++ 1 file changed, 298 insertions(+) create mode 100644 Wrappers/Python/ccpi/optimisation/Algorithms.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py new file mode 100644 index 0000000..325ed77 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy +import time +from ccpi.optimisation.funcs import ZeroFun + +class Algorithm(object): + '''Base class for iterative algorithms + + provides the minimal infrastructure. + Algorithms are iterables so can be easily run in a for loop. They will + stop as soon as the stop cryterion is met. + + The user is required to implement the set_up, __init__, update and + should_stop methods + ''' + iteration = 0 + stop_cryterion = 'max_iter' + __max_iteration = 0 + __loss = [] + memopt = False + timing = [] + def __init__(self, *args, **kwargs): + pass + def set_up(self, *args, **kwargs): + raise NotImplementedError() + def update(self): + raise NotImplementedError() + + def should_stop(self): + '''stopping cryterion''' + raise NotImplementedError() + + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + if self.should_stop(): + raise StopIteration() + else: + time0 = time.time() + self.update() + self.timing.append( time.time() - time0 ) + self.iteration += 1 + def get_output(self): + '''Returns the solution found''' + return self.x + def get_current_loss(self): + '''Returns the current value of the loss function''' + return self.__loss[-1] + @property + def loss(self): + return self.__loss + @property + def max_iteration(self): + return self.__max_iteration + @max_iteration.setter + def max_iteration(self, value): + assert isinstance(value, int) + self.__max_iteration = value + +class GradientDescent(Algorithm): + '''Implementation of a simple Gradient Descent algorithm + ''' + x = None + rate = 0 + objective_function = None + regulariser = None + def __init__(self, **kwargs): + '''initialisation can be done at creation time if all + proper variables are passed or later with set_up''' + args = ['x_init', 'objective_function', 'rate'] + for k,v in kwargs.items(): + if k in args: + args.pop(args.index(k)) + if len(args) == 0: + return self.set_up(x_init=kwargs['x_init'], + objective_function=kwargs['objective_function'], + rate=kwargs['rate']) + + def should_stop(self): + '''stopping cryterion, currently only based on number of iterations''' + return self.iteration >= self.max_iteration + + def set_up(self, x_init, objective_function, rate): + '''initialisation of the algorithm''' + self.x = x_init.copy() + if self.memopt: + self.x_update = x_init.copy() + self.objective_function = objective_function + self.rate = rate + self.loss.append(objective_function(x_init)) + + def update(self): + '''Single iteration''' + if self.memopt: + self.objective_function.gradient(self.x, out=self.x_update) + self.x_update *= -self.rate + self.x += self.x_update + else: + self.x += -self.rate * self.objective_function.grad(self.x) + + self.loss.append(self.objective_function(self.x)) + + + +class FISTA(Algorithm): + '''Fast Iterative Shrinkage-Thresholding Algorithm + + Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding + algorithm for linear inverse problems. + SIAM journal on imaging sciences,2(1), pp.183-202. + + Parameters: + x_init: initial guess + f: data fidelity + g: regularizer + h: + opt: additional algorithm + ''' + f = None + g = None + invL = None + t_old = 1 + def __init__(self, **kwargs): + '''initialisation can be done at creation time if all + proper variables are passed or later with set_up''' + args = ['x_init', 'f', 'g', 'opt'] + for k,v in kwargs.items(): + if k in args: + args.pop(args.index(k)) + if len(args) == 0: + return self.set_up(x_init=kwargs['x_init'], + f=kwargs['f'], + g=kwargs['g'], + opt=kwargs['opt']) + + def set_up(self, x_init, f=None, g=None, opt=None): + + # default inputs + if f is None: + self.f = ZeroFun() + else: + self.f = f + if g is None: + g = ZeroFun() + else: + self.g = g + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} + + self.max_iteration = opt['iter'] if 'iter' in opt.keys() else 1000 + self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + self.memopt = memopt + + # initialization + if memopt: + self.y = x_init.clone() + self.x_old = x_init.clone() + self.x = x_init.clone() + self.u = x_init.clone() + else: + self.x_old = x_init.copy() + self.y = x_init.copy() + + #timing = numpy.zeros(max_iter) + #criter = numpy.zeros(max_iter) + + + self.invL = 1/f.L + + self.t_old = 1 + + def should_stop(self): + '''stopping cryterion, currently only based on number of iterations''' + return self.iteration >= self.max_iteration + + def update(self): + # algorithm loop + #for it in range(0, max_iter): + + if self.memopt: + # u = y - invL*f.grad(y) + # store the result in x_old + self.f.gradient(self.y, out=self.u) + self.u.__imul__( -self.invL ) + self.u.__iadd__( self.y ) + # x = g.prox(u,invL) + self.g.proximal(self.u, self.invL, out=self.x) + + self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) + + # y = x + (t_old-1)/t*(x-x_old) + self.x.subtract(self.x_old, out=self.y) + self.y.__imul__ ((self.t_old-1)/self.t) + self.y.__iadd__( self.x ) + + self.x_old.fill(self.x) + self.t_old = self.t + + + else: + u = self.y - self.invL*self.f.grad(self.y) + + self.x = self.g.prox(u,self.invL) + + self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) + + self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) + + self.x_old = self.x.copy() + self.t_old = self.t + + + self.loss.append( self.f(self.x) + self.g(self.x) ) + + +class FBPD(Algorithm) + '''FBPD Algorithm + + Parameters: + x_init: initial guess + f: constraint + g: data fidelity + h: regularizer + opt: additional algorithm + ''' + constraint = None + data_fidelity = None + regulariser = None + def __init__(self, **kwargs): + pass + def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ + regulariser=None, opt=None): + + # default inputs + if constraint is None: + self.constraint = ZeroFun() + else: + self.constraint = constraint + if data_fidelity is None: + data_fidelity = ZeroFun() + else: + self.data_fidelity = data_fidelity + if regulariser is None: + self.regulariser = ZeroFun() + else: + self.regulariser = regulariser + + # algorithmic parameters + + + # step-sizes + self.tau = 2 / (self.data_fidelity.L + 2) + self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L + + self.inv_sigma = 1/self.sigma + + # initialization + self.x = x_init + self.y = operator.direct(self.x) + + + def update(self): + + # primal forward-backward step + x_old = self.x + self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) + self.x = constraint.prox(self.x, self.tau); + + # dual forward-backward step + self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); + self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); + + # time and criterion + self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) + -- cgit v1.2.3 From 00626d27f25aa19986a711703187a88bad2d2c43 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 13 Feb 2019 15:46:06 +0000 Subject: Removed class members of Algorithm class added update_objective --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 60 ++++++++++++++----------- 1 file changed, 33 insertions(+), 27 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py index 325ed77..de7f0f8 100644 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -22,22 +22,21 @@ from ccpi.optimisation.funcs import ZeroFun class Algorithm(object): '''Base class for iterative algorithms - - provides the minimal infrastructure. - Algorithms are iterables so can be easily run in a for loop. They will - stop as soon as the stop cryterion is met. - - The user is required to implement the set_up, __init__, update and - should_stop methods - ''' - iteration = 0 - stop_cryterion = 'max_iter' - __max_iteration = 0 - __loss = [] - memopt = False - timing = [] - def __init__(self, *args, **kwargs): - pass + + provides the minimal infrastructure. + Algorithms are iterables so can be easily run in a for loop. They will + stop as soon as the stop cryterion is met. + The user is required to implement the set_up, __init__, update and + should_stop and update_objective methods + ''' + + def __init__(self): + self.iteration = 0 + self.stop_cryterion = 'max_iter' + self.__max_iteration = 0 + self.__loss = [] + self.memopt = False + self.timing = [] def set_up(self, *args, **kwargs): raise NotImplementedError() def update(self): @@ -59,6 +58,7 @@ class Algorithm(object): time0 = time.time() self.update() self.timing.append( time.time() - time0 ) + self.update_objective() self.iteration += 1 def get_output(self): '''Returns the solution found''' @@ -66,6 +66,8 @@ class Algorithm(object): def get_current_loss(self): '''Returns the current value of the loss function''' return self.__loss[-1] + def update_objective(self): + raise NotImplementedError() @property def loss(self): return self.__loss @@ -80,13 +82,15 @@ class Algorithm(object): class GradientDescent(Algorithm): '''Implementation of a simple Gradient Descent algorithm ''' - x = None - rate = 0 - objective_function = None - regulariser = None + def __init__(self, **kwargs): '''initialisation can be done at creation time if all proper variables are passed or later with set_up''' + super(GradientDescent, self).__init__() + self.x = None + self.rate = 0 + self.objective_function = None + self.regulariser = None args = ['x_init', 'objective_function', 'rate'] for k,v in kwargs.items(): if k in args: @@ -117,7 +121,8 @@ class GradientDescent(Algorithm): self.x += self.x_update else: self.x += -self.rate * self.objective_function.grad(self.x) - + + def update_objective(self): self.loss.append(self.objective_function(self.x)) @@ -136,13 +141,15 @@ class FISTA(Algorithm): h: opt: additional algorithm ''' - f = None - g = None - invL = None - t_old = 1 + def __init__(self, **kwargs): '''initialisation can be done at creation time if all proper variables are passed or later with set_up''' + super(FISTA, self).__init__() + self.f = None + self.g = None + self.invL = None + self.t_old = 1 args = ['x_init', 'f', 'g', 'opt'] for k,v in kwargs.items(): if k in args: @@ -232,10 +239,9 @@ class FISTA(Algorithm): self.x_old = self.x.copy() self.t_old = self.t - + def update_objective(self): self.loss.append( self.f(self.x) + self.g(self.x) ) - class FBPD(Algorithm) '''FBPD Algorithm -- cgit v1.2.3 From e8886db58d4b2ce5fea42114bc736b2d1bfaf9a5 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Feb 2019 21:37:35 +0000 Subject: initial version. Fix inline __idiv__ --- .../optimisation/operators/CompositeOperator.py | 661 +++++++++++++++++++++ 1 file changed, 661 insertions(+) create mode 100755 Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py new file mode 100755 index 0000000..06c0ca8 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py @@ -0,0 +1,661 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 14 12:36:40 2019 + +@author: ofn77899 +""" +#from ccpi.optimisation.ops import Operator +import numpy +from numbers import Number +class Operator(object): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return False + def direct(self,x, out=None): + raise NotImplementedError + def size(self): + # To be defined for specific class + raise NotImplementedError + def norm(self): + raise NotImplementedError + def allocate_direct(self): + '''Allocates memory on the Y space''' + raise NotImplementedError + def allocate_adjoint(self): + '''Allocates memory on the X space''' + raise NotImplementedError + def range_dim(self): + raise NotImplementedError + def domain_dim(self): + raise NotImplementedError + +class LinearOperator(Operator): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return True + def adjoint(self,x, out=None): + raise NotImplementedError + +class CompositeDataContainer(object): + '''Class to hold a composite operator''' + def __init__(self, *args): + self.containers = args + self.index = 0 + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + try: + out = self[self.index] + except IndexError as ie: + raise StopIteration() + self.index+=1 + return out + + def is_compatible(self, other): + '''basic check if the size of the 2 objects fit''' + if isinstance(other, Number): + return True + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + # TODO look elements should be numbers + for ot in other: + if not isinstance(ot, Number): + raise ValueError('List/ numpy array can only contain numbers') + return len(self.containers) == len(other) + return len(self.containers) == len(other.containers) + def __getitem__(self, index): + return self.containers[index] + + def add(self, other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def subtract(self, other, out=None , *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def multiply(self, other , out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def divide(self, other , out=None ,*args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def power(self, other , out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def maximum(self,other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + ## unary operations + def abs(self, out=None, *args, **kwargs): + return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) + def sign(self, out=None, *args, **kwargs): + return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) + def sqrt(self, out=None, *args, **kwargs): + return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) + + ## reductions + def sum(self, out=None, *args, **kwargs): + return [ el.sum(*args, **kwargs) for el in self.containers] + + def copy(self): + '''alias of clone''' + return self.clone() + def clone(self): + return type(self)(*[el.copy() for el in self.containers]) + + def __add__(self, other): + return self.add( other ) + # __radd__ + + def __sub__(self, other): + return self.subtract( other ) + # __rsub__ + + def __mul__(self, other): + return self.multiply(other) + # __rmul__ + + def __div__(self, other): + return self.divide(other) + # __rdiv__ + def __truediv__(self, other): + return self.divide(other) + + def __pow__(self, other): + return self.power(other) + # reverse operand + def __radd__(self, other): + return self + other + # __radd__ + + def __rsub__(self, other): + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + return self * other + # __rmul__ + + def __rdiv__(self, other): + print ("call __rdiv__") + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + return self.__rdiv__(other) + + def __rpow__(self, other): + return other.power(self) + + def __iadd__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el += ot + elif isinstance(other, Number): + for el in self.containers: + el += other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el += ot + return self + # __radd__ + + def __isub__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el -= ot + elif isinstance(other, Number): + for el in self.containers: + el -= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el -= ot + return self + # __rsub__ + + def __imul__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el *= ot + elif isinstance(other, Number): + for el in self.containers: + el *= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el *= ot + return self + # __rmul__ + + def __idiv__(self, other): + if isinstance (other, CompositeDataContainer): + for i,el,ot in enumerate(zip(self.containers, other.containers)): + print ('__idiv__', i, el.as_array()[0][0][0], ot.as_array()[0][0][0]) + el /= ot + print ("fatto", el.as_array()[0][0][0]) + elif isinstance(other, Number): + for el in self.containers: + print ("prima", el) + print ('__idiv__', el.as_array()[0][0][0], other) + el /= other + print ("fatto", el.as_array()[0][0][0]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + print ('__idiv__', el.as_array()[0][0][0], ot[0][0][0]) + el /= ot + print ("fatto", el.as_array()[0][0][0]) + return self + # __rdiv__ + def __itruediv__(self, other): + return self.__idiv__(other) + +import time +from ccpi.optimisation.funcs import ZeroFun + +class Algorithm(object): + '''Base class for iterative algorithms + + provides the minimal infrastructure. + Algorithms are iterables so can be easily run in a for loop. They will + stop as soon as the stop cryterion is met. + The user is required to implement the set_up, __init__, update and + should_stop and update_objective methods + ''' + + def __init__(self): + self.iteration = 0 + self.stop_cryterion = 'max_iter' + self.__max_iteration = 0 + self.__loss = [] + self.memopt = False + self.timing = [] + def set_up(self, *args, **kwargs): + raise NotImplementedError() + def update(self): + raise NotImplementedError() + + def should_stop(self): + '''stopping cryterion''' + raise NotImplementedError() + + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + if self.should_stop(): + raise StopIteration() + else: + time0 = time.time() + self.update() + self.timing.append( time.time() - time0 ) + self.update_objective() + self.iteration += 1 + def get_output(self): + '''Returns the solution found''' + return self.x + def get_current_loss(self): + '''Returns the current value of the loss function''' + return self.__loss[-1] + def update_objective(self): + raise NotImplementedError() + @property + def loss(self): + return self.__loss + @property + def max_iteration(self): + return self.__max_iteration + @max_iteration.setter + def max_iteration(self, value): + assert isinstance(value, int) + self.__max_iteration = value + +class GradientDescent(Algorithm): + '''Implementation of a simple Gradient Descent algorithm + ''' + + def __init__(self, **kwargs): + '''initialisation can be done at creation time if all + proper variables are passed or later with set_up''' + super(GradientDescent, self).__init__() + self.x = None + self.rate = 0 + self.objective_function = None + self.regulariser = None + args = ['x_init', 'objective_function', 'rate'] + for k,v in kwargs.items(): + if k in args: + args.pop(args.index(k)) + if len(args) == 0: + return self.set_up(x_init=kwargs['x_init'], + objective_function=kwargs['objective_function'], + rate=kwargs['rate']) + + def should_stop(self): + '''stopping cryterion, currently only based on number of iterations''' + return self.iteration >= self.max_iteration + + def set_up(self, x_init, objective_function, rate): + '''initialisation of the algorithm''' + self.x = x_init.copy() + if self.memopt: + self.x_update = x_init.copy() + self.objective_function = objective_function + self.rate = rate + self.loss.append(objective_function(x_init)) + + def update(self): + '''Single iteration''' + if self.memopt: + self.objective_function.gradient(self.x, out=self.x_update) + self.x_update *= -self.rate + self.x += self.x_update + else: + self.x += -self.rate * self.objective_function.grad(self.x) + + def update_objective(self): + self.loss.append(self.objective_function(self.x)) + + +class CompositeOperator(Operator): + '''Class to hold a composite operator''' + def __init__(self, *args): + self.operators = args + + def norm(self): + return [op.norm() for op in self.operators] + + def direct(self, x, out=None): + return CompositeDataContainer(*[op.direct(X) for op,X in zip(self.operators, x)]) + + def adjoint(self, x, out=None): + return CompositeDataContainer(*[op.adjoint(X) for op,X in zip(self.operators, x)]) + + +if __name__ == '__main__': + #from ccpi.optimisation.Algorithms import GradientDescent + from ccpi.plugins.ops import CCPiProjectorSimple + from ccpi.optimisation.ops import TomoIdentity, PowerMethodNonsquare + from ccpi.optimisation.funcs import Norm2sq, Norm1 + from ccpi.framework import ImageGeometry, ImageData, AcquisitionGeometry + import matplotlib.pyplot as plt + + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = CompositeDataContainer(data0,data1) + cp1 = CompositeDataContainer(data2,data3) +# + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] + print (a[0][0].shape) + #cp2 = CompositeDataContainer(*a) + cp2 = cp0.add(cp1) + assert (cp2[0].as_array()[0][0][0] == 2.) + assert (cp2[1].as_array()[0][0][0] == 4.) + + cp2 = cp0 + cp1 + assert (cp2[0].as_array()[0][0][0] == 2.) + assert (cp2[1].as_array()[0][0][0] == 4.) + cp2 = cp0 + 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 + [1 ,2] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 3., decimal = 5) + cp2 += cp1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + + cp2 += 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +7., decimal = 5) + + cp2 += [-2,-1] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 6., decimal = 5) + + + cp2 = cp0.subtract(cp1) + assert (cp2[0].as_array()[0][0][0] == -2.) + assert (cp2[1].as_array()[0][0][0] == -2.) + cp2 = cp0 - cp1 + assert (cp2[0].as_array()[0][0][0] == -2.) + assert (cp2[1].as_array()[0][0][0] == -2.) + + cp2 = cp0 - 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0, decimal = 5) + cp2 = cp0 - [1 ,2] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -1., decimal = 5) + + cp2 -= cp1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -4., decimal = 5) + + cp2 -= 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -5., decimal = 5) + + cp2 -= [-2,-1] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -4., decimal = 5) + + + cp2 = cp0.multiply(cp1) + assert (cp2[0].as_array()[0][0][0] == 0.) + assert (cp2[1].as_array()[0][0][0] == 3.) + cp2 = cp0 * cp1 + assert (cp2[0].as_array()[0][0][0] == 0.) + assert (cp2[1].as_array()[0][0][0] == 3.) + + cp2 = cp0 * 2 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2, decimal = 5) + cp2 = cp0 * [3 ,2] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2., decimal = 5) + + cp2 *= cp1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= [-2,-1] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -6., decimal = 5) + + + cp2 = cp0.divide(cp1) + assert (cp2[0].as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1./3., decimal=4) + cp2 = cp0/cp1 + assert (cp2[0].as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1./3., decimal=4) + + cp2 = cp0 / 2 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / [3 ,2] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 += 1 + print ("cp2" , cp2[0].as_array()[0][0][0],cp2[1].as_array()[0][0][0]) + print ("cp1" , cp1[0].as_array()[0][0][0],cp1[1].as_array()[0][0][0]) + #cp2 /= cp1 + # TODO fix inplace division + cp2 /= 0.5 + print (cp2[0].as_array()[0][0][0],cp2[1].as_array()[0][0][0]) + + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1./0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , .2, decimal = 5) + + cp2 /= 1 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5/3., decimal = 5) + + cp2 /= [-2,-1] + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -0.5/3., decimal = 5) + + + cp2 = cp0.power(cp1) + assert (cp2[0].as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + cp2 = cp0**cp1 + assert (cp2[0].as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0 ** 2 + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 1., decimal = 5) + + cp2 = cp0.maximum(cp1) + assert (cp2[0].as_array()[0][0][0] == cp1[0].as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], cp2[1].as_array()[0][0][0], decimal=4) + + + cp2 = cp0.abs() + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0.subtract(cp1) + s = cp2.sign() + numpy.testing.assert_almost_equal(s[0].as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s[1].as_array()[0][0][0], -1., decimal=4) + + cp2 = cp0.add(cp1) + s = cp2.sqrt() + numpy.testing.assert_almost_equal(s[0].as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s[1].as_array()[0][0][0], numpy.sqrt(4), decimal=4) + + s = cp0.sum() + numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + s0 = 1 + s1 = 1 + for i in cp0[0].shape: + s0 *= i + for i in cp0[1].shape: + s1 *= i + + numpy.testing.assert_almost_equal(s[1], cp0[0].as_array()[0][0][0]*s0 +cp0[1].as_array()[0][0][0]*s1, decimal=4) + + # Set up phantom size N x N x vert by creating ImageGeometry, initialising the + # ImageData object with this geometry and empty array and finally put some + # data into its array, and display one slice as image. + + # Image parameters + N = 128 + vert = 4 + + # Set up image geometry + ig = ImageGeometry(voxel_num_x=N, + voxel_num_y=N, + voxel_num_z=vert) + + # Set up empty image data + Phantom = ImageData(geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + + # Populate image data by looping over and filling slices + i = 0 + while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.98 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + # Display slice of phantom + if vert > 1: + plt.imshow(Phantom.subset(vertical=0).as_array()) + else: + plt.imshow(Phantom.as_array()) + plt.show() + + + # Set up AcquisitionGeometry object to hold the parameters of the measurement + # setup geometry: # Number of angles, the actual angles from 0 to + # pi for parallel beam, set the width of a detector + # pixel relative to an object pixe and the number of detector pixels. + angles_num = 20 + det_w = 1.0 + det_num = N + + angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ + 180/numpy.pi + + # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, + # horz detector pixel size, vert detector pixel count, + # vert detector pixel size. + ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + + # Set up Operator object combining the ImageGeometry and AcquisitionGeometry + # wrapping calls to CCPi projector. + A = CCPiProjectorSimple(ig, ag) + + # Forward and backprojection are available as methods direct and adjoint. Here + # generate test data b and do simple backprojection to obtain z. Display all + # data slices as images, and a single backprojected slice. + b = A.direct(Phantom) + z = A.adjoint(b) + + for i in range(b.get_dimension_size('vertical')): + plt.imshow(b.subset(vertical=i).array) + plt.show() + + plt.imshow(z.subset(vertical=0).array) + plt.title('Backprojected data') + plt.show() + + # Using the test data b, different reconstruction methods can now be set up as + # demonstrated in the rest of this file. In general all methods need an initial + # guess and some algorithm options to be set. Note that 100 iterations for + # some of the methods is a very low number and 1000 or 10000 iterations may be + # needed if one wants to obtain a converged solution. + x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) + x_init1 = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) + X_init = CompositeDataContainer(x_init, x_init1) + B = CompositeDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + + # setup a tomo identity + I = TomoIdentity(geometry=ig) + + # composite operator + K = CompositeOperator(A, I) + + out = K.direct(X_init) + +# f = Norm2sq(K,B) +# f.L = 0.001 +# +# gd = GradientDescent() +# gd.set_up(X_init, f, 0.001 ) +# gd.max_iteration = 2 +# +# for _ in gd: +# pass + + + \ No newline at end of file -- cgit v1.2.3 From 03c03ea81068b62c95882c769230bf8c4c63337b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Sat, 16 Feb 2019 22:06:03 +0000 Subject: First implementation of CompositeOperator/DataContainer --- Wrappers/Python/ccpi/framework.py | 3 ++ .../optimisation/operators/CompositeOperator.py | 39 ++++++++-------------- 2 files changed, 17 insertions(+), 25 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 9938fb7..6af7a97 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -558,6 +558,8 @@ class DataContainer(object): # __isub__ def __idiv__(self, other): + return self.__itruediv__(other) + def __itruediv__(self, other): if isinstance(other, (int, float)) : numpy.divide(self.array, other, out=self.array) elif issubclass(type(other), DataContainer): @@ -721,6 +723,7 @@ class DataContainer(object): def sum(self, out=None, *args, **kwargs): return self.as_array().sum(*args, **kwargs) + class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' def __init__(self, diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py index 06c0ca8..6a14262 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py @@ -219,26 +219,19 @@ class CompositeDataContainer(object): for el,ot in zip(self.containers, other): el *= ot return self - # __rmul__ + # __imul__ def __idiv__(self, other): if isinstance (other, CompositeDataContainer): - for i,el,ot in enumerate(zip(self.containers, other.containers)): - print ('__idiv__', i, el.as_array()[0][0][0], ot.as_array()[0][0][0]) + for el,ot in zip(self.containers, other.containers): el /= ot - print ("fatto", el.as_array()[0][0][0]) elif isinstance(other, Number): for el in self.containers: - print ("prima", el) - print ('__idiv__', el.as_array()[0][0][0], other) el /= other - print ("fatto", el.as_array()[0][0][0]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): assert self.is_compatible(other) for el,ot in zip(self.containers, other): - print ('__idiv__', el.as_array()[0][0][0], ot[0][0][0]) el /= ot - print ("fatto", el.as_array()[0][0][0]) return self # __rdiv__ def __itruediv__(self, other): @@ -486,24 +479,20 @@ if __name__ == '__main__': numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) cp2 += 1 - print ("cp2" , cp2[0].as_array()[0][0][0],cp2[1].as_array()[0][0][0]) - print ("cp1" , cp1[0].as_array()[0][0][0],cp1[1].as_array()[0][0][0]) - #cp2 /= cp1 + cp2 /= cp1 # TODO fix inplace division - cp2 /= 0.5 - print (cp2[0].as_array()[0][0][0],cp2[1].as_array()[0][0][0]) - - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1./0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , .2, decimal = 5) + + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 1.5/3., decimal = 5) cp2 /= 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5/3., decimal = 5) + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -0.5/3., decimal = 5) - + numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -0.5, decimal = 5) + #### cp2 = cp0.power(cp1) assert (cp2[0].as_array()[0][0][0] == 0.) @@ -656,6 +645,6 @@ if __name__ == '__main__': # # for _ in gd: # pass - - - \ No newline at end of file +# +# +# \ No newline at end of file -- cgit v1.2.3 From 6de950b093a7b3602d615e7eb3786d9469ced930 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Sun, 17 Feb 2019 00:26:43 +0000 Subject: removed __getitem__ added get_item added shape --- .../optimisation/operators/CompositeOperator.py | 267 +++++++++++++-------- 1 file changed, 170 insertions(+), 97 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py index 6a14262..ad307b7 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py @@ -7,6 +7,7 @@ Created on Thu Feb 14 12:36:40 2019 #from ccpi.optimisation.ops import Operator import numpy from numbers import Number +import functools class Operator(object): '''Operator that maps from a space X -> Y''' def is_linear(self): @@ -40,9 +41,25 @@ class LinearOperator(Operator): class CompositeDataContainer(object): '''Class to hold a composite operator''' - def __init__(self, *args): + def __init__(self, *args, shape=None): + '''containers must be passed row by row''' self.containers = args self.index = 0 + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) +# for i in range(shape[0]): +# b.append([]) +# for j in range(shape[1]): +# b[-1].append(args[i*shape[1]+j]) +# indices.append(i*shape[1]+j) +# self.containers = b + def __iter__(self): return self def next(self): @@ -67,7 +84,13 @@ class CompositeDataContainer(object): raise ValueError('List/ numpy array can only contain numbers') return len(self.containers) == len(other) return len(self.containers) == len(other.containers) - def __getitem__(self, index): + def get_item(self, row, col=0): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col return self.containers[index] def add(self, other, out=None, *args, **kwargs): @@ -128,7 +151,7 @@ class CompositeDataContainer(object): ## reductions def sum(self, out=None, *args, **kwargs): - return [ el.sum(*args, **kwargs) for el in self.containers] + return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) def copy(self): '''alias of clone''' @@ -236,6 +259,9 @@ class CompositeDataContainer(object): # __rdiv__ def __itruediv__(self, other): return self.__idiv__(other) + def norm(self): + y = numpy.asarray([el.norm() for el in self.containers]) + return numpy.reshape(y, self.shape) import time from ccpi.optimisation.funcs import ZeroFun @@ -348,19 +374,65 @@ class GradientDescent(Algorithm): class CompositeOperator(Operator): '''Class to hold a composite operator''' - def __init__(self, *args): + def __init__(self, *args, shape=None): self.operators = args + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) + def get_item(self, row, col): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + index = row*self.shape[1]+col + return self.operators[index] + def norm(self): - return [op.norm() for op in self.operators] + norm = [op.norm() for op in self.operators] + b = [] + for i in range(self.shape[0]): + b.append([]) + for j in range(self.shape[1]): + b[-1].append(norm[i*self.shape[1]+j]) + return numpy.asarray(b) def direct(self, x, out=None): - return CompositeDataContainer(*[op.direct(X) for op,X in zip(self.operators, x)]) + shape = self.get_output_shape(x.shape) + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).direct(x.get_item(col)) + else: + prod += self.get_item(row,col).direct(x.get_item(col)) + res.append(prod) + print ("len res" , len(res)) + return CompositeDataContainer(*res, shape=shape) def adjoint(self, x, out=None): - return CompositeDataContainer(*[op.adjoint(X) for op,X in zip(self.operators, x)]) - - + shape = self.get_output_shape(x.shape) + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).adjoint(x.get_item(col)) + else: + prod += self.get_item(row,col).adjoint(x.get_item(col)) + res.append(prod) + return CompositeDataContainer(*res, shape=shape) + + def get_output_shape(self, xshape): + print ("operator shape {} data shape {}".format(self.shape, xshape)) + if self.shape[1] != xshape[0]: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + print ((self.shape[0], xshape[-1])) + return (self.shape[0], xshape[-1]) if __name__ == '__main__': #from ccpi.optimisation.Algorithms import GradientDescent from ccpi.plugins.ops import CCPiProjectorSimple @@ -385,155 +457,155 @@ if __name__ == '__main__': print (a[0][0].shape) #cp2 = CompositeDataContainer(*a) cp2 = cp0.add(cp1) - assert (cp2[0].as_array()[0][0][0] == 2.) - assert (cp2[1].as_array()[0][0][0] == 4.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) cp2 = cp0 + cp1 - assert (cp2[0].as_array()[0][0][0] == 2.) - assert (cp2[1].as_array()[0][0][0] == 4.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) cp2 = cp0 + 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) cp2 = cp0 + [1 ,2] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 3., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) cp2 += cp1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , +3. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) cp2 += 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , +4. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +7., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) cp2 += [-2,-1] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 2. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) cp2 = cp0.subtract(cp1) - assert (cp2[0].as_array()[0][0][0] == -2.) - assert (cp2[1].as_array()[0][0][0] == -2.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) cp2 = cp0 - cp1 - assert (cp2[0].as_array()[0][0][0] == -2.) - assert (cp2[1].as_array()[0][0][0] == -2.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) cp2 = cp0 - 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) cp2 = cp0 - [1 ,2] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -1., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) cp2 -= cp1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -3. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -4., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) cp2 -= 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -4. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -5., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) cp2 -= [-2,-1] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -2. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -4., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) cp2 = cp0.multiply(cp1) - assert (cp2[0].as_array()[0][0][0] == 0.) - assert (cp2[1].as_array()[0][0][0] == 3.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) cp2 = cp0 * cp1 - assert (cp2[0].as_array()[0][0][0] == 0.) - assert (cp2[1].as_array()[0][0][0] == 3.) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) cp2 = cp0 * 2 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) cp2 = cp0 * [3 ,2] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) cp2 *= cp1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0 , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) cp2 *= 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) cp2 *= [-2,-1] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) cp2 = cp0.divide(cp1) - assert (cp2[0].as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1./3., decimal=4) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) cp2 = cp0/cp1 - assert (cp2[0].as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1./3., decimal=4) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) cp2 = cp0 / 2 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) cp2 = cp0 / [3 ,2] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) cp2 += 1 cp2 /= cp1 # TODO fix inplace division - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 1./2 , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 1.5/3., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) cp2 /= 1 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , -0.5/2. , decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , -0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) #### cp2 = cp0.power(cp1) - assert (cp2[0].as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) cp2 = cp0**cp1 - assert (cp2[0].as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) cp2 = cp0 ** 2 - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0] , 0., decimal=5) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0] , 1., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) cp2 = cp0.maximum(cp1) - assert (cp2[0].as_array()[0][0][0] == cp1[0].as_array()[0][0][0]) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], cp2[1].as_array()[0][0][0], decimal=4) + assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) cp2 = cp0.abs() - numpy.testing.assert_almost_equal(cp2[0].as_array()[0][0][0], 0., decimal=4) - numpy.testing.assert_almost_equal(cp2[1].as_array()[0][0][0], 1., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) cp2 = cp0.subtract(cp1) s = cp2.sign() - numpy.testing.assert_almost_equal(s[0].as_array()[0][0][0], -1., decimal=4) - numpy.testing.assert_almost_equal(s[1].as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) cp2 = cp0.add(cp1) s = cp2.sqrt() - numpy.testing.assert_almost_equal(s[0].as_array()[0][0][0], numpy.sqrt(2), decimal=4) - numpy.testing.assert_almost_equal(s[1].as_array()[0][0][0], numpy.sqrt(4), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) s = cp0.sum() numpy.testing.assert_almost_equal(s[0], 0, decimal=4) s0 = 1 s1 = 1 - for i in cp0[0].shape: + for i in cp0.get_item(0,0).shape: s0 *= i - for i in cp0[1].shape: + for i in cp0.get_item(1,0).shape: s1 *= i - numpy.testing.assert_almost_equal(s[1], cp0[0].as_array()[0][0][0]*s0 +cp0[1].as_array()[0][0][0]*s1, decimal=4) + numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) # Set up phantom size N x N x vert by creating ImageGeometry, initialising the # ImageData object with this geometry and empty array and finally put some @@ -622,9 +694,7 @@ if __name__ == '__main__': # needed if one wants to obtain a converged solution. x_init = ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']) - x_init1 = ImageData(geometry=ig, - dimension_labels=['horizontal_x','horizontal_y','vertical']) - X_init = CompositeDataContainer(x_init, x_init1) + X_init = CompositeDataContainer(x_init) B = CompositeDataContainer(b, ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) @@ -636,15 +706,18 @@ if __name__ == '__main__': out = K.direct(X_init) -# f = Norm2sq(K,B) -# f.L = 0.001 -# -# gd = GradientDescent() -# gd.set_up(X_init, f, 0.001 ) -# gd.max_iteration = 2 -# -# for _ in gd: -# pass -# -# -# \ No newline at end of file + f = Norm2sq(K,B) + f.L = 0.001 + + gd = GradientDescent() + gd.set_up(X_init, f, 0.001 ) + gd.max_iteration = 2 + + out.__isub__(B) + out2 = K.adjoint(out) + + #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + + for _ in gd: + print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + \ No newline at end of file -- cgit v1.2.3 From 5f82583109cd218e08c2a9e1cca21adca73ffe6d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 15:19:11 +0000 Subject: added CGLS --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 50 +++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py index de7f0f8..bf7f1c3 100644 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -302,3 +302,53 @@ class FBPD(Algorithm) # time and criterion self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) +class CGLS(Algorithm): + + '''Conjugate Gradient Least Squares algorithm + + Parameters: + x_init: initial guess + operator: operator for forward/backward projections + data: data to operate on + ''' + def __init__(self, **kwargs): + super(CGLS, self).__init__() + self.x = kwargs.get('x_init', None) + self.operator = kwargs.get('operator', None) + self.data = kwargs.get('data', None) + if self.x is not None and self.operator is not None and \ + self.data is not None: + print ("Calling from creator") + return self.set_up(x_init =kwargs['x_init'], + operator=kwargs['operator'], + data =kwargs['data']) + + def set_up(self, x_init, operator , data ): + + self.r = data.copy() + self.x = x_init.copy() + + self.operator = operator + self.d = operator.adjoint(self.r) + + self.normr2 = self.d.norm() + + def should_stop(self): + '''stopping cryterion, currently only based on number of iterations''' + return self.iteration >= self.max_iteration + + def update(self): + + Ad = self.operator.direct(self.d) + alpha = self.normr2/Ad.norm() + self.x += alpha * self.d + self.r -= alpha * Ad + s = self.operator.adjoint(self.r) + + normr2_new = s.norm() + beta = normr2_new/self.normr2 + self.normr2 = normr2_new + self.d = s + beta*self.d + + def update_objective(self): + self.loss.append(self.r.norm()) -- cgit v1.2.3 From b9d3b0722c03cded15973417514d4639e390311e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 15:19:48 +0000 Subject: working unit test, initial tomography test --- .../optimisation/operators/CompositeOperator.py | 184 ++++++--------------- 1 file changed, 54 insertions(+), 130 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py index ad307b7..be2d525 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py @@ -8,8 +8,12 @@ Created on Thu Feb 14 12:36:40 2019 import numpy from numbers import Number import functools +from ccpi.framework import AcquisitionData, ImageData + class Operator(object): '''Operator that maps from a space X -> Y''' + def __init__(self, **kwargs): + self.scalar = 1 def is_linear(self): '''Returns if the operator is linear''' return False @@ -30,6 +34,10 @@ class Operator(object): raise NotImplementedError def domain_dim(self): raise NotImplementedError + def __rmul__(self, other): + assert isinstance(other, Number) + self.scalar = other + return self class LinearOperator(Operator): '''Operator that maps from a space X -> Y''' @@ -38,6 +46,8 @@ class LinearOperator(Operator): return True def adjoint(self,x, out=None): raise NotImplementedError + +# this should go in the framework class CompositeDataContainer(object): '''Class to hold a composite operator''' @@ -260,118 +270,9 @@ class CompositeDataContainer(object): def __itruediv__(self, other): return self.__idiv__(other) def norm(self): - y = numpy.asarray([el.norm() for el in self.containers]) - return numpy.reshape(y, self.shape) - -import time -from ccpi.optimisation.funcs import ZeroFun - -class Algorithm(object): - '''Base class for iterative algorithms - - provides the minimal infrastructure. - Algorithms are iterables so can be easily run in a for loop. They will - stop as soon as the stop cryterion is met. - The user is required to implement the set_up, __init__, update and - should_stop and update_objective methods - ''' - - def __init__(self): - self.iteration = 0 - self.stop_cryterion = 'max_iter' - self.__max_iteration = 0 - self.__loss = [] - self.memopt = False - self.timing = [] - def set_up(self, *args, **kwargs): - raise NotImplementedError() - def update(self): - raise NotImplementedError() - - def should_stop(self): - '''stopping cryterion''' - raise NotImplementedError() - - def __iter__(self): - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - if self.should_stop(): - raise StopIteration() - else: - time0 = time.time() - self.update() - self.timing.append( time.time() - time0 ) - self.update_objective() - self.iteration += 1 - def get_output(self): - '''Returns the solution found''' - return self.x - def get_current_loss(self): - '''Returns the current value of the loss function''' - return self.__loss[-1] - def update_objective(self): - raise NotImplementedError() - @property - def loss(self): - return self.__loss - @property - def max_iteration(self): - return self.__max_iteration - @max_iteration.setter - def max_iteration(self, value): - assert isinstance(value, int) - self.__max_iteration = value - -class GradientDescent(Algorithm): - '''Implementation of a simple Gradient Descent algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(GradientDescent, self).__init__() - self.x = None - self.rate = 0 - self.objective_function = None - self.regulariser = None - args = ['x_init', 'objective_function', 'rate'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(x_init=kwargs['x_init'], - objective_function=kwargs['objective_function'], - rate=kwargs['rate']) - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def set_up(self, x_init, objective_function, rate): - '''initialisation of the algorithm''' - self.x = x_init.copy() - if self.memopt: - self.x_update = x_init.copy() - self.objective_function = objective_function - self.rate = rate - self.loss.append(objective_function(x_init)) - - def update(self): - '''Single iteration''' - if self.memopt: - self.objective_function.gradient(self.x, out=self.x_update) - self.x_update *= -self.rate - self.x += self.x_update - else: - self.x += -self.rate * self.objective_function.grad(self.x) - - def update_objective(self): - self.loss.append(self.objective_function(self.x)) - - + y = numpy.asarray([el.norm().sum() for el in self.containers]) + return y.sum() + class CompositeOperator(Operator): '''Class to hold a composite operator''' def __init__(self, *args, shape=None): @@ -416,10 +317,10 @@ class CompositeOperator(Operator): return CompositeDataContainer(*res, shape=shape) def adjoint(self, x, out=None): - shape = self.get_output_shape(x.shape) + shape = self.get_output_shape(x.shape, adjoint=True) res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): + for row in range(self.shape[1]): + for col in range(self.shape[0]): if col == 0: prod = self.get_item(row,col).adjoint(x.get_item(col)) else: @@ -427,18 +328,25 @@ class CompositeOperator(Operator): res.append(prod) return CompositeDataContainer(*res, shape=shape) - def get_output_shape(self, xshape): + def get_output_shape(self, xshape, adjoint=False): print ("operator shape {} data shape {}".format(self.shape, xshape)) - if self.shape[1] != xshape[0]: + sshape = self.shape[1] + oshape = self.shape[0] + if adjoint: + sshape = self.shape[0] + oshape = self.shape[1] + if sshape != xshape[0]: raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - print ((self.shape[0], xshape[-1])) - return (self.shape[0], xshape[-1]) + print ((oshape, xshape[-1])) + return (oshape, xshape[-1]) if __name__ == '__main__': #from ccpi.optimisation.Algorithms import GradientDescent from ccpi.plugins.ops import CCPiProjectorSimple - from ccpi.optimisation.ops import TomoIdentity, PowerMethodNonsquare + from ccpi.optimisation.ops import PowerMethodNonsquare + from ccpi.optimisation.ops import TomoIdentity from ccpi.optimisation.funcs import Norm2sq, Norm1 - from ccpi.framework import ImageGeometry, ImageData, AcquisitionGeometry + from ccpi.framework import ImageGeometry, AcquisitionGeometry + from ccpi.optimisation.Algorithms import CGLS import matplotlib.pyplot as plt ig0 = ImageGeometry(2,3,4) @@ -699,25 +607,41 @@ if __name__ == '__main__': ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) # setup a tomo identity - I = TomoIdentity(geometry=ig) + I = 0.3 * TomoIdentity(geometry=ig) # composite operator - K = CompositeOperator(A, I) + K = CompositeOperator(A, I, shape=(2,1)) out = K.direct(X_init) f = Norm2sq(K,B) - f.L = 0.001 + f.L = 0.1 + + cg = CGLS() + cg.set_up(X_init, K, B ) + cg.max_iteration = 1 - gd = GradientDescent() - gd.set_up(X_init, f, 0.001 ) - gd.max_iteration = 2 + cgs = CGLS() + cgs.set_up(x_init, A, b ) + cgs.max_iteration = 2 out.__isub__(B) out2 = K.adjoint(out) #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - for _ in gd: - print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) - \ No newline at end of file + for _ in cg: + print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) + + fig = plt.figure() + plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS') + plt.show() + + for _ in cgs: + print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) + + fig = plt.figure() + plt.imshow(cgs.get_output().subset(vertical=0).as_array()) + plt.title('Simple CGLS') + plt.show() \ No newline at end of file -- cgit v1.2.3 From 74891953a24416b9680dee13354d57b42cd8f63c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 15:23:29 +0000 Subject: added reverse multiplication of operator with number --- Wrappers/Python/ccpi/optimisation/ops.py | 37 +++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 450b084..3845621 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -24,26 +24,49 @@ from ccpi.framework import AcquisitionData from ccpi.framework import ImageData from ccpi.framework import ImageGeometry from ccpi.framework import AcquisitionGeometry - +from numbers import Number # Maybe operators need to know what types they take as inputs/outputs # to not just use generic DataContainer class Operator(object): + '''Operator that maps from a space X -> Y''' + def __init__(self, **kwargs): + self.scalar = 1 + def is_linear(self): + '''Returns if the operator is linear''' + return False def direct(self,x, out=None): - return x - def adjoint(self,x, out=None): - return x + raise NotImplementedError def size(self): # To be defined for specific class raise NotImplementedError - def get_max_sing_val(self): + def norm(self): raise NotImplementedError def allocate_direct(self): + '''Allocates memory on the Y space''' raise NotImplementedError def allocate_adjoint(self): + '''Allocates memory on the X space''' + raise NotImplementedError + def range_dim(self): raise NotImplementedError + def domain_dim(self): + raise NotImplementedError + def __rmul__(self, other): + '''reverse multiplication of Operator with number sets the variable scalar in the Operator''' + assert isinstance(other, Number) + self.scalar = other + return self +class LinearOperator(Operator): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return True + def adjoint(self,x, out=None): + raise NotImplementedError + class Identity(Operator): def __init__(self): self.s1 = 1.0 @@ -75,12 +98,16 @@ class TomoIdentity(Operator): super(TomoIdentity, self).__init__() def direct(self,x,out=None): + if self.scalar != 1: + x *= self.scalar if out is None: return x.copy() else: out.fill(x) def adjoint(self,x, out=None): + if self.scalar != 1: + x *= self.scalar if out is None: return x.copy() else: -- cgit v1.2.3 From 8a92d3602a07c6f73689fd550b8e4080857cb7f1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 16:24:59 +0000 Subject: added operators directory --- Wrappers/Python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index b584344..00eeed0 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -28,7 +28,7 @@ cil_version='0.11.3' setup( name="ccpi-framework", version=cil_version, - packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation'], + packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation', 'ccpi.optimisation.operators'], # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine -- cgit v1.2.3 From fd0487910f8a17107bdf5a6b2cb68e5edfb7f295 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 16:26:12 +0000 Subject: fixed typo --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py index bf7f1c3..448a7b1 100644 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -242,7 +242,7 @@ class FISTA(Algorithm): def update_objective(self): self.loss.append( self.f(self.x) + self.g(self.x) ) -class FBPD(Algorithm) +class FBPD(Algorithm): '''FBPD Algorithm Parameters: @@ -293,7 +293,7 @@ class FBPD(Algorithm) # primal forward-backward step x_old = self.x self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) - self.x = constraint.prox(self.x, self.tau); + self.x = self.constraint.prox(self.x, self.tau); # dual forward-backward step self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); -- cgit v1.2.3 From 965d78b4a3dd43c89eedae732f1caa1d569a0066 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 16:26:49 +0000 Subject: added unittest for CompositeDataContainer --- Wrappers/Python/conda-recipe/run_test.py | 176 +++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 5bf6538..57afd57 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -962,6 +962,182 @@ class TestNexusReader(unittest.TestCase): data = nr.get_acquisition_data().subset(['vertical','horizontal']) self.assertTrue(sl.shape , (10,data.shape[1])) + +class TestCompositeDataContainer(unittest.TestCase): + + def test_one(self): + from ccpi.optimisation.ops import PowerMethodNonsquare + from ccpi.optimisation.ops import TomoIdentity + from ccpi.optimisation.funcs import Norm2sq, Norm1 + from ccpi.framework import ImageGeometry, AcquisitionGeometry + from ccpi.optimisation.operators.CompositeOperator import CompositeDataContainer, CompositeDataContainer + import matplotlib.pyplot as plt + + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = CompositeDataContainer(data0,data1) + cp1 = CompositeDataContainer(data2,data3) + # + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] + print (a[0][0].shape) + #cp2 = CompositeDataContainer(*a) + cp2 = cp0.add(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + + cp2 = cp0 + cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + cp2 = cp0 + 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 + [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) + cp2 += cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 += 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) + + cp2 += [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) + + + cp2 = cp0.subtract(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + cp2 = cp0 - cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + + cp2 = cp0 - 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) + cp2 = cp0 - [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) + + cp2 -= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + cp2 -= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) + + cp2 -= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + + cp2 = cp0.multiply(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + cp2 = cp0 * cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + + cp2 = cp0 * 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = cp0 * [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 *= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) + + + cp2 = cp0.divide(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + cp2 = cp0/cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + + cp2 = cp0 / 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 += 1 + cp2 /= cp1 + # TODO fix inplace division + + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) + + cp2 /= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 /= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) + #### + + cp2 = cp0.power(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + cp2 = cp0**cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0 ** 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) + + cp2 = cp0.maximum(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) + + + cp2 = cp0.abs() + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0.subtract(cp1) + s = cp2.sign() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) + + cp2 = cp0.add(cp1) + s = cp2.sqrt() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) + + s = cp0.sum() + numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + s0 = 1 + s1 = 1 + for i in cp0.get_item(0,0).shape: + s0 *= i + for i in cp0.get_item(1,0).shape: + s1 *= i + + numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) if __name__ == '__main__': -- cgit v1.2.3 From 8e80fd44796073ebf716110c220a03b63028968e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Feb 2019 16:28:23 +0000 Subject: fix TomoIdentity with scalar --- Wrappers/Python/ccpi/optimisation/ops.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 3845621..a0e1713 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -98,20 +98,20 @@ class TomoIdentity(Operator): super(TomoIdentity, self).__init__() def direct(self,x,out=None): - if self.scalar != 1: - x *= self.scalar + if out is None: + if self.scalar != 1: + return x * self.scalar return x.copy() else: + if self.scalar != 1: + out.fill(x * self.scalar) + return out.fill(x) + return def adjoint(self,x, out=None): - if self.scalar != 1: - x *= self.scalar - if out is None: - return x.copy() - else: - out.fill(x) + return self.direct(x, out) def size(self): return NotImplemented -- cgit v1.2.3 From 10c52f5eda45b412ca8859a04950df62745acbe8 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 20 Feb 2019 15:02:42 +0000 Subject: check numerical types from numpy --- Wrappers/Python/ccpi/framework.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 6af7a97..d1ad26b 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -472,7 +472,10 @@ class DataContainer(object): else: raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, other.shape)) - elif isinstance(other, (int, float, complex)): + elif isinstance(other, (int, float, complex,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): return type(self)(self.as_array() * other, deep_copy=True, dimension_labels=self.dimension_labels, @@ -633,6 +636,10 @@ class DataContainer(object): if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) + elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + out = pwop(self.as_array() , x2 , *args, **kwargs ) elif issubclass(type(x2) , DataContainer): out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) return type(self)(out, -- cgit v1.2.3 From 5317bf21b45433313907c8f4d6331230c2c8349f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 20 Feb 2019 15:05:07 +0000 Subject: add default stop criterion and run method --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py index 448a7b1..9115e6e 100644 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -43,8 +43,8 @@ class Algorithm(object): raise NotImplementedError() def should_stop(self): - '''stopping cryterion''' - raise NotImplementedError() + '''default stopping cryterion: number of iterations''' + return self.iteration >= self.max_iteration def __iter__(self): return self @@ -58,6 +58,7 @@ class Algorithm(object): time0 = time.time() self.update() self.timing.append( time.time() - time0 ) + # TODO update every N iterations self.update_objective() self.iteration += 1 def get_output(self): @@ -66,12 +67,17 @@ class Algorithm(object): def get_current_loss(self): '''Returns the current value of the loss function''' return self.__loss[-1] + def get_current_objective(self): + return self.get_current_loss() def update_objective(self): raise NotImplementedError() @property def loss(self): return self.__loss @property + def objective(self): + return self.__loss + @property def max_iteration(self): return self.__max_iteration @max_iteration.setter @@ -198,11 +204,7 @@ class FISTA(Algorithm): self.invL = 1/f.L self.t_old = 1 - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - + def update(self): # algorithm loop #for it in range(0, max_iter): -- cgit v1.2.3 From 7e0ed0c5fef0382d6b6903d8132fd06a2c4d2967 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 20 Feb 2019 15:05:36 +0000 Subject: add run method --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py index 9115e6e..0a5cac6 100644 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ b/Wrappers/Python/ccpi/optimisation/Algorithms.py @@ -84,6 +84,12 @@ class Algorithm(object): def max_iteration(self, value): assert isinstance(value, int) self.__max_iteration = value + def run(self, iterations, callback=None): + '''run n iterations and update the user with the callback if specified''' + self.max_iteration += iterations + for _ in self: + if callback is not None: + callback(self.iteration, self.get_current_loss()) class GradientDescent(Algorithm): '''Implementation of a simple Gradient Descent algorithm -- cgit v1.2.3 From dc81b230647184acb735e0003a8f49aaf6f37a97 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 20 Feb 2019 15:07:24 +0000 Subject: first working implementation of CGLS with CompositeOperator/DataContainer notice problem with _rmul_ and _mul_ methods precedence with numpy. --- .../optimisation/operators/CompositeOperator.py | 1464 +++++++++++--------- 1 file changed, 818 insertions(+), 646 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py index be2d525..77abb8c 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py @@ -1,647 +1,819 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" -#from ccpi.optimisation.ops import Operator -import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def allocate_direct(self): - '''Allocates memory on the Y space''' - raise NotImplementedError - def allocate_adjoint(self): - '''Allocates memory on the X space''' - raise NotImplementedError - def range_dim(self): - raise NotImplementedError - def domain_dim(self): - raise NotImplementedError - def __rmul__(self, other): - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -# this should go in the framework - -class CompositeDataContainer(object): - '''Class to hold a composite operator''' - def __init__(self, *args, shape=None): - '''containers must be passed row by row''' - self.containers = args - self.index = 0 - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) -# for i in range(shape[0]): -# b.append([]) -# for j in range(shape[1]): -# b[-1].append(args[i*shape[1]+j]) -# indices.append(i*shape[1]+j) -# self.containers = b - - def __iter__(self): - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - if isinstance(other, Number): - return True - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - # TODO look elements should be numbers - for ot in other: - if not isinstance(ot, Number): - raise ValueError('List/ numpy array can only contain numbers') - return len(self.containers) == len(other) - return len(self.containers) == len(other.containers) - def get_item(self, row, col=0): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.containers[index] - - def add(self, other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def subtract(self, other, out=None , *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def multiply(self, other , out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def divide(self, other , out=None ,*args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def power(self, other , out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def maximum(self,other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - ## unary operations - def abs(self, out=None, *args, **kwargs): - return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) - def sign(self, out=None, *args, **kwargs): - return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) - def sqrt(self, out=None, *args, **kwargs): - return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) - - ## reductions - def sum(self, out=None, *args, **kwargs): - return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) - - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers]) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - return self * other - # __rmul__ - - def __rdiv__(self, other): - print ("call __rdiv__") - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - return other.power(self) - - def __iadd__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el += ot - return self - # __radd__ - - def __isub__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el -= ot - return self - # __rsub__ - - def __imul__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - return self.__idiv__(other) - def norm(self): - y = numpy.asarray([el.norm().sum() for el in self.containers]) - return y.sum() - -class CompositeOperator(Operator): - '''Class to hold a composite operator''' - def __init__(self, *args, shape=None): - self.operators = args - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) - def get_item(self, row, col): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.operators[index] - - def norm(self): - norm = [op.norm() for op in self.operators] - b = [] - for i in range(self.shape[0]): - b.append([]) - for j in range(self.shape[1]): - b[-1].append(norm[i*self.shape[1]+j]) - return numpy.asarray(b) - - def direct(self, x, out=None): - shape = self.get_output_shape(x.shape) - res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x.get_item(col)) - else: - prod += self.get_item(row,col).direct(x.get_item(col)) - res.append(prod) - print ("len res" , len(res)) - return CompositeDataContainer(*res, shape=shape) - - def adjoint(self, x, out=None): - shape = self.get_output_shape(x.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row,col).adjoint(x.get_item(col)) - else: - prod += self.get_item(row,col).adjoint(x.get_item(col)) - res.append(prod) - return CompositeDataContainer(*res, shape=shape) - - def get_output_shape(self, xshape, adjoint=False): - print ("operator shape {} data shape {}".format(self.shape, xshape)) - sshape = self.shape[1] - oshape = self.shape[0] - if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - print ((oshape, xshape[-1])) - return (oshape, xshape[-1]) -if __name__ == '__main__': - #from ccpi.optimisation.Algorithms import GradientDescent - from ccpi.plugins.ops import CCPiProjectorSimple - from ccpi.optimisation.ops import PowerMethodNonsquare - from ccpi.optimisation.ops import TomoIdentity - from ccpi.optimisation.funcs import Norm2sq, Norm1 - from ccpi.framework import ImageGeometry, AcquisitionGeometry - from ccpi.optimisation.Algorithms import CGLS - import matplotlib.pyplot as plt - - ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(12,42,55,32) - - data0 = ImageData(geometry=ig0) - data1 = ImageData(geometry=ig1) + 1 - - data2 = ImageData(geometry=ig0) + 2 - data3 = ImageData(geometry=ig1) + 3 - - cp0 = CompositeDataContainer(data0,data1) - cp1 = CompositeDataContainer(data2,data3) -# - a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] - print (a[0][0].shape) - #cp2 = CompositeDataContainer(*a) - cp2 = cp0.add(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - - cp2 = cp0 + cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - cp2 = cp0 + 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = cp0 + [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) - cp2 += cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 += 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) - - cp2 += [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) - - - cp2 = cp0.subtract(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - cp2 = cp0 - cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - - cp2 = cp0 - 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) - cp2 = cp0 - [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) - - cp2 -= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - cp2 -= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) - - cp2 -= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - - cp2 = cp0.multiply(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - cp2 = cp0 * cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - - cp2 = cp0 * 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) - cp2 = cp0 * [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - - cp2 *= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) - - - cp2 = cp0.divide(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - cp2 = cp0/cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - - cp2 = cp0 / 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp2 = cp0 / [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - - cp2 += 1 - cp2 /= cp1 - # TODO fix inplace division - - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) - - cp2 /= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - - cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) - #### - - cp2 = cp0.power(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - cp2 = cp0**cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0 ** 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) - - cp2 = cp0.maximum(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) - - - cp2 = cp0.abs() - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0.subtract(cp1) - s = cp2.sign() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) - - cp2 = cp0.add(cp1) - s = cp2.sqrt() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) - - s = cp0.sum() - numpy.testing.assert_almost_equal(s[0], 0, decimal=4) - s0 = 1 - s1 = 1 - for i in cp0.get_item(0,0).shape: - s0 *= i - for i in cp0.get_item(1,0).shape: - s1 *= i - - numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) - - # Set up phantom size N x N x vert by creating ImageGeometry, initialising the - # ImageData object with this geometry and empty array and finally put some - # data into its array, and display one slice as image. - - # Image parameters - N = 128 - vert = 4 - - # Set up image geometry - ig = ImageGeometry(voxel_num_x=N, - voxel_num_y=N, - voxel_num_z=vert) - - # Set up empty image data - Phantom = ImageData(geometry=ig, - dimension_labels=['horizontal_x', - 'horizontal_y', - 'vertical']) - - # Populate image data by looping over and filling slices - i = 0 - while i < vert: - if vert > 1: - x = Phantom.subset(vertical=i).array - else: - x = Phantom.array - x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 - x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.98 - if vert > 1 : - Phantom.fill(x, vertical=i) - i += 1 - - # Display slice of phantom - if vert > 1: - plt.imshow(Phantom.subset(vertical=0).as_array()) - else: - plt.imshow(Phantom.as_array()) - plt.show() - - - # Set up AcquisitionGeometry object to hold the parameters of the measurement - # setup geometry: # Number of angles, the actual angles from 0 to - # pi for parallel beam, set the width of a detector - # pixel relative to an object pixe and the number of detector pixels. - angles_num = 20 - det_w = 1.0 - det_num = N - - angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ - 180/numpy.pi - - # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, - # horz detector pixel size, vert detector pixel count, - # vert detector pixel size. - ag = AcquisitionGeometry('parallel', - '3D', - angles, - N, - det_w, - vert, - det_w) - - # Set up Operator object combining the ImageGeometry and AcquisitionGeometry - # wrapping calls to CCPi projector. - A = CCPiProjectorSimple(ig, ag) - - # Forward and backprojection are available as methods direct and adjoint. Here - # generate test data b and do simple backprojection to obtain z. Display all - # data slices as images, and a single backprojected slice. - b = A.direct(Phantom) - z = A.adjoint(b) - - for i in range(b.get_dimension_size('vertical')): - plt.imshow(b.subset(vertical=i).array) - plt.show() - - plt.imshow(z.subset(vertical=0).array) - plt.title('Backprojected data') - plt.show() - - # Using the test data b, different reconstruction methods can now be set up as - # demonstrated in the rest of this file. In general all methods need an initial - # guess and some algorithm options to be set. Note that 100 iterations for - # some of the methods is a very low number and 1000 or 10000 iterations may be - # needed if one wants to obtain a converged solution. - x_init = ImageData(geometry=ig, - dimension_labels=['horizontal_x','horizontal_y','vertical']) - X_init = CompositeDataContainer(x_init) - B = CompositeDataContainer(b, - ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) - - # setup a tomo identity - I = 0.3 * TomoIdentity(geometry=ig) - - # composite operator - K = CompositeOperator(A, I, shape=(2,1)) - - out = K.direct(X_init) - - f = Norm2sq(K,B) - f.L = 0.1 - - cg = CGLS() - cg.set_up(X_init, K, B ) - cg.max_iteration = 1 - - cgs = CGLS() - cgs.set_up(x_init, A, b ) - cgs.max_iteration = 2 - - out.__isub__(B) - out2 = K.adjoint(out) - - #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - for _ in cg: - print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) - - fig = plt.figure() - plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) - plt.title('Composite CGLS') - plt.show() - - for _ in cgs: - print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) - - fig = plt.figure() - plt.imshow(cgs.get_output().subset(vertical=0).as_array()) - plt.title('Simple CGLS') +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 14 12:36:40 2019 + +@author: ofn77899 +""" +#from ccpi.optimisation.ops import Operator +import numpy +from numbers import Number +import functools +from ccpi.framework import AcquisitionData, ImageData + +class Operator(object): + '''Operator that maps from a space X -> Y''' + def __init__(self, **kwargs): + self.scalar = 1 + def is_linear(self): + '''Returns if the operator is linear''' + return False + def direct(self,x, out=None): + raise NotImplementedError + def size(self): + # To be defined for specific class + raise NotImplementedError + def norm(self): + raise NotImplementedError + def allocate_direct(self): + '''Allocates memory on the Y space''' + raise NotImplementedError + def allocate_adjoint(self): + '''Allocates memory on the X space''' + raise NotImplementedError + def range_dim(self): + raise NotImplementedError + def domain_dim(self): + raise NotImplementedError + def __rmul__(self, other): + assert isinstance(other, Number) + self.scalar = other + return self + +class LinearOperator(Operator): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return True + def adjoint(self,x, out=None): + raise NotImplementedError + +# this should go in the framework + +class CompositeDataContainer(object): + '''Class to hold a composite operator''' + __array_priority__ = 1 + def __init__(self, *args, shape=None): + '''containers must be passed row by row''' + self.containers = args + self.index = 0 + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) +# for i in range(shape[0]): +# b.append([]) +# for j in range(shape[1]): +# b[-1].append(args[i*shape[1]+j]) +# indices.append(i*shape[1]+j) +# self.containers = b + + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + try: + out = self[self.index] + except IndexError as ie: + raise StopIteration() + self.index+=1 + return out + + def is_compatible(self, other): + '''basic check if the size of the 2 objects fit''' + if isinstance(other, Number): + return True + elif isinstance(other, list): + # TODO look elements should be numbers + for ot in other: + if not isinstance(ot, (Number,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + raise ValueError('List/ numpy array can only contain numbers {}'\ + .format(type(ot))) + return len(self.containers) == len(other) + elif isinstance(other, numpy.ndarray): + return self.shape == other.shape + return len(self.containers) == len(other.containers) + def get_item(self, row, col=0): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col + return self.containers[index] + + def add(self, other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def subtract(self, other, out=None , *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def multiply(self, other , out=None, *args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + elif isinstance(other, numpy.ndarray): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def divide(self, other , out=None ,*args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def power(self, other , out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def maximum(self,other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + ## unary operations + def abs(self, out=None, *args, **kwargs): + return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) + def sign(self, out=None, *args, **kwargs): + return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) + def sqrt(self, out=None, *args, **kwargs): + return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) + def conjugate(self, out=None): + return type(self)(*[el.conjugate() for el in self.containers]) + + ## reductions + def sum(self, out=None, *args, **kwargs): + return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) + def norm(self): + y = numpy.asarray([el**2 for el in self.containers]) + return y.sum() + def copy(self): + '''alias of clone''' + return self.clone() + def clone(self): + return type(self)(*[el.copy() for el in self.containers]) + + def __add__(self, other): + return self.add( other ) + # __radd__ + + def __sub__(self, other): + return self.subtract( other ) + # __rsub__ + + def __mul__(self, other): + return self.multiply(other) + # __rmul__ + + def __div__(self, other): + return self.divide(other) + # __rdiv__ + def __truediv__(self, other): + return self.divide(other) + + def __pow__(self, other): + return self.power(other) + # reverse operand + def __radd__(self, other): + return self + other + # __radd__ + + def __rsub__(self, other): + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + '''Reverse multiplication + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self * other + # __rmul__ + + def __rdiv__(self, other): + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + return self.__rdiv__(other) + + def __rpow__(self, other): + return other.power(self) + + def __iadd__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el += ot + elif isinstance(other, Number): + for el in self.containers: + el += other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + self.is_compatible(other) + for el,ot in zip(self.containers, other): + el += ot + return self + # __radd__ + + def __isub__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el -= ot + elif isinstance(other, Number): + for el in self.containers: + el -= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el -= ot + return self + # __rsub__ + + def __imul__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el *= ot + elif isinstance(other, Number): + for el in self.containers: + el *= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el *= ot + return self + # __imul__ + + def __idiv__(self, other): + if isinstance (other, CompositeDataContainer): + for el,ot in zip(self.containers, other.containers): + el /= ot + elif isinstance(other, Number): + for el in self.containers: + el /= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el /= ot + return self + # __rdiv__ + def __itruediv__(self, other): + return self.__idiv__(other) + + + +class CompositeOperator(Operator): + '''Class to hold a composite operator''' + def __init__(self, *args, shape=None): + self.operators = args + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) + def get_item(self, row, col): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col + return self.operators[index] + + def norm(self): + norm = [op.norm() for op in self.operators] + b = [] + for i in range(self.shape[0]): + b.append([]) + for j in range(self.shape[1]): + b[-1].append(norm[i*self.shape[1]+j]) + return numpy.asarray(b) + + def direct(self, x, out=None): + shape = self.get_output_shape(x.shape) + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).direct(x.get_item(col)) + else: + prod += self.get_item(row,col).direct(x.get_item(col)) + res.append(prod) + return CompositeDataContainer(*res, shape=shape) + + def adjoint(self, x, out=None): + shape = self.get_output_shape(x.shape, adjoint=True) + res = [] + for row in range(self.shape[1]): + for col in range(self.shape[0]): + if col == 0: + prod = self.get_item(row,col).adjoint(x.get_item(col)) + else: + prod += self.get_item(row,col).adjoint(x.get_item(col)) + res.append(prod) + return CompositeDataContainer(*res, shape=shape) + + def get_output_shape(self, xshape, adjoint=False): + sshape = self.shape[1] + oshape = self.shape[0] + if adjoint: + sshape = self.shape[0] + oshape = self.shape[1] + if sshape != xshape[0]: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + return (oshape, xshape[-1]) + +''' + def direct(self, x, out=None): + + out = [None]*self.dimension[0] + for i in range(self.dimension[0]): + z1 = ImageData(np.zeros(self.compMat[i][0].range_dim())) + for j in range(self.dimension[1]): + z1 += self.compMat[i][j].direct(x[j]) + out[i] = z1 + + return out + + + def adjoint(self, x, out=None): + + out = [None]*self.dimension[1] + for i in range(self.dimension[1]): + z2 = ImageData(np.zeros(self.compMat[0][i].domain_dim())) + for j in range(self.dimension[0]): + z2 += self.compMat[j][i].adjoint(x[j]) + out[i] = z2 +''' +from ccpi.optimisation.Algorithms import Algorithm +from collections.abc import Iterable +class CGLS(Algorithm): + + '''Conjugate Gradient Least Squares algorithm + + Parameters: + x_init: initial guess + operator: operator for forward/backward projections + data: data to operate on + ''' + def __init__(self, **kwargs): + super(CGLS, self).__init__() + self.x = kwargs.get('x_init', None) + self.operator = kwargs.get('operator', None) + self.data = kwargs.get('data', None) + if self.x is not None and self.operator is not None and \ + self.data is not None: + print ("Calling from creator") + return self.set_up(x_init =kwargs['x_init'], + operator=kwargs['operator'], + data =kwargs['data']) + + def set_up(self, x_init, operator , data ): + + self.r = data.copy() + self.x = x_init.copy() + + self.operator = operator + self.d = operator.adjoint(self.r) + + + self.normr2 = (self.d * self.d).sum() + if isinstance(self.normr2, Iterable): + self.normr2 = sum(self.normr2) + #self.normr2 = numpy.sqrt(self.normr2) + print ("set_up" , self.normr2) + + def should_stop(self): + '''stopping cryterion, currently only based on number of iterations''' + return self.iteration >= self.max_iteration + + def update(self): + + Ad = self.operator.direct(self.d) + norm = (Ad*Ad).sum() + if isinstance(norm, Iterable): + norm = sum(norm) + #norm = numpy.sqrt(norm) + print (norm) + alpha = self.normr2/norm + self.x += (self.d * alpha) + self.r -= (Ad * alpha) + s = self.operator.adjoint(self.r) + + normr2_new = (s*s).sum() + if isinstance(normr2_new, Iterable): + normr2_new = sum(normr2_new) + #normr2_new = numpy.sqrt(normr2_new) + print (normr2_new) + + beta = normr2_new/self.normr2 + self.normr2 = normr2_new + self.d = s + beta*self.d + + def update_objective(self): + self.loss.append((self.r*self.r).sum()) + + def run(self, iterations, callback=None): + self.max_iteration += iterations + for _ in self: + if callback is not None: + callback(self.iteration, self.get_current_loss()) + + +if __name__ == '__main__': + #from ccpi.optimisation.Algorithms import GradientDescent + from ccpi.plugins.ops import CCPiProjectorSimple + from ccpi.optimisation.ops import PowerMethodNonsquare + from ccpi.optimisation.ops import TomoIdentity + from ccpi.optimisation.funcs import Norm2sq, Norm1 + from ccpi.framework import ImageGeometry, AcquisitionGeometry + from ccpi.optimisation.Algorithms import GradientDescent + #from ccpi.optimisation.Algorithms import CGLS + import matplotlib.pyplot as plt + + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = CompositeDataContainer(data0,data1) + cp1 = CompositeDataContainer(data2,data3) +# + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] + print (a[0][0].shape) + #cp2 = CompositeDataContainer(*a) + cp2 = cp0.add(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + + cp2 = cp0 + cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + cp2 = cp0 + 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 + [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) + cp2 += cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 += 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) + + cp2 += [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) + + + cp2 = cp0.subtract(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + cp2 = cp0 - cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + + cp2 = cp0 - 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) + cp2 = cp0 - [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) + + cp2 -= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + cp2 -= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) + + cp2 -= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + + cp2 = cp0.multiply(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + cp2 = cp0 * cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + + cp2 = cp0 * 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = 2 * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = cp0 * [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 * numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 = [3,2] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = numpy.asarray([3,2]) * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = [3,2,3] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 *= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) + + + cp2 = cp0.divide(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + cp2 = cp0/cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + + cp2 = cp0 / 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp3 = numpy.asarray([3 ,2]) / (cp0+1) + numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) + + cp2 += 1 + cp2 /= cp1 + # TODO fix inplace division + + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) + + cp2 /= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 /= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) + #### + + cp2 = cp0.power(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + cp2 = cp0**cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0 ** 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) + + cp2 = cp0.maximum(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) + + + cp2 = cp0.abs() + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0.subtract(cp1) + s = cp2.sign() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) + + cp2 = cp0.add(cp1) + s = cp2.sqrt() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) + + s = cp0.sum() + numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + s0 = 1 + s1 = 1 + for i in cp0.get_item(0,0).shape: + s0 *= i + for i in cp0.get_item(1,0).shape: + s1 *= i + + numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) + + # Set up phantom size N x N x vert by creating ImageGeometry, initialising the + # ImageData object with this geometry and empty array and finally put some + # data into its array, and display one slice as image. + + # Image parameters + N = 128 + vert = 4 + + # Set up image geometry + ig = ImageGeometry(voxel_num_x=N, + voxel_num_y=N, + voxel_num_z=vert) + + # Set up empty image data + Phantom = ImageData(geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += 0.05 + # Populate image data by looping over and filling slices + i = 0 + while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + + perc = 0.02 + # Set up empty image data + noise = ImageData(numpy.random.normal(loc = 0.04 , + scale = perc , + size = Phantom.shape), geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += noise + + # Set up AcquisitionGeometry object to hold the parameters of the measurement + # setup geometry: # Number of angles, the actual angles from 0 to + # pi for parallel beam, set the width of a detector + # pixel relative to an object pixe and the number of detector pixels. + angles_num = 20 + det_w = 1.0 + det_num = N + + angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ + 180/numpy.pi + + # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, + # horz detector pixel size, vert detector pixel count, + # vert detector pixel size. + ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + + # Set up Operator object combining the ImageGeometry and AcquisitionGeometry + # wrapping calls to CCPi projector. + A = CCPiProjectorSimple(ig, ag) + + # Forward and backprojection are available as methods direct and adjoint. Here + # generate test data b and some noise + + b = A.direct(Phantom) + + + #z = A.adjoint(b) + + + # Using the test data b, different reconstruction methods can now be set up as + # demonstrated in the rest of this file. In general all methods need an initial + # guess and some algorithm options to be set. Note that 100 iterations for + # some of the methods is a very low number and 1000 or 10000 iterations may be + # needed if one wants to obtain a converged solution. + x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) + X_init = CompositeDataContainer(x_init) + B = CompositeDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + + # setup a tomo identity + Ibig = 1e5 * TomoIdentity(geometry=ig) + Ismall = 1e-5 * TomoIdentity(geometry=ig) + + # composite operator + Kbig = CompositeOperator(A, Ibig, shape=(2,1)) + Ksmall = CompositeOperator(A, Ismall, shape=(2,1)) + + #out = K.direct(X_init) + + f = Norm2sq(Kbig,B) + f.L = 0.00003 + + fsmall = Norm2sq(Ksmall,B) + f.L = 0.00003 + + simplef = Norm2sq(A, b) + simplef.L = 0.00003 + + gd = GradientDescent( x_init=x_init, objective_function=simplef, + rate=simplef.L) + gd.max_iteration = 10 + + cg = CGLS() + cg.set_up(X_init, Kbig, B ) + cg.max_iteration = 1 + + cgsmall = CGLS() + cgsmall.set_up(X_init, Ksmall, B ) + cgsmall.max_iteration = 1 + + + cgs = CGLS() + cgs.set_up(x_init, A, b ) + cgs.max_iteration = 6 +# + #out.__isub__(B) + #out2 = K.adjoint(out) + + #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + + for _ in gd: + print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + + cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + + cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + + cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +# for _ in cg: +# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) +# +# fig = plt.figure() +# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# plt.title('Composite CGLS') +# plt.show() +# +# for _ in cgs: +# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) +# + fig = plt.figure() + plt.subplot(1,5,1) + plt.imshow(Phantom.subset(vertical=0).as_array()) + plt.title('Simulated Phantom') + plt.subplot(1,5,2) + plt.imshow(gd.get_output().subset(vertical=0).as_array()) + plt.title('Simple Gradient Descent') + plt.subplot(1,5,3) + plt.imshow(cgs.get_output().subset(vertical=0).as_array()) + plt.title('Simple CGLS') + plt.subplot(1,5,4) + plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nbig lambda') + plt.subplot(1,5,5) + plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nsmall lambda') plt.show() \ No newline at end of file -- cgit v1.2.3 From de7319ec8bb22f1d80c83db20d89ce18fd6961a1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 28 Feb 2019 19:11:46 +0000 Subject: removed line endings --- .../Python/ccpi/optimisation/algorithms/FBPD.py | 172 ++++++++++----------- 1 file changed, 86 insertions(+), 86 deletions(-) mode change 100755 => 100644 Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py old mode 100755 new mode 100644 index 322e9eb..798fb61 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py @@ -1,86 +1,86 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:09:03 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.funcs import ZeroFun - -class FBPD(Algorithm): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - constraint = None - data_fidelity = None - regulariser = None - def __init__(self, **kwargs): - pass - def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - - # default inputs - if constraint is None: - self.constraint = ZeroFun() - else: - self.constraint = constraint - if data_fidelity is None: - data_fidelity = ZeroFun() - else: - self.data_fidelity = data_fidelity - if regulariser is None: - self.regulariser = ZeroFun() - else: - self.regulariser = regulariser - - # algorithmic parameters - - - # step-sizes - self.tau = 2 / (self.data_fidelity.L + 2) - self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L - - self.inv_sigma = 1/self.sigma - - # initialization - self.x = x_init - self.y = operator.direct(self.x) - - - def update(self): - - # primal forward-backward step - x_old = self.x - self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) - self.x = self.constraint.prox(self.x, self.tau); - - # dual forward-backward step - self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); - self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); - - # time and criterion - self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Created on Thu Feb 21 11:09:03 2019 + +@author: ofn77899 +""" + +from ccpi.optimisation.algorithms import Algorithm +from ccpi.optimisation.funcs import ZeroFun + +class FBPD(Algorithm): + '''FBPD Algorithm + + Parameters: + x_init: initial guess + f: constraint + g: data fidelity + h: regularizer + opt: additional algorithm + ''' + constraint = None + data_fidelity = None + regulariser = None + def __init__(self, **kwargs): + pass + def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ + regulariser=None, opt=None): + + # default inputs + if constraint is None: + self.constraint = ZeroFun() + else: + self.constraint = constraint + if data_fidelity is None: + data_fidelity = ZeroFun() + else: + self.data_fidelity = data_fidelity + if regulariser is None: + self.regulariser = ZeroFun() + else: + self.regulariser = regulariser + + # algorithmic parameters + + + # step-sizes + self.tau = 2 / (self.data_fidelity.L + 2) + self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L + + self.inv_sigma = 1/self.sigma + + # initialization + self.x = x_init + self.y = operator.direct(self.x) + + + def update(self): + + # primal forward-backward step + x_old = self.x + self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) + self.x = self.constraint.prox(self.x, self.tau); + + # dual forward-backward step + self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); + self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); + + # time and criterion + self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) -- cgit v1.2.3 From 77b9c17e7bef84295902df5ea6eeb9f7290138c3 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 28 Feb 2019 19:13:18 +0000 Subject: removed dos line ending --- .../ccpi/optimisation/algorithms/__init__.py | 56 +++++++++++----------- 1 file changed, 28 insertions(+), 28 deletions(-) mode change 100755 => 100644 Wrappers/Python/ccpi/optimisation/algorithms/__init__.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py old mode 100755 new mode 100644 index 52fe6d7..4a3830f --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -1,29 +1,29 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:03:13 2019 - -@author: ofn77899 -""" - -from .Algorithm import Algorithm -from .CGLS import CGLS -from .GradientDescent import GradientDescent -from .FISTA import FISTA +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Created on Thu Feb 21 11:03:13 2019 + +@author: ofn77899 +""" + +from .Algorithm import Algorithm +from .CGLS import CGLS +from .GradientDescent import GradientDescent +from .FISTA import FISTA from .FBPD import FBPD \ No newline at end of file -- cgit v1.2.3 From df8a3a56c74773ffe1387d6a9f574f0f4938442b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 1 Mar 2019 17:55:10 +0000 Subject: delete Algorithms.py --- Wrappers/Python/ccpi/optimisation/Algorithms.py | 362 ------------------------ 1 file changed, 362 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/Algorithms.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/Algorithms.py b/Wrappers/Python/ccpi/optimisation/Algorithms.py deleted file mode 100644 index 0a5cac6..0000000 --- a/Wrappers/Python/ccpi/optimisation/Algorithms.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import numpy -import time -from ccpi.optimisation.funcs import ZeroFun - -class Algorithm(object): - '''Base class for iterative algorithms - - provides the minimal infrastructure. - Algorithms are iterables so can be easily run in a for loop. They will - stop as soon as the stop cryterion is met. - The user is required to implement the set_up, __init__, update and - should_stop and update_objective methods - ''' - - def __init__(self): - self.iteration = 0 - self.stop_cryterion = 'max_iter' - self.__max_iteration = 0 - self.__loss = [] - self.memopt = False - self.timing = [] - def set_up(self, *args, **kwargs): - raise NotImplementedError() - def update(self): - raise NotImplementedError() - - def should_stop(self): - '''default stopping cryterion: number of iterations''' - return self.iteration >= self.max_iteration - - def __iter__(self): - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - if self.should_stop(): - raise StopIteration() - else: - time0 = time.time() - self.update() - self.timing.append( time.time() - time0 ) - # TODO update every N iterations - self.update_objective() - self.iteration += 1 - def get_output(self): - '''Returns the solution found''' - return self.x - def get_current_loss(self): - '''Returns the current value of the loss function''' - return self.__loss[-1] - def get_current_objective(self): - return self.get_current_loss() - def update_objective(self): - raise NotImplementedError() - @property - def loss(self): - return self.__loss - @property - def objective(self): - return self.__loss - @property - def max_iteration(self): - return self.__max_iteration - @max_iteration.setter - def max_iteration(self, value): - assert isinstance(value, int) - self.__max_iteration = value - def run(self, iterations, callback=None): - '''run n iterations and update the user with the callback if specified''' - self.max_iteration += iterations - for _ in self: - if callback is not None: - callback(self.iteration, self.get_current_loss()) - -class GradientDescent(Algorithm): - '''Implementation of a simple Gradient Descent algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(GradientDescent, self).__init__() - self.x = None - self.rate = 0 - self.objective_function = None - self.regulariser = None - args = ['x_init', 'objective_function', 'rate'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(x_init=kwargs['x_init'], - objective_function=kwargs['objective_function'], - rate=kwargs['rate']) - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def set_up(self, x_init, objective_function, rate): - '''initialisation of the algorithm''' - self.x = x_init.copy() - if self.memopt: - self.x_update = x_init.copy() - self.objective_function = objective_function - self.rate = rate - self.loss.append(objective_function(x_init)) - - def update(self): - '''Single iteration''' - if self.memopt: - self.objective_function.gradient(self.x, out=self.x_update) - self.x_update *= -self.rate - self.x += self.x_update - else: - self.x += -self.rate * self.objective_function.grad(self.x) - - def update_objective(self): - self.loss.append(self.objective_function(self.x)) - - - -class FISTA(Algorithm): - '''Fast Iterative Shrinkage-Thresholding Algorithm - - Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding - algorithm for linear inverse problems. - SIAM journal on imaging sciences,2(1), pp.183-202. - - Parameters: - x_init: initial guess - f: data fidelity - g: regularizer - h: - opt: additional algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(FISTA, self).__init__() - self.f = None - self.g = None - self.invL = None - self.t_old = 1 - args = ['x_init', 'f', 'g', 'opt'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(x_init=kwargs['x_init'], - f=kwargs['f'], - g=kwargs['g'], - opt=kwargs['opt']) - - def set_up(self, x_init, f=None, g=None, opt=None): - - # default inputs - if f is None: - self.f = ZeroFun() - else: - self.f = f - if g is None: - g = ZeroFun() - else: - self.g = g - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} - - self.max_iteration = opt['iter'] if 'iter' in opt.keys() else 1000 - self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - self.memopt = memopt - - # initialization - if memopt: - self.y = x_init.clone() - self.x_old = x_init.clone() - self.x = x_init.clone() - self.u = x_init.clone() - else: - self.x_old = x_init.copy() - self.y = x_init.copy() - - #timing = numpy.zeros(max_iter) - #criter = numpy.zeros(max_iter) - - - self.invL = 1/f.L - - self.t_old = 1 - - def update(self): - # algorithm loop - #for it in range(0, max_iter): - - if self.memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - self.f.gradient(self.y, out=self.u) - self.u.__imul__( -self.invL ) - self.u.__iadd__( self.y ) - # x = g.prox(u,invL) - self.g.proximal(self.u, self.invL, out=self.x) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - self.x.subtract(self.x_old, out=self.y) - self.y.__imul__ ((self.t_old-1)/self.t) - self.y.__iadd__( self.x ) - - self.x_old.fill(self.x) - self.t_old = self.t - - - else: - u = self.y - self.invL*self.f.grad(self.y) - - self.x = self.g.prox(u,self.invL) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) - - self.x_old = self.x.copy() - self.t_old = self.t - - def update_objective(self): - self.loss.append( self.f(self.x) + self.g(self.x) ) - -class FBPD(Algorithm): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - constraint = None - data_fidelity = None - regulariser = None - def __init__(self, **kwargs): - pass - def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - - # default inputs - if constraint is None: - self.constraint = ZeroFun() - else: - self.constraint = constraint - if data_fidelity is None: - data_fidelity = ZeroFun() - else: - self.data_fidelity = data_fidelity - if regulariser is None: - self.regulariser = ZeroFun() - else: - self.regulariser = regulariser - - # algorithmic parameters - - - # step-sizes - self.tau = 2 / (self.data_fidelity.L + 2) - self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L - - self.inv_sigma = 1/self.sigma - - # initialization - self.x = x_init - self.y = operator.direct(self.x) - - - def update(self): - - # primal forward-backward step - x_old = self.x - self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) - self.x = self.constraint.prox(self.x, self.tau); - - # dual forward-backward step - self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); - self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); - - # time and criterion - self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) - -class CGLS(Algorithm): - - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - ''' - def __init__(self, **kwargs): - super(CGLS, self).__init__() - self.x = kwargs.get('x_init', None) - self.operator = kwargs.get('operator', None) - self.data = kwargs.get('data', None) - if self.x is not None and self.operator is not None and \ - self.data is not None: - print ("Calling from creator") - return self.set_up(x_init =kwargs['x_init'], - operator=kwargs['operator'], - data =kwargs['data']) - - def set_up(self, x_init, operator , data ): - - self.r = data.copy() - self.x = x_init.copy() - - self.operator = operator - self.d = operator.adjoint(self.r) - - self.normr2 = self.d.norm() - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def update(self): - - Ad = self.operator.direct(self.d) - alpha = self.normr2/Ad.norm() - self.x += alpha * self.d - self.r -= alpha * Ad - s = self.operator.adjoint(self.r) - - normr2_new = s.norm() - beta = normr2_new/self.normr2 - self.normr2 = normr2_new - self.d = s + beta*self.d - - def update_objective(self): - self.loss.append(self.r.norm()) -- cgit v1.2.3 From fb50231c6e790f47eb04b785465a41b9fb17f055 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 1 Mar 2019 17:58:53 +0000 Subject: renamed to Block... --- .../ccpi/optimisation/operators/BlockOperator.py | 744 +++++++++++++++++++ .../optimisation/operators/CompositeOperator.py | 819 --------------------- Wrappers/Python/ccpi/optimisation/ops.py | 6 - Wrappers/Python/setup.py | 5 +- 4 files changed, 745 insertions(+), 829 deletions(-) create mode 100755 Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py delete mode 100755 Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py new file mode 100755 index 0000000..b8285b0 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -0,0 +1,744 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 14 12:36:40 2019 + +@author: ofn77899 +""" +#from ccpi.optimisation.ops import Operator +import numpy +from numbers import Number +import functools +from ccpi.framework import AcquisitionData, ImageData + +class Operator(object): + '''Operator that maps from a space X -> Y''' + def __init__(self, **kwargs): + self.scalar = 1 + def is_linear(self): + '''Returns if the operator is linear''' + return False + def direct(self,x, out=None): + raise NotImplementedError + def size(self): + # To be defined for specific class + raise NotImplementedError + def norm(self): + raise NotImplementedError + def range_dim(self): + raise NotImplementedError + def domain_dim(self): + raise NotImplementedError + def __rmul__(self, other): + assert isinstance(other, Number) + self.scalar = other + return self + +class LinearOperator(Operator): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return True + def adjoint(self,x, out=None): + raise NotImplementedError + +# this should go in the framework + +class BlockDataContainer(object): + '''Class to hold a composite operator''' + __array_priority__ = 1 + def __init__(self, *args, shape=None): + '''containers must be passed row by row''' + self.containers = args + self.index = 0 + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) +# for i in range(shape[0]): +# b.append([]) +# for j in range(shape[1]): +# b[-1].append(args[i*shape[1]+j]) +# indices.append(i*shape[1]+j) +# self.containers = b + + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + try: + out = self[self.index] + except IndexError as ie: + raise StopIteration() + self.index+=1 + return out + + def is_compatible(self, other): + '''basic check if the size of the 2 objects fit''' + if isinstance(other, Number): + return True + elif isinstance(other, list): + # TODO look elements should be numbers + for ot in other: + if not isinstance(ot, (Number,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + raise ValueError('List/ numpy array can only contain numbers {}'\ + .format(type(ot))) + return len(self.containers) == len(other) + elif isinstance(other, numpy.ndarray): + return self.shape == other.shape + return len(self.containers) == len(other.containers) + def get_item(self, row, col=0): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col + return self.containers[index] + + def add(self, other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def subtract(self, other, out=None , *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def multiply(self, other , out=None, *args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + elif isinstance(other, numpy.ndarray): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def divide(self, other , out=None ,*args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def power(self, other , out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def maximum(self,other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + ## unary operations + def abs(self, out=None, *args, **kwargs): + return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) + def sign(self, out=None, *args, **kwargs): + return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) + def sqrt(self, out=None, *args, **kwargs): + return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) + def conjugate(self, out=None): + return type(self)(*[el.conjugate() for el in self.containers]) + + ## reductions + def sum(self, out=None, *args, **kwargs): + return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) + def squared_norm(self): + y = numpy.asarray([el.squared_norm() for el in self.containers]) + return y.sum() + def norm(self): + y = numpy.asarray([el.norm() for el in self.containers]) + return y.sum() + def copy(self): + '''alias of clone''' + return self.clone() + def clone(self): + return type(self)(*[el.copy() for el in self.containers]) + + def __add__(self, other): + return self.add( other ) + # __radd__ + + def __sub__(self, other): + return self.subtract( other ) + # __rsub__ + + def __mul__(self, other): + return self.multiply(other) + # __rmul__ + + def __div__(self, other): + return self.divide(other) + # __rdiv__ + def __truediv__(self, other): + return self.divide(other) + + def __pow__(self, other): + return self.power(other) + # reverse operand + def __radd__(self, other): + return self + other + # __radd__ + + def __rsub__(self, other): + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + '''Reverse multiplication + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self * other + # __rmul__ + + def __rdiv__(self, other): + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + return self.__rdiv__(other) + + def __rpow__(self, other): + return other.power(self) + + def __iadd__(self, other): + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el += ot + elif isinstance(other, Number): + for el in self.containers: + el += other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + self.is_compatible(other) + for el,ot in zip(self.containers, other): + el += ot + return self + # __radd__ + + def __isub__(self, other): + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el -= ot + elif isinstance(other, Number): + for el in self.containers: + el -= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el -= ot + return self + # __rsub__ + + def __imul__(self, other): + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el *= ot + elif isinstance(other, Number): + for el in self.containers: + el *= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el *= ot + return self + # __imul__ + + def __idiv__(self, other): + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el /= ot + elif isinstance(other, Number): + for el in self.containers: + el /= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el /= ot + return self + # __rdiv__ + def __itruediv__(self, other): + return self.__idiv__(other) + + + +class BlockOperator(Operator): + '''Class to hold a block operator''' + def __init__(self, *args, shape=None): + self.operators = args + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) + def get_item(self, row, col): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col + return self.operators[index] + + def norm(self): + norm = [op.norm() for op in self.operators] + b = [] + for i in range(self.shape[0]): + b.append([]) + for j in range(self.shape[1]): + b[-1].append(norm[i*self.shape[1]+j]) + return numpy.asarray(b) + + def direct(self, x, out=None): + shape = self.get_output_shape(x.shape) + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).direct(x.get_item(col)) + else: + prod += self.get_item(row,col).direct(x.get_item(col)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) + + def adjoint(self, x, out=None): + shape = self.get_output_shape(x.shape, adjoint=True) + res = [] + for row in range(self.shape[1]): + for col in range(self.shape[0]): + if col == 0: + prod = self.get_item(row,col).adjoint(x.get_item(col)) + else: + prod += self.get_item(row,col).adjoint(x.get_item(col)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) + + def get_output_shape(self, xshape, adjoint=False): + sshape = self.shape[1] + oshape = self.shape[0] + if adjoint: + sshape = self.shape[0] + oshape = self.shape[1] + if sshape != xshape[0]: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + return (oshape, xshape[-1]) + +''' + def direct(self, x, out=None): + + out = [None]*self.dimension[0] + for i in range(self.dimension[0]): + z1 = ImageData(np.zeros(self.compMat[i][0].range_dim())) + for j in range(self.dimension[1]): + z1 += self.compMat[i][j].direct(x[j]) + out[i] = z1 + + return out + + + def adjoint(self, x, out=None): + + out = [None]*self.dimension[1] + for i in range(self.dimension[1]): + z2 = ImageData(np.zeros(self.compMat[0][i].domain_dim())) + for j in range(self.dimension[0]): + z2 += self.compMat[j][i].adjoint(x[j]) + out[i] = z2 +''' +from ccpi.optimisation.algorithms import CGLS + + +if __name__ == '__main__': + #from ccpi.optimisation.Algorithms import GradientDescent + from ccpi.plugins.ops import CCPiProjectorSimple + from ccpi.optimisation.ops import PowerMethodNonsquare + from ccpi.optimisation.ops import TomoIdentity + from ccpi.optimisation.funcs import Norm2sq, Norm1 + from ccpi.framework import ImageGeometry, AcquisitionGeometry + from ccpi.optimisation.Algorithms import GradientDescent + #from ccpi.optimisation.Algorithms import CGLS + import matplotlib.pyplot as plt + + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + cp1 = BlockDataContainer(data2,data3) +# + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] + print (a[0][0].shape) + #cp2 = BlockDataContainer(*a) + cp2 = cp0.add(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + + cp2 = cp0 + cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + cp2 = cp0 + 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 + [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) + cp2 += cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 += 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) + + cp2 += [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) + + + cp2 = cp0.subtract(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + cp2 = cp0 - cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + + cp2 = cp0 - 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) + cp2 = cp0 - [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) + + cp2 -= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + cp2 -= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) + + cp2 -= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + + cp2 = cp0.multiply(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + cp2 = cp0 * cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + + cp2 = cp0 * 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = 2 * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = cp0 * [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 * numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 = [3,2] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = numpy.asarray([3,2]) * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = [3,2,3] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 *= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) + + + cp2 = cp0.divide(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + cp2 = cp0/cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + + cp2 = cp0 / 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp3 = numpy.asarray([3 ,2]) / (cp0+1) + numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) + + cp2 += 1 + cp2 /= cp1 + # TODO fix inplace division + + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) + + cp2 /= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 /= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) + #### + + cp2 = cp0.power(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + cp2 = cp0**cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0 ** 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) + + cp2 = cp0.maximum(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) + + + cp2 = cp0.abs() + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0.subtract(cp1) + s = cp2.sign() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) + + cp2 = cp0.add(cp1) + s = cp2.sqrt() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) + + s = cp0.sum() + numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + s0 = 1 + s1 = 1 + for i in cp0.get_item(0,0).shape: + s0 *= i + for i in cp0.get_item(1,0).shape: + s1 *= i + + numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) + + # Set up phantom size N x N x vert by creating ImageGeometry, initialising the + # ImageData object with this geometry and empty array and finally put some + # data into its array, and display one slice as image. + + # Image parameters + N = 128 + vert = 4 + + # Set up image geometry + ig = ImageGeometry(voxel_num_x=N, + voxel_num_y=N, + voxel_num_z=vert) + + # Set up empty image data + Phantom = ImageData(geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += 0.05 + # Populate image data by looping over and filling slices + i = 0 + while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + + perc = 0.02 + # Set up empty image data + noise = ImageData(numpy.random.normal(loc = 0.04 , + scale = perc , + size = Phantom.shape), geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += noise + + # Set up AcquisitionGeometry object to hold the parameters of the measurement + # setup geometry: # Number of angles, the actual angles from 0 to + # pi for parallel beam, set the width of a detector + # pixel relative to an object pixe and the number of detector pixels. + angles_num = 20 + det_w = 1.0 + det_num = N + + angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ + 180/numpy.pi + + # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, + # horz detector pixel size, vert detector pixel count, + # vert detector pixel size. + ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + + # Set up Operator object combining the ImageGeometry and AcquisitionGeometry + # wrapping calls to CCPi projector. + A = CCPiProjectorSimple(ig, ag) + + # Forward and backprojection are available as methods direct and adjoint. Here + # generate test data b and some noise + + b = A.direct(Phantom) + + + #z = A.adjoint(b) + + + # Using the test data b, different reconstruction methods can now be set up as + # demonstrated in the rest of this file. In general all methods need an initial + # guess and some algorithm options to be set. Note that 100 iterations for + # some of the methods is a very low number and 1000 or 10000 iterations may be + # needed if one wants to obtain a converged solution. + x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) + X_init = BlockDataContainer(x_init) + B = BlockDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + + # setup a tomo identity + Ibig = 1e5 * TomoIdentity(geometry=ig) + Ismall = 1e-5 * TomoIdentity(geometry=ig) + + # composite operator + Kbig = BlockOperator(A, Ibig, shape=(2,1)) + Ksmall = BlockOperator(A, Ismall, shape=(2,1)) + + #out = K.direct(X_init) + + f = Norm2sq(Kbig,B) + f.L = 0.00003 + + fsmall = Norm2sq(Ksmall,B) + f.L = 0.00003 + + simplef = Norm2sq(A, b) + simplef.L = 0.00003 + + gd = GradientDescent( x_init=x_init, objective_function=simplef, + rate=simplef.L) + gd.max_iteration = 10 + + cg = CGLS() + cg.set_up(X_init, Kbig, B ) + cg.max_iteration = 1 + + cgsmall = CGLS() + cgsmall.set_up(X_init, Ksmall, B ) + cgsmall.max_iteration = 1 + + + cgs = CGLS() + cgs.set_up(x_init, A, b ) + cgs.max_iteration = 6 +# + #out.__isub__(B) + #out2 = K.adjoint(out) + + #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + + for _ in gd: + print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + + cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + + cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + + cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +# for _ in cg: +# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) +# +# fig = plt.figure() +# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# plt.title('Composite CGLS') +# plt.show() +# +# for _ in cgs: +# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) +# + fig = plt.figure() + plt.subplot(1,5,1) + plt.imshow(Phantom.subset(vertical=0).as_array()) + plt.title('Simulated Phantom') + plt.subplot(1,5,2) + plt.imshow(gd.get_output().subset(vertical=0).as_array()) + plt.title('Simple Gradient Descent') + plt.subplot(1,5,3) + plt.imshow(cgs.get_output().subset(vertical=0).as_array()) + plt.title('Simple CGLS') + plt.subplot(1,5,4) + plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nbig lambda') + plt.subplot(1,5,5) + plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nsmall lambda') + plt.show() \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py b/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py deleted file mode 100755 index 77abb8c..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/CompositeOperator.py +++ /dev/null @@ -1,819 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" -#from ccpi.optimisation.ops import Operator -import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def allocate_direct(self): - '''Allocates memory on the Y space''' - raise NotImplementedError - def allocate_adjoint(self): - '''Allocates memory on the X space''' - raise NotImplementedError - def range_dim(self): - raise NotImplementedError - def domain_dim(self): - raise NotImplementedError - def __rmul__(self, other): - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -# this should go in the framework - -class CompositeDataContainer(object): - '''Class to hold a composite operator''' - __array_priority__ = 1 - def __init__(self, *args, shape=None): - '''containers must be passed row by row''' - self.containers = args - self.index = 0 - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) -# for i in range(shape[0]): -# b.append([]) -# for j in range(shape[1]): -# b[-1].append(args[i*shape[1]+j]) -# indices.append(i*shape[1]+j) -# self.containers = b - - def __iter__(self): - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - if isinstance(other, Number): - return True - elif isinstance(other, list): - # TODO look elements should be numbers - for ot in other: - if not isinstance(ot, (Number,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - raise ValueError('List/ numpy array can only contain numbers {}'\ - .format(type(ot))) - return len(self.containers) == len(other) - elif isinstance(other, numpy.ndarray): - return self.shape == other.shape - return len(self.containers) == len(other.containers) - def get_item(self, row, col=0): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.containers[index] - - def add(self, other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def subtract(self, other, out=None , *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def multiply(self, other , out=None, *args, **kwargs): - self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def divide(self, other , out=None ,*args, **kwargs): - self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def power(self, other , out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def maximum(self,other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - ## unary operations - def abs(self, out=None, *args, **kwargs): - return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) - def sign(self, out=None, *args, **kwargs): - return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) - def sqrt(self, out=None, *args, **kwargs): - return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) - def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers]) - - ## reductions - def sum(self, out=None, *args, **kwargs): - return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) - def norm(self): - y = numpy.asarray([el**2 for el in self.containers]) - return y.sum() - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers]) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - '''Reverse multiplication - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self * other - # __rmul__ - - def __rdiv__(self, other): - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - return other.power(self) - - def __iadd__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - self.is_compatible(other) - for el,ot in zip(self.containers, other): - el += ot - return self - # __radd__ - - def __isub__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el -= ot - return self - # __rsub__ - - def __imul__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - if isinstance (other, CompositeDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - return self.__idiv__(other) - - - -class CompositeOperator(Operator): - '''Class to hold a composite operator''' - def __init__(self, *args, shape=None): - self.operators = args - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) - def get_item(self, row, col): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.operators[index] - - def norm(self): - norm = [op.norm() for op in self.operators] - b = [] - for i in range(self.shape[0]): - b.append([]) - for j in range(self.shape[1]): - b[-1].append(norm[i*self.shape[1]+j]) - return numpy.asarray(b) - - def direct(self, x, out=None): - shape = self.get_output_shape(x.shape) - res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x.get_item(col)) - else: - prod += self.get_item(row,col).direct(x.get_item(col)) - res.append(prod) - return CompositeDataContainer(*res, shape=shape) - - def adjoint(self, x, out=None): - shape = self.get_output_shape(x.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row,col).adjoint(x.get_item(col)) - else: - prod += self.get_item(row,col).adjoint(x.get_item(col)) - res.append(prod) - return CompositeDataContainer(*res, shape=shape) - - def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] - if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - -''' - def direct(self, x, out=None): - - out = [None]*self.dimension[0] - for i in range(self.dimension[0]): - z1 = ImageData(np.zeros(self.compMat[i][0].range_dim())) - for j in range(self.dimension[1]): - z1 += self.compMat[i][j].direct(x[j]) - out[i] = z1 - - return out - - - def adjoint(self, x, out=None): - - out = [None]*self.dimension[1] - for i in range(self.dimension[1]): - z2 = ImageData(np.zeros(self.compMat[0][i].domain_dim())) - for j in range(self.dimension[0]): - z2 += self.compMat[j][i].adjoint(x[j]) - out[i] = z2 -''' -from ccpi.optimisation.Algorithms import Algorithm -from collections.abc import Iterable -class CGLS(Algorithm): - - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - ''' - def __init__(self, **kwargs): - super(CGLS, self).__init__() - self.x = kwargs.get('x_init', None) - self.operator = kwargs.get('operator', None) - self.data = kwargs.get('data', None) - if self.x is not None and self.operator is not None and \ - self.data is not None: - print ("Calling from creator") - return self.set_up(x_init =kwargs['x_init'], - operator=kwargs['operator'], - data =kwargs['data']) - - def set_up(self, x_init, operator , data ): - - self.r = data.copy() - self.x = x_init.copy() - - self.operator = operator - self.d = operator.adjoint(self.r) - - - self.normr2 = (self.d * self.d).sum() - if isinstance(self.normr2, Iterable): - self.normr2 = sum(self.normr2) - #self.normr2 = numpy.sqrt(self.normr2) - print ("set_up" , self.normr2) - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def update(self): - - Ad = self.operator.direct(self.d) - norm = (Ad*Ad).sum() - if isinstance(norm, Iterable): - norm = sum(norm) - #norm = numpy.sqrt(norm) - print (norm) - alpha = self.normr2/norm - self.x += (self.d * alpha) - self.r -= (Ad * alpha) - s = self.operator.adjoint(self.r) - - normr2_new = (s*s).sum() - if isinstance(normr2_new, Iterable): - normr2_new = sum(normr2_new) - #normr2_new = numpy.sqrt(normr2_new) - print (normr2_new) - - beta = normr2_new/self.normr2 - self.normr2 = normr2_new - self.d = s + beta*self.d - - def update_objective(self): - self.loss.append((self.r*self.r).sum()) - - def run(self, iterations, callback=None): - self.max_iteration += iterations - for _ in self: - if callback is not None: - callback(self.iteration, self.get_current_loss()) - - -if __name__ == '__main__': - #from ccpi.optimisation.Algorithms import GradientDescent - from ccpi.plugins.ops import CCPiProjectorSimple - from ccpi.optimisation.ops import PowerMethodNonsquare - from ccpi.optimisation.ops import TomoIdentity - from ccpi.optimisation.funcs import Norm2sq, Norm1 - from ccpi.framework import ImageGeometry, AcquisitionGeometry - from ccpi.optimisation.Algorithms import GradientDescent - #from ccpi.optimisation.Algorithms import CGLS - import matplotlib.pyplot as plt - - ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(12,42,55,32) - - data0 = ImageData(geometry=ig0) - data1 = ImageData(geometry=ig1) + 1 - - data2 = ImageData(geometry=ig0) + 2 - data3 = ImageData(geometry=ig1) + 3 - - cp0 = CompositeDataContainer(data0,data1) - cp1 = CompositeDataContainer(data2,data3) -# - a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] - print (a[0][0].shape) - #cp2 = CompositeDataContainer(*a) - cp2 = cp0.add(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - - cp2 = cp0 + cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - cp2 = cp0 + 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = cp0 + [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) - cp2 += cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 += 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) - - cp2 += [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) - - - cp2 = cp0.subtract(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - cp2 = cp0 - cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - - cp2 = cp0 - 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) - cp2 = cp0 - [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) - - cp2 -= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - cp2 -= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) - - cp2 -= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - - cp2 = cp0.multiply(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - cp2 = cp0 * cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - - cp2 = cp0 * 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) - cp2 = 2 * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) - cp2 = cp0 * [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = cp0 * numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - - cp2 = [3,2] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = numpy.asarray([3,2]) * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = [3,2,3] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - - cp2 *= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) - - - cp2 = cp0.divide(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - cp2 = cp0/cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - - cp2 = cp0 / 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp2 = cp0 / [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp2 = cp0 / numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp3 = numpy.asarray([3 ,2]) / (cp0+1) - numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) - numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) - - cp2 += 1 - cp2 /= cp1 - # TODO fix inplace division - - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) - - cp2 /= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - - cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) - #### - - cp2 = cp0.power(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - cp2 = cp0**cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0 ** 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) - - cp2 = cp0.maximum(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) - - - cp2 = cp0.abs() - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0.subtract(cp1) - s = cp2.sign() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) - - cp2 = cp0.add(cp1) - s = cp2.sqrt() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) - - s = cp0.sum() - numpy.testing.assert_almost_equal(s[0], 0, decimal=4) - s0 = 1 - s1 = 1 - for i in cp0.get_item(0,0).shape: - s0 *= i - for i in cp0.get_item(1,0).shape: - s1 *= i - - numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) - - # Set up phantom size N x N x vert by creating ImageGeometry, initialising the - # ImageData object with this geometry and empty array and finally put some - # data into its array, and display one slice as image. - - # Image parameters - N = 128 - vert = 4 - - # Set up image geometry - ig = ImageGeometry(voxel_num_x=N, - voxel_num_y=N, - voxel_num_z=vert) - - # Set up empty image data - Phantom = ImageData(geometry=ig, - dimension_labels=['horizontal_x', - 'horizontal_y', - 'vertical']) - Phantom += 0.05 - # Populate image data by looping over and filling slices - i = 0 - while i < vert: - if vert > 1: - x = Phantom.subset(vertical=i).array - else: - x = Phantom.array - x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 - x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 - if vert > 1 : - Phantom.fill(x, vertical=i) - i += 1 - - - perc = 0.02 - # Set up empty image data - noise = ImageData(numpy.random.normal(loc = 0.04 , - scale = perc , - size = Phantom.shape), geometry=ig, - dimension_labels=['horizontal_x', - 'horizontal_y', - 'vertical']) - Phantom += noise - - # Set up AcquisitionGeometry object to hold the parameters of the measurement - # setup geometry: # Number of angles, the actual angles from 0 to - # pi for parallel beam, set the width of a detector - # pixel relative to an object pixe and the number of detector pixels. - angles_num = 20 - det_w = 1.0 - det_num = N - - angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ - 180/numpy.pi - - # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, - # horz detector pixel size, vert detector pixel count, - # vert detector pixel size. - ag = AcquisitionGeometry('parallel', - '3D', - angles, - N, - det_w, - vert, - det_w) - - # Set up Operator object combining the ImageGeometry and AcquisitionGeometry - # wrapping calls to CCPi projector. - A = CCPiProjectorSimple(ig, ag) - - # Forward and backprojection are available as methods direct and adjoint. Here - # generate test data b and some noise - - b = A.direct(Phantom) - - - #z = A.adjoint(b) - - - # Using the test data b, different reconstruction methods can now be set up as - # demonstrated in the rest of this file. In general all methods need an initial - # guess and some algorithm options to be set. Note that 100 iterations for - # some of the methods is a very low number and 1000 or 10000 iterations may be - # needed if one wants to obtain a converged solution. - x_init = ImageData(geometry=ig, - dimension_labels=['horizontal_x','horizontal_y','vertical']) - X_init = CompositeDataContainer(x_init) - B = CompositeDataContainer(b, - ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) - - # setup a tomo identity - Ibig = 1e5 * TomoIdentity(geometry=ig) - Ismall = 1e-5 * TomoIdentity(geometry=ig) - - # composite operator - Kbig = CompositeOperator(A, Ibig, shape=(2,1)) - Ksmall = CompositeOperator(A, Ismall, shape=(2,1)) - - #out = K.direct(X_init) - - f = Norm2sq(Kbig,B) - f.L = 0.00003 - - fsmall = Norm2sq(Ksmall,B) - f.L = 0.00003 - - simplef = Norm2sq(A, b) - simplef.L = 0.00003 - - gd = GradientDescent( x_init=x_init, objective_function=simplef, - rate=simplef.L) - gd.max_iteration = 10 - - cg = CGLS() - cg.set_up(X_init, Kbig, B ) - cg.max_iteration = 1 - - cgsmall = CGLS() - cgsmall.set_up(X_init, Ksmall, B ) - cgsmall.max_iteration = 1 - - - cgs = CGLS() - cgs.set_up(x_init, A, b ) - cgs.max_iteration = 6 -# - #out.__isub__(B) - #out2 = K.adjoint(out) - - #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - for _ in gd: - print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) - - cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - - cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - - cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) -# for _ in cg: -# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) -# -# fig = plt.figure() -# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) -# plt.title('Composite CGLS') -# plt.show() -# -# for _ in cgs: -# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) -# - fig = plt.figure() - plt.subplot(1,5,1) - plt.imshow(Phantom.subset(vertical=0).as_array()) - plt.title('Simulated Phantom') - plt.subplot(1,5,2) - plt.imshow(gd.get_output().subset(vertical=0).as_array()) - plt.title('Simple Gradient Descent') - plt.subplot(1,5,3) - plt.imshow(cgs.get_output().subset(vertical=0).as_array()) - plt.title('Simple CGLS') - plt.subplot(1,5,4) - plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) - plt.title('Composite CGLS\nbig lambda') - plt.subplot(1,5,5) - plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) - plt.title('Composite CGLS\nsmall lambda') - plt.show() \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 54cebcd..e9e7f44 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -48,15 +48,9 @@ class Operator(object): raise NotImplementedError def allocate_adjoint(self): '''Allocates memory on the X space''' -<<<<<<< HEAD raise NotImplementedError def range_dim(self): raise NotImplementedError -======= - raise NotImplementedError - def range_dim(self): - raise NotImplementedError ->>>>>>> master def domain_dim(self): raise NotImplementedError def __rmul__(self, other): diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index 8ebb857..7a55764 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -31,12 +31,9 @@ if cil_version == '': setup( name="ccpi-framework", version=cil_version, -<<<<<<< HEAD - packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation', 'ccpi.optimisation.operators'], -======= packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation', + 'ccpi.optimisation.operators', 'ccpi.optimisation.algorithms'], ->>>>>>> master # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine -- cgit v1.2.3 From 100f86a2afb6e6da3c2ac0a2d7d9501198d62a67 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 5 Mar 2019 16:34:42 +0000 Subject: code refactoring and unittest for block container --- Wrappers/Python/ccpi/framework.py | 1257 -------------------- .../Python/ccpi/framework/BlockDataContainer.py | 294 +++++ Wrappers/Python/ccpi/framework/__init__.py | 24 + Wrappers/Python/ccpi/framework/framework.py | 1257 ++++++++++++++++++++ .../ccpi/optimisation/operators/BlockOperator.py | 278 +---- .../ccpi/optimisation/operators/LinearOperator.py | 19 + .../Python/ccpi/optimisation/operators/Operator.py | 25 + .../Python/ccpi/optimisation/operators/__init__.py | 10 + Wrappers/Python/setup.py | 3 +- Wrappers/Python/test/test_BlockDataContainer.py | 210 ++++ 10 files changed, 1843 insertions(+), 1534 deletions(-) delete mode 100644 Wrappers/Python/ccpi/framework.py create mode 100755 Wrappers/Python/ccpi/framework/BlockDataContainer.py create mode 100755 Wrappers/Python/ccpi/framework/__init__.py create mode 100755 Wrappers/Python/ccpi/framework/framework.py create mode 100755 Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py create mode 100755 Wrappers/Python/ccpi/optimisation/operators/Operator.py create mode 100755 Wrappers/Python/ccpi/optimisation/operators/__init__.py create mode 100755 Wrappers/Python/test/test_BlockDataContainer.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py deleted file mode 100644 index dab2dd9..0000000 --- a/Wrappers/Python/ccpi/framework.py +++ /dev/null @@ -1,1257 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce - -def find_key(dic, val): - """return the key of dictionary dic given the value""" - return [k for k, v in dic.items() if v == val][0] - -def message(cls, msg, *args): - msg = "{0}: " + msg - for i in range(len(args)): - msg += " {%d}" %(i+1) - args = list(args) - args.insert(0, cls.__name__ ) - - return msg.format(*args ) - - -class ImageGeometry(object): - - def __init__(self, - voxel_num_x=0, - voxel_num_y=0, - voxel_num_z=0, - voxel_size_x=1, - voxel_size_y=1, - voxel_size_z=1, - center_x=0, - center_y=0, - center_z=0, - channels=1): - - self.voxel_num_x = voxel_num_x - self.voxel_num_y = voxel_num_y - self.voxel_num_z = voxel_num_z - self.voxel_size_x = voxel_size_x - self.voxel_size_y = voxel_size_y - self.voxel_size_z = voxel_size_z - self.center_x = center_x - self.center_y = center_y - self.center_z = center_z - self.channels = channels - - def get_min_x(self): - return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x - - def get_max_x(self): - return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x - - def get_min_y(self): - return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y - - def get_max_y(self): - return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y - - def get_min_z(self): - if not self.voxel_num_z == 0: - return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def get_max_z(self): - if not self.voxel_num_z == 0: - return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def clone(self): - '''returns a copy of ImageGeometry''' - return ImageGeometry( - self.voxel_num_x, - self.voxel_num_y, - self.voxel_num_z, - self.voxel_size_x, - self.voxel_size_y, - self.voxel_size_z, - self.center_x, - self.center_y, - self.center_z, - self.channels) - def __str__ (self): - repres = "" - repres += "Number of channels: {0}\n".format(self.channels) - repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z) - repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) - repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) - return repres - - -class AcquisitionGeometry(object): - - def __init__(self, - geom_type, - dimension, - angles, - pixel_num_h=0, - pixel_size_h=1, - pixel_num_v=0, - pixel_size_v=1, - dist_source_center=None, - dist_center_detector=None, - channels=1, - angle_unit='degree' - ): - """ - General inputs for standard type projection geometries - detectorDomain or detectorpixelSize: - If 2D - If scalar: Width of detector or single detector pixel - If 2-vec: Error - If 3D - If scalar: Width in both dimensions - If 2-vec: Vertical then horizontal size - grid - If 2D - If scalar: number of detectors - If 2-vec: error - If 3D - If scalar: Square grid that size - If 2-vec vertical then horizontal size - cone or parallel - 2D or 3D - parallel_parameters: ? - cone_parameters: - source_to_center_dist (if parallel: NaN) - center_to_detector_dist (if parallel: NaN) - standard or nonstandard (vec) geometry - angles - angles_format radians or degrees - """ - self.geom_type = geom_type # 'parallel' or 'cone' - self.dimension = dimension # 2D or 3D - self.angles = angles - - self.dist_source_center = dist_source_center - self.dist_center_detector = dist_center_detector - - self.pixel_num_h = pixel_num_h - self.pixel_size_h = pixel_size_h - self.pixel_num_v = pixel_num_v - self.pixel_size_v = pixel_size_v - - self.channels = channels - - def clone(self): - '''returns a copy of the AcquisitionGeometry''' - return AcquisitionGeometry(self.geom_type, - self.dimension, - self.angles, - self.pixel_num_h, - self.pixel_size_h, - self.pixel_num_v, - self.pixel_size_v, - self.dist_source_center, - self.dist_center_detector, - self.channels) - - def __str__ (self): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.dimension) - repres += "angles: {0}\n".format(self.angles) - repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v) - repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v) - repres += "geometry type: {0}\n".format(self.geom_type) - repres += "distance source-detector: {0}\n".format(self.dist_source_center) - repres += "distance center-detector: {0}\n".format(self.dist_source_center) - repres += "number of channels: {0}\n".format(self.channels) - return repres - - - -class DataContainer(object): - '''Generic class to hold data - - Data is currently held in a numpy arrays''' - - def __init__ (self, array, deep_copy=True, dimension_labels=None, - **kwargs): - '''Holds the data''' - - self.shape = numpy.shape(array) - self.number_of_dimensions = len (self.shape) - self.dimension_labels = {} - self.geometry = None # Only relevant for AcquisitionData and ImageData - - if dimension_labels is not None and \ - len (dimension_labels) == self.number_of_dimensions: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = dimension_labels[i] - else: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = 'dimension_{0:02}'.format(i) - - if type(array) == numpy.ndarray: - if deep_copy: - self.array = array.copy() - else: - self.array = array - else: - raise TypeError('Array must be NumpyArray, passed {0}'\ - .format(type(array))) - - # finally copy the geometry - if 'geometry' in kwargs.keys(): - self.geometry = kwargs['geometry'] - else: - # assume it is parallel beam - pass - - def get_dimension_size(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - acq_size = -1 - for k,v in self.dimension_labels.items(): - if v == dimension_label: - acq_size = self.shape[k] - return acq_size - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels)) - def get_dimension_axis(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - for k,v in self.dimension_labels.items(): - if v == dimension_label: - return k - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels.values())) - - - def as_array(self, dimensions=None): - '''Returns the DataContainer as Numpy Array - - Returns the pointer to the array if dimensions is not set. - If dimensions is set, it first creates a new DataContainer with the subset - and then it returns the pointer to the array''' - if dimensions is not None: - return self.subset(dimensions).as_array() - return self.array - - - def subset(self, dimensions=None, **kw): - '''Creates a DataContainer containing a subset of self according to the - labels in dimensions''' - if dimensions is None: - if kw == {}: - return self.array.copy() - else: - reduced_dims = [v for k,v in self.dimension_labels.items()] - for dim_l, dim_v in kw.items(): - for k,v in self.dimension_labels.items(): - if v == dim_l: - reduced_dims.pop(k) - return self.subset(dimensions=reduced_dims, **kw) - else: - # check that all the requested dimensions are in the array - # this is done by checking the dimension_labels - proceed = True - unknown_key = '' - # axis_order contains the order of the axis that the user wants - # in the output DataContainer - axis_order = [] - if type(dimensions) == list: - for dl in dimensions: - if dl not in self.dimension_labels.values(): - proceed = False - unknown_key = dl - break - else: - axis_order.append(find_key(self.dimension_labels, dl)) - if not proceed: - raise KeyError('Subset error: Unknown key specified {0}'.format(dl)) - - # slice away the unwanted data from the array - unwanted_dimensions = self.dimension_labels.copy() - left_dimensions = [] - for ax in sorted(axis_order): - this_dimension = unwanted_dimensions.pop(ax) - left_dimensions.append(this_dimension) - #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) - #print ("left_dimensions {0}".format(left_dimensions)) - #new_shape = [self.shape[ax] for ax in axis_order] - #print ("new_shape {0}".format(new_shape)) - command = "self.array[" - for i in range(self.number_of_dimensions): - if self.dimension_labels[i] in unwanted_dimensions.values(): - value = 0 - for k,v in kw.items(): - if k == self.dimension_labels[i]: - value = v - - command = command + str(value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - command = command + ']' - - cleaned = eval(command) - # cleaned has collapsed dimensions in the same order of - # self.array, but we want it in the order stated in the - # "dimensions". - # create axes order for numpy.transpose - axes = [] - for key in dimensions: - #print ("key {0}".format( key)) - for i in range(len( left_dimensions )): - ld = left_dimensions[i] - #print ("ld {0}".format( ld)) - if ld == key: - axes.append(i) - #print ("axes {0}".format(axes)) - - cleaned = numpy.transpose(cleaned, axes).copy() - - return type(self)(cleaned , True, dimensions) - - def fill(self, array, **dimension): - '''fills the internal numpy array with the one provided''' - if dimension == {}: - if issubclass(type(array), DataContainer) or\ - issubclass(type(array), numpy.ndarray): - if array.shape != self.shape: - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - self.shape,array.shape)) - if issubclass(type(array), DataContainer): - numpy.copyto(self.array, array.array) - else: - #self.array[:] = array - numpy.copyto(self.array, array) - else: - - command = 'self.array[' - i = 0 - for k,v in self.dimension_labels.items(): - for dim_label, dim_value in dimension.items(): - if dim_label == v: - command = command + str(dim_value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - i += 1 - command = command + "] = array[:]" - exec(command) - - - def check_dimensions(self, other): - return self.shape == other.shape - - ## algebra - def __add__(self, other , out=None, *args, **kwargs): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() + other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)( - self.as_array() + other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , - type(other))) - # __add__ - - def __sub__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() - other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() - other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , - type(other))) - # __sub__ - def __truediv__(self,other): - return self.__div__(other) - - def __div__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() / other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() / other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , - type(other))) - # __div__ - - def __pow__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() ** other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() ** other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , - type(other))) - # __pow__ - - def __mul__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() * other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - return type(self)(self.as_array() * other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , - type(other))) - # __mul__ - - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - return self * other - # __rmul__ - - def __rdiv__(self, other): - print ("call __rdiv__") - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - if isinstance(other, (int, float)) : - fother = numpy.ones(numpy.shape(self.array)) * other - return type(self)(fother ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - return type(self)(other.as_array() ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Dimensions do not match') - # __rpow__ - - # in-place arithmetic operators: - # (+=, -=, *=, /= , //=, - # must return self - - - - def __iadd__(self, other): - if isinstance(other, (int, float)) : - numpy.add(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.add(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __iadd__ - - def __imul__(self, other): - if isinstance(other, (int, float)) : - arr = self.as_array() - numpy.multiply(arr, other, out=arr) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.multiply(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __imul__ - - def __isub__(self, other): - if isinstance(other, (int, float)) : - numpy.subtract(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.subtract(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __isub__ - - def __idiv__(self, other): - return self.__itruediv__(other) - def __itruediv__(self, other): - if isinstance(other, (int, float)) : - numpy.divide(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.divide(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __idiv__ - - def __str__ (self, representation=False): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions) - repres += "Shape: {0}\n".format(self.shape) - repres += "Axis labels: {0}\n".format(self.dimension_labels) - if representation: - repres += "Representation: \n{0}\n".format(self.array) - return repres - - def clone(self): - '''returns a copy of itself''' - - return type(self)(self.array, - dimension_labels=self.dimension_labels, - deep_copy=True, - geometry=self.geometry ) - - def get_data_axes_order(self,new_order=None): - '''returns the axes label of self as a list - - if new_order is None returns the labels of the axes as a sorted-by-key list - if new_order is a list of length number_of_dimensions, returns a list - with the indices of the axes in new_order with respect to those in - self.dimension_labels: i.e. - self.dimension_labels = {0:'horizontal',1:'vertical'} - new_order = ['vertical','horizontal'] - returns [1,0] - ''' - if new_order is None: - - axes_order = [i for i in range(len(self.shape))] - for k,v in self.dimension_labels.items(): - axes_order[k] = v - return axes_order - else: - if len(new_order) == self.number_of_dimensions: - axes_order = [i for i in range(self.number_of_dimensions)] - - for i in range(len(self.shape)): - found = False - for k,v in self.dimension_labels.items(): - if new_order[i] == v: - axes_order[i] = k - found = True - if not found: - raise ValueError('Axis label {0} not found.'.format(new_order[i])) - return axes_order - else: - raise ValueError('Expecting {0} axes, got {2}'\ - .format(len(self.shape),len(new_order))) - - - def copy(self): - '''alias of clone''' - return self.clone() - - ## binary operations - - def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): - - if out is None: - if isinstance(x2, (int, float, complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif issubclass(type(x2) , DataContainer): - out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - - elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): - if self.check_dimensions(out) and self.check_dimensions(x2): - pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): - if self.check_dimensions(out): - - pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), x2 , out=out, *args, **kwargs) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def add(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) - - def subtract(self, other, out=None , *args, **kwargs): - return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) - - def multiply(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) - - def divide(self, other , out=None ,*args, **kwargs): - return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) - - def power(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) - - def maximum(self,x2, out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) - - ## unary operations - def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): - if out is None: - out = pwop(self.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(out), DataContainer): - if self.check_dimensions(out): - pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), out=out, *args, **kwargs) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def abs(self, out=None, *args, **kwargs): - return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) - - def sign(self, out=None, *args, **kwargs): - return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) - - def sqrt(self, out=None, *args, **kwargs): - return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) - - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - - ## reductions - def sum(self, out=None, *args, **kwargs): - return self.as_array().sum(*args, **kwargs) - def squared_norm(self): - '''return the squared euclidean norm of the DataContainer viewed as a vector''' - shape = self.shape - size = reduce(lambda x,y:x*y, shape, 1) - y = numpy.reshape(self.as_array(), (size, )) - return numpy.dot(y, y.conjugate()) - def norm(self): - '''return the euclidean norm of the DataContainer viewed as a vector''' - return numpy.sqrt(self.squared_norm()) - - - -class ImageData(DataContainer): - '''DataContainer for holding 2D or 3D DataContainer''' - def __init__(self, - array = None, - deep_copy=False, - dimension_labels=None, - **kwargs): - - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz_x = geometry.voxel_num_x - horiz_y = geometry.voxel_num_y - vert = 1 if geometry.voxel_num_z is None\ - else geometry.voxel_num_z # this should be 1 for 2D - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, vert, horiz_y, horiz_x) - dim_labels = ['channel' ,'vertical' , 'horizontal_y' , - 'horizontal_x'] - else: - shape = (channels , horiz_y, horiz_x) - dim_labels = ['channel' , 'horizontal_y' , - 'horizontal_x'] - else: - if vert > 1: - shape = (vert, horiz_y, horiz_x) - dim_labels = ['vertical' , 'horizontal_y' , - 'horizontal_x'] - else: - shape = (horiz_y, horiz_x) - dim_labels = ['horizontal_y' , - 'horizontal_x'] - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == 'channel': - shape.append(channels) - elif dim == 'horizontal_y': - shape.append(horiz_y) - elif dim == 'vertical': - shape.append(vert) - elif dim == 'horizontal_x': - shape.append(horiz_x) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes'.format( - len(dimension_labels) - len(shape))) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - else: - raise ValueError('Please pass either a DataContainer, ' +\ - 'a numpy array or a geometry') - else: - if issubclass(type(array) , DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(ImageData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) , numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = ['channel' ,'vertical' , 'horizontal_y' , - 'horizontal_x'] - elif array.ndim == 3: - dimension_labels = ['vertical' , 'horizontal_y' , - 'horizontal_x'] - else: - dimension_labels = ['horizontal_y' , - 'horizontal_x'] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - # load metadata from kwargs if present - for key, value in kwargs.items(): - if (type(value) == list or type(value) == tuple) and \ - ( len (value) == 3 and len (value) == 2) : - if key == 'origin' : - self.origin = value - if key == 'spacing' : - self.spacing = value - - def subset(self, dimensions=None, **kw): - out = super(ImageData, self).subset(dimensions, **kw) - #out.geometry = self.recalculate_geometry(dimensions , **kw) - out.geometry = self.geometry - return out - - -class AcquisitionData(DataContainer): - '''DataContainer for holding 2D or 3D sinogram''' - def __init__(self, - array = None, - deep_copy=True, - dimension_labels=None, - **kwargs): - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz = geometry.pixel_num_h - vert = geometry.pixel_num_v - angles = geometry.angles - num_of_angles = numpy.shape(angles)[0] - - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, num_of_angles , vert, horiz) - dim_labels = ['channel' , ' angle' , - 'vertical' , 'horizontal'] - else: - shape = (channels , num_of_angles, horiz) - dim_labels = ['channel' , 'angle' , - 'horizontal'] - else: - if vert > 1: - shape = (num_of_angles, vert, horiz) - dim_labels = ['angle' , 'vertical' , - 'horizontal'] - else: - shape = (num_of_angles, horiz) - dim_labels = ['angle' , - 'horizontal'] - - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == 'channel': - shape.append(channels) - elif dim == 'angle': - shape.append(num_of_angles) - elif dim == 'vertical': - shape.append(vert) - elif dim == 'horizontal': - shape.append(horiz) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes'.format( - len(dimension_labels) - len(shape))) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - else: - - if issubclass(type(array) ,DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) ,numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = ['channel' ,'angle' , 'vertical' , - 'horizontal'] - elif array.ndim == 3: - dimension_labels = ['angle' , 'vertical' , - 'horizontal'] - else: - dimension_labels = ['angle' , - 'horizontal'] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - -class DataProcessor(object): - '''Defines a generic DataContainer processor - - accepts DataContainer as inputs and - outputs DataContainer - additional attributes can be defined with __setattr__ - ''' - - def __init__(self, **attributes): - if not 'store_output' in attributes.keys(): - attributes['store_output'] = True - attributes['output'] = False - attributes['runTime'] = -1 - attributes['mTime'] = datetime.now() - attributes['input'] = None - for key, value in attributes.items(): - self.__dict__[key] = value - - - def __setattr__(self, name, value): - if name == 'input': - self.set_input(value) - elif name in self.__dict__.keys(): - self.__dict__[name] = value - self.__dict__['mTime'] = datetime.now() - else: - raise KeyError('Attribute {0} not found'.format(name)) - #pass - - def set_input(self, dataset): - if issubclass(type(dataset), DataContainer): - if self.check_input(dataset): - self.__dict__['input'] = dataset - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(dataset), DataContainer)) - - def check_input(self, dataset): - '''Checks parameters of the input DataContainer - - Should raise an Error if the DataContainer does not match expectation, e.g. - if the expected input DataContainer is 3D and the Processor expects 2D. - ''' - raise NotImplementedError('Implement basic checks for input DataContainer') - - def get_output(self): - for k,v in self.__dict__.items(): - if v is None: - raise ValueError('Key {0} is None'.format(k)) - shouldRun = False - if self.runTime == -1: - shouldRun = True - elif self.mTime > self.runTime: - shouldRun = True - - # CHECK this - if self.store_output and shouldRun: - self.runTime = datetime.now() - self.output = self.process() - return self.output - self.runTime = datetime.now() - return self.process() - - def set_input_processor(self, processor): - if issubclass(type(processor), DataProcessor): - self.__dict__['input'] = processor - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(processor), DataProcessor)) - - def get_input(self): - '''returns the input DataContainer - - It is useful in the case the user has provided a DataProcessor as - input - ''' - if issubclass(type(self.input), DataProcessor): - dsi = self.input.get_output() - else: - dsi = self.input - return dsi - - def process(self): - raise NotImplementedError('process must be implemented') - - - - -class DataProcessor23D(DataProcessor): - '''Regularizers DataProcessor - ''' - - def check_input(self, dataset): - '''Checks number of dimensions input DataContainer - - Expected input is 2D or 3D - ''' - if dataset.number_of_dimensions == 2 or \ - dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - -###### Example of DataProcessors - -class AX(DataProcessor): - '''Example DataProcessor - The AXPY routines perform a vector multiplication operation defined as - - y := a*x - where: - - a is a scalar - - x a DataContainer. - ''' - - def __init__(self): - kwargs = {'scalar':None, - 'input':None, - } - - #DataProcessor.__init__(self, **kwargs) - super(AX, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self): - - dsi = self.get_input() - a = self.scalar - - y = DataContainer( a * dsi.as_array() , True, - dimension_labels=dsi.dimension_labels ) - #self.setParameter(output_dataset=y) - return y - - - - -class PixelByPixelDataProcessor(DataProcessor): - '''Example DataProcessor - - This processor applies a python function to each pixel of the DataContainer - - f is a python function - - x a DataSet. - ''' - - def __init__(self): - kwargs = {'pyfunc':None, - 'input':None, - } - #DataProcessor.__init__(self, **kwargs) - super(PixelByPixelDataProcessor, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self): - - pyfunc = self.pyfunc - dsi = self.get_input() - - eval_func = numpy.frompyfunc(pyfunc,1,1) - - - y = DataContainer( eval_func( dsi.as_array() ) , True, - dimension_labels=dsi.dimension_labels ) - return y - - - - -if __name__ == '__main__': - shape = (2,3,4,5) - size = shape[0] - for i in range(1, len(shape)): - size = size * shape[i] - #print("a refcount " , sys.getrefcount(a)) - a = numpy.asarray([i for i in range( size )]) - print("a refcount " , sys.getrefcount(a)) - a = numpy.reshape(a, shape) - print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) - print("a refcount " , sys.getrefcount(a)) - print ("ds label {0}".format(ds.dimension_labels)) - subset = ['W' ,'X'] - b = ds.subset( subset ) - print("a refcount " , sys.getrefcount(a)) - print ("b label {0} shape {1}".format(b.dimension_labels, - numpy.shape(b.as_array()))) - c = ds.subset(['Z','W','X']) - print("a refcount " , sys.getrefcount(a)) - - # Create a ImageData sharing the array with c - volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels) - volume1 = ImageData(c, False) - - print ("volume0 {0} volume1 {1}".format(id(volume0.array), - id(volume1.array))) - - # Create a ImageData copying the array from c - volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels) - volume3 = ImageData(c) - - print ("volume2 {0} volume3 {1}".format(id(volume2.array), - id(volume3.array))) - - # single number DataSet - sn = DataContainer(numpy.asarray([1])) - - ax = AX() - ax.scalar = 2 - ax.set_input(c) - #ax.apply() - print ("ax in {0} out {1}".format(c.as_array().flatten(), - ax.get_output().as_array().flatten())) - axm = AX() - axm.scalar = 0.5 - axm.set_input(c) - #axm.apply() - print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array())) - - # create a PixelByPixelDataProcessor - - #define a python function which will take only one input (the pixel value) - pyfunc = lambda x: -x if x > 20 else x - clip = PixelByPixelDataProcessor() - clip.pyfunc = pyfunc - clip.set_input(c) - #clip.apply() - - print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array())) - - #dsp = DataProcessor() - #dsp.set_input(ds) - #dsp.input = a - # pipeline - - chain = AX() - chain.scalar = 0.5 - chain.set_input_processor(ax) - print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array())) - - # testing arithmetic operations - - print (b) - print ((b+1)) - print ((1+b)) - - print (b) - print ((b*2)) - - print (b) - print ((2*b)) - - print (b) - print ((b/2)) - - print (b) - print ((2/b)) - - print (b) - print ((b**2)) - - print (b) - print ((2**b)) - - print (type(volume3 + 2)) - - s = [i for i in range(3 * 4 * 4)] - s = numpy.reshape(numpy.asarray(s), (3,4,4)) - sino = AcquisitionData( s ) - - shape = (4,3,2) - a = [i for i in range(2*3*4)] - a = numpy.asarray(a) - a = numpy.reshape(a, shape) - print (numpy.shape(a)) - ds = DataContainer(a, True, ['X', 'Y','Z']) - # this means that I expect the X to be of length 2 , - # y of length 3 and z of length 4 - subset = ['Y' ,'Z'] - b0 = ds.subset( subset ) - print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array()))) - # expectation on b is that it is - # 3x2 cut at z = 0 - - subset = ['X' ,'Y'] - b1 = ds.subset( subset , Z=1) - print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array()))) - - - - # create VolumeData from geometry - vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2) - vol = ImageData(geometry=vgeometry) - - sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20), - geom_type='parallel', pixel_num_v=3, - pixel_num_h=5 , channels=2) - sino = AcquisitionData(geometry=sgeometry) - sino2 = sino.clone() - diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py new file mode 100755 index 0000000..9a42a16 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 16:04:45 2019 + +@author: ofn77899 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +from numbers import Number +import functools +#from ccpi.framework import AcquisitionData, ImageData +#from ccpi.optimisation.operators import Operator, LinearOperator + +class BlockDataContainer(object): + '''Class to hold a composite operator''' + __array_priority__ = 1 + def __init__(self, *args, shape=None): + '''containers must be passed row by row''' + self.containers = args + self.index = 0 + if shape is None: + shape = (len(args),1) + self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements,len(args))) +# for i in range(shape[0]): +# b.append([]) +# for j in range(shape[1]): +# b[-1].append(args[i*shape[1]+j]) +# indices.append(i*shape[1]+j) +# self.containers = b + + def __iter__(self): + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + try: + out = self[self.index] + except IndexError as ie: + raise StopIteration() + self.index+=1 + return out + + def is_compatible(self, other): + '''basic check if the size of the 2 objects fit''' + if isinstance(other, Number): + return True + elif isinstance(other, list): + # TODO look elements should be numbers + for ot in other: + if not isinstance(ot, (Number,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + raise ValueError('List/ numpy array can only contain numbers {}'\ + .format(type(ot))) + return len(self.containers) == len(other) + elif isinstance(other, numpy.ndarray): + return self.shape == other.shape + return len(self.containers) == len(other.containers) + def get_item(self, row, col=0): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + if col > self.shape[1]: + raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) + + index = row*self.shape[1]+col + return self.containers[index] + + def add(self, other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def subtract(self, other, out=None , *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def multiply(self, other , out=None, *args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + elif isinstance(other, numpy.ndarray): + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def divide(self, other , out=None ,*args, **kwargs): + self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def power(self, other , out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + def maximum(self,other, out=None, *args, **kwargs): + assert self.is_compatible(other) + if isinstance(other, Number): + return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + + ## unary operations + def abs(self, out=None, *args, **kwargs): + return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) + def sign(self, out=None, *args, **kwargs): + return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) + def sqrt(self, out=None, *args, **kwargs): + return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) + def conjugate(self, out=None): + return type(self)(*[el.conjugate() for el in self.containers]) + + ## reductions + def sum(self, out=None, *args, **kwargs): + return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) + def squared_norm(self): + y = numpy.asarray([el.squared_norm() for el in self.containers]) + return y.sum() + def norm(self): + y = numpy.asarray([el.norm() for el in self.containers]) + return y.sum() + def copy(self): + '''alias of clone''' + return self.clone() + def clone(self): + return type(self)(*[el.copy() for el in self.containers]) + + def __add__(self, other): + return self.add( other ) + # __radd__ + + def __sub__(self, other): + return self.subtract( other ) + # __rsub__ + + def __mul__(self, other): + return self.multiply(other) + # __rmul__ + + def __div__(self, other): + return self.divide(other) + # __rdiv__ + def __truediv__(self, other): + return self.divide(other) + + def __pow__(self, other): + return self.power(other) + # reverse operand + def __radd__(self, other): + '''Reverse addition + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self + other + # __radd__ + + def __rsub__(self, other): + '''Reverse subtraction + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + '''Reverse multiplication + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self * other + # __rmul__ + + def __rdiv__(self, other): + '''Reverse division + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + '''Reverse truedivision + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self.__rdiv__(other) + + def __rpow__(self, other): + '''Reverse power + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return other.power(self) + + def __iadd__(self, other): + '''Inline addition''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el += ot + elif isinstance(other, Number): + for el in self.containers: + el += other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + self.is_compatible(other) + for el,ot in zip(self.containers, other): + el += ot + return self + # __iadd__ + + def __isub__(self, other): + '''Inline subtraction''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el -= ot + elif isinstance(other, Number): + for el in self.containers: + el -= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el -= ot + return self + # __isub__ + + def __imul__(self, other): + '''Inline multiplication''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el *= ot + elif isinstance(other, Number): + for el in self.containers: + el *= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el *= ot + return self + # __imul__ + + def __idiv__(self, other): + '''Inline division''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el /= ot + elif isinstance(other, Number): + for el in self.containers: + el /= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + assert self.is_compatible(other) + for el,ot in zip(self.containers, other): + el /= ot + return self + # __rdiv__ + def __itruediv__(self, other): + '''Inline truedivision''' + return self.__idiv__(other) + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py new file mode 100755 index 0000000..083f547 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 16:00:18 2019 + +@author: ofn77899 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +import sys +from datetime import timedelta, datetime +import warnings +from functools import reduce + +from .framework import DataContainer +from .framework import ImageData, AcquisitionData +from .framework import ImageGeometry, AcquisitionGeometry +from .framework import find_key, message +from .framework import DataProcessor +from .framework import AX, PixelByPixelDataProcessor +from .BlockDataContainer import BlockDataContainer diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py new file mode 100755 index 0000000..dab2dd9 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -0,0 +1,1257 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +import sys +from datetime import timedelta, datetime +import warnings +from functools import reduce + +def find_key(dic, val): + """return the key of dictionary dic given the value""" + return [k for k, v in dic.items() if v == val][0] + +def message(cls, msg, *args): + msg = "{0}: " + msg + for i in range(len(args)): + msg += " {%d}" %(i+1) + args = list(args) + args.insert(0, cls.__name__ ) + + return msg.format(*args ) + + +class ImageGeometry(object): + + def __init__(self, + voxel_num_x=0, + voxel_num_y=0, + voxel_num_z=0, + voxel_size_x=1, + voxel_size_y=1, + voxel_size_z=1, + center_x=0, + center_y=0, + center_z=0, + channels=1): + + self.voxel_num_x = voxel_num_x + self.voxel_num_y = voxel_num_y + self.voxel_num_z = voxel_num_z + self.voxel_size_x = voxel_size_x + self.voxel_size_y = voxel_size_y + self.voxel_size_z = voxel_size_z + self.center_x = center_x + self.center_y = center_y + self.center_z = center_z + self.channels = channels + + def get_min_x(self): + return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x + + def get_max_x(self): + return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x + + def get_min_y(self): + return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y + + def get_max_y(self): + return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y + + def get_min_z(self): + if not self.voxel_num_z == 0: + return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z + else: + return 0 + + def get_max_z(self): + if not self.voxel_num_z == 0: + return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z + else: + return 0 + + def clone(self): + '''returns a copy of ImageGeometry''' + return ImageGeometry( + self.voxel_num_x, + self.voxel_num_y, + self.voxel_num_z, + self.voxel_size_x, + self.voxel_size_y, + self.voxel_size_z, + self.center_x, + self.center_y, + self.center_z, + self.channels) + def __str__ (self): + repres = "" + repres += "Number of channels: {0}\n".format(self.channels) + repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z) + repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) + repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) + return repres + + +class AcquisitionGeometry(object): + + def __init__(self, + geom_type, + dimension, + angles, + pixel_num_h=0, + pixel_size_h=1, + pixel_num_v=0, + pixel_size_v=1, + dist_source_center=None, + dist_center_detector=None, + channels=1, + angle_unit='degree' + ): + """ + General inputs for standard type projection geometries + detectorDomain or detectorpixelSize: + If 2D + If scalar: Width of detector or single detector pixel + If 2-vec: Error + If 3D + If scalar: Width in both dimensions + If 2-vec: Vertical then horizontal size + grid + If 2D + If scalar: number of detectors + If 2-vec: error + If 3D + If scalar: Square grid that size + If 2-vec vertical then horizontal size + cone or parallel + 2D or 3D + parallel_parameters: ? + cone_parameters: + source_to_center_dist (if parallel: NaN) + center_to_detector_dist (if parallel: NaN) + standard or nonstandard (vec) geometry + angles + angles_format radians or degrees + """ + self.geom_type = geom_type # 'parallel' or 'cone' + self.dimension = dimension # 2D or 3D + self.angles = angles + + self.dist_source_center = dist_source_center + self.dist_center_detector = dist_center_detector + + self.pixel_num_h = pixel_num_h + self.pixel_size_h = pixel_size_h + self.pixel_num_v = pixel_num_v + self.pixel_size_v = pixel_size_v + + self.channels = channels + + def clone(self): + '''returns a copy of the AcquisitionGeometry''' + return AcquisitionGeometry(self.geom_type, + self.dimension, + self.angles, + self.pixel_num_h, + self.pixel_size_h, + self.pixel_num_v, + self.pixel_size_v, + self.dist_source_center, + self.dist_center_detector, + self.channels) + + def __str__ (self): + repres = "" + repres += "Number of dimensions: {0}\n".format(self.dimension) + repres += "angles: {0}\n".format(self.angles) + repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v) + repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v) + repres += "geometry type: {0}\n".format(self.geom_type) + repres += "distance source-detector: {0}\n".format(self.dist_source_center) + repres += "distance center-detector: {0}\n".format(self.dist_source_center) + repres += "number of channels: {0}\n".format(self.channels) + return repres + + + +class DataContainer(object): + '''Generic class to hold data + + Data is currently held in a numpy arrays''' + + def __init__ (self, array, deep_copy=True, dimension_labels=None, + **kwargs): + '''Holds the data''' + + self.shape = numpy.shape(array) + self.number_of_dimensions = len (self.shape) + self.dimension_labels = {} + self.geometry = None # Only relevant for AcquisitionData and ImageData + + if dimension_labels is not None and \ + len (dimension_labels) == self.number_of_dimensions: + for i in range(self.number_of_dimensions): + self.dimension_labels[i] = dimension_labels[i] + else: + for i in range(self.number_of_dimensions): + self.dimension_labels[i] = 'dimension_{0:02}'.format(i) + + if type(array) == numpy.ndarray: + if deep_copy: + self.array = array.copy() + else: + self.array = array + else: + raise TypeError('Array must be NumpyArray, passed {0}'\ + .format(type(array))) + + # finally copy the geometry + if 'geometry' in kwargs.keys(): + self.geometry = kwargs['geometry'] + else: + # assume it is parallel beam + pass + + def get_dimension_size(self, dimension_label): + if dimension_label in self.dimension_labels.values(): + acq_size = -1 + for k,v in self.dimension_labels.items(): + if v == dimension_label: + acq_size = self.shape[k] + return acq_size + else: + raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, + self.dimension_labels)) + def get_dimension_axis(self, dimension_label): + if dimension_label in self.dimension_labels.values(): + for k,v in self.dimension_labels.items(): + if v == dimension_label: + return k + else: + raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, + self.dimension_labels.values())) + + + def as_array(self, dimensions=None): + '''Returns the DataContainer as Numpy Array + + Returns the pointer to the array if dimensions is not set. + If dimensions is set, it first creates a new DataContainer with the subset + and then it returns the pointer to the array''' + if dimensions is not None: + return self.subset(dimensions).as_array() + return self.array + + + def subset(self, dimensions=None, **kw): + '''Creates a DataContainer containing a subset of self according to the + labels in dimensions''' + if dimensions is None: + if kw == {}: + return self.array.copy() + else: + reduced_dims = [v for k,v in self.dimension_labels.items()] + for dim_l, dim_v in kw.items(): + for k,v in self.dimension_labels.items(): + if v == dim_l: + reduced_dims.pop(k) + return self.subset(dimensions=reduced_dims, **kw) + else: + # check that all the requested dimensions are in the array + # this is done by checking the dimension_labels + proceed = True + unknown_key = '' + # axis_order contains the order of the axis that the user wants + # in the output DataContainer + axis_order = [] + if type(dimensions) == list: + for dl in dimensions: + if dl not in self.dimension_labels.values(): + proceed = False + unknown_key = dl + break + else: + axis_order.append(find_key(self.dimension_labels, dl)) + if not proceed: + raise KeyError('Subset error: Unknown key specified {0}'.format(dl)) + + # slice away the unwanted data from the array + unwanted_dimensions = self.dimension_labels.copy() + left_dimensions = [] + for ax in sorted(axis_order): + this_dimension = unwanted_dimensions.pop(ax) + left_dimensions.append(this_dimension) + #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) + #print ("left_dimensions {0}".format(left_dimensions)) + #new_shape = [self.shape[ax] for ax in axis_order] + #print ("new_shape {0}".format(new_shape)) + command = "self.array[" + for i in range(self.number_of_dimensions): + if self.dimension_labels[i] in unwanted_dimensions.values(): + value = 0 + for k,v in kw.items(): + if k == self.dimension_labels[i]: + value = v + + command = command + str(value) + else: + command = command + ":" + if i < self.number_of_dimensions -1: + command = command + ',' + command = command + ']' + + cleaned = eval(command) + # cleaned has collapsed dimensions in the same order of + # self.array, but we want it in the order stated in the + # "dimensions". + # create axes order for numpy.transpose + axes = [] + for key in dimensions: + #print ("key {0}".format( key)) + for i in range(len( left_dimensions )): + ld = left_dimensions[i] + #print ("ld {0}".format( ld)) + if ld == key: + axes.append(i) + #print ("axes {0}".format(axes)) + + cleaned = numpy.transpose(cleaned, axes).copy() + + return type(self)(cleaned , True, dimensions) + + def fill(self, array, **dimension): + '''fills the internal numpy array with the one provided''' + if dimension == {}: + if issubclass(type(array), DataContainer) or\ + issubclass(type(array), numpy.ndarray): + if array.shape != self.shape: + raise ValueError('Cannot fill with the provided array.' + \ + 'Expecting {0} got {1}'.format( + self.shape,array.shape)) + if issubclass(type(array), DataContainer): + numpy.copyto(self.array, array.array) + else: + #self.array[:] = array + numpy.copyto(self.array, array) + else: + + command = 'self.array[' + i = 0 + for k,v in self.dimension_labels.items(): + for dim_label, dim_value in dimension.items(): + if dim_label == v: + command = command + str(dim_value) + else: + command = command + ":" + if i < self.number_of_dimensions -1: + command = command + ',' + i += 1 + command = command + "] = array[:]" + exec(command) + + + def check_dimensions(self, other): + return self.shape == other.shape + + ## algebra + def __add__(self, other , out=None, *args, **kwargs): + if issubclass(type(other), DataContainer): + if self.check_dimensions(other): + out = self.as_array() + other.as_array() + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, + other.shape)) + elif isinstance(other, (int, float, complex)): + return type(self)( + self.as_array() + other, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , + type(other))) + # __add__ + + def __sub__(self, other): + if issubclass(type(other), DataContainer): + if self.check_dimensions(other): + out = self.as_array() - other.as_array() + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, + other.shape)) + elif isinstance(other, (int, float, complex)): + return type(self)(self.as_array() - other, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , + type(other))) + # __sub__ + def __truediv__(self,other): + return self.__div__(other) + + def __div__(self, other): + if issubclass(type(other), DataContainer): + if self.check_dimensions(other): + out = self.as_array() / other.as_array() + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, + other.shape)) + elif isinstance(other, (int, float, complex)): + return type(self)(self.as_array() / other, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , + type(other))) + # __div__ + + def __pow__(self, other): + if issubclass(type(other), DataContainer): + if self.check_dimensions(other): + out = self.as_array() ** other.as_array() + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, + other.shape)) + elif isinstance(other, (int, float, complex)): + return type(self)(self.as_array() ** other, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , + type(other))) + # __pow__ + + def __mul__(self, other): + if issubclass(type(other), DataContainer): + if self.check_dimensions(other): + out = self.as_array() * other.as_array() + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, + other.shape)) + elif isinstance(other, (int, float, complex,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + return type(self)(self.as_array() * other, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , + type(other))) + # __mul__ + + # reverse operand + def __radd__(self, other): + return self + other + # __radd__ + + def __rsub__(self, other): + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + return self * other + # __rmul__ + + def __rdiv__(self, other): + print ("call __rdiv__") + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + return self.__rdiv__(other) + + def __rpow__(self, other): + if isinstance(other, (int, float)) : + fother = numpy.ones(numpy.shape(self.array)) * other + return type(self)(fother ** self.array , + dimension_labels=self.dimension_labels, + geometry=self.geometry) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + return type(self)(other.as_array() ** self.array , + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError('Dimensions do not match') + # __rpow__ + + # in-place arithmetic operators: + # (+=, -=, *=, /= , //=, + # must return self + + + + def __iadd__(self, other): + if isinstance(other, (int, float)) : + numpy.add(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.add(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self + # __iadd__ + + def __imul__(self, other): + if isinstance(other, (int, float)) : + arr = self.as_array() + numpy.multiply(arr, other, out=arr) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.multiply(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self + # __imul__ + + def __isub__(self, other): + if isinstance(other, (int, float)) : + numpy.subtract(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.subtract(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self + # __isub__ + + def __idiv__(self, other): + return self.__itruediv__(other) + def __itruediv__(self, other): + if isinstance(other, (int, float)) : + numpy.divide(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.divide(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self + # __idiv__ + + def __str__ (self, representation=False): + repres = "" + repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions) + repres += "Shape: {0}\n".format(self.shape) + repres += "Axis labels: {0}\n".format(self.dimension_labels) + if representation: + repres += "Representation: \n{0}\n".format(self.array) + return repres + + def clone(self): + '''returns a copy of itself''' + + return type(self)(self.array, + dimension_labels=self.dimension_labels, + deep_copy=True, + geometry=self.geometry ) + + def get_data_axes_order(self,new_order=None): + '''returns the axes label of self as a list + + if new_order is None returns the labels of the axes as a sorted-by-key list + if new_order is a list of length number_of_dimensions, returns a list + with the indices of the axes in new_order with respect to those in + self.dimension_labels: i.e. + self.dimension_labels = {0:'horizontal',1:'vertical'} + new_order = ['vertical','horizontal'] + returns [1,0] + ''' + if new_order is None: + + axes_order = [i for i in range(len(self.shape))] + for k,v in self.dimension_labels.items(): + axes_order[k] = v + return axes_order + else: + if len(new_order) == self.number_of_dimensions: + axes_order = [i for i in range(self.number_of_dimensions)] + + for i in range(len(self.shape)): + found = False + for k,v in self.dimension_labels.items(): + if new_order[i] == v: + axes_order[i] = k + found = True + if not found: + raise ValueError('Axis label {0} not found.'.format(new_order[i])) + return axes_order + else: + raise ValueError('Expecting {0} axes, got {2}'\ + .format(len(self.shape),len(new_order))) + + + def copy(self): + '''alias of clone''' + return self.clone() + + ## binary operations + + def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): + + if out is None: + if isinstance(x2, (int, float, complex)): + out = pwop(self.as_array() , x2 , *args, **kwargs ) + elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + out = pwop(self.as_array() , x2 , *args, **kwargs ) + elif issubclass(type(x2) , DataContainer): + out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + + + elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): + if self.check_dimensions(out) and self.check_dimensions(x2): + pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs ) + #return type(self)(out.as_array(), + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + return out + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): + if self.check_dimensions(out): + + pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) + return out + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), x2 , out=out, *args, **kwargs) + #return type(self)(out, + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def add(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) + + def subtract(self, other, out=None , *args, **kwargs): + return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) + + def multiply(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) + + def divide(self, other , out=None ,*args, **kwargs): + return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) + + def power(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) + + def maximum(self,x2, out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) + + ## unary operations + def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): + if out is None: + out = pwop(self.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + elif issubclass(type(out), DataContainer): + if self.check_dimensions(out): + pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), out=out, *args, **kwargs) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def abs(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) + + def sign(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) + + def sqrt(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) + + #def __abs__(self): + # operation = FM.OPERATION.ABS + # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) + # __abs__ + + ## reductions + def sum(self, out=None, *args, **kwargs): + return self.as_array().sum(*args, **kwargs) + def squared_norm(self): + '''return the squared euclidean norm of the DataContainer viewed as a vector''' + shape = self.shape + size = reduce(lambda x,y:x*y, shape, 1) + y = numpy.reshape(self.as_array(), (size, )) + return numpy.dot(y, y.conjugate()) + def norm(self): + '''return the euclidean norm of the DataContainer viewed as a vector''' + return numpy.sqrt(self.squared_norm()) + + + +class ImageData(DataContainer): + '''DataContainer for holding 2D or 3D DataContainer''' + def __init__(self, + array = None, + deep_copy=False, + dimension_labels=None, + **kwargs): + + self.geometry = None + if array is None: + if 'geometry' in kwargs.keys(): + geometry = kwargs['geometry'] + self.geometry = geometry + channels = geometry.channels + horiz_x = geometry.voxel_num_x + horiz_y = geometry.voxel_num_y + vert = 1 if geometry.voxel_num_z is None\ + else geometry.voxel_num_z # this should be 1 for 2D + if dimension_labels is None: + if channels > 1: + if vert > 1: + shape = (channels, vert, horiz_y, horiz_x) + dim_labels = ['channel' ,'vertical' , 'horizontal_y' , + 'horizontal_x'] + else: + shape = (channels , horiz_y, horiz_x) + dim_labels = ['channel' , 'horizontal_y' , + 'horizontal_x'] + else: + if vert > 1: + shape = (vert, horiz_y, horiz_x) + dim_labels = ['vertical' , 'horizontal_y' , + 'horizontal_x'] + else: + shape = (horiz_y, horiz_x) + dim_labels = ['horizontal_y' , + 'horizontal_x'] + dimension_labels = dim_labels + else: + shape = [] + for dim in dimension_labels: + if dim == 'channel': + shape.append(channels) + elif dim == 'horizontal_y': + shape.append(horiz_y) + elif dim == 'vertical': + shape.append(vert) + elif dim == 'horizontal_x': + shape.append(horiz_x) + if len(shape) != len(dimension_labels): + raise ValueError('Missing {0} axes'.format( + len(dimension_labels) - len(shape))) + shape = tuple(shape) + + array = numpy.zeros( shape , dtype=numpy.float32) + super(ImageData, self).__init__(array, deep_copy, + dimension_labels, **kwargs) + + else: + raise ValueError('Please pass either a DataContainer, ' +\ + 'a numpy array or a geometry') + else: + if issubclass(type(array) , DataContainer): + # if the array is a DataContainer get the info from there + if not ( array.number_of_dimensions == 2 or \ + array.number_of_dimensions == 3 or \ + array.number_of_dimensions == 4): + raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ + .format(array.number_of_dimensions)) + + #DataContainer.__init__(self, array.as_array(), deep_copy, + # array.dimension_labels, **kwargs) + super(ImageData, self).__init__(array.as_array(), deep_copy, + array.dimension_labels, **kwargs) + elif issubclass(type(array) , numpy.ndarray): + if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): + raise ValueError( + 'Number of dimensions are not 2 or 3 or 4 : {0}'\ + .format(array.ndim)) + + if dimension_labels is None: + if array.ndim == 4: + dimension_labels = ['channel' ,'vertical' , 'horizontal_y' , + 'horizontal_x'] + elif array.ndim == 3: + dimension_labels = ['vertical' , 'horizontal_y' , + 'horizontal_x'] + else: + dimension_labels = ['horizontal_y' , + 'horizontal_x'] + + #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) + super(ImageData, self).__init__(array, deep_copy, + dimension_labels, **kwargs) + + # load metadata from kwargs if present + for key, value in kwargs.items(): + if (type(value) == list or type(value) == tuple) and \ + ( len (value) == 3 and len (value) == 2) : + if key == 'origin' : + self.origin = value + if key == 'spacing' : + self.spacing = value + + def subset(self, dimensions=None, **kw): + out = super(ImageData, self).subset(dimensions, **kw) + #out.geometry = self.recalculate_geometry(dimensions , **kw) + out.geometry = self.geometry + return out + + +class AcquisitionData(DataContainer): + '''DataContainer for holding 2D or 3D sinogram''' + def __init__(self, + array = None, + deep_copy=True, + dimension_labels=None, + **kwargs): + self.geometry = None + if array is None: + if 'geometry' in kwargs.keys(): + geometry = kwargs['geometry'] + self.geometry = geometry + channels = geometry.channels + horiz = geometry.pixel_num_h + vert = geometry.pixel_num_v + angles = geometry.angles + num_of_angles = numpy.shape(angles)[0] + + if dimension_labels is None: + if channels > 1: + if vert > 1: + shape = (channels, num_of_angles , vert, horiz) + dim_labels = ['channel' , ' angle' , + 'vertical' , 'horizontal'] + else: + shape = (channels , num_of_angles, horiz) + dim_labels = ['channel' , 'angle' , + 'horizontal'] + else: + if vert > 1: + shape = (num_of_angles, vert, horiz) + dim_labels = ['angle' , 'vertical' , + 'horizontal'] + else: + shape = (num_of_angles, horiz) + dim_labels = ['angle' , + 'horizontal'] + + dimension_labels = dim_labels + else: + shape = [] + for dim in dimension_labels: + if dim == 'channel': + shape.append(channels) + elif dim == 'angle': + shape.append(num_of_angles) + elif dim == 'vertical': + shape.append(vert) + elif dim == 'horizontal': + shape.append(horiz) + if len(shape) != len(dimension_labels): + raise ValueError('Missing {0} axes'.format( + len(dimension_labels) - len(shape))) + shape = tuple(shape) + + array = numpy.zeros( shape , dtype=numpy.float32) + super(AcquisitionData, self).__init__(array, deep_copy, + dimension_labels, **kwargs) + else: + + if issubclass(type(array) ,DataContainer): + # if the array is a DataContainer get the info from there + if not ( array.number_of_dimensions == 2 or \ + array.number_of_dimensions == 3 or \ + array.number_of_dimensions == 4): + raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ + .format(array.number_of_dimensions)) + + #DataContainer.__init__(self, array.as_array(), deep_copy, + # array.dimension_labels, **kwargs) + super(AcquisitionData, self).__init__(array.as_array(), deep_copy, + array.dimension_labels, **kwargs) + elif issubclass(type(array) ,numpy.ndarray): + if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): + raise ValueError( + 'Number of dimensions are not 2 or 3 or 4 : {0}'\ + .format(array.ndim)) + + if dimension_labels is None: + if array.ndim == 4: + dimension_labels = ['channel' ,'angle' , 'vertical' , + 'horizontal'] + elif array.ndim == 3: + dimension_labels = ['angle' , 'vertical' , + 'horizontal'] + else: + dimension_labels = ['angle' , + 'horizontal'] + + #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) + super(AcquisitionData, self).__init__(array, deep_copy, + dimension_labels, **kwargs) + + +class DataProcessor(object): + '''Defines a generic DataContainer processor + + accepts DataContainer as inputs and + outputs DataContainer + additional attributes can be defined with __setattr__ + ''' + + def __init__(self, **attributes): + if not 'store_output' in attributes.keys(): + attributes['store_output'] = True + attributes['output'] = False + attributes['runTime'] = -1 + attributes['mTime'] = datetime.now() + attributes['input'] = None + for key, value in attributes.items(): + self.__dict__[key] = value + + + def __setattr__(self, name, value): + if name == 'input': + self.set_input(value) + elif name in self.__dict__.keys(): + self.__dict__[name] = value + self.__dict__['mTime'] = datetime.now() + else: + raise KeyError('Attribute {0} not found'.format(name)) + #pass + + def set_input(self, dataset): + if issubclass(type(dataset), DataContainer): + if self.check_input(dataset): + self.__dict__['input'] = dataset + else: + raise TypeError("Input type mismatch: got {0} expecting {1}"\ + .format(type(dataset), DataContainer)) + + def check_input(self, dataset): + '''Checks parameters of the input DataContainer + + Should raise an Error if the DataContainer does not match expectation, e.g. + if the expected input DataContainer is 3D and the Processor expects 2D. + ''' + raise NotImplementedError('Implement basic checks for input DataContainer') + + def get_output(self): + for k,v in self.__dict__.items(): + if v is None: + raise ValueError('Key {0} is None'.format(k)) + shouldRun = False + if self.runTime == -1: + shouldRun = True + elif self.mTime > self.runTime: + shouldRun = True + + # CHECK this + if self.store_output and shouldRun: + self.runTime = datetime.now() + self.output = self.process() + return self.output + self.runTime = datetime.now() + return self.process() + + def set_input_processor(self, processor): + if issubclass(type(processor), DataProcessor): + self.__dict__['input'] = processor + else: + raise TypeError("Input type mismatch: got {0} expecting {1}"\ + .format(type(processor), DataProcessor)) + + def get_input(self): + '''returns the input DataContainer + + It is useful in the case the user has provided a DataProcessor as + input + ''' + if issubclass(type(self.input), DataProcessor): + dsi = self.input.get_output() + else: + dsi = self.input + return dsi + + def process(self): + raise NotImplementedError('process must be implemented') + + + + +class DataProcessor23D(DataProcessor): + '''Regularizers DataProcessor + ''' + + def check_input(self, dataset): + '''Checks number of dimensions input DataContainer + + Expected input is 2D or 3D + ''' + if dataset.number_of_dimensions == 2 or \ + dataset.number_of_dimensions == 3: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + +###### Example of DataProcessors + +class AX(DataProcessor): + '''Example DataProcessor + The AXPY routines perform a vector multiplication operation defined as + + y := a*x + where: + + a is a scalar + + x a DataContainer. + ''' + + def __init__(self): + kwargs = {'scalar':None, + 'input':None, + } + + #DataProcessor.__init__(self, **kwargs) + super(AX, self).__init__(**kwargs) + + def check_input(self, dataset): + return True + + def process(self): + + dsi = self.get_input() + a = self.scalar + + y = DataContainer( a * dsi.as_array() , True, + dimension_labels=dsi.dimension_labels ) + #self.setParameter(output_dataset=y) + return y + + + + +class PixelByPixelDataProcessor(DataProcessor): + '''Example DataProcessor + + This processor applies a python function to each pixel of the DataContainer + + f is a python function + + x a DataSet. + ''' + + def __init__(self): + kwargs = {'pyfunc':None, + 'input':None, + } + #DataProcessor.__init__(self, **kwargs) + super(PixelByPixelDataProcessor, self).__init__(**kwargs) + + def check_input(self, dataset): + return True + + def process(self): + + pyfunc = self.pyfunc + dsi = self.get_input() + + eval_func = numpy.frompyfunc(pyfunc,1,1) + + + y = DataContainer( eval_func( dsi.as_array() ) , True, + dimension_labels=dsi.dimension_labels ) + return y + + + + +if __name__ == '__main__': + shape = (2,3,4,5) + size = shape[0] + for i in range(1, len(shape)): + size = size * shape[i] + #print("a refcount " , sys.getrefcount(a)) + a = numpy.asarray([i for i in range( size )]) + print("a refcount " , sys.getrefcount(a)) + a = numpy.reshape(a, shape) + print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) + print("a refcount " , sys.getrefcount(a)) + print ("ds label {0}".format(ds.dimension_labels)) + subset = ['W' ,'X'] + b = ds.subset( subset ) + print("a refcount " , sys.getrefcount(a)) + print ("b label {0} shape {1}".format(b.dimension_labels, + numpy.shape(b.as_array()))) + c = ds.subset(['Z','W','X']) + print("a refcount " , sys.getrefcount(a)) + + # Create a ImageData sharing the array with c + volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels) + volume1 = ImageData(c, False) + + print ("volume0 {0} volume1 {1}".format(id(volume0.array), + id(volume1.array))) + + # Create a ImageData copying the array from c + volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels) + volume3 = ImageData(c) + + print ("volume2 {0} volume3 {1}".format(id(volume2.array), + id(volume3.array))) + + # single number DataSet + sn = DataContainer(numpy.asarray([1])) + + ax = AX() + ax.scalar = 2 + ax.set_input(c) + #ax.apply() + print ("ax in {0} out {1}".format(c.as_array().flatten(), + ax.get_output().as_array().flatten())) + axm = AX() + axm.scalar = 0.5 + axm.set_input(c) + #axm.apply() + print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array())) + + # create a PixelByPixelDataProcessor + + #define a python function which will take only one input (the pixel value) + pyfunc = lambda x: -x if x > 20 else x + clip = PixelByPixelDataProcessor() + clip.pyfunc = pyfunc + clip.set_input(c) + #clip.apply() + + print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array())) + + #dsp = DataProcessor() + #dsp.set_input(ds) + #dsp.input = a + # pipeline + + chain = AX() + chain.scalar = 0.5 + chain.set_input_processor(ax) + print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array())) + + # testing arithmetic operations + + print (b) + print ((b+1)) + print ((1+b)) + + print (b) + print ((b*2)) + + print (b) + print ((2*b)) + + print (b) + print ((b/2)) + + print (b) + print ((2/b)) + + print (b) + print ((b**2)) + + print (b) + print ((2**b)) + + print (type(volume3 + 2)) + + s = [i for i in range(3 * 4 * 4)] + s = numpy.reshape(numpy.asarray(s), (3,4,4)) + sino = AcquisitionData( s ) + + shape = (4,3,2) + a = [i for i in range(2*3*4)] + a = numpy.asarray(a) + a = numpy.reshape(a, shape) + print (numpy.shape(a)) + ds = DataContainer(a, True, ['X', 'Y','Z']) + # this means that I expect the X to be of length 2 , + # y of length 3 and z of length 4 + subset = ['Y' ,'Z'] + b0 = ds.subset( subset ) + print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array()))) + # expectation on b is that it is + # 3x2 cut at z = 0 + + subset = ['X' ,'Y'] + b1 = ds.subset( subset , Z=1) + print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array()))) + + + + # create VolumeData from geometry + vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2) + vol = ImageData(geometry=vgeometry) + + sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20), + geom_type='parallel', pixel_num_v=3, + pixel_num_h=5 , channels=2) + sino = AcquisitionData(geometry=sgeometry) + sino2 = sino.clone() + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index b8285b0..145277f 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -8,283 +8,9 @@ Created on Thu Feb 14 12:36:40 2019 import numpy from numbers import Number import functools -from ccpi.framework import AcquisitionData, ImageData +from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer +from ccpi.optimisation.operators import Operator, LinearOperator -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def range_dim(self): - raise NotImplementedError - def domain_dim(self): - raise NotImplementedError - def __rmul__(self, other): - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -# this should go in the framework - -class BlockDataContainer(object): - '''Class to hold a composite operator''' - __array_priority__ = 1 - def __init__(self, *args, shape=None): - '''containers must be passed row by row''' - self.containers = args - self.index = 0 - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) -# for i in range(shape[0]): -# b.append([]) -# for j in range(shape[1]): -# b[-1].append(args[i*shape[1]+j]) -# indices.append(i*shape[1]+j) -# self.containers = b - - def __iter__(self): - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - if isinstance(other, Number): - return True - elif isinstance(other, list): - # TODO look elements should be numbers - for ot in other: - if not isinstance(ot, (Number,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - raise ValueError('List/ numpy array can only contain numbers {}'\ - .format(type(ot))) - return len(self.containers) == len(other) - elif isinstance(other, numpy.ndarray): - return self.shape == other.shape - return len(self.containers) == len(other.containers) - def get_item(self, row, col=0): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.containers[index] - - def add(self, other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def subtract(self, other, out=None , *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def multiply(self, other , out=None, *args, **kwargs): - self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def divide(self, other , out=None ,*args, **kwargs): - self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def power(self, other , out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - def maximum(self,other, out=None, *args, **kwargs): - assert self.is_compatible(other) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - - ## unary operations - def abs(self, out=None, *args, **kwargs): - return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) - def sign(self, out=None, *args, **kwargs): - return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) - def sqrt(self, out=None, *args, **kwargs): - return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) - def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers]) - - ## reductions - def sum(self, out=None, *args, **kwargs): - return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) - def squared_norm(self): - y = numpy.asarray([el.squared_norm() for el in self.containers]) - return y.sum() - def norm(self): - y = numpy.asarray([el.norm() for el in self.containers]) - return y.sum() - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers]) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - '''Reverse multiplication - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self * other - # __rmul__ - - def __rdiv__(self, other): - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - return other.power(self) - - def __iadd__(self, other): - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - self.is_compatible(other) - for el,ot in zip(self.containers, other): - el += ot - return self - # __radd__ - - def __isub__(self, other): - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el -= ot - return self - # __rsub__ - - def __imul__(self, other): - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - return self.__idiv__(other) - class BlockOperator(Operator): diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py new file mode 100755 index 0000000..d0e7804 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 15:57:52 2019 + +@author: ofn77899 +""" + +from ccpi.optimisation.operators import Operator + +class LinearOperator(Operator): + '''Operator that maps from a space X -> Y''' + def is_linear(self): + '''Returns if the operator is linear''' + return True + def adjoint(self,x, out=None): + '''returns the adjoint/inverse operation + + only available to linear operators''' + raise NotImplementedError diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py new file mode 100755 index 0000000..ea08b30 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 15:55:56 2019 + +@author: ofn77899 +""" + +class Operator(object): + '''Operator that maps from a space X -> Y''' + def __init__(self, **kwargs): + self.scalar = 1 + def is_linear(self): + '''Returns if the operator is linear''' + return False + def direct(self,x, out=None): + raise NotImplementedError + def size(self): + # To be defined for specific class + raise NotImplementedError + def norm(self): + raise NotImplementedError + def range_geometry(self): + raise NotImplementedError + def domain_geometry(self): + raise NotImplementedError diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py new file mode 100755 index 0000000..088f48c --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 15:56:27 2019 + +@author: ofn77899 +""" + +from .Operator import Operator +from .LinearOperator import LinearOperator +from .BlockOperator import BlockOperator diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index 7a55764..630e33e 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -31,7 +31,8 @@ if cil_version == '': setup( name="ccpi-framework", version=cil_version, - packages=['ccpi' , 'ccpi.io', 'ccpi.optimisation', + packages=['ccpi' , 'ccpi.io', + 'ccpi.framework', 'ccpi.optimisation', 'ccpi.optimisation.operators', 'ccpi.optimisation.algorithms'], diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py new file mode 100755 index 0000000..824abf6 --- /dev/null +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 16:08:23 2019 + +@author: ofn77899 +""" + +import unittest +import numpy +#from ccpi.plugins.ops import CCPiProjectorSimple +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.framework import ImageGeometry, AcquisitionGeometry +from ccpi.framework import ImageData, AcquisitionData +#from ccpi.optimisation.algorithms import GradientDescent +from ccpi.framework import BlockDataContainer +#from ccpi.optimisation.Algorithms import CGLS + +class TestBlockDataContainer(unittest.TestCase): + def test_BlockDataContainer(self): + print ("test block data container") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + cp1 = BlockDataContainer(data2,data3) + # + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] + print (a[0][0].shape) + #cp2 = BlockDataContainer(*a) + cp2 = cp0.add(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + + cp2 = cp0 + cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + cp2 = cp0 + 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 + [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) + cp2 += cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 += 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) + + cp2 += [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) + + + cp2 = cp0.subtract(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + cp2 = cp0 - cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + + cp2 = cp0 - 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) + cp2 = cp0 - [1 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) + + cp2 -= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + cp2 -= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) + + cp2 -= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + + + cp2 = cp0.multiply(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + cp2 = cp0 * cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + + cp2 = cp0 * 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = 2 * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + cp2 = cp0 * [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = cp0 * numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 = [3,2] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = numpy.asarray([3,2]) * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + cp2 = [3,2,3] * cp0 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + + cp2 *= cp1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + + cp2 *= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) + + + cp2 = cp0.divide(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + cp2 = cp0/cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + + cp2 = cp0 / 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / [3 ,2] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp2 = cp0 / numpy.asarray([3 ,2]) + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + cp3 = numpy.asarray([3 ,2]) / (cp0+1) + numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) + + cp2 += 1 + cp2 /= cp1 + # TODO fix inplace division + + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) + + cp2 /= 1 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + + cp2 /= [-2,-1] + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) + #### + + cp2 = cp0.power(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + cp2 = cp0**cp1 + assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0 ** 2 + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) + + cp2 = cp0.maximum(cp1) + assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) + + + cp2 = cp0.abs() + numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + + cp2 = cp0.subtract(cp1) + s = cp2.sign() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) + + cp2 = cp0.add(cp1) + s = cp2.sqrt() + numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) + + s = cp0.sum() + numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + s0 = 1 + s1 = 1 + for i in cp0.get_item(0,0).shape: + s0 *= i + for i in cp0.get_item(1,0).shape: + s1 *= i + + numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) + \ No newline at end of file -- cgit v1.2.3 From f4126e1f085a4bfab9cae5081af57edb40af0832 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 5 Mar 2019 16:47:22 +0000 Subject: python 2.7 fix --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 9a42a16..e077290 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -18,7 +18,7 @@ import functools class BlockDataContainer(object): '''Class to hold a composite operator''' __array_priority__ = 1 - def __init__(self, *args, shape=None): + def __init__(self, shape=None, *args): '''containers must be passed row by row''' self.containers = args self.index = 0 -- cgit v1.2.3 From 989f32cd52caa597781b7cf1312e5ace28576f79 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 6 Mar 2019 11:33:51 +0000 Subject: fix named argument before *args --- .../Python/ccpi/framework/BlockDataContainer.py | 44 +++++++++++++--------- Wrappers/Python/ccpi/framework/framework.py | 39 ++++++++++++------- 2 files changed, 51 insertions(+), 32 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index e077290..5f24e5c 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -16,28 +16,27 @@ import functools #from ccpi.optimisation.operators import Operator, LinearOperator class BlockDataContainer(object): - '''Class to hold a composite operator''' + '''Class to hold DataContainers as blocks''' __array_priority__ = 1 - def __init__(self, shape=None, *args): + def __init__(self, *args, **kwargs): '''containers must be passed row by row''' self.containers = args self.index = 0 + shape = kwargs.get('shape', None) + print (shape) if shape is None: shape = (len(args),1) self.shape = shape + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) if len(args) != n_elements: raise ValueError( 'Dimension and size do not match: expected {} got {}' .format(n_elements,len(args))) -# for i in range(shape[0]): -# b.append([]) -# for j in range(shape[1]): -# b[-1].append(args[i*shape[1]+j]) -# indices.append(i*shape[1]+j) -# self.containers = b + def __iter__(self): + '''BlockDataContainer is Iterable''' return self def next(self): '''python2 backwards compatibility''' @@ -76,24 +75,27 @@ class BlockDataContainer(object): index = row*self.shape[1]+col return self.containers[index] - def add(self, other, out=None, *args, **kwargs): + def add(self, other, *args, **kwargs): assert self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - def subtract(self, other, out=None , *args, **kwargs): + def subtract(self, other, *args, **kwargs): assert self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - def multiply(self, other , out=None, *args, **kwargs): + def multiply(self, other, *args, **kwargs): self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list): @@ -102,24 +104,27 @@ class BlockDataContainer(object): return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - def divide(self, other , out=None ,*args, **kwargs): + def divide(self, other, *args, **kwargs): self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - def power(self, other , out=None, *args, **kwargs): + def power(self, other, *args, **kwargs): assert self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) - def maximum(self,other, out=None, *args, **kwargs): + def maximum(self,other, *args, **kwargs): assert self.is_compatible(other) + out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) elif isinstance(other, list) or isinstance(other, numpy.ndarray): @@ -127,17 +132,20 @@ class BlockDataContainer(object): return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) ## unary operations - def abs(self, out=None, *args, **kwargs): + def abs(self, *args, **kwargs): + out = kwargs.get('out', None) return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) - def sign(self, out=None, *args, **kwargs): + def sign(self, *args, **kwargs): + out = kwargs.get('out', None) return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) - def sqrt(self, out=None, *args, **kwargs): + def sqrt(self, *args, **kwargs): + out = kwargs.get('out', None) return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) def conjugate(self, out=None): return type(self)(*[el.conjugate() for el in self.containers]) ## reductions - def sum(self, out=None, *args, **kwargs): + def sum(self, *args, **kwargs): return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) def squared_norm(self): y = numpy.asarray([el.squared_norm() for el in self.containers]) diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index dab2dd9..23f18e6 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -375,7 +375,8 @@ class DataContainer(object): return self.shape == other.shape ## algebra - def __add__(self, other , out=None, *args, **kwargs): + def __add__(self, other, *args, **kwargs): + out = kwargs.get('out', None) if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -632,8 +633,8 @@ class DataContainer(object): ## binary operations - def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): - + def pixel_wise_binary(self, pwop, x2, *args, **kwargs): + out = kwargs.get('out', None) if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) @@ -676,26 +677,33 @@ class DataContainer(object): else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - def add(self, other , out=None, *args, **kwargs): + def add(self, other, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) - def subtract(self, other, out=None , *args, **kwargs): + def subtract(self, other, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) - def multiply(self, other , out=None, *args, **kwargs): + def multiply(self, other, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) - def divide(self, other , out=None ,*args, **kwargs): + def divide(self, other, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) - def power(self, other , out=None, *args, **kwargs): + def power(self, other, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) - def maximum(self,x2, out=None, *args, **kwargs): + def maximum(self, x2, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) ## unary operations - def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): + def pixel_wise_unary(self, pwop, *args, **kwargs): + out = kwargs.get('out', None) if out is None: out = pwop(self.as_array() , *args, **kwargs ) return type(self)(out, @@ -713,13 +721,16 @@ class DataContainer(object): else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - def abs(self, out=None, *args, **kwargs): + def abs(self, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) - def sign(self, out=None, *args, **kwargs): + def sign(self, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) - def sqrt(self, out=None, *args, **kwargs): + def sqrt(self, *args, **kwargs): + out = kwargs.get('out', None) return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) #def __abs__(self): @@ -728,7 +739,7 @@ class DataContainer(object): # __abs__ ## reductions - def sum(self, out=None, *args, **kwargs): + def sum(self, *args, **kwargs): return self.as_array().sum(*args, **kwargs) def squared_norm(self): '''return the squared euclidean norm of the DataContainer viewed as a vector''' -- cgit v1.2.3 From 6984e890c19b2b1ee547bbf079c27564cff56887 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 6 Mar 2019 13:55:20 +0000 Subject: removed default before positional --- Wrappers/Python/ccpi/framework/framework.py | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 23f18e6..d77db4a 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -678,28 +678,22 @@ class DataContainer(object): raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) def add(self, other, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.add, other, *args, **kwargs) def subtract(self, other, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs) def multiply(self, other, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs) def divide(self, other, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) def power(self, other, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) def maximum(self, x2, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) + return self.pixel_wise_binary(numpy.maximum, x2=x2, *args, **kwargs) ## unary operations def pixel_wise_unary(self, pwop, *args, **kwargs): @@ -722,16 +716,13 @@ class DataContainer(object): raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) def abs(self, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) + return self.pixel_wise_unary(numpy.abs, *args, **kwargs) def sign(self, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) + return self.pixel_wise_unary(numpy.sign, *args, **kwargs) def sqrt(self, *args, **kwargs): - out = kwargs.get('out', None) - return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) + return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs) #def __abs__(self): # operation = FM.OPERATION.ABS -- cgit v1.2.3 From d6d63f9f10f8eccee1a4cacf76d9d2de1ea93377 Mon Sep 17 00:00:00 2001 From: vagrant Date: Wed, 6 Mar 2019 11:16:25 -0500 Subject: conda recipe conditional for testing --- Wrappers/Python/conda-recipe/conda_build_config.yaml | 2 +- Wrappers/Python/conda-recipe/meta.yaml | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml index 96a211f..30c8e9d 100644 --- a/Wrappers/Python/conda-recipe/conda_build_config.yaml +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -4,5 +4,5 @@ python: - 3.6 numpy: # TODO investigage, as it doesn't currently build with cvxp, requires >1.14 - #- 1.12 + - 1.12 - 1.15 diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 8ded429..dd3238e 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -11,7 +11,7 @@ build: test: requires: - python-wget - - cvxpy # [not win] + - cvxpy # [ unix and py36 and np115 ] source_files: - ./test # [win] @@ -24,8 +24,9 @@ test: requirements: build: + - {{ pin_compatible('numpy', max_pin='x.x') }} - python - - numpy {{ numpy }} + - numpy - setuptools run: @@ -33,7 +34,7 @@ requirements: - python - numpy - scipy - - matplotlib + #- matplotlib - h5py about: -- cgit v1.2.3 From 604f5ce166838defe9d3df3b936830b5b96a1fe1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 6 Mar 2019 11:18:10 -0500 Subject: fixed tests and py27 quirks --- .../Python/ccpi/framework/BlockDataContainer.py | 6 ++--- Wrappers/Python/ccpi/framework/framework.py | 18 ++++++++------ .../ccpi/optimisation/operators/BlockOperator.py | 28 ++++++++++++++++++---- Wrappers/Python/test/test_DataContainer.py | 4 ++-- 4 files changed, 38 insertions(+), 18 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 5f24e5c..1bfc98c 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -23,7 +23,6 @@ class BlockDataContainer(object): self.containers = args self.index = 0 shape = kwargs.get('shape', None) - print (shape) if shape is None: shape = (len(args),1) self.shape = shape @@ -151,8 +150,7 @@ class BlockDataContainer(object): y = numpy.asarray([el.squared_norm() for el in self.containers]) return y.sum() def norm(self): - y = numpy.asarray([el.norm() for el in self.containers]) - return y.sum() + return numpy.sqrt(self.squared_norm()) def copy(self): '''alias of clone''' return self.clone() @@ -299,4 +297,4 @@ class BlockDataContainer(object): def __itruediv__(self, other): '''Inline truedivision''' return self.__idiv__(other) - \ No newline at end of file + diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index d77db4a..09fa320 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -652,7 +652,8 @@ class DataContainer(object): elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): if self.check_dimensions(out) and self.check_dimensions(x2): - pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs ) + kwargs['out'] = out.as_array() + pwop(self.as_array(), x2.as_array(), *args, **kwargs ) #return type(self)(out.as_array(), # deep_copy=False, # dimension_labels=self.dimension_labels, @@ -662,14 +663,15 @@ class DataContainer(object): raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): if self.check_dimensions(out): - - pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) + kwargs['out']=out.as_array() + pwop(self.as_array(), x2, *args, **kwargs ) return out else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), x2 , out=out, *args, **kwargs) + kwargs['out'] = out + pwop(self.as_array(), x2, *args, **kwargs) #return type(self)(out, # deep_copy=False, # dimension_labels=self.dimension_labels, @@ -693,7 +695,7 @@ class DataContainer(object): return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) def maximum(self, x2, *args, **kwargs): - return self.pixel_wise_binary(numpy.maximum, x2=x2, *args, **kwargs) + return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs) ## unary operations def pixel_wise_unary(self, pwop, *args, **kwargs): @@ -706,12 +708,14 @@ class DataContainer(object): geometry=self.geometry) elif issubclass(type(out), DataContainer): if self.check_dimensions(out): - pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) + kwargs['out'] = out.as_array() + pwop(self.as_array(), *args, **kwargs ) else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), out=out, *args, **kwargs) + kwargs['out'] = out + pwop(self.as_array(), *args, **kwargs) else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 145277f..4bbc536 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -14,9 +14,27 @@ from ccpi.optimisation.operators import Operator, LinearOperator class BlockOperator(Operator): - '''Class to hold a block operator''' - def __init__(self, *args, shape=None): + '''Class to hold a block operator + + Class to hold a number of Operators in a block. + User may specify the shape of the block, by default is a row vector + ''' + def __init__(self, *args, **kwargs): + ''' + Class creator + + Note: + Do not include the `self` parameter in the ``Args`` section. + + Args: + vararg (Operator): Operators in the block. varargs are passed in a + row-by-row fashion. + shape (:obj:`tuple`, optional): If passed the Operators listed + in the vararg are laid out as described. Shape and number + of Operators must match. + ''' self.operators = args + shape = kwargs.get('shape', None) if shape is None: shape = (len(args),1) self.shape = shape @@ -61,9 +79,9 @@ class BlockOperator(Operator): for row in range(self.shape[1]): for col in range(self.shape[0]): if col == 0: - prod = self.get_item(row,col).adjoint(x.get_item(col)) + prod = self.get_item(col,row).adjoint(x.get_item(row)) else: - prod += self.get_item(row,col).adjoint(x.get_item(col)) + prod += self.get_item(col,row).adjoint(x.get_item(row)) res.append(prod) return BlockDataContainer(*res, shape=shape) @@ -467,4 +485,4 @@ if __name__ == '__main__': plt.subplot(1,5,5) plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nsmall lambda') - plt.show() \ No newline at end of file + plt.show() diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index 05f3fe8..3ce2dac 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -174,7 +174,7 @@ class TestDataContainer(unittest.TestCase): def binary_add(self): print("Test binary add") X, Y, Z = 512, 512, 512 - X, Y, Z = 256, 512, 512 + X, Y, Z = 1024, 512, 512 steps = [timer()] a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) @@ -496,4 +496,4 @@ class TestDataContainer(unittest.TestCase): if __name__ == '__main__': unittest.main() - \ No newline at end of file + -- cgit v1.2.3 From 9f656c1aee2f9da8baee692b2a5de1de74cc5b12 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 6 Mar 2019 16:38:57 +0000 Subject: added BlockLinearOperator --- .../ccpi/optimisation/operators/BlockOperator.py | 272 +++++---------------- 1 file changed, 59 insertions(+), 213 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 4bbc536..b2af8fc 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -27,11 +27,14 @@ class BlockOperator(Operator): Do not include the `self` parameter in the ``Args`` section. Args: - vararg (Operator): Operators in the block. varargs are passed in a - row-by-row fashion. - shape (:obj:`tuple`, optional): If passed the Operators listed - in the vararg are laid out as described. Shape and number - of Operators must match. + vararg (Operator): Operators in the block. + shape (:obj:`tuple`, optional): If shape is passed the Operators in + vararg are considered input in a row-by-row fashion. + Shape and number of Operators must match. + + Example: + BlockOperator(op0,op1) results in a row block + BlockOperator(op0,op1,shape=(1,2)) results in a column block ''' self.operators = args shape = kwargs.get('shape', None) @@ -95,33 +98,63 @@ class BlockOperator(Operator): raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) return (oshape, xshape[-1]) -''' - def direct(self, x, out=None): - - out = [None]*self.dimension[0] - for i in range(self.dimension[0]): - z1 = ImageData(np.zeros(self.compMat[i][0].range_dim())) - for j in range(self.dimension[1]): - z1 += self.compMat[i][j].direct(x[j]) - out[i] = z1 - - return out - - def adjoint(self, x, out=None): + +class BlockLinearOperator(BlockOperator): + '''Class to hold a block operator + + Class to hold a number of Operators in a block. + User may specify the shape of the block, by default is a row vector + ''' + def __init__(self, *args, **kwargs): + ''' + Class creator + + Note: + Do not include the `self` parameter in the ``Args`` section. + + Args: + vararg (Operator): LinearOperators in the block. + shape (:obj:`tuple`, optional): If shape is passed the Operators in + vararg are considered input in a row-by-row fashion. + Shape and number of Operators must match. + + Example: + BlockLinearOperator(op0,op1) results in a row block + BlockLinearOperator(op0,op1,shape=(1,2)) results in a column block + ''' + for i,op in enumerate(args): + if not op.is_linear(): + raise ValueError('Operator {} must be LinearOperator'.format(i)) + super(BlockLinearOperator, self).__init__(*args, **kwargs) + + def adjoint(self, x, out=None): + '''Adjoint operation for the BlockOperator - out = [None]*self.dimension[1] - for i in range(self.dimension[1]): - z2 = ImageData(np.zeros(self.compMat[0][i].domain_dim())) - for j in range(self.dimension[0]): - z2 += self.compMat[j][i].adjoint(x[j]) - out[i] = z2 -''' -from ccpi.optimisation.algorithms import CGLS + only available on BlockLinearOperator + ''' + shape = self.get_output_shape(x.shape, adjoint=True) + res = [] + for row in range(self.shape[1]): + for col in range(self.shape[0]): + if col == 0: + prod = self.get_item(col,row).adjoint(x.get_item(row)) + else: + prod += self.get_item(col,row).adjoint(x.get_item(row)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) + + + + + + if __name__ == '__main__': #from ccpi.optimisation.Algorithms import GradientDescent + from ccpi.optimisation.algorithms import CGLS + from ccpi.plugins.ops import CCPiProjectorSimple from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.optimisation.ops import TomoIdentity @@ -131,193 +164,6 @@ if __name__ == '__main__': #from ccpi.optimisation.Algorithms import CGLS import matplotlib.pyplot as plt - ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(12,42,55,32) - - data0 = ImageData(geometry=ig0) - data1 = ImageData(geometry=ig1) + 1 - - data2 = ImageData(geometry=ig0) + 2 - data3 = ImageData(geometry=ig1) + 3 - - cp0 = BlockDataContainer(data0,data1) - cp1 = BlockDataContainer(data2,data3) -# - a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] - print (a[0][0].shape) - #cp2 = BlockDataContainer(*a) - cp2 = cp0.add(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - - cp2 = cp0 + cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) - cp2 = cp0 + 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = cp0 + [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) - cp2 += cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 += 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) - - cp2 += [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) - - - cp2 = cp0.subtract(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - cp2 = cp0 - cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) - - cp2 = cp0 - 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) - cp2 = cp0 - [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) - - cp2 -= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - cp2 -= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) - - cp2 -= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) - - - cp2 = cp0.multiply(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - cp2 = cp0 * cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) - - cp2 = cp0 * 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) - cp2 = 2 * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) - cp2 = cp0 * [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = cp0 * numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - - cp2 = [3,2] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = numpy.asarray([3,2]) * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - cp2 = [3,2,3] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) - - cp2 *= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) - - cp2 *= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) - - - cp2 = cp0.divide(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - cp2 = cp0/cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) - - cp2 = cp0 / 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp2 = cp0 / [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp2 = cp0 / numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - cp3 = numpy.asarray([3 ,2]) / (cp0+1) - numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) - numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) - - cp2 += 1 - cp2 /= cp1 - # TODO fix inplace division - - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) - - cp2 /= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) - - cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) - #### - - cp2 = cp0.power(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - cp2 = cp0**cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0 ** 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) - - cp2 = cp0.maximum(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) - - - cp2 = cp0.abs() - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) - - cp2 = cp0.subtract(cp1) - s = cp2.sign() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) - - cp2 = cp0.add(cp1) - s = cp2.sqrt() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) - - s = cp0.sum() - numpy.testing.assert_almost_equal(s[0], 0, decimal=4) - s0 = 1 - s1 = 1 - for i in cp0.get_item(0,0).shape: - s0 *= i - for i in cp0.get_item(1,0).shape: - s1 *= i - - numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) # Set up phantom size N x N x vert by creating ImageGeometry, initialising the # ImageData object with this geometry and empty array and finally put some -- cgit v1.2.3 From d9994f9e1576c82e92bfe2684e11d2d768e95046 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 6 Mar 2019 22:14:37 +0000 Subject: created ScaledOperator.py --- .../Python/ccpi/optimisation/operators/ScaledOperator.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py new file mode 100644 index 0000000..c29effc --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -0,0 +1,14 @@ +from ccpi.optimisation.operators import LinearOperator +from numbers import Number + +class ScaledOperator(LinearOperator): + def __init__(self, operator, scalar): + if not isinstance (scalar, Number): + raise TypeError('expected scalar: got {}'.format(type(scalar)) + self.scalar = scalar + self.operator = operator + def direct(self, x, out=None): + return self.scalar * self.operator.direct(x, out=out) + def adjoint(self, x, out=None): + if self.operator.is_linear(): + return self.scalar * self.operator.adjoint(x, out=out) -- cgit v1.2.3 From 1d34ba0bd8c3cdca127625296cc37a56be6aec93 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 7 Mar 2019 06:55:44 +0000 Subject: added all methods --- Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index c29effc..56f58cd 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -12,3 +12,12 @@ class ScaledOperator(LinearOperator): def adjoint(self, x, out=None): if self.operator.is_linear(): return self.scalar * self.operator.adjoint(x, out=out) + def size(self): + return self.operator.size() + def norm(self): + return self.operator.norm() + def range_geometry(self): + return self.operator.range_geometry() + def domain_geometry(self): + return self.operator.domain_geometry() + -- cgit v1.2.3 From 365ea7153042687dbd1f10a7e61f74fc2eb29580 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 7 Mar 2019 08:18:55 +0000 Subject: add scaled operator and block scaled --- .../optimisation/operators/BlockScaledOperator.py | 26 ++++++++++++++++++++++ .../Python/ccpi/optimisation/operators/Operator.py | 4 ++++ .../ccpi/optimisation/operators/ScaledOperator.py | 21 ++++++++++++++++- .../Python/ccpi/optimisation/operators/__init__.py | 2 ++ 4 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py new file mode 100644 index 0000000..29dacb8 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 14 12:36:40 2019 + +@author: ofn77899 +""" +#from ccpi.optimisation.ops import Operator +import numpy +from numbers import Number +import functools +from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer +from ccpi.optimisation.operators import Operator, LinearOperator + + + +class BlockScaledOperator(BlockOperator): + def __init__(self, *args, **kwargs): + super(BlockScaledOperator, self).__init__(*args, **kwargs) + scalar = kwargs.get('scalar',1) + if isinstance (scalar, list) or isinstance(scalar, tuple) or \ + isinstance(scalar, numpy.ndarray): + if len(scalars) != len(self.operators): + raise ValueError('dimensions of scalars and operators do not match') + else: + scalar = [scalar for _ in self.operators] + self.operators = [v * op for op in self.operators] \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py index ea08b30..11b9b87 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py @@ -4,6 +4,7 @@ Created on Tue Mar 5 15:55:56 2019 @author: ofn77899 """ +from ccpi.framework,operators import ScaledOperator class Operator(object): '''Operator that maps from a space X -> Y''' @@ -23,3 +24,6 @@ class Operator(object): raise NotImplementedError def domain_geometry(self): raise NotImplementedError + def __rmul__(self, scalar): + return ScaledOperator(self, scalar) + diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index 56f58cd..1181604 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -2,6 +2,25 @@ from ccpi.optimisation.operators import LinearOperator from numbers import Number class ScaledOperator(LinearOperator): + '''ScaledOperator + + A class to represent the scalar multiplication of an Operator with a scalar. + It holds an operator and a scalar. Basically it returns the multiplication + of the result of direct and adjoint of the operator with the scalar. + For the rest it behaves like the operator it holds. + + Args: + operator (Operator): a Operator or LinearOperator + scalar (Number): a scalar multiplier + Example: + The scaled operator behaves like the following: + sop = ScaledOperator(operator, scalar) + sop.direct(x) = scalar * operator.direct(x) + sop.adjoint(x) = scalar * operator.adjoint(x) + sop.norm() = operator.norm() + sop.range_geometry() = operator.range_geometry() + sop.domain_geometry() = operator.domain_geometry() + ''' def __init__(self, operator, scalar): if not isinstance (scalar, Number): raise TypeError('expected scalar: got {}'.format(type(scalar)) @@ -20,4 +39,4 @@ class ScaledOperator(LinearOperator): return self.operator.range_geometry() def domain_geometry(self): return self.operator.domain_geometry() - + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 088f48c..cc307e0 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -7,4 +7,6 @@ Created on Tue Mar 5 15:56:27 2019 from .Operator import Operator from .LinearOperator import LinearOperator +from .ScaledOperator import ScaledOperator from .BlockOperator import BlockOperator +from .BlockScaledOperator import BlockScaledOperator -- cgit v1.2.3 From 2ecf8bf3063f3a427fc4c96d5bdfcd65d7045488 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 7 Mar 2019 10:16:59 -0500 Subject: lots of changes and added tests --- .../Python/ccpi/framework/BlockDataContainer.py | 63 +++++++++-------- Wrappers/Python/ccpi/framework/__init__.py | 2 +- Wrappers/Python/ccpi/framework/framework.py | 4 +- .../ccpi/optimisation/algorithms/__init__.py | 1 + .../ccpi/optimisation/operators/BlockOperator.py | 38 +++++++---- .../optimisation/operators/BlockScaledOperator.py | 6 +- .../ccpi/optimisation/operators/LinearOperator.py | 5 +- .../Python/ccpi/optimisation/operators/Operator.py | 8 +-- .../ccpi/optimisation/operators/ScaledOperator.py | 16 ++--- Wrappers/Python/test/test_BlockDataContainer.py | 78 +++++++++++++++++++++- Wrappers/Python/test/test_BlockOperator.py | 78 ++++++++++++++++++++++ Wrappers/Python/test/test_Operator.py | 24 +++++++ 12 files changed, 257 insertions(+), 66 deletions(-) create mode 100644 Wrappers/Python/test/test_BlockOperator.py create mode 100644 Wrappers/Python/test/test_Operator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 1bfc98c..cbce844 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -26,7 +26,7 @@ class BlockDataContainer(object): if shape is None: shape = (len(args),1) self.shape = shape - + print (self.shape) n_elements = functools.reduce(lambda x,y: x*y, shape, 1) if len(args) != n_elements: raise ValueError( @@ -78,74 +78,79 @@ class BlockDataContainer(object): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)( + *[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) def subtract(self, other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) def multiply(self, other, *args, **kwargs): self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) def divide(self, other, *args, **kwargs): self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) def power(self, other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def maximum(self,other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)]) - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)]) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) ## unary operations def abs(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers], shape=self.shape) def sign(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers], shape=self.shape) def sqrt(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers]) + return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers], shape=self.shape) def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers]) + return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) ## reductions def sum(self, *args, **kwargs): - return numpy.asarray([ el.sum(*args, **kwargs) for el in self.containers]) + return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) def squared_norm(self): y = numpy.asarray([el.squared_norm() for el in self.containers]) return y.sum() @@ -155,7 +160,7 @@ class BlockDataContainer(object): '''alias of clone''' return self.clone() def clone(self): - return type(self)(*[el.copy() for el in self.containers]) + return type(self)(*[el.copy() for el in self.containers], shape=self.shape) def __add__(self, other): return self.add( other ) @@ -297,4 +302,8 @@ class BlockDataContainer(object): def __itruediv__(self, other): '''Inline truedivision''' return self.__idiv__(other) - + @property + def T(self): + '''return the transposed of self''' + shape = (self.shape[1], self.shape[0]) + return type(self)(*self.containers, shape=shape) diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py index 083f547..4683c21 100755 --- a/Wrappers/Python/ccpi/framework/__init__.py +++ b/Wrappers/Python/ccpi/framework/__init__.py @@ -20,5 +20,5 @@ from .framework import ImageData, AcquisitionData from .framework import ImageGeometry, AcquisitionGeometry from .framework import find_key, message from .framework import DataProcessor -from .framework import AX, PixelByPixelDataProcessor +from .framework import AX, PixelByPixelDataProcessor, CastDataContainer from .BlockDataContainer import BlockDataContainer diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 3159cc7..1413e21 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -734,7 +734,9 @@ class DataContainer(object): def sqrt(self, *args, **kwargs): return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs) - + + def conjugate(self, *args, **kwargs): + return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs) #def __abs__(self): # operation = FM.OPERATION.ABS # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py index 903bc30..7e500e8 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -27,3 +27,4 @@ from .CGLS import CGLS from .GradientDescent import GradientDescent from .FISTA import FISTA from .FBPD import FBPD + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index b2af8fc..a83dc8a 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -66,9 +66,11 @@ class BlockOperator(Operator): def direct(self, x, out=None): shape = self.get_output_shape(x.shape) + print ("direct output shape", shape) res = [] for row in range(self.shape[0]): for col in range(self.shape[1]): + print ("row {} col {}".format(row, col)) if col == 0: prod = self.get_item(row,col).direct(x.get_item(col)) else: @@ -76,19 +78,8 @@ class BlockOperator(Operator): res.append(prod) return BlockDataContainer(*res, shape=shape) - def adjoint(self, x, out=None): - shape = self.get_output_shape(x.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(col,row).adjoint(x.get_item(row)) - else: - prod += self.get_item(col,row).adjoint(x.get_item(row)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) - def get_output_shape(self, xshape, adjoint=False): + print ("get_output_shape", self.shape, xshape) sshape = self.shape[1] oshape = self.shape[0] if adjoint: @@ -98,8 +89,27 @@ class BlockOperator(Operator): raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) return (oshape, xshape[-1]) - - + def __rmul__(self, scalar): + '''Defines the left multiplication with a scalar + + Args: scalar (number or iterable containing numbers): + + Returns: a block operator with Scaled Operators inside''' + if isinstance (scalar, list) or isinstance(scalar, tuple) or \ + isinstance(scalar, numpy.ndarray): + if len(scalar) != len(self.operators): + raise ValueError('dimensions of scalars and operators do not match') + scalars = scalar + else: + scalars = [scalar for _ in self.operators] + # create a list of ScaledOperator-s + ops = [ v * op for v,op in zip(scalars, self.operators)] + return BlockOperator(*ops, shape=self.shape) + def T(self): + '''Return the transposed of self''' + shape = (self.shape[1], self.shape[0]) + return type(self)(*self.operators, shape=shape) + class BlockLinearOperator(BlockOperator): '''Class to hold a block operator diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py index 29dacb8..a47bec2 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py @@ -4,12 +4,8 @@ Created on Thu Feb 14 12:36:40 2019 @author: ofn77899 """ -#from ccpi.optimisation.ops import Operator import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer -from ccpi.optimisation.operators import Operator, LinearOperator +from ccpi.optimisation.operators import BlockOperator diff --git a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py index d0e7804..e19304f 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/LinearOperator.py @@ -7,8 +7,11 @@ Created on Tue Mar 5 15:57:52 2019 from ccpi.optimisation.operators import Operator + class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' + '''A Linear Operator that maps from a space X <-> Y''' + def __init__(self): + super(LinearOperator, self).__init__() def is_linear(self): '''Returns if the operator is linear''' return True diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py index 11b9b87..95082f4 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py @@ -4,20 +4,15 @@ Created on Tue Mar 5 15:55:56 2019 @author: ofn77899 """ -from ccpi.framework,operators import ScaledOperator +from ccpi.optimisation.operators import ScaledOperator class Operator(object): '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 def is_linear(self): '''Returns if the operator is linear''' return False def direct(self,x, out=None): raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError def norm(self): raise NotImplementedError def range_geometry(self): @@ -26,4 +21,3 @@ class Operator(object): raise NotImplementedError def __rmul__(self, scalar): return ScaledOperator(self, scalar) - diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index 1181604..adcc6d9 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -1,10 +1,10 @@ -from ccpi.optimisation.operators import LinearOperator from numbers import Number +import numpy -class ScaledOperator(LinearOperator): +class ScaledOperator(object): '''ScaledOperator - A class to represent the scalar multiplication of an Operator with a scalar. + A class to represent the scalar multiplication of an Operator with a scalar. It holds an operator and a scalar. Basically it returns the multiplication of the result of direct and adjoint of the operator with the scalar. For the rest it behaves like the operator it holds. @@ -22,8 +22,9 @@ class ScaledOperator(LinearOperator): sop.domain_geometry() = operator.domain_geometry() ''' def __init__(self, operator, scalar): + super(ScaledOperator, self).__init__() if not isinstance (scalar, Number): - raise TypeError('expected scalar: got {}'.format(type(scalar)) + raise TypeError('expected scalar: got {}'.format(type(scalar))) self.scalar = scalar self.operator = operator def direct(self, x, out=None): @@ -31,12 +32,11 @@ class ScaledOperator(LinearOperator): def adjoint(self, x, out=None): if self.operator.is_linear(): return self.scalar * self.operator.adjoint(x, out=out) - def size(self): - return self.operator.size() + else: + raise TypeError('Operator is not linear') def norm(self): - return self.operator.norm() + return numpy.abs(self.scalar) * self.operator.norm() def range_geometry(self): return self.operator.range_geometry() def domain_geometry(self): return self.operator.domain_geometry() - \ No newline at end of file diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 824abf6..c14a101 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -16,8 +16,80 @@ from ccpi.framework import ImageData, AcquisitionData #from ccpi.optimisation.algorithms import GradientDescent from ccpi.framework import BlockDataContainer #from ccpi.optimisation.Algorithms import CGLS +import functools class TestBlockDataContainer(unittest.TestCase): + def test_BlockDataContainerShape(self): + print ("test block data container") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + cp1 = BlockDataContainer(data2,data3) + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp0.T.shape == transpose_shape) + def test_BlockDataContainerShapeArithmetic(self): + print ("test block data container") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(12,42,55,32) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + #cp1 = BlockDataContainer(data2,data3) + cp1 = cp0 + 1 + self.assertTrue(cp1.shape == cp0.shape) + cp1 = cp0.T + 1 + + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T - 1 + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = (cp0.T + 1)*2 + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = (cp0.T + 1)/2 + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.power(2.2) + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.maximum(3) + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.abs() + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.sign() + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.sqrt() + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + + cp1 = cp0.T.conjugate() + transpose_shape = (cp0.shape[1], cp0.shape[0]) + self.assertTrue(cp1.shape == transpose_shape) + def test_BlockDataContainer(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) @@ -198,7 +270,9 @@ class TestBlockDataContainer(unittest.TestCase): numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) s = cp0.sum() - numpy.testing.assert_almost_equal(s[0], 0, decimal=4) + size = functools.reduce(lambda x,y: x*y, data1.shape, 1) + print ("size" , size) + numpy.testing.assert_almost_equal(s, 0 + size, decimal=4) s0 = 1 s1 = 1 for i in cp0.get_item(0,0).shape: @@ -206,5 +280,5 @@ class TestBlockDataContainer(unittest.TestCase): for i in cp0.get_item(1,0).shape: s1 *= i - numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) + #numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) \ No newline at end of file diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py new file mode 100644 index 0000000..f3b057b --- /dev/null +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -0,0 +1,78 @@ +import unittest +from ccpi.optimisation.operators import BlockOperator +from ccpi.framework import BlockDataContainer +from ccpi.optimisation.ops import TomoIdentity +from ccpi.framework import ImageGeometry, ImageData +import numpy + +class TestBlockOperator(unittest.TestCase): + + def test_BlockOperator(self): + ig = [ ImageGeometry(10,20,30) , \ + ImageGeometry(11,21,31) , \ + ImageGeometry(12,22,32) ] + x = [ g.allocate() for g in ig ] + ops = [ TomoIdentity(g) for g in ig ] + + K = BlockOperator(*ops) + #X = BlockDataContainer(*x).T + 1 + X = BlockDataContainer(x[0]) + Y = K.direct(X) + #self.assertTrue(Y.shape == X.shape) + + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),X.get_item(0).as_array()) + numpy.testing.assert_array_equal(Y.get_item(1).as_array(),X.get_item(0).as_array()) + #numpy.testing.assert_array_equal(Y.get_item(2).as_array(),X.get_item(2).as_array()) + + + def test_ScaledBlockOperatorSingleScalar(self): + ig = [ ImageGeometry(10,20,30) , \ + ImageGeometry(11,21,31) , \ + ImageGeometry(12,22,32) ] + x = [ g.allocate() for g in ig ] + ops = [ TomoIdentity(g) for g in ig ] + + scalar = 0.5 + K = 0.5 * BlockOperator(*ops) + X = BlockDataContainer(*x) + 1 + print (X.shape) + X = BlockDataContainer(*x).T + 1 + print (X.shape, K.shape) + Y = K.direct(X) + self.assertTrue(Y.shape == X.shape) + + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),scalar * X.get_item(0).as_array()) + numpy.testing.assert_array_equal(Y.get_item(1).as_array(),scalar * X.get_item(1).as_array()) + numpy.testing.assert_array_equal(Y.get_item(2).as_array(),scalar * X.get_item(2).as_array()) + + def test_ScaledBlockOperatorScalarList(self): + ig = [ImageGeometry(10, 20, 30), + ImageGeometry(11, 21, 31), + ImageGeometry(12, 22, 32)] + x = [g.allocate() for g in ig] + ops = [TomoIdentity(g) for g in ig] + + scalar = [i*1.2 for i, el in enumerate(ig)] + + K = scalar * BlockOperator(*ops) + X = BlockDataContainer(*x).T + 1 + Y = K.direct(X) + self.assertTrue(Y.shape == X.shape) + + numpy.testing.assert_array_equal(Y.get_item(0).as_array(), + scalar[0] * X.get_item(0).as_array()) + numpy.testing.assert_array_equal(Y.get_item(1).as_array(), + scalar[1] * X.get_item(1).as_array()) + numpy.testing.assert_array_equal(Y.get_item(2).as_array(), + scalar[2] * X.get_item(2).as_array()) + + + def test_TomoIdentity(self): + ig = ImageGeometry(10,20,30) + img = ig.allocate() + self.assertTrue(img.shape == (30,20,10)) + self.assertEqual(img.sum(), 0) + Id = TomoIdentity(ig) + y = Id.direct(img) + numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py new file mode 100644 index 0000000..46e8c7c --- /dev/null +++ b/Wrappers/Python/test/test_Operator.py @@ -0,0 +1,24 @@ +import unittest +#from ccpi.optimisation.operators import Operator +from ccpi.optimisation.ops import TomoIdentity +from ccpi.framework import ImageGeometry, ImageData +import numpy + +class TestOperator(unittest.TestCase): + def test_ScaledOperator(self): + ig = ImageGeometry(10,20,30) + img = ig.allocate() + scalar = 0.5 + sid = scalar * TomoIdentity(ig) + numpy.testing.assert_array_equal(scalar * img.as_array(), sid.direct(img).as_array()) + + + def test_TomoIdentity(self): + ig = ImageGeometry(10,20,30) + img = ig.allocate() + self.assertTrue(img.shape == (30,20,10)) + self.assertEqual(img.sum(), 0) + Id = TomoIdentity(ig) + y = Id.direct(img) + numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + -- cgit v1.2.3 From 1c38b87270e0c08c085418d95e852bec7e3786ce Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 7 Mar 2019 22:47:25 +0000 Subject: first working block framework --- .../Python/ccpi/framework/BlockDataContainer.py | 40 +++++---- .../ccpi/optimisation/operators/BlockOperator.py | 15 ++-- .../optimisation/operators/BlockScaledOperator.py | 81 +++++++++++++---- Wrappers/Python/test/test_BlockDataContainer.py | 4 +- Wrappers/Python/test/test_BlockOperator.py | 100 ++++++++++++++------- 5 files changed, 164 insertions(+), 76 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index cbce844..8152bff 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -16,17 +16,24 @@ import functools #from ccpi.optimisation.operators import Operator, LinearOperator class BlockDataContainer(object): - '''Class to hold DataContainers as blocks''' + '''Class to hold DataContainers as column vector''' __array_priority__ = 1 def __init__(self, *args, **kwargs): - '''containers must be passed row by row''' + '''containers must be consistent in shape''' self.containers = args + for i, co in enumerate(args): + if i == 0: + shape = co.shape + else: + if shape != co.shape: + raise ValueError('Expected shape is {} got {}'.format(shape, co.shape)) self.index = 0 - shape = kwargs.get('shape', None) - if shape is None: - shape = (len(args),1) + #shape = kwargs.get('shape', None) + #if shape is None: + # shape = (len(args),1) + shape = (len(args),1) self.shape = shape - print (self.shape) + #print (self.shape) n_elements = functools.reduce(lambda x,y: x*y, shape, 1) if len(args) != n_elements: raise ValueError( @@ -65,14 +72,12 @@ class BlockDataContainer(object): elif isinstance(other, numpy.ndarray): return self.shape == other.shape return len(self.containers) == len(other.containers) - def get_item(self, row, col=0): + def get_item(self, row): if row > self.shape[0]: raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.containers[index] + return self.containers[row] + def __getitem__(self, row): + return self.get_item(row) def add(self, other, *args, **kwargs): assert self.is_compatible(other) @@ -96,6 +101,7 @@ class BlockDataContainer(object): shape=self.shape) def multiply(self, other, *args, **kwargs): + print ("BlockDataContainer" , other) self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): @@ -302,8 +308,8 @@ class BlockDataContainer(object): def __itruediv__(self, other): '''Inline truedivision''' return self.__idiv__(other) - @property - def T(self): - '''return the transposed of self''' - shape = (self.shape[1], self.shape[0]) - return type(self)(*self.containers, shape=shape) + #@property + #def T(self): + # '''return the transposed of self''' + # shape = (self.shape[1], self.shape[0]) + # return type(self)(*self.containers, shape=shape) diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index a83dc8a..a3ba93e 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -10,7 +10,7 @@ from numbers import Number import functools from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer from ccpi.optimisation.operators import Operator, LinearOperator - +from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator class BlockOperator(Operator): @@ -18,7 +18,13 @@ class BlockOperator(Operator): Class to hold a number of Operators in a block. User may specify the shape of the block, by default is a row vector + + BlockOperators have a generic shape M x N, and when applied on an + Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. + Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with + N rows and 1 column. ''' + __array_priority__ = 1 def __init__(self, *args, **kwargs): ''' Class creator @@ -66,11 +72,9 @@ class BlockOperator(Operator): def direct(self, x, out=None): shape = self.get_output_shape(x.shape) - print ("direct output shape", shape) res = [] for row in range(self.shape[0]): for col in range(self.shape[1]): - print ("row {} col {}".format(row, col)) if col == 0: prod = self.get_item(row,col).direct(x.get_item(col)) else: @@ -79,7 +83,6 @@ class BlockOperator(Operator): return BlockDataContainer(*res, shape=shape) def get_output_shape(self, xshape, adjoint=False): - print ("get_output_shape", self.shape, xshape) sshape = self.shape[1] oshape = self.shape[0] if adjoint: @@ -104,7 +107,9 @@ class BlockOperator(Operator): scalars = [scalar for _ in self.operators] # create a list of ScaledOperator-s ops = [ v * op for v,op in zip(scalars, self.operators)] - return BlockOperator(*ops, shape=self.shape) + #return BlockScaledOperator(self, scalars ,shape=self.shape) + return type(self)(*ops, shape=self.shape) + @property def T(self): '''Return the transposed of self''' shape = (self.shape[1], self.shape[0]) diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py index a47bec2..aeb6c53 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockScaledOperator.py @@ -1,22 +1,67 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" +from numbers import Number import numpy -from ccpi.optimisation.operators import BlockOperator +from ccpi.optimisation.operators import ScaledOperator +import functools + +class BlockScaledOperator(ScaledOperator): + '''ScaledOperator + A class to represent the scalar multiplication of an Operator with a scalar. + It holds an operator and a scalar. Basically it returns the multiplication + of the result of direct and adjoint of the operator with the scalar. + For the rest it behaves like the operator it holds. - -class BlockScaledOperator(BlockOperator): - def __init__(self, *args, **kwargs): - super(BlockScaledOperator, self).__init__(*args, **kwargs) - scalar = kwargs.get('scalar',1) - if isinstance (scalar, list) or isinstance(scalar, tuple) or \ - isinstance(scalar, numpy.ndarray): - if len(scalars) != len(self.operators): - raise ValueError('dimensions of scalars and operators do not match') + Args: + operator (Operator): a Operator or LinearOperator + scalar (Number): a scalar multiplier + Example: + The scaled operator behaves like the following: + sop = ScaledOperator(operator, scalar) + sop.direct(x) = scalar * operator.direct(x) + sop.adjoint(x) = scalar * operator.adjoint(x) + sop.norm() = operator.norm() + sop.range_geometry() = operator.range_geometry() + sop.domain_geometry() = operator.domain_geometry() + ''' + def __init__(self, operator, scalar, shape=None): + if shape is None: + shape = operator.shape + + if isinstance(scalar, (list, tuple, numpy.ndarray)): + size = functools.reduce(lambda x,y:x*y, shape, 1) + if len(scalar) != size: + raise ValueError('Scalar and operators size do not match: {}!={}' + .format(len(scalar), len(operator))) + self.scalar = scalar[:] + print ("BlockScaledOperator ", self.scalar) + elif isinstance (scalar, Number): + self.scalar = scalar + else: + raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar))) + self.operator = operator + self.shape = shape + def direct(self, x, out=None): + print ("BlockScaledOperator self.scalar", self.scalar) + #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array()) + return self.scalar * (self.operator.direct(x, out=out)) + def adjoint(self, x, out=None): + if self.operator.is_linear(): + return self.scalar * self.operator.adjoint(x, out=out) else: - scalar = [scalar for _ in self.operators] - self.operators = [v * op for op in self.operators] \ No newline at end of file + raise TypeError('Operator is not linear') + def norm(self): + return numpy.abs(self.scalar) * self.operator.norm() + def range_geometry(self): + return self.operator.range_geometry() + def domain_geometry(self): + return self.operator.domain_geometry() + @property + def T(self): + '''Return the transposed of self''' + #print ("transpose before" , self.shape) + #shape = (self.shape[1], self.shape[0]) + ##self.shape = shape + ##self.operator.shape = shape + #print ("transpose" , shape) + #return self + return type(self)(self.operator.T, self.scalar) \ No newline at end of file diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index c14a101..190fb21 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -19,7 +19,7 @@ from ccpi.framework import BlockDataContainer import functools class TestBlockDataContainer(unittest.TestCase): - def test_BlockDataContainerShape(self): + def skiptest_BlockDataContainerShape(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) ig1 = ImageGeometry(12,42,55,32) @@ -34,7 +34,7 @@ class TestBlockDataContainer(unittest.TestCase): cp1 = BlockDataContainer(data2,data3) transpose_shape = (cp0.shape[1], cp0.shape[0]) self.assertTrue(cp0.T.shape == transpose_shape) - def test_BlockDataContainerShapeArithmetic(self): + def skiptest_BlockDataContainerShapeArithmetic(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) ig1 = ImageGeometry(12,42,55,32) diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index f3b057b..1896978 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -8,64 +8,96 @@ import numpy class TestBlockOperator(unittest.TestCase): def test_BlockOperator(self): + print ("test_BlockOperator") ig = [ ImageGeometry(10,20,30) , \ - ImageGeometry(11,21,31) , \ - ImageGeometry(12,22,32) ] + ImageGeometry(10,20,30) , \ + ImageGeometry(10,20,30) ] x = [ g.allocate() for g in ig ] ops = [ TomoIdentity(g) for g in ig ] K = BlockOperator(*ops) - #X = BlockDataContainer(*x).T + 1 X = BlockDataContainer(x[0]) Y = K.direct(X) - #self.assertTrue(Y.shape == X.shape) + self.assertTrue(Y.shape == K.shape) numpy.testing.assert_array_equal(Y.get_item(0).as_array(),X.get_item(0).as_array()) numpy.testing.assert_array_equal(Y.get_item(1).as_array(),X.get_item(0).as_array()) #numpy.testing.assert_array_equal(Y.get_item(2).as_array(),X.get_item(2).as_array()) - + + X = BlockDataContainer(*x) + 1 + Y = K.T.direct(X) + # K.T (1,3) X (3,1) => output shape (1,1) + self.assertTrue(Y.shape == (1,1)) + zero = numpy.zeros(X.get_item(0).shape) + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),len(x)+zero) + def test_ScaledBlockOperatorSingleScalar(self): ig = [ ImageGeometry(10,20,30) , \ - ImageGeometry(11,21,31) , \ - ImageGeometry(12,22,32) ] + ImageGeometry(10,20,30) , \ + ImageGeometry(10,20,30) ] x = [ g.allocate() for g in ig ] ops = [ TomoIdentity(g) for g in ig ] + val = 1 + # test limit as non Scaled + scalar = 1 + k = BlockOperator(*ops) + K = scalar * k + X = BlockDataContainer(*x) + val + + Y = K.T.direct(X) + self.assertTrue(Y.shape == (1,1)) + zero = numpy.zeros(X.get_item(0).shape) + xx = numpy.asarray([val for _ in x]) + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),((scalar*xx).sum()+zero)) + scalar = 0.5 - K = 0.5 * BlockOperator(*ops) + k = BlockOperator(*ops) + K = scalar * k X = BlockDataContainer(*x) + 1 - print (X.shape) - X = BlockDataContainer(*x).T + 1 - print (X.shape, K.shape) - Y = K.direct(X) - self.assertTrue(Y.shape == X.shape) - - numpy.testing.assert_array_equal(Y.get_item(0).as_array(),scalar * X.get_item(0).as_array()) - numpy.testing.assert_array_equal(Y.get_item(1).as_array(),scalar * X.get_item(1).as_array()) - numpy.testing.assert_array_equal(Y.get_item(2).as_array(),scalar * X.get_item(2).as_array()) - + + Y = K.T.direct(X) + self.assertTrue(Y.shape == (1,1)) + zero = numpy.zeros(X.get_item(0).shape) + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),scalar*(len(x)+zero)) + + def test_ScaledBlockOperatorScalarList(self): - ig = [ImageGeometry(10, 20, 30), - ImageGeometry(11, 21, 31), - ImageGeometry(12, 22, 32)] - x = [g.allocate() for g in ig] - ops = [TomoIdentity(g) for g in ig] + ig = [ ImageGeometry(2,3) , \ + #ImageGeometry(10,20,30) , \ + ImageGeometry(2,3 ) ] + x = [ g.allocate() for g in ig ] + ops = [ TomoIdentity(g) for g in ig ] - scalar = [i*1.2 for i, el in enumerate(ig)] - K = scalar * BlockOperator(*ops) - X = BlockDataContainer(*x).T + 1 - Y = K.direct(X) - self.assertTrue(Y.shape == X.shape) + # test limit as non Scaled + scalar = numpy.asarray([1 for _ in x]) + k = BlockOperator(*ops) + K = scalar * k + val = 1 + X = BlockDataContainer(*x) + val + + Y = K.T.direct(X) + self.assertTrue(Y.shape == (1,1)) + zero = numpy.zeros(X.get_item(0).shape) + xx = numpy.asarray([val for _ in x]) + numpy.testing.assert_array_equal(Y.get_item(0).as_array(),(scalar*xx).sum()+zero) + + scalar = numpy.asarray([i+1 for i,el in enumerate(x)]) + #scalar = numpy.asarray([6,0]) + k = BlockOperator(*ops) + K = scalar * k + X = BlockDataContainer(*x) + val + Y = K.T.direct(X) + self.assertTrue(Y.shape == (1,1)) + zero = numpy.zeros(X.get_item(0).shape) + xx = numpy.asarray([val for _ in x]) + numpy.testing.assert_array_equal(Y.get_item(0).as_array(), - scalar[0] * X.get_item(0).as_array()) - numpy.testing.assert_array_equal(Y.get_item(1).as_array(), - scalar[1] * X.get_item(1).as_array()) - numpy.testing.assert_array_equal(Y.get_item(2).as_array(), - scalar[2] * X.get_item(2).as_array()) - + (scalar*xx).sum()+zero) + def test_TomoIdentity(self): ig = ImageGeometry(10,20,30) -- cgit v1.2.3 From 26546cb76c36aba7167a2a7ac705e58c14ff64cd Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 7 Mar 2019 23:50:21 +0000 Subject: py27 fixes --- .../ccpi/optimisation/operators/BlockOperator.py | 353 +++++++++++---------- Wrappers/Python/test/test_BlockDataContainer.py | 178 +++++------ Wrappers/Python/test/test_BlockOperator.py | 180 +++++++++++ 3 files changed, 446 insertions(+), 265 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index a3ba93e..c9bf794 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -167,183 +167,184 @@ class BlockLinearOperator(BlockOperator): if __name__ == '__main__': + pass #from ccpi.optimisation.Algorithms import GradientDescent - from ccpi.optimisation.algorithms import CGLS +# from ccpi.optimisation.algorithms import CGLS - from ccpi.plugins.ops import CCPiProjectorSimple - from ccpi.optimisation.ops import PowerMethodNonsquare - from ccpi.optimisation.ops import TomoIdentity - from ccpi.optimisation.funcs import Norm2sq, Norm1 - from ccpi.framework import ImageGeometry, AcquisitionGeometry - from ccpi.optimisation.Algorithms import GradientDescent - #from ccpi.optimisation.Algorithms import CGLS - import matplotlib.pyplot as plt +# from ccpi.plugins.ops import CCPiProjectorSimple +# from ccpi.optimisation.ops import PowerMethodNonsquare +# from ccpi.optimisation.ops import TomoIdentity +# from ccpi.optimisation.funcs import Norm2sq, Norm1 +# from ccpi.framework import ImageGeometry, AcquisitionGeometry +# from ccpi.optimisation.Algorithms import GradientDescent +# #from ccpi.optimisation.Algorithms import CGLS +# import matplotlib.pyplot as plt - # Set up phantom size N x N x vert by creating ImageGeometry, initialising the - # ImageData object with this geometry and empty array and finally put some - # data into its array, and display one slice as image. - - # Image parameters - N = 128 - vert = 4 - - # Set up image geometry - ig = ImageGeometry(voxel_num_x=N, - voxel_num_y=N, - voxel_num_z=vert) - - # Set up empty image data - Phantom = ImageData(geometry=ig, - dimension_labels=['horizontal_x', - 'horizontal_y', - 'vertical']) - Phantom += 0.05 - # Populate image data by looping over and filling slices - i = 0 - while i < vert: - if vert > 1: - x = Phantom.subset(vertical=i).array - else: - x = Phantom.array - x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 - x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 - if vert > 1 : - Phantom.fill(x, vertical=i) - i += 1 - - - perc = 0.02 - # Set up empty image data - noise = ImageData(numpy.random.normal(loc = 0.04 , - scale = perc , - size = Phantom.shape), geometry=ig, - dimension_labels=['horizontal_x', - 'horizontal_y', - 'vertical']) - Phantom += noise - - # Set up AcquisitionGeometry object to hold the parameters of the measurement - # setup geometry: # Number of angles, the actual angles from 0 to - # pi for parallel beam, set the width of a detector - # pixel relative to an object pixe and the number of detector pixels. - angles_num = 20 - det_w = 1.0 - det_num = N - - angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ - 180/numpy.pi - - # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, - # horz detector pixel size, vert detector pixel count, - # vert detector pixel size. - ag = AcquisitionGeometry('parallel', - '3D', - angles, - N, - det_w, - vert, - det_w) - - # Set up Operator object combining the ImageGeometry and AcquisitionGeometry - # wrapping calls to CCPi projector. - A = CCPiProjectorSimple(ig, ag) - - # Forward and backprojection are available as methods direct and adjoint. Here - # generate test data b and some noise - - b = A.direct(Phantom) - - - #z = A.adjoint(b) - - - # Using the test data b, different reconstruction methods can now be set up as - # demonstrated in the rest of this file. In general all methods need an initial - # guess and some algorithm options to be set. Note that 100 iterations for - # some of the methods is a very low number and 1000 or 10000 iterations may be - # needed if one wants to obtain a converged solution. - x_init = ImageData(geometry=ig, - dimension_labels=['horizontal_x','horizontal_y','vertical']) - X_init = BlockDataContainer(x_init) - B = BlockDataContainer(b, - ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) - - # setup a tomo identity - Ibig = 1e5 * TomoIdentity(geometry=ig) - Ismall = 1e-5 * TomoIdentity(geometry=ig) - - # composite operator - Kbig = BlockOperator(A, Ibig, shape=(2,1)) - Ksmall = BlockOperator(A, Ismall, shape=(2,1)) - - #out = K.direct(X_init) - - f = Norm2sq(Kbig,B) - f.L = 0.00003 - - fsmall = Norm2sq(Ksmall,B) - f.L = 0.00003 - - simplef = Norm2sq(A, b) - simplef.L = 0.00003 - - gd = GradientDescent( x_init=x_init, objective_function=simplef, - rate=simplef.L) - gd.max_iteration = 10 - - cg = CGLS() - cg.set_up(X_init, Kbig, B ) - cg.max_iteration = 1 - - cgsmall = CGLS() - cgsmall.set_up(X_init, Ksmall, B ) - cgsmall.max_iteration = 1 - - - cgs = CGLS() - cgs.set_up(x_init, A, b ) - cgs.max_iteration = 6 -# - #out.__isub__(B) - #out2 = K.adjoint(out) - - #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - for _ in gd: - print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) - - cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - - cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - - cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) -# for _ in cg: -# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) -# -# fig = plt.figure() -# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) -# plt.title('Composite CGLS') -# plt.show() -# -# for _ in cgs: -# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) -# - fig = plt.figure() - plt.subplot(1,5,1) - plt.imshow(Phantom.subset(vertical=0).as_array()) - plt.title('Simulated Phantom') - plt.subplot(1,5,2) - plt.imshow(gd.get_output().subset(vertical=0).as_array()) - plt.title('Simple Gradient Descent') - plt.subplot(1,5,3) - plt.imshow(cgs.get_output().subset(vertical=0).as_array()) - plt.title('Simple CGLS') - plt.subplot(1,5,4) - plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) - plt.title('Composite CGLS\nbig lambda') - plt.subplot(1,5,5) - plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) - plt.title('Composite CGLS\nsmall lambda') - plt.show() +# # Set up phantom size N x N x vert by creating ImageGeometry, initialising the +# # ImageData object with this geometry and empty array and finally put some +# # data into its array, and display one slice as image. + +# # Image parameters +# N = 128 +# vert = 4 + +# # Set up image geometry +# ig = ImageGeometry(voxel_num_x=N, +# voxel_num_y=N, +# voxel_num_z=vert) + +# # Set up empty image data +# Phantom = ImageData(geometry=ig, +# dimension_labels=['horizontal_x', +# 'horizontal_y', +# 'vertical']) +# Phantom += 0.05 +# # Populate image data by looping over and filling slices +# i = 0 +# while i < vert: +# if vert > 1: +# x = Phantom.subset(vertical=i).array +# else: +# x = Phantom.array +# x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +# x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 +# if vert > 1 : +# Phantom.fill(x, vertical=i) +# i += 1 + + +# perc = 0.02 +# # Set up empty image data +# noise = ImageData(numpy.random.normal(loc = 0.04 , +# scale = perc , +# size = Phantom.shape), geometry=ig, +# dimension_labels=['horizontal_x', +# 'horizontal_y', +# 'vertical']) +# Phantom += noise + +# # Set up AcquisitionGeometry object to hold the parameters of the measurement +# # setup geometry: # Number of angles, the actual angles from 0 to +# # pi for parallel beam, set the width of a detector +# # pixel relative to an object pixe and the number of detector pixels. +# angles_num = 20 +# det_w = 1.0 +# det_num = N + +# angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ +# 180/numpy.pi + +# # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, +# # horz detector pixel size, vert detector pixel count, +# # vert detector pixel size. +# ag = AcquisitionGeometry('parallel', +# '3D', +# angles, +# N, +# det_w, +# vert, +# det_w) + +# # Set up Operator object combining the ImageGeometry and AcquisitionGeometry +# # wrapping calls to CCPi projector. +# A = CCPiProjectorSimple(ig, ag) + +# # Forward and backprojection are available as methods direct and adjoint. Here +# # generate test data b and some noise + +# b = A.direct(Phantom) + + +# #z = A.adjoint(b) + + +# # Using the test data b, different reconstruction methods can now be set up as +# # demonstrated in the rest of this file. In general all methods need an initial +# # guess and some algorithm options to be set. Note that 100 iterations for +# # some of the methods is a very low number and 1000 or 10000 iterations may be +# # needed if one wants to obtain a converged solution. +# x_init = ImageData(geometry=ig, +# dimension_labels=['horizontal_x','horizontal_y','vertical']) +# X_init = BlockDataContainer(x_init) +# B = BlockDataContainer(b, +# ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + +# # setup a tomo identity +# Ibig = 1e5 * TomoIdentity(geometry=ig) +# Ismall = 1e-5 * TomoIdentity(geometry=ig) + +# # composite operator +# Kbig = BlockOperator(A, Ibig, shape=(2,1)) +# Ksmall = BlockOperator(A, Ismall, shape=(2,1)) + +# #out = K.direct(X_init) + +# f = Norm2sq(Kbig,B) +# f.L = 0.00003 + +# fsmall = Norm2sq(Ksmall,B) +# f.L = 0.00003 + +# simplef = Norm2sq(A, b) +# simplef.L = 0.00003 + +# gd = GradientDescent( x_init=x_init, objective_function=simplef, +# rate=simplef.L) +# gd.max_iteration = 10 + +# cg = CGLS() +# cg.set_up(X_init, Kbig, B ) +# cg.max_iteration = 1 + +# cgsmall = CGLS() +# cgsmall.set_up(X_init, Ksmall, B ) +# cgsmall.max_iteration = 1 + + +# cgs = CGLS() +# cgs.set_up(x_init, A, b ) +# cgs.max_iteration = 6 +# # +# #out.__isub__(B) +# #out2 = K.adjoint(out) + +# #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + +# for _ in gd: +# print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + +# cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + +# cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + +# cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +# cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +# # for _ in cg: +# # print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) +# # +# # fig = plt.figure() +# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# # plt.title('Composite CGLS') +# # plt.show() +# # +# # for _ in cgs: +# # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) +# # +# fig = plt.figure() +# plt.subplot(1,5,1) +# plt.imshow(Phantom.subset(vertical=0).as_array()) +# plt.title('Simulated Phantom') +# plt.subplot(1,5,2) +# plt.imshow(gd.get_output().subset(vertical=0).as_array()) +# plt.title('Simple Gradient Descent') +# plt.subplot(1,5,3) +# plt.imshow(cgs.get_output().subset(vertical=0).as_array()) +# plt.title('Simple CGLS') +# plt.subplot(1,5,4) +# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# plt.title('Composite CGLS\nbig lambda') +# plt.subplot(1,5,5) +# plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) +# plt.title('Composite CGLS\nsmall lambda') +# plt.show() diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 190fb21..ef11a82 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -21,7 +21,7 @@ import functools class TestBlockDataContainer(unittest.TestCase): def skiptest_BlockDataContainerShape(self): print ("test block data container") - ig0 = ImageGeometry(2,3,4) + ig0 = ImageGeometry(12,42,55,32) ig1 = ImageGeometry(12,42,55,32) data0 = ImageData(geometry=ig0) @@ -37,7 +37,7 @@ class TestBlockDataContainer(unittest.TestCase): def skiptest_BlockDataContainerShapeArithmetic(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(12,42,55,32) + ig1 = ImageGeometry(2,3,4) data0 = ImageData(geometry=ig0) data1 = ImageData(geometry=ig1) + 1 @@ -93,7 +93,7 @@ class TestBlockDataContainer(unittest.TestCase): def test_BlockDataContainer(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(12,42,55,32) + ig1 = ImageGeometry(2,3,4) data0 = ImageData(geometry=ig0) data1 = ImageData(geometry=ig1) + 1 @@ -108,166 +108,166 @@ class TestBlockDataContainer(unittest.TestCase): print (a[0][0].shape) #cp2 = BlockDataContainer(*a) cp2 = cp0.add(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + assert (cp2.get_item(0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1).as_array()[0][0][0] == 4.) cp2 = cp0 + cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 4.) + assert (cp2.get_item(0).as_array()[0][0][0] == 2.) + assert (cp2.get_item(1).as_array()[0][0][0] == 4.) cp2 = cp0 + 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 = cp0 + [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 3., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 3., decimal = 5) cp2 += cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , +3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5) cp2 += 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , +4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +7., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , +4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +7., decimal = 5) cp2 += [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 6., decimal = 5) cp2 = cp0.subtract(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1).as_array()[0][0][0] == -2.) cp2 = cp0 - cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == -2.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(0).as_array()[0][0][0] == -2.) + assert (cp2.get_item(1).as_array()[0][0][0] == -2.) cp2 = cp0 - 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0, decimal = 5) cp2 = cp0 - [1 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -1. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -1., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -1. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -1., decimal = 5) cp2 -= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -3. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -3. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -4., decimal = 5) cp2 -= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -4. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -5., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -4. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -5., decimal = 5) cp2 -= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -4., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -4., decimal = 5) cp2 = cp0.multiply(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1).as_array()[0][0][0] == 3.) cp2 = cp0 * cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - assert (cp2.get_item(1,0).as_array()[0][0][0] == 3.) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + assert (cp2.get_item(1).as_array()[0][0][0] == 3.) cp2 = cp0 * 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2, decimal = 5) cp2 = 2 * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2, decimal = 5) cp2 = cp0 * [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 = cp0 * numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 = [3,2] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 = numpy.asarray([3,2]) * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 = [3,2,3] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 2., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) cp2 *= cp1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5) cp2 *= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , +6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5) cp2 *= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -6., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -6., decimal = 5) cp2 = cp0.divide(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1./3., decimal=4) cp2 = cp0/cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1./3., decimal=4) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1./3., decimal=4) cp2 = cp0 / 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5) cp2 = cp0 / [3 ,2] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5) cp2 = cp0 / numpy.asarray([3 ,2]) - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5) cp3 = numpy.asarray([3 ,2]) / (cp0+1) - numpy.testing.assert_almost_equal(cp3.get_item(0,0).as_array()[0][0][0] , 3. , decimal=5) - numpy.testing.assert_almost_equal(cp3.get_item(1,0).as_array()[0][0][0] , 1, decimal = 5) + numpy.testing.assert_almost_equal(cp3.get_item(0).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(cp3.get_item(1).as_array()[0][0][0] , 1, decimal = 5) cp2 += 1 cp2 /= cp1 # TODO fix inplace division - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 1./2 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1.5/3., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1./2 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 1.5/3., decimal = 5) cp2 /= 1 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0.5 , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0.5 , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 0.5, decimal = 5) cp2 /= [-2,-1] - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , -0.5/2. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , -0.5, decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , -0.5/2. , decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -0.5, decimal = 5) #### cp2 = cp0.power(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4) cp2 = cp0**cp1 - assert (cp2.get_item(0,0).as_array()[0][0][0] == 0.) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + assert (cp2.get_item(0).as_array()[0][0][0] == 0.) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4) cp2 = cp0 ** 2 - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0] , 0., decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0] , 1., decimal = 5) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0., decimal=5) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 1., decimal = 5) cp2 = cp0.maximum(cp1) - assert (cp2.get_item(0,0).as_array()[0][0][0] == cp1.get_item(0,0).as_array()[0][0][0]) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], cp2.get_item(1,0).as_array()[0][0][0], decimal=4) + assert (cp2.get_item(0).as_array()[0][0][0] == cp1.get_item(0).as_array()[0][0][0]) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], cp2.get_item(1).as_array()[0][0][0], decimal=4) cp2 = cp0.abs() - numpy.testing.assert_almost_equal(cp2.get_item(0,0).as_array()[0][0][0], 0., decimal=4) - numpy.testing.assert_almost_equal(cp2.get_item(1,0).as_array()[0][0][0], 1., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0], 0., decimal=4) + numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0], 1., decimal=4) cp2 = cp0.subtract(cp1) s = cp2.sign() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], -1., decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(0).as_array()[0][0][0], -1., decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1).as_array()[0][0][0], -1., decimal=4) cp2 = cp0.add(cp1) s = cp2.sqrt() - numpy.testing.assert_almost_equal(s.get_item(0,0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) - numpy.testing.assert_almost_equal(s.get_item(1,0).as_array()[0][0][0], numpy.sqrt(4), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(0).as_array()[0][0][0], numpy.sqrt(2), decimal=4) + numpy.testing.assert_almost_equal(s.get_item(1).as_array()[0][0][0], numpy.sqrt(4), decimal=4) s = cp0.sum() size = functools.reduce(lambda x,y: x*y, data1.shape, 1) @@ -275,9 +275,9 @@ class TestBlockDataContainer(unittest.TestCase): numpy.testing.assert_almost_equal(s, 0 + size, decimal=4) s0 = 1 s1 = 1 - for i in cp0.get_item(0,0).shape: + for i in cp0.get_item(0).shape: s0 *= i - for i in cp0.get_item(1,0).shape: + for i in cp0.get_item(1).shape: s1 *= i #numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index 1896978..8bd673b 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -108,3 +108,183 @@ class TestBlockOperator(unittest.TestCase): y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + def skiptest_CGLS_tikhonov(self): + from ccpi.optimisation.algorithms import CGLS + + from ccpi.plugins.ops import CCPiProjectorSimple + from ccpi.optimisation.ops import PowerMethodNonsquare + from ccpi.optimisation.ops import TomoIdentity + from ccpi.optimisation.funcs import Norm2sq, Norm1 + from ccpi.framework import ImageGeometry, AcquisitionGeometry + from ccpi.optimisation.Algorithms import GradientDescent + #from ccpi.optimisation.Algorithms import CGLS + import matplotlib.pyplot as plt + + + # Set up phantom size N x N x vert by creating ImageGeometry, initialising the + # ImageData object with this geometry and empty array and finally put some + # data into its array, and display one slice as image. + + # Image parameters + N = 128 + vert = 4 + + # Set up image geometry + ig = ImageGeometry(voxel_num_x=N, + voxel_num_y=N, + voxel_num_z=vert) + + # Set up empty image data + Phantom = ImageData(geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += 0.05 + # Populate image data by looping over and filling slices + i = 0 + while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + + perc = 0.02 + # Set up empty image data + noise = ImageData(numpy.random.normal(loc = 0.04 , + scale = perc , + size = Phantom.shape), geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) + Phantom += noise + + # Set up AcquisitionGeometry object to hold the parameters of the measurement + # setup geometry: # Number of angles, the actual angles from 0 to + # pi for parallel beam, set the width of a detector + # pixel relative to an object pixe and the number of detector pixels. + angles_num = 20 + det_w = 1.0 + det_num = N + + angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ + 180/numpy.pi + + # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, + # horz detector pixel size, vert detector pixel count, + # vert detector pixel size. + ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + + # Set up Operator object combining the ImageGeometry and AcquisitionGeometry + # wrapping calls to CCPi projector. + A = CCPiProjectorSimple(ig, ag) + + # Forward and backprojection are available as methods direct and adjoint. Here + # generate test data b and some noise + + b = A.direct(Phantom) + + + #z = A.adjoint(b) + + + # Using the test data b, different reconstruction methods can now be set up as + # demonstrated in the rest of this file. In general all methods need an initial + # guess and some algorithm options to be set. Note that 100 iterations for + # some of the methods is a very low number and 1000 or 10000 iterations may be + # needed if one wants to obtain a converged solution. + x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) + X_init = BlockDataContainer(x_init) + B = BlockDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + + # setup a tomo identity + Ibig = 1e5 * TomoIdentity(geometry=ig) + Ismall = 1e-5 * TomoIdentity(geometry=ig) + + # composite operator + Kbig = BlockOperator(A, Ibig, shape=(2,1)) + Ksmall = BlockOperator(A, Ismall, shape=(2,1)) + + #out = K.direct(X_init) + + f = Norm2sq(Kbig,B) + f.L = 0.00003 + + fsmall = Norm2sq(Ksmall,B) + f.L = 0.00003 + + simplef = Norm2sq(A, b) + simplef.L = 0.00003 + + gd = GradientDescent( x_init=x_init, objective_function=simplef, + rate=simplef.L) + gd.max_iteration = 10 + + cg = CGLS() + cg.set_up(X_init, Kbig, B ) + cg.max_iteration = 1 + + cgsmall = CGLS() + cgsmall.set_up(X_init, Ksmall, B ) + cgsmall.max_iteration = 1 + + + cgs = CGLS() + cgs.set_up(x_init, A, b ) + cgs.max_iteration = 6 + # + #out.__isub__(B) + #out2 = K.adjoint(out) + + #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + + #for _ in gd: + # print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + + #cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val)) ) + + #cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + + #cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + #cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + # for _ in cg: + # print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) + # + # fig = plt.figure() + # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) + # plt.title('Composite CGLS') + # plt.show() + # + # for _ in cgs: + # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) + # + fig = plt.figure() + plt.subplot(1,5,1) + plt.imshow(Phantom.subset(vertical=0).as_array()) + plt.title('Simulated Phantom') + plt.subplot(1,5,2) + plt.imshow(gd.get_output().subset(vertical=0).as_array()) + plt.title('Simple Gradient Descent') + plt.subplot(1,5,3) + plt.imshow(cgs.get_output().subset(vertical=0).as_array()) + plt.title('Simple CGLS') + plt.subplot(1,5,4) + plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nbig lambda') + plt.subplot(1,5,5) + plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) + plt.title('Composite CGLS\nsmall lambda') + plt.show() -- cgit v1.2.3 From 5b4e817268009ad63823784c9b51c9bca6a599af Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 8 Mar 2019 02:03:58 +0000 Subject: added CGLS_tikohnov.py --- Wrappers/Python/wip/CGLS_tikhonov.py | 182 +++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 Wrappers/Python/wip/CGLS_tikhonov.py (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py new file mode 100644 index 0000000..7178510 --- /dev/null +++ b/Wrappers/Python/wip/CGLS_tikhonov.py @@ -0,0 +1,182 @@ +from ccpi.optimisation.algorithms import CGLS + +from ccpi.plugins.ops import CCPiProjectorSimple +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData +from ccpi.optimisation.algorithms import GradientDescent +#from ccpi.optimisation.algorithms import CGLS +import matplotlib.pyplot as plt +import numpy +from ccpi.framework import BlockDataContainer +from ccpi.optimisation.operators import BlockOperator +from ccpi.optimisation.operators.BlockOperator import BlockLinearOperator + +# Set up phantom size N x N x vert by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display one slice as image. + +# Image parameters +N = 128 +vert = 4 + +# Set up image geometry +ig = ImageGeometry(voxel_num_x=N, + voxel_num_y=N, + voxel_num_z=vert) + +# Set up empty image data +Phantom = ImageData(geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) +Phantom += 0.05 +# Populate image data by looping over and filling slices +i = 0 +while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + +perc = 0.02 +# Set up empty image data +noise = ImageData(numpy.random.normal(loc = 0.04 , + scale = perc , + size = Phantom.shape), geometry=ig, + dimension_labels=['horizontal_x', + 'horizontal_y', + 'vertical']) +Phantom += noise + +# Set up AcquisitionGeometry object to hold the parameters of the measurement +# setup geometry: # Number of angles, the actual angles from 0 to +# pi for parallel beam, set the width of a detector +# pixel relative to an object pixe and the number of detector pixels. +angles_num = 20 +det_w = 1.0 +det_num = N + +angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ + 180/numpy.pi + +# Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, +# horz detector pixel size, vert detector pixel count, +# vert detector pixel size. +ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + +# Set up Operator object combining the ImageGeometry and AcquisitionGeometry +# wrapping calls to CCPi projector. +A = CCPiProjectorSimple(ig, ag) + +# Forward and backprojection are available as methods direct and adjoint. Here +# generate test data b and some noise + +b = A.direct(Phantom) + + +#z = A.adjoint(b) + + +# Using the test data b, different reconstruction methods can now be set up as +# demonstrated in the rest of this file. In general all methods need an initial +# guess and some algorithm options to be set. Note that 100 iterations for +# some of the methods is a very low number and 1000 or 10000 iterations may be +# needed if one wants to obtain a converged solution. +x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) +X_init = BlockDataContainer(x_init) +B = BlockDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + +# setup a tomo identity +Ibig = 1e5 * TomoIdentity(geometry=ig) +Ismall = 1e-5 * TomoIdentity(geometry=ig) + +# composite operator +Kbig = BlockOperator(A, Ibig, shape=(2,1)) +Ksmall = BlockOperator(A, Ismall, shape=(2,1)) + +#out = K.direct(X_init) + +f = Norm2sq(Kbig,B) +f.L = 0.00003 + +fsmall = Norm2sq(Ksmall,B) +f.L = 0.00003 + +simplef = Norm2sq(A, b) +simplef.L = 0.00003 + +gd = GradientDescent( x_init=x_init, objective_function=simplef, + rate=simplef.L) +gd.max_iteration = 10 + +cg = CGLS() +cg.set_up(X_init, Kbig, B ) +cg.max_iteration = 1 + +cgsmall = CGLS() +cgsmall.set_up(X_init, Ksmall, B ) +cgsmall.max_iteration = 1 + + +cgs = CGLS() +cgs.set_up(x_init, A, b ) +cgs.max_iteration = 6 +# # +#out.__isub__(B) +#out2 = K.adjoint(out) + +#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + +for _ in gd: + print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + +cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + +cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) + +cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +# # for _ in cg: +# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) +# # +# # fig = plt.figure() +# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# # plt.title('Composite CGLS') +# # plt.show() +# # +# # for _ in cgs: +# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) +# # +fig = plt.figure() +plt.subplot(1,5,1) +plt.imshow(Phantom.subset(vertical=0).as_array()) +plt.title('Simulated Phantom') +plt.subplot(1,5,2) +plt.imshow(gd.get_output().subset(vertical=0).as_array()) +plt.title('Simple Gradient Descent') +plt.subplot(1,5,3) +plt.imshow(cgs.get_output().subset(vertical=0).as_array()) +plt.title('Simple CGLS') +plt.subplot(1,5,4) +plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +plt.title('Composite CGLS\nbig lambda') +plt.subplot(1,5,5) +plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) +plt.title('Composite CGLS\nsmall lambda') +plt.show() -- cgit v1.2.3 From 4290663849f7f396fa3a6f8b4de8a312372e2556 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 8 Mar 2019 07:47:20 -0500 Subject: working implementation of CGLS Tikhonov example CGLS with Tikhonov regularisation with Block structures --- .../Python/ccpi/framework/BlockDataContainer.py | 19 +++------ .../ccpi/optimisation/operators/BlockOperator.py | 39 +++++++++++-------- Wrappers/Python/wip/CGLS_tikhonov.py | 45 ++++++++++++++-------- 3 files changed, 58 insertions(+), 45 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 8152bff..d509d25 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -19,14 +19,8 @@ class BlockDataContainer(object): '''Class to hold DataContainers as column vector''' __array_priority__ = 1 def __init__(self, *args, **kwargs): - '''containers must be consistent in shape''' + '''''' self.containers = args - for i, co in enumerate(args): - if i == 0: - shape = co.shape - else: - if shape != co.shape: - raise ValueError('Expected shape is {} got {}'.format(shape, co.shape)) self.index = 0 #shape = kwargs.get('shape', None) #if shape is None: @@ -38,7 +32,7 @@ class BlockDataContainer(object): if len(args) != n_elements: raise ValueError( 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) + .format(n_elements, len(args))) def __iter__(self): @@ -60,7 +54,6 @@ class BlockDataContainer(object): if isinstance(other, Number): return True elif isinstance(other, list): - # TODO look elements should be numbers for ot in other: if not isinstance(ot, (Number,\ numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ @@ -72,10 +65,12 @@ class BlockDataContainer(object): elif isinstance(other, numpy.ndarray): return self.shape == other.shape return len(self.containers) == len(other.containers) + def get_item(self, row): if row > self.shape[0]: raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) return self.containers[row] + def __getitem__(self, row): return self.get_item(row) @@ -308,8 +303,4 @@ class BlockDataContainer(object): def __itruediv__(self, other): '''Inline truedivision''' return self.__idiv__(other) - #@property - #def T(self): - # '''return the transposed of self''' - # shape = (self.shape[1], self.shape[0]) - # return type(self)(*self.containers, shape=shape) + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index c9bf794..f102f1e 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -81,6 +81,28 @@ class BlockOperator(Operator): prod += self.get_item(row,col).direct(x.get_item(col)) res.append(prod) return BlockDataContainer(*res, shape=shape) + + def adjoint(self, x, out=None): + '''Adjoint operation for the BlockOperator + + BlockOperator may contain both LinearOperator and Operator + This method exists in BlockOperator as it is not known what type of + Operator it will contain. + + Raises: ValueError if the contained Operators are not linear + ''' + if not functools.reduce(lambda x,y: x and y, self.operators.is_linear(), True): + raise ValueError('Not all operators in Block are linear.') + shape = self.get_output_shape(x.shape, adjoint=True) + res = [] + for row in range(self.shape[1]): + for col in range(self.shape[0]): + if col == 0: + prod = self.get_item(col,row).adjoint(x.get_item(row)) + else: + prod += self.get_item(col,row).adjoint(x.get_item(row)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) def get_output_shape(self, xshape, adjoint=False): sshape = self.shape[1] @@ -142,22 +164,7 @@ class BlockLinearOperator(BlockOperator): if not op.is_linear(): raise ValueError('Operator {} must be LinearOperator'.format(i)) super(BlockLinearOperator, self).__init__(*args, **kwargs) - - def adjoint(self, x, out=None): - '''Adjoint operation for the BlockOperator - - only available on BlockLinearOperator - ''' - shape = self.get_output_shape(x.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(col,row).adjoint(x.get_item(row)) - else: - prod += self.get_item(col,row).adjoint(x.get_item(row)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) + diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py index 7178510..f247896 100644 --- a/Wrappers/Python/wip/CGLS_tikhonov.py +++ b/Wrappers/Python/wip/CGLS_tikhonov.py @@ -105,46 +105,57 @@ B = BlockDataContainer(b, # setup a tomo identity Ibig = 1e5 * TomoIdentity(geometry=ig) Ismall = 1e-5 * TomoIdentity(geometry=ig) +Iok = 1e1 * TomoIdentity(geometry=ig) # composite operator Kbig = BlockOperator(A, Ibig, shape=(2,1)) Ksmall = BlockOperator(A, Ismall, shape=(2,1)) - +Kok = BlockOperator(A, Iok, shape=(2,1)) + #out = K.direct(X_init) f = Norm2sq(Kbig,B) f.L = 0.00003 fsmall = Norm2sq(Ksmall,B) -f.L = 0.00003 - +fsmall.L = 0.00003 + +fok = Norm2sq(Kok,B) +fok.L = 0.00003 + simplef = Norm2sq(A, b) simplef.L = 0.00003 gd = GradientDescent( x_init=x_init, objective_function=simplef, rate=simplef.L) gd.max_iteration = 10 - + +Kbig.direct(X_init) +Kbig.adjoint(B) cg = CGLS() cg.set_up(X_init, Kbig, B ) -cg.max_iteration = 1 +cg.max_iteration = 5 cgsmall = CGLS() cgsmall.set_up(X_init, Ksmall, B ) -cgsmall.max_iteration = 1 +cgsmall.max_iteration = 5 cgs = CGLS() cgs.set_up(x_init, A, b ) cgs.max_iteration = 6 -# # + +cgok = CGLS() +cgok.set_up(X_init, Kok, B ) +cgok.max_iteration = 6 +# # #out.__isub__(B) #out2 = K.adjoint(out) #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) for _ in gd: - print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) + print ("iteration {} {}".format(gd.iteration, gd.get_last_loss())) cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) @@ -152,6 +163,7 @@ cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) +cgok.run(10, verbose=True) # # for _ in cg: # print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) # # @@ -164,19 +176,22 @@ cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val) # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) # # fig = plt.figure() -plt.subplot(1,5,1) +plt.subplot(1,6,1) plt.imshow(Phantom.subset(vertical=0).as_array()) plt.title('Simulated Phantom') -plt.subplot(1,5,2) +plt.subplot(1,6,2) plt.imshow(gd.get_output().subset(vertical=0).as_array()) plt.title('Simple Gradient Descent') -plt.subplot(1,5,3) +plt.subplot(1,6,3) plt.imshow(cgs.get_output().subset(vertical=0).as_array()) plt.title('Simple CGLS') -plt.subplot(1,5,4) -plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +plt.subplot(1,6,4) +plt.imshow(cg.get_output().get_item(0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nbig lambda') -plt.subplot(1,5,5) -plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) +plt.subplot(1,6,5) +plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nsmall lambda') +plt.subplot(1,6,6) +plt.imshow(cgok.get_output().get_item(0).subset(vertical=0).as_array()) +plt.title('Composite CGLS\nok lambda') plt.show() -- cgit v1.2.3 From af7925fb8b5da9b0b47c1abbc29cd861968dd16c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 8 Mar 2019 07:51:58 -0500 Subject: bugfix BlockOperator adjoint --- Wrappers/Python/ccpi/optimisation/funcs.py | 2 + .../ccpi/optimisation/operators/BlockOperator.py | 220 +-------------------- 2 files changed, 4 insertions(+), 218 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 9b9fc36..99af275 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -154,6 +154,8 @@ class Norm2sq(Function): self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) except AttributeError as ae: pass + except NotImplementedError as noe: + pass def grad(self,x): #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index f102f1e..8298c03 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -98,9 +98,9 @@ class BlockOperator(Operator): for row in range(self.shape[1]): for col in range(self.shape[0]): if col == 0: - prod = self.get_item(col,row).adjoint(x.get_item(row)) + prod = self.get_item(row, col).adjoint(x.get_item(col)) else: - prod += self.get_item(col,row).adjoint(x.get_item(row)) + prod += self.get_item(row, col).adjoint(x.get_item(col)) res.append(prod) return BlockDataContainer(*res, shape=shape) @@ -137,221 +137,5 @@ class BlockOperator(Operator): shape = (self.shape[1], self.shape[0]) return type(self)(*self.operators, shape=shape) -class BlockLinearOperator(BlockOperator): - '''Class to hold a block operator - - Class to hold a number of Operators in a block. - User may specify the shape of the block, by default is a row vector - ''' - def __init__(self, *args, **kwargs): - ''' - Class creator - - Note: - Do not include the `self` parameter in the ``Args`` section. - - Args: - vararg (Operator): LinearOperators in the block. - shape (:obj:`tuple`, optional): If shape is passed the Operators in - vararg are considered input in a row-by-row fashion. - Shape and number of Operators must match. - - Example: - BlockLinearOperator(op0,op1) results in a row block - BlockLinearOperator(op0,op1,shape=(1,2)) results in a column block - ''' - for i,op in enumerate(args): - if not op.is_linear(): - raise ValueError('Operator {} must be LinearOperator'.format(i)) - super(BlockLinearOperator, self).__init__(*args, **kwargs) - - - - - - - - - if __name__ == '__main__': pass - #from ccpi.optimisation.Algorithms import GradientDescent -# from ccpi.optimisation.algorithms import CGLS - -# from ccpi.plugins.ops import CCPiProjectorSimple -# from ccpi.optimisation.ops import PowerMethodNonsquare -# from ccpi.optimisation.ops import TomoIdentity -# from ccpi.optimisation.funcs import Norm2sq, Norm1 -# from ccpi.framework import ImageGeometry, AcquisitionGeometry -# from ccpi.optimisation.Algorithms import GradientDescent -# #from ccpi.optimisation.Algorithms import CGLS -# import matplotlib.pyplot as plt - - -# # Set up phantom size N x N x vert by creating ImageGeometry, initialising the -# # ImageData object with this geometry and empty array and finally put some -# # data into its array, and display one slice as image. - -# # Image parameters -# N = 128 -# vert = 4 - -# # Set up image geometry -# ig = ImageGeometry(voxel_num_x=N, -# voxel_num_y=N, -# voxel_num_z=vert) - -# # Set up empty image data -# Phantom = ImageData(geometry=ig, -# dimension_labels=['horizontal_x', -# 'horizontal_y', -# 'vertical']) -# Phantom += 0.05 -# # Populate image data by looping over and filling slices -# i = 0 -# while i < vert: -# if vert > 1: -# x = Phantom.subset(vertical=i).array -# else: -# x = Phantom.array -# x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 -# x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.94 -# if vert > 1 : -# Phantom.fill(x, vertical=i) -# i += 1 - - -# perc = 0.02 -# # Set up empty image data -# noise = ImageData(numpy.random.normal(loc = 0.04 , -# scale = perc , -# size = Phantom.shape), geometry=ig, -# dimension_labels=['horizontal_x', -# 'horizontal_y', -# 'vertical']) -# Phantom += noise - -# # Set up AcquisitionGeometry object to hold the parameters of the measurement -# # setup geometry: # Number of angles, the actual angles from 0 to -# # pi for parallel beam, set the width of a detector -# # pixel relative to an object pixe and the number of detector pixels. -# angles_num = 20 -# det_w = 1.0 -# det_num = N - -# angles = numpy.linspace(0,numpy.pi,angles_num,endpoint=False,dtype=numpy.float32)*\ -# 180/numpy.pi - -# # Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, -# # horz detector pixel size, vert detector pixel count, -# # vert detector pixel size. -# ag = AcquisitionGeometry('parallel', -# '3D', -# angles, -# N, -# det_w, -# vert, -# det_w) - -# # Set up Operator object combining the ImageGeometry and AcquisitionGeometry -# # wrapping calls to CCPi projector. -# A = CCPiProjectorSimple(ig, ag) - -# # Forward and backprojection are available as methods direct and adjoint. Here -# # generate test data b and some noise - -# b = A.direct(Phantom) - - -# #z = A.adjoint(b) - - -# # Using the test data b, different reconstruction methods can now be set up as -# # demonstrated in the rest of this file. In general all methods need an initial -# # guess and some algorithm options to be set. Note that 100 iterations for -# # some of the methods is a very low number and 1000 or 10000 iterations may be -# # needed if one wants to obtain a converged solution. -# x_init = ImageData(geometry=ig, -# dimension_labels=['horizontal_x','horizontal_y','vertical']) -# X_init = BlockDataContainer(x_init) -# B = BlockDataContainer(b, -# ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) - -# # setup a tomo identity -# Ibig = 1e5 * TomoIdentity(geometry=ig) -# Ismall = 1e-5 * TomoIdentity(geometry=ig) - -# # composite operator -# Kbig = BlockOperator(A, Ibig, shape=(2,1)) -# Ksmall = BlockOperator(A, Ismall, shape=(2,1)) - -# #out = K.direct(X_init) - -# f = Norm2sq(Kbig,B) -# f.L = 0.00003 - -# fsmall = Norm2sq(Ksmall,B) -# f.L = 0.00003 - -# simplef = Norm2sq(A, b) -# simplef.L = 0.00003 - -# gd = GradientDescent( x_init=x_init, objective_function=simplef, -# rate=simplef.L) -# gd.max_iteration = 10 - -# cg = CGLS() -# cg.set_up(X_init, Kbig, B ) -# cg.max_iteration = 1 - -# cgsmall = CGLS() -# cgsmall.set_up(X_init, Ksmall, B ) -# cgsmall.max_iteration = 1 - - -# cgs = CGLS() -# cgs.set_up(x_init, A, b ) -# cgs.max_iteration = 6 -# # -# #out.__isub__(B) -# #out2 = K.adjoint(out) - -# #(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - -# for _ in gd: -# print ("iteration {} {}".format(gd.iteration, gd.get_current_loss())) - -# cg.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - -# cgs.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) - -# cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) -# cgsmall.run(10, lambda it,val: print ("iteration {} objective {}".format(it,val))) -# # for _ in cg: -# # print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) -# # -# # fig = plt.figure() -# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) -# # plt.title('Composite CGLS') -# # plt.show() -# # -# # for _ in cgs: -# # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) -# # -# fig = plt.figure() -# plt.subplot(1,5,1) -# plt.imshow(Phantom.subset(vertical=0).as_array()) -# plt.title('Simulated Phantom') -# plt.subplot(1,5,2) -# plt.imshow(gd.get_output().subset(vertical=0).as_array()) -# plt.title('Simple Gradient Descent') -# plt.subplot(1,5,3) -# plt.imshow(cgs.get_output().subset(vertical=0).as_array()) -# plt.title('Simple CGLS') -# plt.subplot(1,5,4) -# plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) -# plt.title('Composite CGLS\nbig lambda') -# plt.subplot(1,5,5) -# plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) -# plt.title('Composite CGLS\nsmall lambda') -# plt.show() -- cgit v1.2.3 From a3db4f14e0981b0a3cfceee58c810ab4d484c116 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 8 Mar 2019 15:42:11 +0000 Subject: blockFramework --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 102 +++++ .../ccpi/optimisation/functions/BlockFunction.py | 69 +++ .../optimisation/functions/FunctionComposition.py | 175 ++++++++ .../functions/FunctionOperatorComposition.py | 53 +++ .../Python/ccpi/optimisation/functions/L1Norm.py | 75 ++++ .../ccpi/optimisation/functions/L2NormSquared.py | 101 +++++ .../Python/ccpi/optimisation/functions/ZeroFun.py | 40 ++ .../Python/ccpi/optimisation/functions/__init__.py | 10 + .../__pycache__/BlockFunction.cpython-36.pyc | Bin 0 -> 2398 bytes .../__pycache__/FunctionComposition.cpython-36.pyc | Bin 0 -> 5778 bytes .../FunctionOperatorComposition.cpython-36.pyc | Bin 0 -> 2127 bytes .../functions/__pycache__/L1Norm.cpython-36.pyc | Bin 0 -> 2922 bytes .../__pycache__/L2NormSquared.cpython-36.pyc | Bin 0 -> 3428 bytes .../functions/__pycache__/ZeroFun.cpython-36.pyc | Bin 0 -> 1698 bytes .../functions/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 413 bytes .../__pycache__/mixed_L12Norm.cpython-36.pyc | Bin 0 -> 2294 bytes .../ccpi/optimisation/functions/functions.py | 311 ++++++++++++++ .../ccpi/optimisation/functions/mixed_L12Norm.py | 65 +++ .../ccpi/optimisation/functions/test_functions.py | 474 +++++++++++++++++++++ .../operators/FiniteDifferenceOperator.py | 314 ++++++++++++++ .../optimisation/operators/GradientOperator.py | 125 ++++++ .../optimisation/operators/IdentityOperator.py | 42 ++ .../operators/SymmetrizedGradientOperator.py | 118 +++++ .../ccpi/optimisation/operators/ZeroOperator.py | 39 ++ .../Python/ccpi/optimisation/operators/__init__.py | 8 + Wrappers/Python/setup.py | 3 +- 26 files changed, 2123 insertions(+), 1 deletion(-) create mode 100644 Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/L1Norm.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__init__.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc create mode 100644 Wrappers/Python/ccpi/optimisation/functions/functions.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py create mode 100644 Wrappers/Python/ccpi/optimisation/functions/test_functions.py create mode 100644 Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py create mode 100644 Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py create mode 100644 Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py create mode 100644 Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py create mode 100644 Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py new file mode 100644 index 0000000..7488310 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Feb 4 16:18:06 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData +import numpy as np +import matplotlib.pyplot as plt +import time +from Operators.CompositeOperator import CompositeOperator +from Operators.CompositeDataContainer import CompositeDataContainer + +def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ + 'memopt': False} + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + + niter = opt['niter'] if 'niter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False + stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False + + if isinstance(operator, CompositeOperator): +# if isinstance(operator, CompositeOperator_DataContainer): + x_old = operator.alloc_domain_dim() + y_old = operator.alloc_range_dim() + else: + x_old = ImageData(np.zeros(operator.domain_dim())) + y_old = ImageData(np.zeros(operator.range_dim())) + + + xbar = x_old + x_tmp = x_old + x = x_old + + y_tmp = y_old + y = y_tmp + + # relaxation parameter + theta = 1 + + t = time.time() + + objective = [] + + for i in range(niter): + + # Gradient descent, Dual problem solution + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + # Gradient ascent, Primal problem solution + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + #Update + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y + +# pdgap + print(f(x) + g(x) + f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + + + + + +# # TV denoising, pdgap with composite +# +# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ +# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) +# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() + + # TV denoising, pdgap with no composite + + + +# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ +# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) +# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() + + +# print(primal_obj) +# objective = primal_obj +# + + + + t_end = time.time() + + return x, t_end - t, objective + diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py new file mode 100644 index 0000000..bc9b62d --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 8 10:01:31 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import BlockDataContainer + +class BlockFunction(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(BlockFunction, self).__init__() + + def __call__(self, x): + + tmp = self.operator.direct(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def call_adjoint(self, x): + + tmp = operator.adjoint(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def convex_conjugate(self, x): + + ''' Convex_conjugate does not take into account the BlockOperator''' + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + + def proximal_conjugate(self, x, tau, out = None): + + ''' proximal_conjugate does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + return BlockDataContainer(*out) + + def proximal(self, x, tau, out = None): + + ''' proximal does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + return BlockDataContainer(*out) + + def gradient(self,x, out=None): + pass \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py new file mode 100644 index 0000000..5b6defc --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:45:06 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from ccpi.framework import BlockDataContainer + +class FunctionOperatorComposition(Function): + + def __init__(self, function, operator): + + self.function = functions + self.operator = operator + self.grad_Lipschitz_cnst = 2*self.function.alpha*operator.norm()**2 + super(FunctionOperatorComposition, self).__init__() + + def __call__(self, x): + + return self.function(operator.direct(x)) + + def call_adjoint(self, x): + + return self.function(operator.adjoint(x)) + + def convex_conjugate(self, x): + + return self.function.convex_conjugate(x) + + def proximal(self, x, tau): + + ''' proximal does not take into account the Operator''' + + return self.function.proximal(x, tau, out=None) + + def proximal_conjugate(self, x, tau): + + ''' proximal conjugate does not take into account the Operator''' + + return self.function.proximal_conjugate(x, tau, out=None) + + def gradient(self, x): + + ''' Gradient takes into account the Operator''' + + return self.adjoint(self.function.gradient(self.operator.direct(x))) + + +class BlockFunction(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(BlockFunction, self).__init__() + + def __call__(self, x): + + tmp = operator.direct(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def call_adjoint(self, x): + + tmp = operator.adjoint(x) + + t = 0 + for i in range(tmp.shape[0]): + t += self.functions[i](tmp.get_item(i)) + return t + + def convex_conjugate(self, x): + + ''' Convex_conjugate does not take into account the BlockOperator''' + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + + def proximal_conjugate(self, x, tau, out = None): + + ''' proximal_conjugate does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + ''' proximal does not take into account the BlockOperator''' + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + return CompositeDataContainer(*out) + + def gradient(self,x, out=None): + pass + + +class FunctionComposition_new(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + + super(FunctionComposition_new, self).__init__() + + def __call__(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + def convex_conjugate(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + +if __name__ == '__main__': + + from operators import Operator + from IdentityOperator import Identity \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py new file mode 100644 index 0000000..a67b884 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 8 09:55:36 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function + + +class FunctionOperatorComposition(Function): + + def __init__(self, operator, function): + + self.function = function + self.operator = operator + self.L = 2*self.function.alpha*operator.norm()**2 + super(FunctionOperatorComposition, self).__init__() + + def __call__(self, x): + + return self.function(self.operator.direct(x)) + + def call_adjoint(self, x): + + return self.function(self.operator.adjoint(x)) + + def convex_conjugate(self, x): + + ''' convex_conjugate does not take into account the Operator''' + return self.function.convex_conjugate(x) + + def proximal(self, x, tau): + + ''' proximal does not take into account the Operator''' + + return self.function.proximal(x, tau, out=None) + + def proximal_conjugate(self, x, tau, out=None): + + ''' proximal conjugate does not take into account the Operator''' + + return self.function.proximal_conjugate(x, tau) + + def gradient(self, x): + + ''' Gradient takes into account the Operator''' + + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py new file mode 100644 index 0000000..ec7aa5b --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:42:34 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1NormEdo(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1NormEdo): + + def __init__(self, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, x) + else: + return SimpleL1Norm.__call__(self, x - self.b) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py new file mode 100644 index 0000000..8b73be6 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + # Lispchitz constant of gradient + self.L = 2*self.alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/(4*self.alpha)) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/(2*self.alpha) ) + + +############################ L2NORM FUNCTIONS ############################# +class L2NormSq(SimpleL2NormSq): + + def __init__(self, alpha, **kwargs): + + super(L2NormSq, self).__init__(alpha) + self.alpha = alpha + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, x) + else: + return SimpleL2NormSq.__call__(self, x - self.b) + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * x + else: + return 2*self.alpha * (x - self.b) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py new file mode 100644 index 0000000..b41cc26 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:44:10 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData +from ccpi.framework import BlockDataContainer + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + if isinstance(x, CompositeDataContainer): + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + else: + return x.maximum(0).sum() + x.maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py new file mode 100644 index 0000000..7ce617a --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- + + +from .ZeroFun import ZeroFun +from .L1Norm import * +from .L2NormSquared import * +from .mixed_L12Norm import * +from .FunctionOperatorComposition import FunctionOperatorComposition +from .BlockFunction import BlockFunction + diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc new file mode 100644 index 0000000..660532e Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc new file mode 100644 index 0000000..075cdfb Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc new file mode 100644 index 0000000..f564eff Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc new file mode 100644 index 0000000..4ef959d Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc new file mode 100644 index 0000000..ad1b296 Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc new file mode 100644 index 0000000..f2bf70f Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..1321257 Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc new file mode 100644 index 0000000..d43e3ad Binary files /dev/null and b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/functions.py b/Wrappers/Python/ccpi/optimisation/functions/functions.py new file mode 100644 index 0000000..f40abb9 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/functions.py @@ -0,0 +1,311 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from operators import CompositeDataContainer, Identity, CompositeOperator +from numbers import Number + + +############################ L2NORM FUNCTIONS ############################# +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/4*self.alpha) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/2*self.alpha ) + + +class L2NormSq(SimpleL2NormSq): + + def __init__(self, A, b = None, alpha=1, **kwargs): + + super(L2NormSq, self).__init__(alpha=alpha) + self.alpha = alpha + self.A = A + self.b = b + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, self.A.direct(x)) + else: + return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * self.A.adjoint(self.A.direct(x)) + else: + return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b) + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1Norm(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1Norm): + + def __init__(self, A, b = None, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + self.A = A + self.b = b + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, self.A.direct(x)) + else: + return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, A, b=None, alpha=1, **kwargs): + + super(mixed_L12Norm, self).__init__() + self.alpha = alpha + self.A = A + self.b = b + + self.sym_grad = kwargs.get('sym_grad',False) + + + + def gradient(self,x): + return ValueError('Not Differentiable') + + + def __call__(self,x): + + y = self.A.direct(x) + eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt() + eucl_norm.__isub__(self.b) + return eucl_norm.sum() * self.alpha + + def convex_conjugate(self,x): + return 0 + + def proximal_conjugate(self, x, tau): + + if self.b is None: + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + else: + res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0) + + return res + + +#%% + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + return x.maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 + + +class CompositeFunction(Function): + + def __init__(self, *args): + self.functions = args + self.length = len(self.functions) + + def get_item(self, ind): + return self.functions[ind] + + def __call__(self,x): + + t = 0 + for i in range(self.length): + for j in range(x.shape[0]): + t +=self.functions[i](x.get_item(j)) + return t + + def convex_conjugate(self, x): + + z = 0 + t = 0 + for i in range(x.shape[0]): + t += self.functions[z].convex_conjugate(x.get_item(i)) + z += 1 + + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + + +if __name__ == '__main__': + + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = CompositeOperator((2,1), op1, op2 ) + + # Create functions + alpha = 1 + noisy_data = ImageData(np.random.randint(10, size=ag)) + f = CompositeFunction(L1Norm(op1,alpha), \ + L2NormSq(op2, noisy_data, c = 0.5, memopt = False) ) + + u = ImageData(np.random.randint(10, size=ig)) + uComp = CompositeDataContainer(u) + + print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u) + + f1 = L1Norm(op1,alpha) + f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False) + + print(f1(u) + f2(u)) + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py new file mode 100644 index 0000000..0ce1d28 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:43:12 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry + + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, alpha, **kwargs): + + super(mixed_L12Norm, self).__init__() + + self.alpha = alpha + self.b = kwargs.get('b',None) + self.sym_grad = kwargs.get('sym_grad',False) + + def __call__(self,x): + + if self.b is None: + tmp1 = x + else: + tmp1 = x - self.b +# + if self.sym_grad: + tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) + else: + tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() + + return self.alpha*tmp.sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + pass + + def proximal_conjugate(self, x, tau): + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + return res + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(mixed_L12Norm(self.alpha), operator) + else: + return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + diff --git a/Wrappers/Python/ccpi/optimisation/functions/test_functions.py b/Wrappers/Python/ccpi/optimisation/functions/test_functions.py new file mode 100644 index 0000000..54a952a --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/test_functions.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 2 19:24:37 2019 + +@author: evangelos +""" + +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from operators import CompositeDataContainer, Identity, CompositeOperator +from numbers import Number +from GradientOperator import Gradient + + +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/(4*self.alpha)) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/(2*self.alpha) ) + + +############################ L2NORM FUNCTIONS ############################# +class L2NormSq(SimpleL2NormSq): + + def __init__(self, alpha, **kwargs): + + super(L2NormSq, self).__init__(alpha) + self.alpha = alpha + self.b = kwargs.get('b',None) + self.L = 1 + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, x) + else: + return SimpleL2NormSq.__call__(self, x - self.b) + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * x + else: + return 2*self.alpha * (x - self.b) + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(L2NormSq(self.alpha), operator) + else: + return FunctionComposition(L2NormSq(self.alpha, b=self.b), operator) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1Norm(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1Norm): + + def __init__(self, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + + self.A = kwargs.get('A',None) + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, self.A.direct(x)) + else: + return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) + + def eval_norm(self, x): + + return SimpleL1Norm.__call__(self, x) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, alpha, **kwargs): + + super(mixed_L12Norm, self).__init__() + + self.alpha = alpha + self.b = kwargs.get('b',None) + self.sym_grad = kwargs.get('sym_grad',False) + + def __call__(self,x): + + if self.b is None: + tmp1 = x + else: + tmp1 = x - self.b +# + if self.sym_grad: + tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) + else: + tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() + + return self.alpha*tmp.sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + pass + + def proximal_conjugate(self, x, tau): + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + return res + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(mixed_L12Norm(self.alpha), operator) + else: + return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + + +#%% + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 + + +class CompositeFunction(Function): + + def __init__(self, *functions, blockMatrix): + + self.blockMatrix = blockMatrix + self.functions = functions + self.length = len(self.functions) + + def get_item(self, ind): + return self.functions[ind] + + def __call__(self,x): + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + + def convex_conjugate(self, x): + + z = 0 + t = 0 + for i in range(x.shape[0]): + t += self.functions[z].convex_conjugate(x.get_item(i)) + z += 1 + + return t + + def proximal(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + +# +class FunctionComposition(Function): + + def __init__(self, function, operator): + + self.function = function + self.alpha = self.function.alpha + self.b = self.function.b + self.operator = operator + + + super(FunctionComposition, self).__init__() + + ''' overide call and gradient ''' + def __call__(self, x): + return self.function(x) + + def gradient(self,x): + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + + ''' Same as in the parent class''' + def proximal(self,x, tau): + return self.function.proximal(x, tau) + + def proximal_conjugate(self,x, tau): + return self.function.proximal_conjugate(x, tau) + + def convex_conjugate(self,x): + return self.function.convex_conjugate(x) + + +class FunctionComposition_new(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + +# if self.length==1: +# self.L = self.functions[0].alpha*(self.operator.norm()**2) + + # length == to operator.shape[0]# + super(FunctionComposition_new, self).__init__() + + def __call__(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + def convex_conjugate(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + +if __name__ == '__main__': + + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = CompositeOperator((2,1), op1, op2 ) + + # Create functions + noisy_data = ImageData(np.random.randint(10, size=ag)) + + d = ImageData(np.random.randint(10, size=ag)) + + f = mixed_L12Norm(alpha = 1).composition_with(op1) + g = L2NormSq(alpha=0.5, b=noisy_data) + + # Compare call of f + a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() + print(a1, f(d)) + + # Compare call of g + a2 = g.alpha*(d - noisy_data).power(2).sum() + print(a2, g(d)) + + # Compare convex conjugate of g + a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() + print( a3, g.convex_conjugate(d)) + + + + + +# +# f1 = L2NormSq(alpha=1, b=noisy_data) +# print(f1(noisy_data)) +# +# f2 = L2NormSq(alpha=5, b=noisy_data).composition_with(op2) +# print(f2(noisy_data)) +# +# print(f1.gradient(noisy_data).as_array()) +# print(f2.gradient(noisy_data).as_array()) +## +# print(f1.proximal(noisy_data,1).as_array()) +# print(f2.proximal(noisy_data,1).as_array()) +# +# +# f3 = mixed_L12Norm(alpha = 1).composition_with(op1) +# print(f3(noisy_data)) +# +# print(ImageData(op1.direct(noisy_data).power(2).sum(axis=0)).sqrt().sum()) +# +# print( 5*(op2.direct(d) - noisy_data).power(2).sum(), f2(d)) +# +# from functions import mixed_L12Norm as mixed_L12Norm_old +# +# print(mixed_L12Norm_old(op1,None,alpha)(noisy_data)) + + + # + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py new file mode 100644 index 0000000..16cd215 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -0,0 +1,314 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:51:17 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, BlockDataContainer +import numpy as np + +class FiniteDiff(Operator): + + # Works for Neum/Symmetric & periodic boundary conditions + # TODO add central differences??? + # TODO not very well optimised, too many conditions + # TODO add discretisation step, should get that from imageGeometry + + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_y', 'direction_x'] + # Grad_order = ['direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + + def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): + + super(FiniteDiff, self).__init__() + + self.gm_domain = gm_domain + self.gm_range = gm_range + self.direction = direction + self.bnd_cond = bnd_cond + + if self.gm_range is None: + self.gm_range = self.gm_domain + + if self.direction + 1 > len(gm_domain): + raise ValueError('Gradient directions more than geometry domain') + + def direct(self, x, out=None): + +# x_asarr = x.as_array() + x_asarr = x + x_sz = len(x.shape) + + if out is None: + out = np.zeros(x.shape) + + fd_arr = out + + ######################## Direct for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + + res = out + return res + + def adjoint(self, x, out=None): + +# x_asarr = x.as_array() + x_asarr = x + x_sz = len(x.shape) + + if out is None: + out = np.zeros(x.shape) + + fd_arr = out + + ######################## Adjoint for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) + np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) + + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) + np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) + + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) + np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) + np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) + np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) + np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) + np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) + np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) + np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + + res = out + return res + + def range_dim(self): + return self.gm_range + + def domain_dim(self): + return self.gm_domain + + def norm(self): + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py new file mode 100644 index 0000000..3dcc1bd --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:50:04 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, BlockDataContainer +import numpy as np +from ccpi.optimisation.operators import FiniteDiff + +#%% + +class Gradient(Operator): + + def __init__(self, gm_domain, gm_range=None, bnd_cond = 'Neumann', **kwargs): + + super(Gradient, self).__init__() + + self.gm_domain = gm_domain # Domain of Grad Operator + self.gm_range = gm_range # Range of Grad Operator + self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences + + + if self.gm_range is None: + self.gm_range = ((len(self.gm_domain),)+self.gm_domain) + + # Kwargs Default options + self.memopt = kwargs.get('memopt',False) + self.correlation = kwargs.get('correlation','Space') + + #TODO not tested yet, operator norm??? + self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) + + + def direct(self, x, out=None): + + tmp = np.zeros(self.gm_range) + for i in range(len(self.gm_domain)): + tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] +# return type(x)(tmp) + return type(x)(tmp) + + def adjoint(self, x, out=None): + + tmp = np.zeros(self.gm_domain) + for i in range(len(self.gm_domain)): + tmp+=FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).adjoint(x.as_array()[i])/self.voxel_size[i] + return type(x)(-tmp) + + def alloc_domain_dim(self): + return ImageData(np.zeros(self.gm_domain)) + + def alloc_range_dim(self): + return ImageData(np.zeros(self.range_dim)) + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range + + def norm(self): +# return np.sqrt(4*len(self.domainDim())) + #TODO this takes time for big ImageData + # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + +if __name__ == '__main__': + + N, M = (200,300) + ig = (N,M) + G = Gradient(ig) + u = DataContainer(np.random.randint(10, size=G.domain_dim())) + w = DataContainer(np.random.randint(10, size=G.range_dim())) +# w = [DataContainer(np.random.randint(10, size=G.domain_dim())),\ +# DataContainer(np.random.randint(10, size=G.domain_dim()))] + + # domain_dim + print('Domain {}'.format(G.domain_dim())) + + # range_dim + print('Range {}'.format(G.range_dim())) + + # Direct + z = G.direct(u) + + # Adjoint + z1 = G.adjoint(w) + + print(z) + print(z1) + + LHS = (G.direct(u)*w).sum() + RHS = (u * G.adjoint(w)).sum() +# + print(LHS,RHS) + print(G.norm()) + +# print(G.adjoint(G.direct(u))) + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py new file mode 100644 index 0000000..d49cb30 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:30:51 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator + + +class Identity(Operator): + + def __init__(self, gm_domain, gm_range=None): + + self.gm_domain = gm_domain + self.gm_range = gm_range + if self.gm_range is None: + self.gm_range = self.gm_domain + + super(Identity, self).__init__() + + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def norm(self): + return 1.0 + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py new file mode 100644 index 0000000..d908e49 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:53:55 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import FiniteDiff +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, DataContainer +import numpy as np + + +class SymmetrizedGradient(Operator): + + def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs): + + super(SymmetrizedGradient, self).__init__() + + self.gm_domain = gm_domain # Domain of Grad Operator + self.gm_range = gm_range # Range of Grad Operator + self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences + + # Kwargs Default options + self.memopt = kwargs.get('memopt',False) + self.correlation = kwargs.get('correlation','Space') + + #TODO not tested yet, operator norm??? + self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) + + + def direct(self, x, out=None): + + tmp = np.zeros(self.gm_range) + tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) + tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + + FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) + + return type(x)(tmp) + + + def adjoint(self, x, out=None): + + tmp = np.zeros(self.gm_domain) + + tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ + FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + + tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ + FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) + + return type(x)(tmp) + + def alloc_domain_dim(self): + return ImageData(np.zeros(self.gm_domain)) + + def alloc_range_dim(self): + return ImageData(np.zeros(self.range_dim)) + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range + + def norm(self): +# return np.sqrt(4*len(self.domainDim())) + #TODO this takes time for big ImageData + # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) + x0 = ImageData(np.random.random_sample(self.domain_dim())) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + + +if __name__ == '__main__': + + ########################################################################### + ## Symmetrized Gradient + + N, M = 2, 3 + ig = (N,M) + ig2 = (2,) + ig + ig3 = (3,) + ig + u1 = DataContainer(np.random.randint(10, size=ig2)) + w1 = DataContainer(np.random.randint(10, size=ig3)) + + E = SymmetrizedGradient(ig2,ig3) + + d1 = E.direct(u1) + d2 = E.adjoint(w1) + + LHS = (d1.as_array()[0]*w1.as_array()[0] + \ + d1.as_array()[1]*w1.as_array()[1] + \ + 2*d1.as_array()[2]*w1.as_array()[2]).sum() + + RHS = (u1.as_array()[0]*d2.as_array()[0] + \ + u1.as_array()[1]*d2.as_array()[1]).sum() + + + print(LHS, RHS, E.norm()) + + +# + + + + + + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py new file mode 100644 index 0000000..a7c5f09 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/ZeroOperator.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:25:53 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.framework import ImageData +from ccpi.optimisation.operators import Operator + +class ZeroOp(Operator): + + def __init__(self, gm_domain, gm_range): + self.gm_domain = gm_domain + self.gm_range = gm_range + super(ZeroOp, self).__init__() + + def direct(self,x,out=None): + if out is None: + return ImageData(np.zeros(self.gm_range)) + else: + return ImageData(np.zeros(self.gm_range)) + + def adjoint(self,x, out=None): + if out is None: + return ImageData(np.zeros(self.gm_domain)) + else: + return ImageData(np.zeros(self.gm_domain)) + + def norm(self): + return 0 + + def domain_dim(self): + return self.gm_domain + + def range_dim(self): + return self.gm_range \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index cc307e0..1e86efc 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -10,3 +10,11 @@ from .LinearOperator import LinearOperator from .ScaledOperator import ScaledOperator from .BlockOperator import BlockOperator from .BlockScaledOperator import BlockScaledOperator + + +from .FiniteDifferenceOperator import FiniteDiff +from .GradientOperator import Gradient +from .SymmetrizedGradientOperator import SymmetrizedGradient +from .IdentityOperator import Identity +from .ZeroOperator import ZeroOp + diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index 630e33e..87930b5 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -34,7 +34,8 @@ setup( packages=['ccpi' , 'ccpi.io', 'ccpi.framework', 'ccpi.optimisation', 'ccpi.optimisation.operators', - 'ccpi.optimisation.algorithms'], + 'ccpi.optimisation.algorithms', + 'ccpi.optimisation.functions'], # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine -- cgit v1.2.3 From fa64464acfb747c4e606a28193514711e4eefcc6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 8 Mar 2019 10:52:58 -0500 Subject: removed comment --- .../Python/ccpi/framework/BlockDataContainer.py | 1 - Wrappers/Python/wip/CGLS_tikhonov.py | 25 +++++++++++----------- 2 files changed, 12 insertions(+), 14 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index d509d25..b9f5c5f 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -96,7 +96,6 @@ class BlockDataContainer(object): shape=self.shape) def multiply(self, other, *args, **kwargs): - print ("BlockDataContainer" , other) self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py index f247896..e9bbcd9 100644 --- a/Wrappers/Python/wip/CGLS_tikhonov.py +++ b/Wrappers/Python/wip/CGLS_tikhonov.py @@ -11,8 +11,7 @@ import matplotlib.pyplot as plt import numpy from ccpi.framework import BlockDataContainer from ccpi.optimisation.operators import BlockOperator -from ccpi.optimisation.operators.BlockOperator import BlockLinearOperator - + # Set up phantom size N x N x vert by creating ImageGeometry, initialising the # ImageData object with this geometry and empty array and finally put some # data into its array, and display one slice as image. @@ -128,26 +127,26 @@ simplef.L = 0.00003 gd = GradientDescent( x_init=x_init, objective_function=simplef, rate=simplef.L) -gd.max_iteration = 10 +gd.max_iteration = 50 Kbig.direct(X_init) Kbig.adjoint(B) cg = CGLS() cg.set_up(X_init, Kbig, B ) -cg.max_iteration = 5 +cg.max_iteration = 10 cgsmall = CGLS() cgsmall.set_up(X_init, Ksmall, B ) -cgsmall.max_iteration = 5 +cgsmall.max_iteration = 10 cgs = CGLS() cgs.set_up(x_init, A, b ) -cgs.max_iteration = 6 +cgs.max_iteration = 10 cgok = CGLS() cgok.set_up(X_init, Kok, B ) -cgok.max_iteration = 6 +cgok.max_iteration = 10 # # #out.__isub__(B) #out2 = K.adjoint(out) @@ -176,22 +175,22 @@ cgok.run(10, verbose=True) # print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) # # fig = plt.figure() -plt.subplot(1,6,1) +plt.subplot(2,3,1) plt.imshow(Phantom.subset(vertical=0).as_array()) plt.title('Simulated Phantom') -plt.subplot(1,6,2) +plt.subplot(2,3,2) plt.imshow(gd.get_output().subset(vertical=0).as_array()) plt.title('Simple Gradient Descent') -plt.subplot(1,6,3) +plt.subplot(2,3,3) plt.imshow(cgs.get_output().subset(vertical=0).as_array()) plt.title('Simple CGLS') -plt.subplot(1,6,4) +plt.subplot(2,3,5) plt.imshow(cg.get_output().get_item(0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nbig lambda') -plt.subplot(1,6,5) +plt.subplot(2,3,6) plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nsmall lambda') -plt.subplot(1,6,6) +plt.subplot(2,3,4) plt.imshow(cgok.get_output().get_item(0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nok lambda') plt.show() -- cgit v1.2.3 From 431cc82f3b09c337ec4d46e7c1d21a0a1b0dbc35 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:27:58 -0400 Subject: run default verbose True excludes callback --- Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py index 680b268..cc99344 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py @@ -140,7 +140,7 @@ class Algorithm(object): raise ValueError('Update objective interval must be an integer >= 1') else: raise ValueError('Update objective interval must be an integer >= 1') - def run(self, iterations, verbose=False, callback=None): + def run(self, iterations, verbose=True, callback=None): '''run n iterations and update the user with the callback if specified''' if self.should_stop(): print ("Stop cryterion has been reached.") @@ -149,8 +149,9 @@ class Algorithm(object): if verbose: print ("Iteration {}/{}, objective {}".format(self.iteration, self.max_iteration, self.get_last_objective()) ) - if callback is not None: - callback(self.iteration, self.get_last_objective()) + else: + if callback is not None: + callback(self.iteration, self.get_last_objective()) i += 1 if i == iterations: break -- cgit v1.2.3 From 8c248538a839dd34fb5599bcb126fe8d2f395fa5 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:30:07 -0400 Subject: bugfix in adjoint --- Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 8298c03..21ea104 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -91,7 +91,7 @@ class BlockOperator(Operator): Raises: ValueError if the contained Operators are not linear ''' - if not functools.reduce(lambda x,y: x and y, self.operators.is_linear(), True): + if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): raise ValueError('Not all operators in Block are linear.') shape = self.get_output_shape(x.shape, adjoint=True) res = [] -- cgit v1.2.3 From cbb6e2ce2baa3a9c18f1d8ad537f1498348f827d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:31:11 -0400 Subject: add dimension_labels to geometries to allocate on specific axis order --- Wrappers/Python/ccpi/framework/framework.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 1413e21..029a80d 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -54,7 +54,8 @@ class ImageGeometry(object): center_x=0, center_y=0, center_z=0, - channels=1): + channels=1, + dimension_labels=None): self.voxel_num_x = voxel_num_x self.voxel_num_y = voxel_num_y @@ -66,6 +67,7 @@ class ImageGeometry(object): self.center_y = center_y self.center_z = center_z self.channels = channels + self.dimension_labels = dimension_labels def get_min_x(self): return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x @@ -113,6 +115,8 @@ class ImageGeometry(object): return repres def allocate(self, value=0, dimension_labels=None): '''allocates an ImageData according to the size expressed in the instance''' + if dimension_labels is None: + dimension_labels = self.dimension_labels out = ImageData(geometry=self, dimension_labels=dimension_labels) if value != 0: out += value @@ -130,7 +134,8 @@ class AcquisitionGeometry(object): dist_source_center=None, dist_center_detector=None, channels=1, - angle_unit='degree' + angle_unit='degree', + dimension_labels=None ): """ General inputs for standard type projection geometries @@ -171,6 +176,7 @@ class AcquisitionGeometry(object): self.pixel_size_v = pixel_size_v self.channels = channels + self.dimension_labels = dimension_labels def clone(self): '''returns a copy of the AcquisitionGeometry''' @@ -198,6 +204,8 @@ class AcquisitionGeometry(object): return repres def allocate(self, value=0, dimension_labels=None): '''allocates an AcquisitionData according to the size expressed in the instance''' + if dimension_labels is None: + dimension_labels = self.dimension_labels out = AcquisitionData(geometry=self, dimension_labels=dimension_labels) if value != 0: out += value -- cgit v1.2.3 From 78d97a226ede52ccd7386a8bf4097c9f83f6c4a6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:31:54 -0400 Subject: deprecate grad and prox Norm2sq fixes for memopt --- Wrappers/Python/ccpi/optimisation/funcs.py | 45 +++++++++++++++++++----------- 1 file changed, 29 insertions(+), 16 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 99af275..4f84889 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -20,6 +20,7 @@ from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy from ccpi.framework import DataContainer +import warnings def isSizeCorrect(data1 ,data2): @@ -40,8 +41,12 @@ class Function(object): def __init__(self): self.L = None def __call__(self,x, out=None): raise NotImplementedError - def grad(self, x): raise NotImplementedError - def prox(self, x, tau): raise NotImplementedError + def grad(self, x): + warnings.warn("grad method is deprecated. use gradient instead", DeprecationWarning) + return self.gradient(x, out=None) + def prox(self, x, tau): + warnings.warn("prox method is deprecated. use proximal instead", DeprecationWarning) + return self.proximal(x,tau,out=None) def gradient(self, x, out=None): raise NotImplementedError def proximal(self, x, tau, out=None): raise NotImplementedError @@ -141,12 +146,20 @@ class Norm2sq(Function): self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. - self.memopt = memopt if memopt: - #self.direct_placehold = A.adjoint(b) - self.direct_placehold = A.allocate_direct() - self.adjoint_placehold = A.allocate_adjoint() - + try: + self.adjoint_placehold = A.range_geometry().allocate() + self.direct_placehold = A.domain_geometry().allocate() + self.memopt = True + except NameError as ne: + warnings.warn(str(ne)) + self.memopt = False + except NotImplementedError as nie: + print (nie) + warnings.warn(str(nie)) + self.memopt = False + else: + self.memopt = False # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise @@ -157,10 +170,9 @@ class Norm2sq(Function): except NotImplementedError as noe: pass - def grad(self,x): - #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - + #def grad(self,x): + # return self.gradient(x, out=None) + def __call__(self,x): #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) #if out is None: @@ -178,12 +190,13 @@ class Norm2sq(Function): self.A.direct(x, out=self.adjoint_placehold) self.adjoint_placehold.__isub__( self.b ) self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold) - self.direct_placehold.__imul__(2.0 * self.c) - # can this be avoided? - out.fill(self.direct_placehold) + #self.direct_placehold.__imul__(2.0 * self.c) + ## can this be avoided? + #out.fill(self.direct_placehold) + self.direct_placehold.multiply(2.0*self.c, out=out) else: - return self.grad(x) - + return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + class ZeroFun(Function): -- cgit v1.2.3 From 6a76bd07171ccf4e95372e7d84f6b381aad9e557 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:33:07 -0400 Subject: fix initialisation for memopt --- .../Python/ccpi/optimisation/algorithms/GradientDescent.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py index 7794b4d..f1e4132 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py @@ -51,13 +51,17 @@ class GradientDescent(Algorithm): def set_up(self, x_init, objective_function, rate): '''initialisation of the algorithm''' self.x = x_init.copy() - if self.memopt: - self.x_update = x_init.copy() self.objective_function = objective_function self.rate = rate self.loss.append(objective_function(x_init)) self.iteration = 0 - + try: + self.memopt = self.objective_function.memopt + except AttributeError as ae: + self.memopt = False + if self.memopt: + self.x_update = x_init.copy() + def update(self): '''Single iteration''' if self.memopt: @@ -65,7 +69,7 @@ class GradientDescent(Algorithm): self.x_update *= -self.rate self.x += self.x_update else: - self.x += -self.rate * self.objective_function.grad(self.x) + self.x += -self.rate * self.objective_function.gradient(self.x) def update_objective(self): self.loss.append(self.objective_function(self.x)) -- cgit v1.2.3 From 42f1020414fc9e722484f626e07f5eaf3f689a92 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:34:09 -0400 Subject: change default method to range_geometry and domain_geometry --- Wrappers/Python/ccpi/optimisation/ops.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index e9e7f44..6afb97a 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -49,9 +49,9 @@ class Operator(object): def allocate_adjoint(self): '''Allocates memory on the X space''' raise NotImplementedError - def range_dim(self): + def range_geometry(self): raise NotImplementedError - def domain_dim(self): + def domain_geometry(self): raise NotImplementedError def __rmul__(self, other): '''reverse multiplication of Operator with number sets the variable scalar in the Operator''' @@ -97,7 +97,8 @@ class TomoIdentity(Operator): self.s1 = 1.0 self.geometry = geometry - + def is_linear(self): + return True def direct(self,x,out=None): if out is None: @@ -128,6 +129,10 @@ class TomoIdentity(Operator): raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) def allocate_adjoint(self): return self.allocate_direct() + def range_geometry(self): + return self.geometry + def domain_geometry(self): + return self.geometry -- cgit v1.2.3 From b6c6f1187a6c337698401f348c938d6b6dfb29fd Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 11 Mar 2019 13:35:29 -0400 Subject: create and reconstruct tomophantom dataset --- Wrappers/Python/wip/CreatePhantom.py | 242 +++++++++++++++++++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 Wrappers/Python/wip/CreatePhantom.py (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/CreatePhantom.py b/Wrappers/Python/wip/CreatePhantom.py new file mode 100644 index 0000000..4bf6ea4 --- /dev/null +++ b/Wrappers/Python/wip/CreatePhantom.py @@ -0,0 +1,242 @@ +import numpy +import tomophantom +from tomophantom import TomoP3D +from tomophantom.supp.artifacts import ArtifactsClass as Artifact +import os + +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import CGLS +from ccpi.plugins.ops import CCPiProjectorSimple +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData +from ccpi.optimisation.algorithms import GradientDescent +from ccpi.framework import BlockDataContainer +from ccpi.optimisation.operators import BlockOperator + + +model = 13 # select a model number from tomophantom library +N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom) +path = os.path.dirname(tomophantom.__file__) +path_library3D = os.path.join(path, "Phantom3DLibrary.dat") + +#This will generate a N_size x N_size x N_size phantom (3D) +phantom_tm = TomoP3D.Model(model, N_size, path_library3D) + +# detector column count (horizontal) +detector_horiz = int(numpy.sqrt(2)*N_size) +# detector row count (vertical) (no reason for it to be > N) +detector_vert = N_size +# number of projection angles +angles_num = int(0.5*numpy.pi*N_size) +# angles are expressed in degrees +angles = numpy.linspace(0.0, 179.9, angles_num, dtype='float32') + + +acquisition_data_array = TomoP3D.ModelSino(model, N_size, + detector_horiz, detector_vert, + angles, + path_library3D) + +tomophantom_acquisition_axes_order = ['vertical', 'angle', 'horizontal'] + +artifacts = Artifact(acquisition_data_array) + + +tp_acq_data = AcquisitionData(artifacts.noise(0.2, 'Gaussian'), + dimension_labels=tomophantom_acquisition_axes_order) +#print ("size", acquisition_data.shape) +print ("horiz", detector_horiz) +print ("vert", detector_vert) +print ("angles", angles_num) + +tp_acq_geometry = AcquisitionGeometry(geom_type='parallel', dimension='3D', + angles=angles, + pixel_num_h=detector_horiz, + pixel_num_v=detector_vert, + channels=1, + ) + +acq_data = tp_acq_geometry.allocate() +#print (tp_acq_geometry) +print ("AcquisitionData", acq_data.shape) +print ("TomoPhantom", tp_acq_data.shape, tp_acq_data.dimension_labels) + +default_acquisition_axes_order = ['angle', 'vertical', 'horizontal'] + +acq_data2 = tp_acq_data.subset(dimensions=default_acquisition_axes_order) +print ("AcquisitionData", acq_data2.shape, acq_data2.dimension_labels) +print ("AcquisitionData {} TomoPhantom {}".format(id(acq_data2.as_array()), + id(acquisition_data_array))) + +fig = plt.figure() +plt.subplot(1,2,1) +plt.imshow(acquisition_data_array[20]) +plt.title('Sinogram') +plt.subplot(1,2,2) +plt.imshow(tp_acq_data.as_array()[20]) +plt.title('Sinogram + noise') +plt.show() + +# Set up Operator object combining the ImageGeometry and AcquisitionGeometry +# wrapping calls to CCPi projector. + +ig = ImageGeometry(voxel_num_x=detector_horiz, + voxel_num_y=detector_horiz, + voxel_num_z=detector_vert) +A = CCPiProjectorSimple(ig, tp_acq_geometry) +# Forward and backprojection are available as methods direct and adjoint. Here +# generate test data b and some noise + +#b = A.direct(Phantom) +b = acq_data2 + +#z = A.adjoint(b) + + +# Using the test data b, different reconstruction methods can now be set up as +# demonstrated in the rest of this file. In general all methods need an initial +# guess and some algorithm options to be set. Note that 100 iterations for +# some of the methods is a very low number and 1000 or 10000 iterations may be +# needed if one wants to obtain a converged solution. +x_init = ImageData(geometry=ig, + dimension_labels=['horizontal_x','horizontal_y','vertical']) +X_init = BlockDataContainer(x_init) +B = BlockDataContainer(b, + ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical'])) + +# setup a tomo identity +Ibig = 4e1 * TomoIdentity(geometry=ig) +Ismall = 1e-3 * TomoIdentity(geometry=ig) +Iok = 7.6e0 * TomoIdentity(geometry=ig) + +# composite operator +Kbig = BlockOperator(A, Ibig, shape=(2,1)) +Ksmall = BlockOperator(A, Ismall, shape=(2,1)) +Kok = BlockOperator(A, Iok, shape=(2,1)) + +#out = K.direct(X_init) +#x0 = x_init.copy() +#x0.fill(numpy.random.randn(*x0.shape)) +#lipschitz = PowerMethodNonsquare(A, 5, x0) +#print("lipschitz", lipschitz) + +#%% + +simplef = Norm2sq(A, b, memopt=False) +#simplef.L = lipschitz[0]/3000. +simplef.L = 0.00003 + +f = Norm2sq(Kbig,B) +f.L = 0.00003 + +fsmall = Norm2sq(Ksmall,B) +fsmall.L = 0.00003 + +fok = Norm2sq(Kok,B) +fok.L = 0.00003 + +print("setup gradient descent") +gd = GradientDescent( x_init=x_init, objective_function=simplef, + rate=simplef.L) +gd.max_iteration = 5 +simplef2 = Norm2sq(A, b, memopt=True) +#simplef.L = lipschitz[0]/3000. +simplef2.L = 0.00003 +print("setup gradient descent") +gd2 = GradientDescent( x_init=x_init, objective_function=simplef2, + rate=simplef2.L) +gd2.max_iteration = 5 + +Kbig.direct(X_init) +Kbig.adjoint(B) +print("setup CGLS") +cg = CGLS() +cg.set_up(X_init, Kbig, B ) +cg.max_iteration = 10 + +print("setup CGLS") +cgsmall = CGLS() +cgsmall.set_up(X_init, Ksmall, B ) +cgsmall.max_iteration = 10 + + +print("setup CGLS") +cgs = CGLS() +cgs.set_up(x_init, A, b ) +cgs.max_iteration = 10 + +print("setup CGLS") +cgok = CGLS() +cgok.set_up(X_init, Kok, B ) +cgok.max_iteration = 10 +# # +#out.__isub__(B) +#out2 = K.adjoint(out) + +#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) + + +for _ in gd: + print ("GradientDescent iteration {} {}".format(gd.iteration, gd.get_last_loss())) +#gd2.run(5,verbose=True) +print("CGLS block lambda big") +cg.run(10, lambda it,val: print ("CGLS big iteration {} objective {}".format(it,val))) + +print("CGLS standard") +cgs.run(10, lambda it,val: print ("CGLS standard iteration {} objective {}".format(it,val))) + +print("CGLS block lambda small") +cgsmall.run(10, lambda it,val: print ("CGLS small iteration {} objective {}".format(it,val))) +print("CGLS block lambdaok") +cgok.run(10, verbose=True) +# # for _ in cg: +# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss())) +# # +# # fig = plt.figure() +# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array()) +# # plt.title('Composite CGLS') +# # plt.show() +# # +# # for _ in cgs: +# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss())) +# # +Phantom = ImageData(phantom_tm) + +theslice=40 + +fig = plt.figure() +plt.subplot(2,3,1) +plt.imshow(numpy.flip(Phantom.subset(vertical=theslice).as_array(),axis=0), cmap='gray') +plt.clim(0,0.7) +plt.title('Simulated Phantom') +plt.subplot(2,3,2) +plt.imshow(gd.get_output().subset(vertical=theslice).as_array(), cmap='gray') +plt.clim(0,0.7) +plt.title('Simple Gradient Descent') +plt.subplot(2,3,3) +plt.imshow(cgs.get_output().subset(vertical=theslice).as_array(), cmap='gray') +plt.clim(0,0.7) +plt.title('Simple CGLS') +plt.subplot(2,3,5) +plt.imshow(cg.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray') +plt.clim(0,0.7) +plt.title('Composite CGLS\nbig lambda') +plt.subplot(2,3,6) +plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray') +plt.clim(0,0.7) +plt.title('Composite CGLS\nsmall lambda') +plt.subplot(2,3,4) +plt.imshow(cgok.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray') +plt.clim(0,0.7) +plt.title('Composite CGLS\nok lambda') +plt.show() + + +#Ibig = 7e1 * TomoIdentity(geometry=ig) +#Kbig = BlockOperator(A, Ibig, shape=(2,1)) +#cg2 = CGLS(x_init=X_init, operator=Kbig, data=B) +#cg2.max_iteration = 10 +#cg2.run(10, verbose=True) -- cgit v1.2.3 From 5da0059afe72a23f5cc814e562f95791c853f13b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 10:23:20 +0000 Subject: removed __pycache__ from git --- .../functions/__pycache__/BlockFunction.cpython-36.pyc | Bin 2398 -> 0 bytes .../__pycache__/FunctionComposition.cpython-36.pyc | Bin 5778 -> 0 bytes .../FunctionOperatorComposition.cpython-36.pyc | Bin 2127 -> 0 bytes .../functions/__pycache__/L1Norm.cpython-36.pyc | Bin 2922 -> 0 bytes .../functions/__pycache__/L2NormSquared.cpython-36.pyc | Bin 3428 -> 0 bytes .../functions/__pycache__/ZeroFun.cpython-36.pyc | Bin 1698 -> 0 bytes .../functions/__pycache__/__init__.cpython-36.pyc | Bin 413 -> 0 bytes .../functions/__pycache__/mixed_L12Norm.cpython-36.pyc | Bin 2294 -> 0 bytes 8 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc deleted file mode 100644 index 660532e..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/BlockFunction.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc deleted file mode 100644 index 075cdfb..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionComposition.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc deleted file mode 100644 index f564eff..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/FunctionOperatorComposition.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc deleted file mode 100644 index 4ef959d..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L1Norm.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc deleted file mode 100644 index ad1b296..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/L2NormSquared.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc deleted file mode 100644 index f2bf70f..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/ZeroFun.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 1321257..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc deleted file mode 100644 index d43e3ad..0000000 Binary files a/Wrappers/Python/ccpi/optimisation/functions/__pycache__/mixed_L12Norm.cpython-36.pyc and /dev/null differ -- cgit v1.2.3 From dd909b7aae1126e19003a247454c262c477217d8 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 10:25:25 +0000 Subject: moved test functions file to test dir --- .../ccpi/optimisation/functions/test_functions.py | 474 --------------------- Wrappers/Python/test/test_functions.py | 474 +++++++++++++++++++++ 2 files changed, 474 insertions(+), 474 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/test_functions.py create mode 100644 Wrappers/Python/test/test_functions.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/test_functions.py b/Wrappers/Python/ccpi/optimisation/functions/test_functions.py deleted file mode 100644 index 54a952a..0000000 --- a/Wrappers/Python/ccpi/optimisation/functions/test_functions.py +++ /dev/null @@ -1,474 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Sat Mar 2 19:24:37 2019 - -@author: evangelos -""" - -# -*- coding: utf-8 -*- - -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 - -@author: evangelos -""" - -import numpy as np -from ccpi.optimisation.funcs import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry -from operators import CompositeDataContainer, Identity, CompositeOperator -from numbers import Number -from GradientOperator import Gradient - - -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): - - super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.power(2).sum() - - def gradient(self,x): - return 2 * self.alpha * x - - def convex_conjugate(self,x): - return (1/(4*self.alpha)) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) - - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/(2*self.alpha) ) - - -############################ L2NORM FUNCTIONS ############################# -class L2NormSq(SimpleL2NormSq): - - def __init__(self, alpha, **kwargs): - - super(L2NormSq, self).__init__(alpha) - self.alpha = alpha - self.b = kwargs.get('b',None) - self.L = 1 - - def __call__(self, x): - - if self.b is None: - return SimpleL2NormSq.__call__(self, x) - else: - return SimpleL2NormSq.__call__(self, x - self.b) - - def gradient(self, x): - - if self.b is None: - return 2*self.alpha * x - else: - return 2*self.alpha * (x - self.b) - - def composition_with(self, operator): - - if self.b is None: - return FunctionComposition(L2NormSq(self.alpha), operator) - else: - return FunctionComposition(L2NormSq(self.alpha, b=self.b), operator) - - def convex_conjugate(self, x): - - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - - - def proximal_conjugate(self, x, tau): - - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - - self.A = kwargs.get('A',None) - self.b = kwargs.get('b',None) - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, self.A.direct(x)) - else: - return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) - - def eval_norm(self, x): - - return SimpleL1Norm.__call__(self, x) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, alpha, **kwargs): - - super(mixed_L12Norm, self).__init__() - - self.alpha = alpha - self.b = kwargs.get('b',None) - self.sym_grad = kwargs.get('sym_grad',False) - - def __call__(self,x): - - if self.b is None: - tmp1 = x - else: - tmp1 = x - self.b -# - if self.sym_grad: - tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) - else: - tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() - - return self.alpha*tmp.sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - pass - - def proximal_conjugate(self, x, tau): - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - return res - - def composition_with(self, operator): - - if self.b is None: - return FunctionComposition(mixed_L12Norm(self.alpha), operator) - else: - return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) - - -#%% - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - - if x.shape[0]==1: - return x.maximum(0).sum() - else: - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - - def proximal(self,x,tau): - return x.copy() - - def proximal_conjugate(self, x, tau): - return 0 - - -class CompositeFunction(Function): - - def __init__(self, *functions, blockMatrix): - - self.blockMatrix = blockMatrix - self.functions = functions - self.length = len(self.functions) - - def get_item(self, ind): - return self.functions[ind] - - def __call__(self,x): - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - - def convex_conjugate(self, x): - - z = 0 - t = 0 - for i in range(x.shape[0]): - t += self.functions[z].convex_conjugate(x.get_item(i)) - z += 1 - - return t - - def proximal(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - -# -class FunctionComposition(Function): - - def __init__(self, function, operator): - - self.function = function - self.alpha = self.function.alpha - self.b = self.function.b - self.operator = operator - - - super(FunctionComposition, self).__init__() - - ''' overide call and gradient ''' - def __call__(self, x): - return self.function(x) - - def gradient(self,x): - return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) - - ''' Same as in the parent class''' - def proximal(self,x, tau): - return self.function.proximal(x, tau) - - def proximal_conjugate(self,x, tau): - return self.function.proximal_conjugate(x, tau) - - def convex_conjugate(self,x): - return self.function.convex_conjugate(x) - - -class FunctionComposition_new(Function): - - def __init__(self, operator, *functions): - - self.functions = functions - self.operator = operator - self.length = len(self.functions) - -# if self.length==1: -# self.L = self.functions[0].alpha*(self.operator.norm()**2) - - # length == to operator.shape[0]# - super(FunctionComposition_new, self).__init__() - - def __call__(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - def convex_conjugate(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - def proximal(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) - - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - -if __name__ == '__main__': - - N = 3 - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - - # Form Composite Operator - operator = CompositeOperator((2,1), op1, op2 ) - - # Create functions - noisy_data = ImageData(np.random.randint(10, size=ag)) - - d = ImageData(np.random.randint(10, size=ag)) - - f = mixed_L12Norm(alpha = 1).composition_with(op1) - g = L2NormSq(alpha=0.5, b=noisy_data) - - # Compare call of f - a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() - print(a1, f(d)) - - # Compare call of g - a2 = g.alpha*(d - noisy_data).power(2).sum() - print(a2, g(d)) - - # Compare convex conjugate of g - a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() - print( a3, g.convex_conjugate(d)) - - - - - -# -# f1 = L2NormSq(alpha=1, b=noisy_data) -# print(f1(noisy_data)) -# -# f2 = L2NormSq(alpha=5, b=noisy_data).composition_with(op2) -# print(f2(noisy_data)) -# -# print(f1.gradient(noisy_data).as_array()) -# print(f2.gradient(noisy_data).as_array()) -## -# print(f1.proximal(noisy_data,1).as_array()) -# print(f2.proximal(noisy_data,1).as_array()) -# -# -# f3 = mixed_L12Norm(alpha = 1).composition_with(op1) -# print(f3(noisy_data)) -# -# print(ImageData(op1.direct(noisy_data).power(2).sum(axis=0)).sqrt().sum()) -# -# print( 5*(op2.direct(d) - noisy_data).power(2).sum(), f2(d)) -# -# from functions import mixed_L12Norm as mixed_L12Norm_old -# -# print(mixed_L12Norm_old(op1,None,alpha)(noisy_data)) - - - # - - diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py new file mode 100644 index 0000000..54a952a --- /dev/null +++ b/Wrappers/Python/test/test_functions.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 2 19:24:37 2019 + +@author: evangelos +""" + +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Feb 7 13:10:56 2019 + +@author: evangelos +""" + +import numpy as np +from ccpi.optimisation.funcs import Function +from ccpi.framework import DataContainer, ImageData, ImageGeometry +from operators import CompositeDataContainer, Identity, CompositeOperator +from numbers import Number +from GradientOperator import Gradient + + +class SimpleL2NormSq(Function): + + def __init__(self, alpha=1): + + super(SimpleL2NormSq, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.power(2).sum() + + def gradient(self,x): + return 2 * self.alpha * x + + def convex_conjugate(self,x): + return (1/(4*self.alpha)) * x.power(2).sum() + + def proximal(self, x, tau): + return x.divide(1+2*tau*self.alpha) + + def proximal_conjugate(self, x, tau): + return x.divide(1 + tau/(2*self.alpha) ) + + +############################ L2NORM FUNCTIONS ############################# +class L2NormSq(SimpleL2NormSq): + + def __init__(self, alpha, **kwargs): + + super(L2NormSq, self).__init__(alpha) + self.alpha = alpha + self.b = kwargs.get('b',None) + self.L = 1 + + def __call__(self, x): + + if self.b is None: + return SimpleL2NormSq.__call__(self, x) + else: + return SimpleL2NormSq.__call__(self, x - self.b) + + def gradient(self, x): + + if self.b is None: + return 2*self.alpha * x + else: + return 2*self.alpha * (x - self.b) + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(L2NormSq(self.alpha), operator) + else: + return FunctionComposition(L2NormSq(self.alpha, b=self.b), operator) + + def convex_conjugate(self, x): + + ''' The convex conjugate corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + ''' + + if self.b is None: + return SimpleL2NormSq.convex_conjugate(self, x) + else: + return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + ''' The proximal operator corresponds to the simple functional i.e., + f(x) = alpha * ||x - b||_{2}^{2} + + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if self.b is None: + return SimpleL2NormSq.proximal(self, x, tau) + else: + return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + + + def proximal_conjugate(self, x, tau): + + ''' The proximal operator corresponds to the simple convex conjugate + functional i.e., f^{*}(x^{) + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + if self.b is None: + return SimpleL2NormSq.proximal_conjugate(self, x, tau) + else: + return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) + + +############################ L1NORM FUNCTIONS ############################# +class SimpleL1Norm(Function): + + def __init__(self, alpha=1): + + super(SimpleL1Norm, self).__init__() + self.alpha = alpha + + def __call__(self, x): + return self.alpha * x.abs().sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + ''' Soft Threshold''' + return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) + + def proximal_conjugate(self, x, tau): + return x.divide((x.abs()/self.alpha).maximum(1.0)) + +class L1Norm(SimpleL1Norm): + + def __init__(self, alpha=1, **kwargs): + + super(L1Norm, self).__init__() + self.alpha = alpha + + self.A = kwargs.get('A',None) + self.b = kwargs.get('b',None) + + def __call__(self, x): + + if self.b is None: + return SimpleL1Norm.__call__(self, self.A.direct(x)) + else: + return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) + + def eval_norm(self, x): + + return SimpleL1Norm.__call__(self, x) + + def gradient(self, x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + if self.b is None: + return SimpleL1Norm.convex_conjugate(self, x) + else: + return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + + def proximal(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal(self, x, tau) + else: + return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) + + def proximal_conjugate(self, x, tau): + + if self.b is None: + return SimpleL1Norm.proximal_conjugate(self, x, tau) + else: + return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) + + +############################ mixed_L1,2NORM FUNCTIONS ############################# +class mixed_L12Norm(Function): + + def __init__(self, alpha, **kwargs): + + super(mixed_L12Norm, self).__init__() + + self.alpha = alpha + self.b = kwargs.get('b',None) + self.sym_grad = kwargs.get('sym_grad',False) + + def __call__(self,x): + + if self.b is None: + tmp1 = x + else: + tmp1 = x - self.b +# + if self.sym_grad: + tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) + else: + tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() + + return self.alpha*tmp.sum() + + def gradient(self,x): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + return 0 + + def proximal(self, x, tau): + pass + + def proximal_conjugate(self, x, tau): + + if self.sym_grad: + tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha + res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) + + return res + + def composition_with(self, operator): + + if self.b is None: + return FunctionComposition(mixed_L12Norm(self.alpha), operator) + else: + return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + + +#%% + +class ZeroFun(Function): + + def __init__(self): + super(ZeroFun, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + ''' This is the support function sup which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + + def proximal(self,x,tau): + return x.copy() + + def proximal_conjugate(self, x, tau): + return 0 + + +class CompositeFunction(Function): + + def __init__(self, *functions, blockMatrix): + + self.blockMatrix = blockMatrix + self.functions = functions + self.length = len(self.functions) + + def get_item(self, ind): + return self.functions[ind] + + def __call__(self,x): + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + + def convex_conjugate(self, x): + + z = 0 + t = 0 + for i in range(x.shape[0]): + t += self.functions[z].convex_conjugate(x.get_item(i)) + z += 1 + + return t + + def proximal(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(tau, Number): + tau = CompositeDataContainer(tau) + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) + return CompositeDataContainer(*out) + +# +class FunctionComposition(Function): + + def __init__(self, function, operator): + + self.function = function + self.alpha = self.function.alpha + self.b = self.function.b + self.operator = operator + + + super(FunctionComposition, self).__init__() + + ''' overide call and gradient ''' + def __call__(self, x): + return self.function(x) + + def gradient(self,x): + return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + + ''' Same as in the parent class''' + def proximal(self,x, tau): + return self.function.proximal(x, tau) + + def proximal_conjugate(self,x, tau): + return self.function.proximal_conjugate(x, tau) + + def convex_conjugate(self,x): + return self.function.convex_conjugate(x) + + +class FunctionComposition_new(Function): + + def __init__(self, operator, *functions): + + self.functions = functions + self.operator = operator + self.length = len(self.functions) + +# if self.length==1: +# self.L = self.functions[0].alpha*(self.operator.norm()**2) + + # length == to operator.shape[0]# + super(FunctionComposition_new, self).__init__() + + def __call__(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) + return t + + def convex_conjugate(self, x): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + t = 0 + for i in range(x.shape[0]): + t += self.functions[i].convex_conjugate(x.get_item(i)) + return t + + def proximal_conjugate(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + def proximal(self, x, tau, out = None): + + if isinstance(x, ImageData): + x = CompositeDataContainer(x) + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + + if self.length==1: + return ImageData(*out) + else: + return CompositeDataContainer(*out) + + +if __name__ == '__main__': + + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = CompositeOperator((2,1), op1, op2 ) + + # Create functions + noisy_data = ImageData(np.random.randint(10, size=ag)) + + d = ImageData(np.random.randint(10, size=ag)) + + f = mixed_L12Norm(alpha = 1).composition_with(op1) + g = L2NormSq(alpha=0.5, b=noisy_data) + + # Compare call of f + a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() + print(a1, f(d)) + + # Compare call of g + a2 = g.alpha*(d - noisy_data).power(2).sum() + print(a2, g(d)) + + # Compare convex conjugate of g + a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() + print( a3, g.convex_conjugate(d)) + + + + + +# +# f1 = L2NormSq(alpha=1, b=noisy_data) +# print(f1(noisy_data)) +# +# f2 = L2NormSq(alpha=5, b=noisy_data).composition_with(op2) +# print(f2(noisy_data)) +# +# print(f1.gradient(noisy_data).as_array()) +# print(f2.gradient(noisy_data).as_array()) +## +# print(f1.proximal(noisy_data,1).as_array()) +# print(f2.proximal(noisy_data,1).as_array()) +# +# +# f3 = mixed_L12Norm(alpha = 1).composition_with(op1) +# print(f3(noisy_data)) +# +# print(ImageData(op1.direct(noisy_data).power(2).sum(axis=0)).sqrt().sum()) +# +# print( 5*(op2.direct(d) - noisy_data).power(2).sum(), f2(d)) +# +# from functions import mixed_L12Norm as mixed_L12Norm_old +# +# print(mixed_L12Norm_old(op1,None,alpha)(noisy_data)) + + + # + + -- cgit v1.2.3 From 5338f1c6936584c867107a6512f204b2b3778afc Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 11:26:38 +0000 Subject: refactoring add unittest --- .../ccpi/optimisation/functions/BlockFunction.py | 3 +- .../functions/FunctionOperatorComposition.py | 3 +- .../Python/ccpi/optimisation/functions/L1Norm.py | 7 +- .../ccpi/optimisation/functions/L2NormSquared.py | 3 +- .../Python/ccpi/optimisation/functions/ZeroFun.py | 10 +- .../Python/ccpi/optimisation/functions/__init__.py | 2 +- .../ccpi/optimisation/functions/mixed_L12Norm.py | 9 +- Wrappers/Python/test/test_functions.py | 459 ++------------------- 8 files changed, 67 insertions(+), 429 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index bc9b62d..d6c98c4 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -7,7 +7,8 @@ Created on Fri Mar 8 10:01:31 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer class BlockFunction(Function): diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index a67b884..0f3defe 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -7,7 +7,8 @@ Created on Fri Mar 8 09:55:36 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function class FunctionOperatorComposition(Function): diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py index ec7aa5b..f83de6f 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -7,12 +7,13 @@ Created on Wed Mar 6 19:42:34 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry ############################ L1NORM FUNCTIONS ############################# -class SimpleL1NormEdo(Function): +class SimpleL1Norm(Function): def __init__(self, alpha=1): @@ -35,7 +36,7 @@ class SimpleL1NormEdo(Function): def proximal_conjugate(self, x, tau): return x.divide((x.abs()/self.alpha).maximum(1.0)) -class L1Norm(SimpleL1NormEdo): +class L1Norm(SimpleL1Norm): def __init__(self, alpha=1, **kwargs): diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 8b73be6..5817317 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -9,7 +9,8 @@ Created on Thu Feb 7 13:10:56 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py index b41cc26..9def741 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -7,7 +7,8 @@ Created on Wed Mar 6 19:44:10 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData from ccpi.framework import BlockDataContainer @@ -33,8 +34,11 @@ class ZeroFun(Function): else: return x.maximum(0).sum() + x.maximum(0).sum() - def proximal(self,x,tau): - return x.copy() + def proximal(self,x,tau, out=None): + if out is None: + return x.copy() + else: + out.fill(x) def proximal_conjugate(self, x, tau): return 0 \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 7ce617a..c4ba0a6 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- - +from .Function import Function from .ZeroFun import ZeroFun from .L1Norm import * from .L2NormSquared import * diff --git a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py index 0ce1d28..8fe8620 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py @@ -7,9 +7,10 @@ Created on Wed Mar 6 19:43:12 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry - +from ccpi.optimisation.functions.FunctionOperatorComposition import FunctionOperatorComposition ############################ mixed_L1,2NORM FUNCTIONS ############################# @@ -59,7 +60,7 @@ class mixed_L12Norm(Function): def composition_with(self, operator): if self.b is None: - return FunctionComposition(mixed_L12Norm(self.alpha), operator) + return FunctionOperatorComposition(operator, mixed_L12Norm(self.alpha)) else: - return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) + return FunctionOperatorComposition(operator, mixed_L12Norm(self.alpha, b=self.b)) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 54a952a..0741d1c 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -6,438 +6,67 @@ Created on Sat Mar 2 19:24:37 2019 @author: evangelos """ -# -*- coding: utf-8 -*- - -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 - -@author: evangelos -""" import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry -from operators import CompositeDataContainer, Identity, CompositeOperator +from ccpi.optimisation.operators import Identity +from ccpi.optimisation.operators import BlockOperator +from ccpi.framework import BlockDataContainer from numbers import Number -from GradientOperator import Gradient - - -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): - - super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.power(2).sum() - - def gradient(self,x): - return 2 * self.alpha * x - - def convex_conjugate(self,x): - return (1/(4*self.alpha)) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) - - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/(2*self.alpha) ) - - -############################ L2NORM FUNCTIONS ############################# -class L2NormSq(SimpleL2NormSq): - - def __init__(self, alpha, **kwargs): - - super(L2NormSq, self).__init__(alpha) - self.alpha = alpha - self.b = kwargs.get('b',None) - self.L = 1 - - def __call__(self, x): - - if self.b is None: - return SimpleL2NormSq.__call__(self, x) - else: - return SimpleL2NormSq.__call__(self, x - self.b) - - def gradient(self, x): - - if self.b is None: - return 2*self.alpha * x - else: - return 2*self.alpha * (x - self.b) - - def composition_with(self, operator): - - if self.b is None: - return FunctionComposition(L2NormSq(self.alpha), operator) - else: - return FunctionComposition(L2NormSq(self.alpha, b=self.b), operator) - - def convex_conjugate(self, x): - - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - - - def proximal_conjugate(self, x, tau): - - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - - self.A = kwargs.get('A',None) - self.b = kwargs.get('b',None) - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, self.A.direct(x)) - else: - return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) - - def eval_norm(self, x): - - return SimpleL1Norm.__call__(self, x) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, alpha, **kwargs): +from ccpi.optimisation.operators import Gradient - super(mixed_L12Norm, self).__init__() - - self.alpha = alpha - self.b = kwargs.get('b',None) - self.sym_grad = kwargs.get('sym_grad',False) - - def __call__(self,x): - - if self.b is None: - tmp1 = x - else: - tmp1 = x - self.b -# - if self.sym_grad: - tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) - else: - tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() - - return self.alpha*tmp.sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - pass - - def proximal_conjugate(self, x, tau): - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - return res - - def composition_with(self, operator): - - if self.b is None: - return FunctionComposition(mixed_L12Norm(self.alpha), operator) - else: - return FunctionComposition(mixed_L12Norm(self.alpha, b=self.b), operator) - +from ccpi.optimisation.functions import SimpleL2NormSq +from ccpi.optimisation.functions import L2NormSq +from ccpi.optimisation.functions import SimpleL1Norm +from ccpi.optimisation.functions import L1Norm +# from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq +# from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm +from ccpi.optimisation.functions import mixed_L12Norm +from ccpi.optimisation.functions import ZeroFun -#%% - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - - if x.shape[0]==1: - return x.maximum(0).sum() - else: - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - - def proximal(self,x,tau): - return x.copy() - - def proximal_conjugate(self, x, tau): - return 0 - - -class CompositeFunction(Function): - - def __init__(self, *functions, blockMatrix): - - self.blockMatrix = blockMatrix - self.functions = functions - self.length = len(self.functions) - - def get_item(self, ind): - return self.functions[ind] - - def __call__(self,x): - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - - def convex_conjugate(self, x): - - z = 0 - t = 0 - for i in range(x.shape[0]): - t += self.functions[z].convex_conjugate(x.get_item(i)) - z += 1 - - return t - - def proximal(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) +from ccpi.optimisation.functions import FunctionOperatorComposition +import unittest - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - # -class FunctionComposition(Function): - - def __init__(self, function, operator): - - self.function = function - self.alpha = self.function.alpha - self.b = self.function.b - self.operator = operator - - - super(FunctionComposition, self).__init__() - - ''' overide call and gradient ''' - def __call__(self, x): - return self.function(x) - - def gradient(self,x): - return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) - - ''' Same as in the parent class''' - def proximal(self,x, tau): - return self.function.proximal(x, tau) - - def proximal_conjugate(self,x, tau): - return self.function.proximal_conjugate(x, tau) - def convex_conjugate(self,x): - return self.function.convex_conjugate(x) - -class FunctionComposition_new(Function): +class TestFunction(unittest.TestCase): + def test_Function(self): + - def __init__(self, operator, *functions): + N = 3 + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator((2,1), op1, op2 ) - self.functions = functions - self.operator = operator - self.length = len(self.functions) + # Create functions + noisy_data = ImageData(np.random.randint(10, size=ag)) -# if self.length==1: -# self.L = self.functions[0].alpha*(self.operator.norm()**2) - - # length == to operator.shape[0]# - super(FunctionComposition_new, self).__init__() - - def __call__(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) + d = ImageData(np.random.randint(10, size=ag)) - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - def convex_conjugate(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) + f = mixed_L12Norm(alpha = 1).composition_with(op1) + g = L2NormSq(alpha=0.5, b=noisy_data) - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + # Compare call of f + a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() + #print(a1, f(d)) + self.assertEqual (a1, f(d)) - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - def proximal(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) + # Compare call of g + a2 = g.alpha*(d - noisy_data).power(2).sum() + #print(a2, g(d)) + self.assertEqual(a2, g(d)) - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - -if __name__ == '__main__': - - N = 3 - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - - # Form Composite Operator - operator = CompositeOperator((2,1), op1, op2 ) - - # Create functions - noisy_data = ImageData(np.random.randint(10, size=ag)) - - d = ImageData(np.random.randint(10, size=ag)) - - f = mixed_L12Norm(alpha = 1).composition_with(op1) - g = L2NormSq(alpha=0.5, b=noisy_data) - - # Compare call of f - a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() - print(a1, f(d)) - - # Compare call of g - a2 = g.alpha*(d - noisy_data).power(2).sum() - print(a2, g(d)) - - # Compare convex conjugate of g - a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() - print( a3, g.convex_conjugate(d)) + # Compare convex conjugate of g + a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() + self.assertEqual(a3, g.convex_conjugate(d)) + #print( a3, g.convex_conjugate(d)) -- cgit v1.2.3 From 2dabf103a7d56de9afcf252dab881b097a9cd259 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 11:37:06 +0000 Subject: added corrections to files which shoudl go? --- .../optimisation/functions/FunctionComposition.py | 60 ++-------------------- .../ccpi/optimisation/functions/functions.py | 3 +- 2 files changed, 5 insertions(+), 58 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py index 5b6defc..f24dc10 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py @@ -7,9 +7,11 @@ Created on Wed Mar 6 19:45:06 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry from ccpi.framework import BlockDataContainer +from ccpi.optimisation.functions import BlockFunction class FunctionOperatorComposition(Function): @@ -51,63 +53,7 @@ class FunctionOperatorComposition(Function): return self.adjoint(self.function.gradient(self.operator.direct(x))) -class BlockFunction(Function): - - def __init__(self, operator, *functions): - - self.functions = functions - self.operator = operator - self.length = len(self.functions) - - super(BlockFunction, self).__init__() - - def __call__(self, x): - - tmp = operator.direct(x) - - t = 0 - for i in range(tmp.shape[0]): - t += self.functions[i](tmp.get_item(i)) - return t - - def call_adjoint(self, x): - - tmp = operator.adjoint(x) - - t = 0 - for i in range(tmp.shape[0]): - t += self.functions[i](tmp.get_item(i)) - return t - - def convex_conjugate(self, x): - - ''' Convex_conjugate does not take into account the BlockOperator''' - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - - def proximal_conjugate(self, x, tau, out = None): - - ''' proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - - return CompositeDataContainer(*out) - - def proximal(self, x, tau, out = None): - - ''' proximal does not take into account the BlockOperator''' - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) - return CompositeDataContainer(*out) - - def gradient(self,x, out=None): - pass class FunctionComposition_new(Function): diff --git a/Wrappers/Python/ccpi/optimisation/functions/functions.py b/Wrappers/Python/ccpi/optimisation/functions/functions.py index f40abb9..8632920 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/functions.py +++ b/Wrappers/Python/ccpi/optimisation/functions/functions.py @@ -9,7 +9,8 @@ Created on Thu Feb 7 13:10:56 2019 """ import numpy as np -from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.funcs import Function +from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry from operators import CompositeDataContainer, Identity, CompositeOperator from numbers import Number -- cgit v1.2.3 From 2101861fa2075fa12abb0f0d4dcccd64e15c1853 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 11:47:31 +0000 Subject: line endings --- Wrappers/Python/ccpi/processors.py | 1026 ++++++++++++++++++------------------ 1 file changed, 513 insertions(+), 513 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py index 6a9057a..611c8c6 100755 --- a/Wrappers/Python/ccpi/processors.py +++ b/Wrappers/Python/ccpi/processors.py @@ -1,514 +1,514 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ - AcquisitionGeometry, ImageGeometry, ImageData -from ccpi.reconstruction.parallelbeam import alg as pbalg -import numpy -from scipy import ndimage - -import matplotlib.pyplot as plt - - -class Normalizer(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): - kwargs = { - 'flat_field' : flat_field, - 'dark_field' : dark_field, - # very small number. Used when there is a division by zero - 'tolerance' : tolerance - } - - #DataProcessor.__init__(self, **kwargs) - super(Normalizer, self).__init__(**kwargs) - if not flat_field is None: - self.set_flat_field(flat_field) - if not dark_field is None: - self.set_dark_field(dark_field) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3 or\ - dataset.number_of_dimensions == 2: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def set_dark_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Dark Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.dark_field = df - elif issubclass(type(df), DataContainer): - self.dark_field = self.set_dark_field(df.as_array()) - - def set_flat_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Flat Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.flat_field = df - elif issubclass(type(df), DataContainer): - self.flat_field = self.set_flat_field(df.as_array()) - - @staticmethod - def normalize_projection(projection, flat, dark, tolerance): - a = (projection - dark) - b = (flat-dark) - with numpy.errstate(divide='ignore', invalid='ignore'): - c = numpy.true_divide( a, b ) - c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0 - return c - - @staticmethod - def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): - '''returns the estimated relative error of the normalised projection - - n = (projection - dark) / (flat - dark) - Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) - ''' - a = (projection - dark) - b = (flat-dark) - df = delta_flat / flat - dd = delta_dark / dark - rel_norm_error = (b + a) / (b * a) * (df + dd) - return rel_norm_error - - def process(self): - - projections = self.get_input() - dark = self.dark_field - flat = self.flat_field - - if projections.number_of_dimensions == 3: - if not (projections.shape[1:] == dark.shape and \ - projections.shape[1:] == flat.shape): - raise ValueError('Flats/Dark and projections size do not match.') - - - a = numpy.asarray( - [ Normalizer.normalize_projection( - projection, flat, dark, self.tolerance) \ - for projection in projections.as_array() ] - ) - elif projections.number_of_dimensions == 2: - a = Normalizer.normalize_projection(projections.as_array(), - flat, dark, self.tolerance) - y = type(projections)( a , True, - dimension_labels=projections.dimension_labels, - geometry=projections.geometry) - return y - - -class CenterOfRotationFinder(DataProcessor): - '''Processor to find the center of rotation in a parallel beam experiment - - This processor read in a AcquisitionDataSet and finds the center of rotation - based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078 - - Input: AcquisitionDataSet - - Output: float. center of rotation in pixel coordinate - ''' - - def __init__(self): - kwargs = { - - } - - #DataProcessor.__init__(self, **kwargs) - super(CenterOfRotationFinder, self).__init__(**kwargs) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3: - if dataset.geometry.geom_type == 'parallel': - return True - else: - raise ValueError('{0} is suitable only for parallel beam geometry'\ - .format(self.__class__.__name__)) - else: - raise ValueError("Expected input dimensions is 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - - # ######################################################################### - # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. # - # # - # Copyright 2015. UChicago Argonne, LLC. This software was produced # - # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # - # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # - # U.S. Department of Energy. The U.S. Government has rights to use, # - # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # - # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # - # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # - # modified to produce derivative works, such modified software should # - # be clearly marked, so as not to confuse it with the version available # - # from ANL. # - # # - # Additionally, redistribution and use in source and binary forms, with # - # or without modification, are permitted provided that the following # - # conditions are met: # - # # - # * Redistributions of source code must retain the above copyright # - # notice, this list of conditions and the following disclaimer. # - # # - # * Redistributions in binary form must reproduce the above copyright # - # notice, this list of conditions and the following disclaimer in # - # the documentation and/or other materials provided with the # - # distribution. # - # # - # * Neither the name of UChicago Argonne, LLC, Argonne National # - # Laboratory, ANL, the U.S. Government, nor the names of its # - # contributors may be used to endorse or promote products derived # - # from this software without specific prior written permission. # - # # - # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # - # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # - # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # - # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # - # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # - # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # - # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # - # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # - # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # - # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # - # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # - # POSSIBILITY OF SUCH DAMAGE. # - # ######################################################################### - - @staticmethod - def as_ndarray(arr, dtype=None, copy=False): - if not isinstance(arr, numpy.ndarray): - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_dtype(arr, dtype, copy=False): - if not arr.dtype == dtype: - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_float32(arr): - arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32) - return CenterOfRotationFinder.as_dtype(arr, numpy.float32) - - - - - @staticmethod - def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5, - ratio=2., drop=20): - """ - Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. - - Parameters - ---------- - tomo : ndarray - 3D tomographic data. - ind : int, optional - Index of the slice to be used for reconstruction. - smin, smax : int, optional - Reference to the horizontal center of the sinogram. - srad : float, optional - Fine search radius. - step : float, optional - Step of fine searching. - ratio : float, optional - The ratio between the FOV of the camera and the size of object. - It's used to generate the mask. - drop : int, optional - Drop lines around vertical center of the mask. - - Returns - ------- - float - Rotation axis location. - - Notes - ----- - The function may not yield a correct estimate, if: - - - the sample size is bigger than the field of view of the camera. - In this case the ``ratio`` argument need to be set larger - than the default of 2.0. - - - there is distortion in the imaging hardware. If there's - no correction applied, the center of the projection image may - yield a better estimate. - - - the sample contrast is weak. Paganin's filter need to be applied - to overcome this. - - - the sample was changed during the scan. - """ - tomo = CenterOfRotationFinder.as_float32(tomo) - - if ind is None: - ind = tomo.shape[1] // 2 - _tomo = tomo[:, ind, :] - - - - # Reduce noise by smooth filters. Use different filters for coarse and fine search - _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) - _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) - - # Coarse and fine searches for finding the rotation center. - if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k) - #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :] - #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop) - #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin, - smax, ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - else: - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, - smin, smax, - ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - - #logger.debug('Rotation center search finished: %i', fine_cen) - return fine_cen - - - @staticmethod - def _search_coarse(sino, smin, smax, ratio, drop): - """ - Coarse search for finding the rotation center. - """ - (Nrow, Ncol) = sino.shape - centerfliplr = (Ncol - 1.0) / 2.0 - - # Copy the sinogram and flip left right, the purpose is to - # make a full [0;2Pi] sinogram - _copy_sino = numpy.fliplr(sino[1:]) - - # This image is used for compensating the shift of sinogram 2 - temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32') - temp_img[:] = sino[-1] - - # Start coarse search in which the shift step is 1 - listshift = numpy.arange(smin, smax + 1) - listmetric = numpy.zeros(len(listshift), dtype='float32') - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol, - 0.5 * ratio * Ncol, drop) - for i in listshift: - _sino = numpy.roll(_copy_sino, i, axis=1) - if i >= 0: - _sino[:, 0:i] = temp_img[:, 0:i] - else: - _sino[:, i:] = temp_img[:, i:] - listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # numpy.vstack((sino, _sino))) - numpy.fft.fft2(numpy.vstack((sino, _sino))) - )) * mask) - minpos = numpy.argmin(listmetric) - return centerfliplr + listshift[minpos] / 2.0 - - @staticmethod - def _search_fine(sino, srad, step, init_cen, ratio, drop): - """ - Fine search for finding the rotation center. - """ - Nrow, Ncol = sino.shape - centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 - # Use to shift the sinogram 2 to the raw CoR. - shiftsino = numpy.int16(2 * (init_cen - centerfliplr)) - _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1) - if init_cen <= centerfliplr: - lefttake = numpy.int16(numpy.ceil(srad + 1)) - righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1)) - else: - lefttake = numpy.int16(numpy.ceil( - init_cen - (Ncol - 1 - init_cen) + srad + 1)) - righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1)) - Ncol1 = righttake - lefttake + 1 - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1, - 0.5 * ratio * Ncol, drop) - numshift = numpy.int16((2 * srad) / step) + 1 - listshift = numpy.linspace(-srad, srad, num=numshift) - listmetric = numpy.zeros(len(listshift), dtype='float32') - factor1 = numpy.mean(sino[-1, lefttake:righttake]) - num1 = 0 - for i in listshift: - _sino = ndimage.interpolation.shift( - _copy_sino, (0, i), prefilter=False) - factor2 = numpy.mean(_sino[0,lefttake:righttake]) - _sino = _sino * factor1 / factor2 - sinojoin = numpy.vstack((sino, _sino)) - listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # sinojoin[:, lefttake:righttake + 1]) - numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1]) - )) * mask) - num1 = num1 + 1 - minpos = numpy.argmin(listmetric) - return init_cen + listshift[minpos] / 2.0 - - @staticmethod - def _create_mask(nrow, ncol, radius, drop): - du = 1.0 / ncol - dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi) - centerrow = numpy.ceil(nrow / 2) - 1 - centercol = numpy.ceil(ncol / 2) - 1 - # added by Edoardo Pasca - centerrow = int(centerrow) - centercol = int(centercol) - mask = numpy.zeros((nrow, ncol), dtype='float32') - for i in range(nrow): - num1 = numpy.round(((i - centerrow) * dv / radius) / du) - (p1, p2) = numpy.int16(numpy.clip(numpy.sort( - (-num1 + centercol, num1 + centercol)), 0, ncol - 1)) - mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32') - if drop < centerrow: - mask[centerrow - drop:centerrow + drop + 1, - :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32') - mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32') - return mask - - def process(self): - - projections = self.get_input() - - cor = CenterOfRotationFinder.find_center_vo(projections.as_array()) - - return cor - - -class AcquisitionDataPadder(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, - center_of_rotation = None, - acquisition_geometry = None, - pad_value = 1e-5): - kwargs = { - 'acquisition_geometry' : acquisition_geometry, - 'center_of_rotation' : center_of_rotation, - 'pad_value' : pad_value - } - - super(AcquisitionDataPadder, self).__init__(**kwargs) - - def check_input(self, dataset): - if self.acquisition_geometry is None: - self.acquisition_geometry = dataset.geometry - if dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def process(self): - projections = self.get_input() - w = projections.get_dimension_size('horizontal') - delta = w - 2 * self.center_of_rotation - - padded_width = int ( - numpy.ceil(abs(delta)) + w - ) - delta_pix = padded_width - w - - voxel_per_pixel = 1 - geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), - self.acquisition_geometry.angles, - self.center_of_rotation, - voxel_per_pixel ) - - padded_geometry = self.acquisition_geometry.clone() - - padded_geometry.pixel_num_h = geom['n_h'] - padded_geometry.pixel_num_v = geom['n_v'] - - delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h - delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v - - if delta_pix_h == 0: - delta_pix_h = delta_pix - padded_geometry.pixel_num_h = padded_width - #initialize a new AcquisitionData with values close to 0 - out = AcquisitionData(geometry=padded_geometry) - out = out + self.pad_value - - - #pad in the horizontal-vertical plane -> slice on angles - if delta > 0: - #pad left of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - else: - #pad right of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(0, w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - #cleaned = eval(command) - exec(command) +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018 Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ + AcquisitionGeometry, ImageGeometry, ImageData +from ccpi.reconstruction.parallelbeam import alg as pbalg +import numpy +from scipy import ndimage + +import matplotlib.pyplot as plt + + +class Normalizer(DataProcessor): + '''Normalization based on flat and dark + + This processor read in a AcquisitionData and normalises it based on + the instrument reading with and without incident photons or neutrons. + + Input: AcquisitionData + Parameter: 2D projection with flat field (or stack) + 2D projection with dark field (or stack) + Output: AcquisitionDataSetn + ''' + + def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): + kwargs = { + 'flat_field' : flat_field, + 'dark_field' : dark_field, + # very small number. Used when there is a division by zero + 'tolerance' : tolerance + } + + #DataProcessor.__init__(self, **kwargs) + super(Normalizer, self).__init__(**kwargs) + if not flat_field is None: + self.set_flat_field(flat_field) + if not dark_field is None: + self.set_dark_field(dark_field) + + def check_input(self, dataset): + if dataset.number_of_dimensions == 3 or\ + dataset.number_of_dimensions == 2: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + def set_dark_field(self, df): + if type(df) is numpy.ndarray: + if len(numpy.shape(df)) == 3: + raise ValueError('Dark Field should be 2D') + elif len(numpy.shape(df)) == 2: + self.dark_field = df + elif issubclass(type(df), DataContainer): + self.dark_field = self.set_dark_field(df.as_array()) + + def set_flat_field(self, df): + if type(df) is numpy.ndarray: + if len(numpy.shape(df)) == 3: + raise ValueError('Flat Field should be 2D') + elif len(numpy.shape(df)) == 2: + self.flat_field = df + elif issubclass(type(df), DataContainer): + self.flat_field = self.set_flat_field(df.as_array()) + + @staticmethod + def normalize_projection(projection, flat, dark, tolerance): + a = (projection - dark) + b = (flat-dark) + with numpy.errstate(divide='ignore', invalid='ignore'): + c = numpy.true_divide( a, b ) + c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0 + return c + + @staticmethod + def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): + '''returns the estimated relative error of the normalised projection + + n = (projection - dark) / (flat - dark) + Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) + ''' + a = (projection - dark) + b = (flat-dark) + df = delta_flat / flat + dd = delta_dark / dark + rel_norm_error = (b + a) / (b * a) * (df + dd) + return rel_norm_error + + def process(self): + + projections = self.get_input() + dark = self.dark_field + flat = self.flat_field + + if projections.number_of_dimensions == 3: + if not (projections.shape[1:] == dark.shape and \ + projections.shape[1:] == flat.shape): + raise ValueError('Flats/Dark and projections size do not match.') + + + a = numpy.asarray( + [ Normalizer.normalize_projection( + projection, flat, dark, self.tolerance) \ + for projection in projections.as_array() ] + ) + elif projections.number_of_dimensions == 2: + a = Normalizer.normalize_projection(projections.as_array(), + flat, dark, self.tolerance) + y = type(projections)( a , True, + dimension_labels=projections.dimension_labels, + geometry=projections.geometry) + return y + + +class CenterOfRotationFinder(DataProcessor): + '''Processor to find the center of rotation in a parallel beam experiment + + This processor read in a AcquisitionDataSet and finds the center of rotation + based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078 + + Input: AcquisitionDataSet + + Output: float. center of rotation in pixel coordinate + ''' + + def __init__(self): + kwargs = { + + } + + #DataProcessor.__init__(self, **kwargs) + super(CenterOfRotationFinder, self).__init__(**kwargs) + + def check_input(self, dataset): + if dataset.number_of_dimensions == 3: + if dataset.geometry.geom_type == 'parallel': + return True + else: + raise ValueError('{0} is suitable only for parallel beam geometry'\ + .format(self.__class__.__name__)) + else: + raise ValueError("Expected input dimensions is 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + + # ######################################################################### + # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. # + # # + # Copyright 2015. UChicago Argonne, LLC. This software was produced # + # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # + # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # + # U.S. Department of Energy. The U.S. Government has rights to use, # + # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # + # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # + # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # + # modified to produce derivative works, such modified software should # + # be clearly marked, so as not to confuse it with the version available # + # from ANL. # + # # + # Additionally, redistribution and use in source and binary forms, with # + # or without modification, are permitted provided that the following # + # conditions are met: # + # # + # * Redistributions of source code must retain the above copyright # + # notice, this list of conditions and the following disclaimer. # + # # + # * Redistributions in binary form must reproduce the above copyright # + # notice, this list of conditions and the following disclaimer in # + # the documentation and/or other materials provided with the # + # distribution. # + # # + # * Neither the name of UChicago Argonne, LLC, Argonne National # + # Laboratory, ANL, the U.S. Government, nor the names of its # + # contributors may be used to endorse or promote products derived # + # from this software without specific prior written permission. # + # # + # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # + # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # + # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # + # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # + # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # + # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # + # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # + # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # + # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # + # POSSIBILITY OF SUCH DAMAGE. # + # ######################################################################### + + @staticmethod + def as_ndarray(arr, dtype=None, copy=False): + if not isinstance(arr, numpy.ndarray): + arr = numpy.array(arr, dtype=dtype, copy=copy) + return arr + + @staticmethod + def as_dtype(arr, dtype, copy=False): + if not arr.dtype == dtype: + arr = numpy.array(arr, dtype=dtype, copy=copy) + return arr + + @staticmethod + def as_float32(arr): + arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32) + return CenterOfRotationFinder.as_dtype(arr, numpy.float32) + + + + + @staticmethod + def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5, + ratio=2., drop=20): + """ + Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. + + Parameters + ---------- + tomo : ndarray + 3D tomographic data. + ind : int, optional + Index of the slice to be used for reconstruction. + smin, smax : int, optional + Reference to the horizontal center of the sinogram. + srad : float, optional + Fine search radius. + step : float, optional + Step of fine searching. + ratio : float, optional + The ratio between the FOV of the camera and the size of object. + It's used to generate the mask. + drop : int, optional + Drop lines around vertical center of the mask. + + Returns + ------- + float + Rotation axis location. + + Notes + ----- + The function may not yield a correct estimate, if: + + - the sample size is bigger than the field of view of the camera. + In this case the ``ratio`` argument need to be set larger + than the default of 2.0. + + - there is distortion in the imaging hardware. If there's + no correction applied, the center of the projection image may + yield a better estimate. + + - the sample contrast is weak. Paganin's filter need to be applied + to overcome this. + + - the sample was changed during the scan. + """ + tomo = CenterOfRotationFinder.as_float32(tomo) + + if ind is None: + ind = tomo.shape[1] // 2 + _tomo = tomo[:, ind, :] + + + + # Reduce noise by smooth filters. Use different filters for coarse and fine search + _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) + _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) + + # Coarse and fine searches for finding the rotation center. + if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k) + #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :] + #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop) + #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) + init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin, + smax, ratio, drop) + fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, + step, init_cen, + ratio, drop) + else: + init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, + smin, smax, + ratio, drop) + fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, + step, init_cen, + ratio, drop) + + #logger.debug('Rotation center search finished: %i', fine_cen) + return fine_cen + + + @staticmethod + def _search_coarse(sino, smin, smax, ratio, drop): + """ + Coarse search for finding the rotation center. + """ + (Nrow, Ncol) = sino.shape + centerfliplr = (Ncol - 1.0) / 2.0 + + # Copy the sinogram and flip left right, the purpose is to + # make a full [0;2Pi] sinogram + _copy_sino = numpy.fliplr(sino[1:]) + + # This image is used for compensating the shift of sinogram 2 + temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32') + temp_img[:] = sino[-1] + + # Start coarse search in which the shift step is 1 + listshift = numpy.arange(smin, smax + 1) + listmetric = numpy.zeros(len(listshift), dtype='float32') + mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol, + 0.5 * ratio * Ncol, drop) + for i in listshift: + _sino = numpy.roll(_copy_sino, i, axis=1) + if i >= 0: + _sino[:, 0:i] = temp_img[:, 0:i] + else: + _sino[:, i:] = temp_img[:, i:] + listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift( + #pyfftw.interfaces.numpy_fft.fft2( + # numpy.vstack((sino, _sino))) + numpy.fft.fft2(numpy.vstack((sino, _sino))) + )) * mask) + minpos = numpy.argmin(listmetric) + return centerfliplr + listshift[minpos] / 2.0 + + @staticmethod + def _search_fine(sino, srad, step, init_cen, ratio, drop): + """ + Fine search for finding the rotation center. + """ + Nrow, Ncol = sino.shape + centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 + # Use to shift the sinogram 2 to the raw CoR. + shiftsino = numpy.int16(2 * (init_cen - centerfliplr)) + _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1) + if init_cen <= centerfliplr: + lefttake = numpy.int16(numpy.ceil(srad + 1)) + righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1)) + else: + lefttake = numpy.int16(numpy.ceil( + init_cen - (Ncol - 1 - init_cen) + srad + 1)) + righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1)) + Ncol1 = righttake - lefttake + 1 + mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1, + 0.5 * ratio * Ncol, drop) + numshift = numpy.int16((2 * srad) / step) + 1 + listshift = numpy.linspace(-srad, srad, num=numshift) + listmetric = numpy.zeros(len(listshift), dtype='float32') + factor1 = numpy.mean(sino[-1, lefttake:righttake]) + num1 = 0 + for i in listshift: + _sino = ndimage.interpolation.shift( + _copy_sino, (0, i), prefilter=False) + factor2 = numpy.mean(_sino[0,lefttake:righttake]) + _sino = _sino * factor1 / factor2 + sinojoin = numpy.vstack((sino, _sino)) + listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift( + #pyfftw.interfaces.numpy_fft.fft2( + # sinojoin[:, lefttake:righttake + 1]) + numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1]) + )) * mask) + num1 = num1 + 1 + minpos = numpy.argmin(listmetric) + return init_cen + listshift[minpos] / 2.0 + + @staticmethod + def _create_mask(nrow, ncol, radius, drop): + du = 1.0 / ncol + dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi) + centerrow = numpy.ceil(nrow / 2) - 1 + centercol = numpy.ceil(ncol / 2) - 1 + # added by Edoardo Pasca + centerrow = int(centerrow) + centercol = int(centercol) + mask = numpy.zeros((nrow, ncol), dtype='float32') + for i in range(nrow): + num1 = numpy.round(((i - centerrow) * dv / radius) / du) + (p1, p2) = numpy.int16(numpy.clip(numpy.sort( + (-num1 + centercol, num1 + centercol)), 0, ncol - 1)) + mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32') + if drop < centerrow: + mask[centerrow - drop:centerrow + drop + 1, + :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32') + mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32') + return mask + + def process(self): + + projections = self.get_input() + + cor = CenterOfRotationFinder.find_center_vo(projections.as_array()) + + return cor + + +class AcquisitionDataPadder(DataProcessor): + '''Normalization based on flat and dark + + This processor read in a AcquisitionData and normalises it based on + the instrument reading with and without incident photons or neutrons. + + Input: AcquisitionData + Parameter: 2D projection with flat field (or stack) + 2D projection with dark field (or stack) + Output: AcquisitionDataSetn + ''' + + def __init__(self, + center_of_rotation = None, + acquisition_geometry = None, + pad_value = 1e-5): + kwargs = { + 'acquisition_geometry' : acquisition_geometry, + 'center_of_rotation' : center_of_rotation, + 'pad_value' : pad_value + } + + super(AcquisitionDataPadder, self).__init__(**kwargs) + + def check_input(self, dataset): + if self.acquisition_geometry is None: + self.acquisition_geometry = dataset.geometry + if dataset.number_of_dimensions == 3: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + def process(self): + projections = self.get_input() + w = projections.get_dimension_size('horizontal') + delta = w - 2 * self.center_of_rotation + + padded_width = int ( + numpy.ceil(abs(delta)) + w + ) + delta_pix = padded_width - w + + voxel_per_pixel = 1 + geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), + self.acquisition_geometry.angles, + self.center_of_rotation, + voxel_per_pixel ) + + padded_geometry = self.acquisition_geometry.clone() + + padded_geometry.pixel_num_h = geom['n_h'] + padded_geometry.pixel_num_v = geom['n_v'] + + delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h + delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v + + if delta_pix_h == 0: + delta_pix_h = delta_pix + padded_geometry.pixel_num_h = padded_width + #initialize a new AcquisitionData with values close to 0 + out = AcquisitionData(geometry=padded_geometry) + out = out + self.pad_value + + + #pad in the horizontal-vertical plane -> slice on angles + if delta > 0: + #pad left of middle + command = "out.array[" + for i in range(out.number_of_dimensions): + if out.dimension_labels[i] == 'horizontal': + value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) + command = command + str(value) + else: + if out.dimension_labels[i] == 'vertical' : + value = '{0}:'.format(delta_pix_v) + command = command + str(value) + else: + command = command + ":" + if i < out.number_of_dimensions -1: + command = command + ',' + command = command + '] = projections.array' + #print (command) + else: + #pad right of middle + command = "out.array[" + for i in range(out.number_of_dimensions): + if out.dimension_labels[i] == 'horizontal': + value = '{0}:{1}'.format(0, w) + command = command + str(value) + else: + if out.dimension_labels[i] == 'vertical' : + value = '{0}:'.format(delta_pix_v) + command = command + str(value) + else: + command = command + ":" + if i < out.number_of_dimensions -1: + command = command + ',' + command = command + '] = projections.array' + #print (command) + #cleaned = eval(command) + exec(command) return out \ No newline at end of file -- cgit v1.2.3 From 2bc9cce049c6ae588562ac88e089553a3dcc6d19 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:51:11 +0000 Subject: added ScaledFunction --- .../Python/ccpi/optimisation/functions/Function.py | 48 +++++++++++++++++ .../ccpi/optimisation/functions/ScaledFunction.py | 60 ++++++++++++++++++++++ 2 files changed, 108 insertions(+) create mode 100755 Wrappers/Python/ccpi/optimisation/functions/Function.py create mode 100755 Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py new file mode 100755 index 0000000..43ce900 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +class Function(object): + '''Abstract class representing a function + + + ''' + def __init__(self): + self.L = None + def __call__(self,x, out=None): + raise NotImplementedError + def call_adjoint(self, x, out=None): + raise NotImplementedError + def convex_conjugate(self, x, out=None): + raise NotImplementedError + def proximal_conjugate(self, x, tau, out = None): + raise NotImplementedError + def grad(self, x): + warnings.warn('''This method will disappear in following + versions of the CIL. Use gradient instead''', DeprecationWarning) + return self.gradient(x, out=None) + def prox(self, x, tau): + warnings.warn('''This method will disappear in following + versions of the CIL. Use proximal instead''', DeprecationWarning) + return self.proximal(x, out=None) + def gradient(self, x, out=None): + raise NotImplementedError + def proximal(self, x, tau, out=None): + raise NotImplementedError \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py new file mode 100755 index 0000000..f2e39fb --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -0,0 +1,60 @@ +from numbers import Number +import numpy + +class ScaledFunction(object): + '''ScaledFunction + + A class to represent the scalar multiplication of an Operator with a scalar. + It holds an operator and a scalar. Basically it returns the multiplication + of the result of direct and adjoint of the operator with the scalar. + For the rest it behaves like the operator it holds. + + Args: + operator (Operator): a Operator or LinearOperator + scalar (Number): a scalar multiplier + Example: + The scaled operator behaves like the following: + sop = ScaledOperator(operator, scalar) + sop.direct(x) = scalar * operator.direct(x) + sop.adjoint(x) = scalar * operator.adjoint(x) + sop.norm() = operator.norm() + sop.range_geometry() = operator.range_geometry() + sop.domain_geometry() = operator.domain_geometry() + ''' + def __init__(self, function, scalar): + super(ScaledFunction, self).__init__() + self.L = None + if not isinstance (scalar, Number): + raise TypeError('expected scalar: got {}'.format(type(scalar))) + self.scalar = scalar + self.function = function + + def __call__(self,x, out=None): + return self.scalar * self.function(x) + + def call_adjoint(self, x, out=None): + return self.scalar * self.function.call_adjoint(x, out=out) + + def convex_conjugate(self, x, out=None): + return self.scalar * self.function.convex_conjugate(x, out=out) + + def proximal_conjugate(self, x, tau, out = None): + '''TODO check if this is mathematically correct''' + return self.function.proximal_conjugate(x, tau, out=out) + + def grad(self, x): + warnings.warn('''This method will disappear in following + versions of the CIL. Use gradient instead''', DeprecationWarning) + return self.gradient(x, out=None) + + def prox(self, x, tau): + warnings.warn('''This method will disappear in following + versions of the CIL. Use proximal instead''', DeprecationWarning) + return self.proximal(x, out=None) + + def gradient(self, x, out=None): + return self.scalar * self.function.gradient(x, out=out) + + def proximal(self, x, tau, out=None): + '''TODO check if this is mathematically correct''' + return self.function.proximal(x, tau, out=out) -- cgit v1.2.3 From 53689e374625441867c6169829b1ee9b167547f4 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:51:47 +0000 Subject: added Function.py --- Wrappers/Python/ccpi/optimisation/functions/Function.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py index 43ce900..27a5f01 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/Function.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py @@ -22,7 +22,9 @@ import warnings class Function(object): '''Abstract class representing a function - + Members: + L is the Lipschitz constant of the gradient of the Function + alpha is scaling parameter of the function. ''' def __init__(self): self.L = None -- cgit v1.2.3 From b3be9080f736964486c8f647a68720d2836eb89d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:52:36 +0000 Subject: use ScaledFunction --- .../functions/FunctionOperatorComposition.py | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 0f3defe..3ac4358 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -9,16 +9,20 @@ Created on Fri Mar 8 09:55:36 2019 import numpy as np #from ccpi.optimisation.funcs import Function from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions import ScaledFunction class FunctionOperatorComposition(Function): def __init__(self, operator, function): - + super(FunctionOperatorComposition, self).__init__() self.function = function self.operator = operator - self.L = 2*self.function.alpha*operator.norm()**2 - super(FunctionOperatorComposition, self).__init__() + alpha = 1 + if isinstance (function, ScaledFunction): + alpha = function.scalar + self.L = 2 * alpha * operator.norm()**2 + def __call__(self, x): @@ -45,10 +49,17 @@ class FunctionOperatorComposition(Function): return self.function.proximal_conjugate(x, tau) - def gradient(self, x): + def gradient(self, x, out=None): ''' Gradient takes into account the Operator''' - - return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) + if out is None: + return self.operator.adjoint( + self.function.gradient(self.operator.direct(x)) + ) + else: + self.operator.adjoint( + self.function.gradient(self.operator.direct(x), + out=out) + ) \ No newline at end of file -- cgit v1.2.3 From 86efea890fd591c3b67a07aa308f8f6fa26de3b1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:53:21 +0000 Subject: remove * import --- Wrappers/Python/ccpi/optimisation/functions/__init__.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index c4ba0a6..f775e52 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -2,9 +2,8 @@ from .Function import Function from .ZeroFun import ZeroFun -from .L1Norm import * -from .L2NormSquared import * -from .mixed_L12Norm import * -from .FunctionOperatorComposition import FunctionOperatorComposition +from .L1Norm import SimpleL1Norm, L1Norm +from .L2NormSquared import L2NormSq, SimpleL2NormSq +from .mixed_L12Norm import mixed_L12Norm from .BlockFunction import BlockFunction - +from .FunctionOperatorComposition import FunctionOperatorComposition -- cgit v1.2.3 From 11af3e2d27d521f1305eb6622d5d614449e7378e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:53:59 +0000 Subject: removed composition_with --- Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py index 8fe8620..ffeb32e 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py @@ -10,8 +10,6 @@ import numpy as np #from ccpi.optimisation.funcs import Function from ccpi.optimisation.functions import Function from ccpi.framework import DataContainer, ImageData, ImageGeometry -from ccpi.optimisation.functions.FunctionOperatorComposition import FunctionOperatorComposition - ############################ mixed_L1,2NORM FUNCTIONS ############################# class mixed_L12Norm(Function): @@ -56,11 +54,3 @@ class mixed_L12Norm(Function): res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) return res - - def composition_with(self, operator): - - if self.b is None: - return FunctionOperatorComposition(operator, mixed_L12Norm(self.alpha)) - else: - return FunctionOperatorComposition(operator, mixed_L12Norm(self.alpha, b=self.b)) - -- cgit v1.2.3 From 3879c21f0cf2a7cf7d885ea20f1cc9363c9ecbe8 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 14:54:15 +0000 Subject: fixed test_functions.py --- Wrappers/Python/test/test_functions.py | 6 ------ 1 file changed, 6 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 0741d1c..554d400 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -50,14 +50,8 @@ class TestFunction(unittest.TestCase): d = ImageData(np.random.randint(10, size=ag)) - f = mixed_L12Norm(alpha = 1).composition_with(op1) g = L2NormSq(alpha=0.5, b=noisy_data) - # Compare call of f - a1 = ImageData(op1.direct(d).power(2).sum(axis=0)).sqrt().sum() - #print(a1, f(d)) - self.assertEqual (a1, f(d)) - # Compare call of g a2 = g.alpha*(d - noisy_data).power(2).sum() #print(a2, g(d)) -- cgit v1.2.3 From 0f1b5e9ef2619eff4cc158d8e2e05a9d19b8393a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 15:06:59 +0000 Subject: added docstrings --- .../Python/ccpi/optimisation/functions/Function.py | 26 ++++++++++++++++------ 1 file changed, 19 insertions(+), 7 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py index 27a5f01..fa81dfc 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/Function.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py @@ -23,28 +23,40 @@ class Function(object): '''Abstract class representing a function Members: - L is the Lipschitz constant of the gradient of the Function - alpha is scaling parameter of the function. + L is the Lipschitz constant of the gradient of the Function ''' def __init__(self): self.L = None + def __call__(self,x, out=None): + '''Evaluates the function at x ''' raise NotImplementedError - def call_adjoint(self, x, out=None): + + def gradient(self, x, out=None): + '''Returns the gradient of the function at x, if the function is differentiable''' raise NotImplementedError + + def proximal(self, x, tau, out=None): + '''This returns the proximal operator for the function at x, tau''' + raise NotImplementedError + def convex_conjugate(self, x, out=None): + '''This evaluates the convex conjugate of the function at x''' raise NotImplementedError + def proximal_conjugate(self, x, tau, out = None): + '''This returns the proximal operator for the convex conjugate of the function at x, tau''' raise NotImplementedError + def grad(self, x): + '''Alias of gradient(x,None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use gradient instead''', DeprecationWarning) return self.gradient(x, out=None) + def prox(self, x, tau): + '''Alias of proximal(x, tau, None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use proximal instead''', DeprecationWarning) return self.proximal(x, out=None) - def gradient(self, x, out=None): - raise NotImplementedError - def proximal(self, x, tau, out=None): - raise NotImplementedError \ No newline at end of file + -- cgit v1.2.3 From 9769759d3f7f1eab53631627474eade8e4c6f96a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 15:08:03 +0000 Subject: removed FunctionComposition.py --- .../optimisation/functions/FunctionComposition.py | 121 --------------------- 1 file changed, 121 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py deleted file mode 100644 index f24dc10..0000000 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionComposition.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:45:06 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry -from ccpi.framework import BlockDataContainer -from ccpi.optimisation.functions import BlockFunction - -class FunctionOperatorComposition(Function): - - def __init__(self, function, operator): - - self.function = functions - self.operator = operator - self.grad_Lipschitz_cnst = 2*self.function.alpha*operator.norm()**2 - super(FunctionOperatorComposition, self).__init__() - - def __call__(self, x): - - return self.function(operator.direct(x)) - - def call_adjoint(self, x): - - return self.function(operator.adjoint(x)) - - def convex_conjugate(self, x): - - return self.function.convex_conjugate(x) - - def proximal(self, x, tau): - - ''' proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=None) - - def proximal_conjugate(self, x, tau): - - ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=None) - - def gradient(self, x): - - ''' Gradient takes into account the Operator''' - - return self.adjoint(self.function.gradient(self.operator.direct(x))) - - - - - -class FunctionComposition_new(Function): - - def __init__(self, operator, *functions): - - self.functions = functions - self.operator = operator - self.length = len(self.functions) - - super(FunctionComposition_new, self).__init__() - - def __call__(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - def convex_conjugate(self, x): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - def proximal(self, x, tau, out = None): - - if isinstance(x, ImageData): - x = CompositeDataContainer(x) - - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) - - if self.length==1: - return ImageData(*out) - else: - return CompositeDataContainer(*out) - - -if __name__ == '__main__': - - from operators import Operator - from IdentityOperator import Identity \ No newline at end of file -- cgit v1.2.3 From f2b62709a1e4a9529dcee17b2cf7de87a5f02d2c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 15:34:04 +0000 Subject: removed alpha parameter --- .../ccpi/optimisation/functions/L2NormSquared.py | 39 +++++++++++++--------- .../ccpi/optimisation/functions/ScaledFunction.py | 39 ++++++++++++---------- 2 files changed, 45 insertions(+), 33 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 5817317..54c947a 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -19,34 +19,41 @@ class SimpleL2NormSq(Function): def __init__(self, alpha=1): super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - # Lispchitz constant of gradient - self.L = 2*self.alpha + self.L = 2 def __call__(self, x): return self.alpha * x.power(2).sum() - def gradient(self,x): - return 2 * self.alpha * x + def gradient(self,x, out=None): + if out is None: + return 2 * x + else: + out.fill(2*x) def convex_conjugate(self,x): - return (1/(4*self.alpha)) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) + return (1/4) * x.squared_norm() + + def proximal(self, x, tau, out=None): + if out is None: + return x.divide(1+2*tau) + else: + x.divide(1+2*tau, out=out) - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/(2*self.alpha) ) + def proximal_conjugate(self, x, tau, out=None): + if out is None: + return x.divide(1 + tau/2) + else: + x.divide(1+tau/2, out=out) + ############################ L2NORM FUNCTIONS ############################# class L2NormSq(SimpleL2NormSq): - def __init__(self, alpha, **kwargs): + def __init__(self, **kwargs): - super(L2NormSq, self).__init__(alpha) - self.alpha = alpha + super(L2NormSq, self).__init__() self.b = kwargs.get('b',None) def __call__(self, x): @@ -59,9 +66,9 @@ class L2NormSq(SimpleL2NormSq): def gradient(self, x): if self.b is None: - return 2*self.alpha * x + return 2 * x else: - return 2*self.alpha * (x - self.b) + return 2 * (x - self.b) def convex_conjugate(self, x): diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index f2e39fb..7e2f20a 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -4,22 +4,17 @@ import numpy class ScaledFunction(object): '''ScaledFunction - A class to represent the scalar multiplication of an Operator with a scalar. - It holds an operator and a scalar. Basically it returns the multiplication - of the result of direct and adjoint of the operator with the scalar. - For the rest it behaves like the operator it holds. + A class to represent the scalar multiplication of an Function with a scalar. + It holds a function and a scalar. Basically it returns the multiplication + of the product of the function __call__, convex_conjugate and gradient with the scalar. + For the rest it behaves like the function it holds. Args: - operator (Operator): a Operator or LinearOperator + function (Function): a Function or BlockOperator scalar (Number): a scalar multiplier Example: The scaled operator behaves like the following: - sop = ScaledOperator(operator, scalar) - sop.direct(x) = scalar * operator.direct(x) - sop.adjoint(x) = scalar * operator.adjoint(x) - sop.norm() = operator.norm() - sop.range_geometry() = operator.range_geometry() - sop.domain_geometry() = operator.domain_geometry() + ''' def __init__(self, function, scalar): super(ScaledFunction, self).__init__() @@ -30,31 +25,41 @@ class ScaledFunction(object): self.function = function def __call__(self,x, out=None): + '''Evaluates the function at x ''' return self.scalar * self.function(x) - def call_adjoint(self, x, out=None): - return self.scalar * self.function.call_adjoint(x, out=out) - def convex_conjugate(self, x, out=None): - return self.scalar * self.function.convex_conjugate(x, out=out) + '''returns the convex_conjugate of the scaled function ''' + if out is None: + return self.scalar * self.function.convex_conjugate(x/self.scalar, out=out) + else: + out.fill(self.function.convex_conjugate(x/self.scalar)) + out *= self.scalar def proximal_conjugate(self, x, tau, out = None): - '''TODO check if this is mathematically correct''' + '''This returns the proximal operator for the function at x, tau + + TODO check if this is mathematically correct''' return self.function.proximal_conjugate(x, tau, out=out) def grad(self, x): + '''Alias of gradient(x,None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use gradient instead''', DeprecationWarning) return self.gradient(x, out=None) def prox(self, x, tau): + '''Alias of proximal(x, tau, None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use proximal instead''', DeprecationWarning) return self.proximal(x, out=None) def gradient(self, x, out=None): + '''Returns the gradient of the function at x, if the function is differentiable''' return self.scalar * self.function.gradient(x, out=out) def proximal(self, x, tau, out=None): - '''TODO check if this is mathematically correct''' + '''This returns the proximal operator for the function at x, tau + + TODO check if this is mathematically correct''' return self.function.proximal(x, tau, out=out) -- cgit v1.2.3 From b404c63042a496611de3ed03ca24d4b705feca7d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 14 Mar 2019 16:39:51 +0000 Subject: work with Vaggelis --- Wrappers/Python/ccpi/framework/__init__.py | 1 + .../ccpi/optimisation/functions/L2NormSquared.py | 20 +++++--------------- .../operators/FiniteDifferenceOperator.py | 12 +++++++----- .../ccpi/optimisation/operators/GradientOperator.py | 15 +++++++++------ 4 files changed, 22 insertions(+), 26 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py index 4683c21..66e2f56 100755 --- a/Wrappers/Python/ccpi/framework/__init__.py +++ b/Wrappers/Python/ccpi/framework/__init__.py @@ -22,3 +22,4 @@ from .framework import find_key, message from .framework import DataProcessor from .framework import AX, PixelByPixelDataProcessor, CastDataContainer from .BlockDataContainer import BlockDataContainer +from .BlockGeometry import BlockGeometry diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 54c947a..9267565 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -52,51 +52,43 @@ class SimpleL2NormSq(Function): class L2NormSq(SimpleL2NormSq): def __init__(self, **kwargs): - super(L2NormSq, self).__init__() self.b = kwargs.get('b',None) - + def __call__(self, x): - if self.b is None: return SimpleL2NormSq.__call__(self, x) else: return SimpleL2NormSq.__call__(self, x - self.b) def gradient(self, x): - if self.b is None: return 2 * x else: return 2 * (x - self.b) - + def convex_conjugate(self, x): - ''' The convex conjugate corresponds to the simple functional i.e., f(x) = alpha * ||x - b||_{2}^{2} ''' - if self.b is None: return SimpleL2NormSq.convex_conjugate(self, x) else: return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - + def proximal(self, x, tau): - + ''' The proximal operator corresponds to the simple functional i.e., f(x) = alpha * ||x - b||_{2}^{2} argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - + ''' if self.b is None: return SimpleL2NormSq.proximal(self, x, tau) else: return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - def proximal_conjugate(self, x, tau): - ''' The proximal operator corresponds to the simple convex conjugate functional i.e., f^{*}(x^{) argmin_x { 0.5||x - u||^{2} + tau f(x) } @@ -105,5 +97,3 @@ class L2NormSq(SimpleL2NormSq): return SimpleL2NormSq.proximal_conjugate(self, x, tau) else: return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 16cd215..999975c 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -24,9 +24,9 @@ class FiniteDiff(Operator): # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): - + '''''' super(FiniteDiff, self).__init__() - + '''FIXME: domain and range should be geometries''' self.gm_domain = gm_domain self.gm_range = gm_range self.direction = direction @@ -297,14 +297,16 @@ class FiniteDiff(Operator): res = out return res - def range_dim(self): + def range_geometry(self): return self.gm_range - def domain_dim(self): + def domain_geometry(self): + '''currently is a tuple''' return self.gm_domain def norm(self): - x0 = ImageData(np.random.random_sample(self.domain_dim())) + x0 = self.gm_domain.allocate() + x0 = np.random.random_sample(x0.shape) self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) return self.s1 diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 3dcc1bd..2eb77ce 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -11,6 +11,7 @@ from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, BlockDataContainer import numpy as np from ccpi.optimisation.operators import FiniteDiff +from ccpi.framework import BlockGeometry #%% @@ -57,11 +58,12 @@ class Gradient(Operator): def alloc_range_dim(self): return ImageData(np.zeros(self.range_dim)) - def domain_dim(self): + def domain_geometry(self): return self.gm_domain - def range_dim(self): - return self.gm_range + def range_geometry(self): + '''fix this''' + return BlockGeometry(self.gm_range, self.gm_range) def norm(self): # return np.sqrt(4*len(self.domainDim())) @@ -83,10 +85,10 @@ if __name__ == '__main__': # DataContainer(np.random.randint(10, size=G.domain_dim()))] # domain_dim - print('Domain {}'.format(G.domain_dim())) + print('Domain {}'.format(G.domain_geometry())) # range_dim - print('Range {}'.format(G.range_dim())) + print('Range {}'.format(G.range_geometry())) # Direct z = G.direct(u) @@ -104,7 +106,8 @@ if __name__ == '__main__': print(G.norm()) # print(G.adjoint(G.direct(u))) - + + -- cgit v1.2.3 From 071f9b65e552ba338fa35e0798116a6ccd00802a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Mar 2019 16:36:41 +0000 Subject: squared_norm returns dot with self --- Wrappers/Python/ccpi/framework/framework.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 029a80d..499a2cd 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -755,10 +755,11 @@ class DataContainer(object): return self.as_array().sum(*args, **kwargs) def squared_norm(self): '''return the squared euclidean norm of the DataContainer viewed as a vector''' - shape = self.shape - size = reduce(lambda x,y:x*y, shape, 1) - y = numpy.reshape(self.as_array(), (size, )) - return numpy.dot(y, y.conjugate()) + #shape = self.shape + #size = reduce(lambda x,y:x*y, shape, 1) + #y = numpy.reshape(self.as_array(), (size, )) + #return numpy.dot(y, y.conjugate()) + return self.dot(self) def norm(self): '''return the euclidean norm of the DataContainer viewed as a vector''' return numpy.sqrt(self.squared_norm()) -- cgit v1.2.3 From 5585a3caf6832908ea64089fe47666a4e1b72c76 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Mar 2019 16:37:21 +0000 Subject: add docstring --- Wrappers/Python/ccpi/optimisation/operators/Operator.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py index 95082f4..cdf15a7 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py @@ -12,12 +12,19 @@ class Operator(object): '''Returns if the operator is linear''' return False def direct(self,x, out=None): + '''Returns the application of the Operator on x''' raise NotImplementedError def norm(self): + '''Returns the norm of the Operator''' raise NotImplementedError def range_geometry(self): + '''Returns the range of the Operator: Y space''' raise NotImplementedError def domain_geometry(self): + '''Returns the domain of the Operator: X space''' raise NotImplementedError def __rmul__(self, scalar): + '''Defines the multiplication by a scalar on the left + + returns a ScaledOperator''' return ScaledOperator(self, scalar) -- cgit v1.2.3 From bee897122ec1d39a097ba795585e24ec8da4104f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 15 Mar 2019 17:21:39 +0000 Subject: added first implementation --- Wrappers/Python/ccpi/framework/BlockGeometry.py | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100755 Wrappers/Python/ccpi/framework/BlockGeometry.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py new file mode 100755 index 0000000..87dfe92 --- /dev/null +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -0,0 +1,34 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +from numbers import Number +import functools +#from ccpi.framework import AcquisitionData, ImageData +#from ccpi.optimisation.operators import Operator, LinearOperator + +class BlockGeometry(object): + '''Class to hold Geometry as column vector''' + #__array_priority__ = 1 + def __init__(self, *args, **kwargs): + '''''' + self.geometries = args + self.index = 0 + #shape = kwargs.get('shape', None) + #if shape is None: + # shape = (len(args),1) + shape = (len(args),1) + self.shape = shape + #print (self.shape) + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements, len(args))) + + def allocate(self): + containers = [geom.allocate() for geom in self.geometries] + BlockDataContainer(*containers) + -- cgit v1.2.3 From bfa7d487866ad5196466b8f4f6975a369961c8cd Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 18 Mar 2019 13:14:51 +0000 Subject: fix algebra for BlockDataContainer and add test --- .../Python/ccpi/framework/BlockDataContainer.py | 35 +++++++-------- Wrappers/Python/test/test_BlockDataContainer.py | 50 +++++++++++++++++++++- 2 files changed, 67 insertions(+), 18 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index b9f5c5f..358ba2d 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -77,8 +77,9 @@ class BlockDataContainer(object): def add(self, other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) + #print ("args" , *args) if isinstance(other, Number): - return type(self)(*[ el.add(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) return type(self)( @@ -99,52 +100,52 @@ class BlockDataContainer(object): self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.multiply(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.multiply(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def divide(self, other, *args, **kwargs): self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.divide(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.divide(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def power(self, other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.power(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.power(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) + return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def maximum(self,other, *args, **kwargs): assert self.is_compatible(other) out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.maximum(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.maximum(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) + return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) ## unary operations def abs(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.abs(out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) def sign(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.sign(out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) def sqrt(self, *args, **kwargs): out = kwargs.get('out', None) - return type(self)(*[ el.sqrt(out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) def conjugate(self, out=None): return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index ef11a82..ec69225 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -281,4 +281,52 @@ class TestBlockDataContainer(unittest.TestCase): s1 *= i #numpy.testing.assert_almost_equal(s[1], cp0.get_item(0,0).as_array()[0][0][0]*s0 +cp0.get_item(1,0).as_array()[0][0][0]*s1, decimal=4) - \ No newline at end of file + def test_Nested_BlockDataContainer(self): + print ("test_Nested_BlockDataContainer") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(2,3,4) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + cp1 = BlockDataContainer(data2,data3) + + nbdc = BlockDataContainer(cp0, cp1) + nbdc2 = nbdc + 2 + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(1).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 4. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 5. , decimal=5) + + nbdc2 = 2 + nbdc + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(0).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(1).as_array()[0][0][0] , 3. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 4. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 5. , decimal=5) + + + nbdc2 = nbdc * 2 + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(1).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 4. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 6. , decimal=5) + + nbdc2 = 2 * nbdc + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(1).as_array()[0][0][0] , 2. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 4. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 6. , decimal=5) + + nbdc2 = nbdc / 2 + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(0).as_array()[0][0][0] , 0. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(0).get_item(1).as_array()[0][0][0] , .5 , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 1. , decimal=5) + numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 3./2 , decimal=5) + + + print ("test_Nested_BlockDataContainer OK") + -- cgit v1.2.3 From aa628c3f02f2246f4e1b2982b9497802d615f2e7 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 19 Mar 2019 13:21:50 +0000 Subject: Geometries create default dimension labels and shape allocate method can return a different shape of Image/AcquisitionData add reverse multiplication by scalar L2NormSq uses Scaled Function add scaled function BlockOperator to act on DataContainers by wrapping in BlockDataContainer modifications from block_function branch --- Wrappers/Python/ccpi/framework/framework.py | 86 +++++++++++--- .../Python/ccpi/optimisation/functions/Function.py | 131 +++++++++++---------- .../ccpi/optimisation/functions/L2NormSquared.py | 4 +- .../ccpi/optimisation/functions/ScaledFunction.py | 13 +- .../Python/ccpi/optimisation/functions/__init__.py | 1 + .../ccpi/optimisation/operators/BlockOperator.py | 30 +++-- .../operators/FiniteDifferenceOperator.py | 34 +++--- .../optimisation/operators/GradientOperator.py | 12 +- 8 files changed, 201 insertions(+), 110 deletions(-) mode change 100755 => 100644 Wrappers/Python/ccpi/optimisation/functions/Function.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 499a2cd..0c43737 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -27,6 +27,7 @@ import sys from datetime import timedelta, datetime import warnings from functools import reduce +from numbers import Number def find_key(dic, val): """return the key of dictionary dic given the value""" @@ -54,8 +55,7 @@ class ImageGeometry(object): center_x=0, center_y=0, center_z=0, - channels=1, - dimension_labels=None): + channels=1): self.voxel_num_x = voxel_num_x self.voxel_num_y = voxel_num_y @@ -67,7 +67,28 @@ class ImageGeometry(object): self.center_y = center_y self.center_z = center_z self.channels = channels - self.dimension_labels = dimension_labels + + # this is some code repetition + if self.channels > 1: + if self.voxel_num_z>1: + self.length = 4 + self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) + dim_labels = ['channel' ,'vertical' , 'horizontal_y' , 'horizontal_x'] + else: + self.length = 3 + self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x) + dim_labels = ['channel' , 'horizontal_y' , 'horizontal_x'] + else: + if self.voxel_num_z>1: + self.length = 3 + self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) + dim_labels = ['vertical', 'horizontal_y' , 'horizontal_x'] + else: + self.length = 2 + self.shape = (self.voxel_num_y, self.voxel_num_x) + dim_labels = ['horizontal_y' , 'horizontal_x'] + + self.dimension_labels = dim_labels def get_min_x(self): return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x @@ -115,11 +136,18 @@ class ImageGeometry(object): return repres def allocate(self, value=0, dimension_labels=None): '''allocates an ImageData according to the size expressed in the instance''' - if dimension_labels is None: - dimension_labels = self.dimension_labels - out = ImageData(geometry=self, dimension_labels=dimension_labels) - if value != 0: - out += value + out = ImageData(geometry=self) + if isinstance(value, Number): + if value != 0: + out += value + else: + if value == 'random': + out.fill(numpy.random.random_sample(self.shape)) + elif value == 'random_int': + out.fill(numpy.random.randint(1, 10 + 1,size=self.shape)) + if dimension_labels is not None: + if dimension_labels != self.dimension_labels: + return out.subset(dimensions=dimension_labels) return out class AcquisitionGeometry(object): @@ -135,7 +163,6 @@ class AcquisitionGeometry(object): dist_center_detector=None, channels=1, angle_unit='degree', - dimension_labels=None ): """ General inputs for standard type projection geometries @@ -166,6 +193,7 @@ class AcquisitionGeometry(object): self.geom_type = geom_type # 'parallel' or 'cone' self.dimension = dimension # 2D or 3D self.angles = angles + num_of_angles = len (angles) self.dist_source_center = dist_source_center self.dist_center_detector = dist_center_detector @@ -176,7 +204,24 @@ class AcquisitionGeometry(object): self.pixel_size_v = pixel_size_v self.channels = channels - self.dimension_labels = dimension_labels + + if channels > 1: + if pixel_num_v > 1: + shape = (channels, num_of_angles , pixel_num_v, pixel_num_h) + dim_labels = ['channel' , 'angle' , 'vertical' , 'horizontal'] + else: + shape = (channels , num_of_angles, pixel_num_h) + dim_labels = ['channel' , 'angle' , 'horizontal'] + else: + if pixel_num_v > 1: + shape = (num_of_angles, pixel_num_v, pixel_num_h) + dim_labels = ['angle' , 'vertical' , 'horizontal'] + else: + shape = (num_of_angles, pixel_num_h) + dim_labels = ['angle' , 'horizontal'] + self.shape = shape + + self.dimension_labels = dim_labels def clone(self): '''returns a copy of the AcquisitionGeometry''' @@ -204,11 +249,18 @@ class AcquisitionGeometry(object): return repres def allocate(self, value=0, dimension_labels=None): '''allocates an AcquisitionData according to the size expressed in the instance''' - if dimension_labels is None: - dimension_labels = self.dimension_labels - out = AcquisitionData(geometry=self, dimension_labels=dimension_labels) - if value != 0: - out += value + out = AcquisitionData(geometry=self) + if isinstance(value, Number): + if value != 0: + out += value + else: + if value == 'random': + out.fill(numpy.random.random_sample(self.shape)) + elif value == 'random_int': + out.fill(numpy.random.out.fill(numpy.random.randint(1, 10 + 1,size=self.shape))) + if dimension_labels is not None: + if dimension_labels != self.dimension_labels: + return out.subset(dimensions=dimension_labels) return out class DataContainer(object): '''Generic class to hold data @@ -907,7 +959,7 @@ class AcquisitionData(DataContainer): if channels > 1: if vert > 1: shape = (channels, num_of_angles , vert, horiz) - dim_labels = ['channel' , ' angle' , + dim_labels = ['channel' , 'angle' , 'vertical' , 'horizontal'] else: shape = (channels , num_of_angles, horiz) @@ -936,7 +988,7 @@ class AcquisitionData(DataContainer): elif dim == 'horizontal': shape.append(horiz) if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes.\nExpected{1} got {2}}'\ + raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ .format( len(dimension_labels) - len(shape), dimension_labels, shape) diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py old mode 100755 new mode 100644 index fa81dfc..82f24a6 --- a/Wrappers/Python/ccpi/optimisation/functions/Function.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py @@ -1,62 +1,69 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings - -class Function(object): - '''Abstract class representing a function - - Members: - L is the Lipschitz constant of the gradient of the Function - ''' - def __init__(self): - self.L = None - - def __call__(self,x, out=None): - '''Evaluates the function at x ''' - raise NotImplementedError - - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - raise NotImplementedError - - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau''' - raise NotImplementedError - - def convex_conjugate(self, x, out=None): - '''This evaluates the convex conjugate of the function at x''' - raise NotImplementedError - - def proximal_conjugate(self, x, tau, out = None): - '''This returns the proximal operator for the convex conjugate of the function at x, tau''' - raise NotImplementedError - - def grad(self, x): - '''Alias of gradient(x,None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use gradient instead''', DeprecationWarning) - return self.gradient(x, out=None) - - def prox(self, x, tau): - '''Alias of proximal(x, tau, None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) - +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction + +class Function(object): + '''Abstract class representing a function + + Members: + L is the Lipschitz constant of the gradient of the Function + ''' + def __init__(self): + self.L = None + + def __call__(self,x, out=None): + '''Evaluates the function at x ''' + raise NotImplementedError + + def gradient(self, x, out=None): + '''Returns the gradient of the function at x, if the function is differentiable''' + raise NotImplementedError + + def proximal(self, x, tau, out=None): + '''This returns the proximal operator for the function at x, tau''' + raise NotImplementedError + + def convex_conjugate(self, x, out=None): + '''This evaluates the convex conjugate of the function at x''' + raise NotImplementedError + + def proximal_conjugate(self, x, tau, out = None): + '''This returns the proximal operator for the convex conjugate of the function at x, tau''' + raise NotImplementedError + + def grad(self, x): + '''Alias of gradient(x,None)''' + warnings.warn('''This method will disappear in following + versions of the CIL. Use gradient instead''', DeprecationWarning) + return self.gradient(x, out=None) + + def prox(self, x, tau): + '''Alias of proximal(x, tau, None)''' + warnings.warn('''This method will disappear in following + versions of the CIL. Use proximal instead''', DeprecationWarning) + return self.proximal(x, out=None) + + def __rmul__(self, scalar): + '''Defines the multiplication by a scalar on the left + + returns a ScaledFunction''' + return ScaledFunction(self, scalar) + diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 9267565..1baf365 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -23,7 +23,7 @@ class SimpleL2NormSq(Function): self.L = 2 def __call__(self, x): - return self.alpha * x.power(2).sum() + return x.power(2).sum() def gradient(self,x, out=None): if out is None: @@ -61,7 +61,7 @@ class L2NormSq(SimpleL2NormSq): else: return SimpleL2NormSq.__call__(self, x - self.b) - def gradient(self, x): + def gradient(self, x, out=None): if self.b is None: return 2 * x else: diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 7e2f20a..8a52566 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -28,13 +28,14 @@ class ScaledFunction(object): '''Evaluates the function at x ''' return self.scalar * self.function(x) - def convex_conjugate(self, x, out=None): + def convex_conjugate(self, x): '''returns the convex_conjugate of the scaled function ''' - if out is None: - return self.scalar * self.function.convex_conjugate(x/self.scalar, out=out) - else: - out.fill(self.function.convex_conjugate(x/self.scalar)) - out *= self.scalar + # if out is None: + # return self.scalar * self.function.convex_conjugate(x/self.scalar) + # else: + # out.fill(self.function.convex_conjugate(x/self.scalar)) + # out *= self.scalar + return self.scalar * self.function.convex_conjugate(x/self.scalar) def proximal_conjugate(self, x, tau, out = None): '''This returns the proximal operator for the function at x, tau diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index f775e52..9030454 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -6,4 +6,5 @@ from .L1Norm import SimpleL1Norm, L1Norm from .L2NormSquared import L2NormSq, SimpleL2NormSq from .mixed_L12Norm import mixed_L12Norm from .BlockFunction import BlockFunction +from .ScaledFunction import ScaledFunction from .FunctionOperatorComposition import FunctionOperatorComposition diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 21ea104..4ff38c6 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -11,7 +11,7 @@ import functools from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer from ccpi.optimisation.operators import Operator, LinearOperator from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator - +from ccpi.framework import BlockGeometry class BlockOperator(Operator): '''Class to hold a block operator @@ -71,14 +71,23 @@ class BlockOperator(Operator): return numpy.asarray(b) def direct(self, x, out=None): - shape = self.get_output_shape(x.shape) + '''Direct operation for the BlockOperator + + BlockOperator work on BlockDataContainer, but they will work on DataContainers + and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) + ''' + if not isinstance (x, BlockDataContainer): + x_b = BlockDataContainer(x) + else: + x_b = x + shape = self.get_output_shape(x_b.shape) res = [] for row in range(self.shape[0]): for col in range(self.shape[1]): if col == 0: - prod = self.get_item(row,col).direct(x.get_item(col)) + prod = self.get_item(row,col).direct(x_b.get_item(col)) else: - prod += self.get_item(row,col).direct(x.get_item(col)) + prod += self.get_item(row,col).direct(x_b.get_item(col)) res.append(prod) return BlockDataContainer(*res, shape=shape) @@ -89,18 +98,25 @@ class BlockOperator(Operator): This method exists in BlockOperator as it is not known what type of Operator it will contain. + BlockOperator work on BlockDataContainer, but they will work on DataContainers + and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) + Raises: ValueError if the contained Operators are not linear ''' if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): raise ValueError('Not all operators in Block are linear.') - shape = self.get_output_shape(x.shape, adjoint=True) + if not isinstance (x, BlockDataContainer): + x_b = BlockDataContainer(x) + else: + x_b = x + shape = self.get_output_shape(x_b.shape, adjoint=True) res = [] for row in range(self.shape[1]): for col in range(self.shape[0]): if col == 0: - prod = self.get_item(row, col).adjoint(x.get_item(col)) + prod = self.get_item(row, col).adjoint(x_b.get_item(col)) else: - prod += self.get_item(row, col).adjoint(x.get_item(col)) + prod += self.get_item(row, col).adjoint(x_b.get_item(col)) res.append(prod) return BlockDataContainer(*res, shape=shape) diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 999975c..24c4e4b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -32,16 +32,21 @@ class FiniteDiff(Operator): self.direction = direction self.bnd_cond = bnd_cond + # Domain Geometry = Range Geometry if not stated if self.gm_range is None: self.gm_range = self.gm_domain - - if self.direction + 1 > len(gm_domain): + # check direction and "length" of geometry + if self.direction + 1 > len(self.gm_domain.shape): raise ValueError('Gradient directions more than geometry domain') - + + #self.voxel_size = kwargs.get('voxel_size',1) + # this wrongly assumes a homogeneous voxel size + self.voxel_size = self.gm_domain.voxel_size_x + + def direct(self, x, out=None): - -# x_asarr = x.as_array() - x_asarr = x + + x_asarr = x.as_array() x_sz = len(x.shape) if out is None: @@ -157,13 +162,13 @@ class FiniteDiff(Operator): else: raise NotImplementedError - res = out - return res + res = out/self.voxel_size + return type(x)(res) def adjoint(self, x, out=None): -# x_asarr = x.as_array() - x_asarr = x + x_asarr = x.as_array() + #x_asarr = x x_sz = len(x.shape) if out is None: @@ -294,19 +299,20 @@ class FiniteDiff(Operator): else: raise NotImplementedError - res = out - return res + res = out/self.voxel_size + return type(x)(-res) def range_geometry(self): + '''Returns the range geometry''' return self.gm_range def domain_geometry(self): - '''currently is a tuple''' + '''Returns the domain geometry''' return self.gm_domain def norm(self): x0 = self.gm_domain.allocate() - x0 = np.random.random_sample(x0.shape) + x0.fill( np.random.random_sample(x0.shape) ) self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) return self.s1 diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 2eb77ce..d0d0f43 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -27,6 +27,7 @@ class Gradient(Operator): if self.gm_range is None: + #FIXME this should be a BlockGeometry self.gm_range = ((len(self.gm_domain),)+self.gm_domain) # Kwargs Default options @@ -39,9 +40,16 @@ class Gradient(Operator): def direct(self, x, out=None): - tmp = np.zeros(self.gm_range) + #tmp = np.zeros(self.gm_range) + tmp = self.gm_range.allocate() for i in range(len(self.gm_domain)): - tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] + #tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] + if self.correlation == 'Space': + if i == 0 : + i+=1 + tmp[i].fill( + FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] + ) # return type(x)(tmp) return type(x)(tmp) -- cgit v1.2.3 From 174d0ace64decac39340c7b160ffdaf37676a6d2 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 19 Mar 2019 13:31:12 +0000 Subject: added and updated unittests --- Wrappers/Python/test/test_BlockDataContainer.py | 39 +++++++++++++++++++++++-- Wrappers/Python/test/test_BlockOperator.py | 23 +++++++++++++++ Wrappers/Python/test/test_DataContainer.py | 11 +++---- Wrappers/Python/test/test_functions.py | 13 ++++++--- 4 files changed, 75 insertions(+), 11 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index ec69225..6c0bede 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -18,6 +18,8 @@ from ccpi.framework import BlockDataContainer #from ccpi.optimisation.Algorithms import CGLS import functools +from ccpi.optimisation.operators import Gradient, Identity, BlockOperator + class TestBlockDataContainer(unittest.TestCase): def skiptest_BlockDataContainerShape(self): print ("test block data container") @@ -327,6 +329,39 @@ class TestBlockDataContainer(unittest.TestCase): numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(0).as_array()[0][0][0] , 1. , decimal=5) numpy.testing.assert_almost_equal(nbdc2.get_item(1).get_item(1).as_array()[0][0][0] , 3./2 , decimal=5) - + c5 = nbdc.get_item(0).power(2).sum() + c5a = nbdc.power(2).sum() + print ("sum", c5a, c5) + print ("test_Nested_BlockDataContainer OK") - + def stest_NestedBlockDataContainer2(self): + M, N = 2, 3 + ig = ImageGeometry(voxel_num_x = M, voxel_num_y = N) + ag = ig + u = ig.allocate(1) + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + operator = BlockOperator(op1, op2, shape=(2,1)) + + d1 = op1.direct(u) + d2 = op2.direct(u) + + d = operator.direct(u) + + dd = operator.domain_geometry() + ww = operator.range_geometry() + + print(d.get_item(0).get_item(0).as_array()) + print(d.get_item(0).get_item(1).as_array()) + print(d.get_item(1).as_array()) + + c1 = d + d + + c2 = 2*d + + c3 = d / (d+0.0001) + + + c5 = d.get_item(0).power(2).sum() + diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index 8bd673b..951aa0a 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -4,6 +4,7 @@ from ccpi.framework import BlockDataContainer from ccpi.optimisation.ops import TomoIdentity from ccpi.framework import ImageGeometry, ImageData import numpy +from ccpi.optimisation.operators import FiniteDiff class TestBlockOperator(unittest.TestCase): @@ -102,6 +103,7 @@ class TestBlockOperator(unittest.TestCase): def test_TomoIdentity(self): ig = ImageGeometry(10,20,30) img = ig.allocate() + print (img.shape, ig.shape) self.assertTrue(img.shape == (30,20,10)) self.assertEqual(img.sum(), 0) Id = TomoIdentity(ig) @@ -288,3 +290,24 @@ class TestBlockOperator(unittest.TestCase): plt.imshow(cgsmall.get_output().get_item(0,0).subset(vertical=0).as_array()) plt.title('Composite CGLS\nsmall lambda') plt.show() + + def test_FiniteDiffOperator(self): + N, M = 200, 300 + + + ig = ImageGeometry(voxel_num_x = M, voxel_num_y = N) + u = ig.allocate('random_int') + G = FiniteDiff(ig, direction=0, bnd_cond = 'Neumann') + print(type(u), u.as_array()) + print(G.direct(u).as_array()) + + # Gradient Operator norm, for one direction should be close to 2 + numpy.testing.assert_allclose(G.norm(), numpy.sqrt(4), atol=0.1) + + M1, N1, K1 = 200, 300, 2 + ig1 = ImageGeometry(voxel_num_x = M1, voxel_num_y = N1, channels = K1) + u1 = ig1.allocate('random_int') + G1 = FiniteDiff(ig1, direction=2, bnd_cond = 'Periodic') + print(ig1.shape==u1.shape) + print (G1.norm()) + numpy.testing.assert_allclose(G1.norm(), numpy.sqrt(4), atol=0.1) \ No newline at end of file diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index 47feb95..7a7e6a0 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -495,9 +495,10 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[1], image.dimension_labels[1]) self.assertEqual(order[2], image.dimension_labels[2]) def test_AcquisitionGeometry_allocate(self): - ageometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10), - geom_type='parallel', pixel_num_v=3, - pixel_num_h=5, channels=2) + ageometry = AcquisitionGeometry(dimension=2, + angles=numpy.linspace(0, 180, num=10), + geom_type='parallel', pixel_num_v=3, + pixel_num_h=5, channels=2) sino = ageometry.allocate() shape = sino.shape print ("shape", shape) @@ -509,8 +510,8 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(1,sino.as_array()[shape[0]-1][shape[1]-1][shape[2]-1][shape[3]-1]) print (sino.dimension_labels, sino.shape, ageometry) - default_order = ['channel' , ' angle' , - 'vertical' , 'horizontal'] + default_order = ['channel' , 'angle' , + 'vertical' , 'horizontal'] self.assertEqual(default_order[0], sino.dimension_labels[0]) self.assertEqual(default_order[1], sino.dimension_labels[1]) self.assertEqual(default_order[2], sino.dimension_labels[2]) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 554d400..6a44641 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -49,11 +49,12 @@ class TestFunction(unittest.TestCase): noisy_data = ImageData(np.random.randint(10, size=ag)) d = ImageData(np.random.randint(10, size=ag)) - - g = L2NormSq(alpha=0.5, b=noisy_data) + alpha = 0.5 + # scaled function + g = alpha * L2NormSq(b=noisy_data) # Compare call of g - a2 = g.alpha*(d - noisy_data).power(2).sum() + a2 = alpha*(d - noisy_data).power(2).sum() #print(a2, g(d)) self.assertEqual(a2, g(d)) @@ -63,7 +64,11 @@ class TestFunction(unittest.TestCase): #print( a3, g.convex_conjugate(d)) - + def stest_ScaledFunctin(self): + ig = (N,N) + ag = ig + op1 = Gradient(ig) + op2 = Identity(ig, ag) # -- cgit v1.2.3 From 1b34498aaa93b95925991258fe542b62a9155aff Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 15:16:39 +0000 Subject: BlockDataContainer can do algebra with DataContainers --- .../Python/ccpi/framework/BlockDataContainer.py | 27 ++++++++++++++----- Wrappers/Python/test/test_BlockDataContainer.py | 30 ++++++++++++++++++++-- 2 files changed, 49 insertions(+), 8 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 358ba2d..f29f839 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -12,6 +12,7 @@ from __future__ import unicode_literals import numpy from numbers import Number import functools +from ccpi.framework import DataContainer #from ccpi.framework import AcquisitionData, ImageData #from ccpi.optimisation.operators import Operator, LinearOperator @@ -64,6 +65,8 @@ class BlockDataContainer(object): return len(self.containers) == len(other) elif isinstance(other, numpy.ndarray): return self.shape == other.shape + elif issubclass(other.__class__, DataContainer): + return self.get_item(0).shape == other.shape return len(self.containers) == len(other.containers) def get_item(self, row): @@ -75,24 +78,33 @@ class BlockDataContainer(object): return self.get_item(row) def add(self, other, *args, **kwargs): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for add') out = kwargs.get('out', None) #print ("args" , *args) if isinstance(other, Number): return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)( *[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def subtract(self, other, *args, **kwargs): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for add') out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) @@ -105,6 +117,9 @@ class BlockDataContainer(object): return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) elif isinstance(other, numpy.ndarray): return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) @@ -115,6 +130,9 @@ class BlockDataContainer(object): return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) @@ -138,13 +156,10 @@ class BlockDataContainer(object): ## unary operations def abs(self, *args, **kwargs): - out = kwargs.get('out', None) return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) def sign(self, *args, **kwargs): - out = kwargs.get('out', None) return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) def sqrt(self, *args, **kwargs): - out = kwargs.get('out', None) return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) def conjugate(self, out=None): return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 6c0bede..51d07fa 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -95,7 +95,7 @@ class TestBlockDataContainer(unittest.TestCase): def test_BlockDataContainer(self): print ("test block data container") ig0 = ImageGeometry(2,3,4) - ig1 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(2,3,5) data0 = ImageData(geometry=ig0) data1 = ImageData(geometry=ig1) + 1 @@ -105,7 +105,33 @@ class TestBlockDataContainer(unittest.TestCase): cp0 = BlockDataContainer(data0,data1) cp1 = BlockDataContainer(data2,data3) - # + + cp2 = BlockDataContainer(data0+1, data2+1) + d = cp2 + data0 + self.assertEqual(d.get_item(0).as_array()[0][0][0], 1) + try: + d = cp2 + data1 + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) + d = cp2 - data0 + self.assertEqual(d.get_item(0).as_array()[0][0][0], 1) + try: + d = cp2 - data1 + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) + d = cp2 * data2 + self.assertEqual(d.get_item(0).as_array()[0][0][0], 2) + try: + d = cp2 * data1 + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) + a = [ (el, ot) for el,ot in zip(cp0.containers,cp1.containers)] print (a[0][0].shape) #cp2 = BlockDataContainer(*a) -- cgit v1.2.3 From 5477469d504997da61d3e1da779bb7ffcd019191 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 15:18:23 +0000 Subject: add value to allocate pars --- Wrappers/Python/ccpi/framework/BlockGeometry.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py index 87dfe92..632d320 100755 --- a/Wrappers/Python/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -28,7 +28,7 @@ class BlockGeometry(object): 'Dimension and size do not match: expected {} got {}' .format(n_elements, len(args))) - def allocate(self): - containers = [geom.allocate() for geom in self.geometries] + def allocate(self, value=0): + containers = [geom.allocate(value) for geom in self.geometries] BlockDataContainer(*containers) -- cgit v1.2.3 From 757de9d5f43fd05cc05dd9e3bb96078a86948475 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 16:20:10 +0000 Subject: added strings as class members --- Wrappers/Python/ccpi/framework/framework.py | 89 ++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 20 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 0c43737..3707c07 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -44,6 +44,13 @@ def message(cls, msg, *args): class ImageGeometry(object): + RANDOM = 'random' + RANDOM_INT = 'random_int' + CHANNEL = 'channel' + ANGLE = 'angle' + VERTICAL = 'vertical' + HORIZONTAL_X = 'horizontal_x' + HORIZONTAL_Y = 'horizontal_y' def __init__(self, voxel_num_x=0, @@ -73,20 +80,22 @@ class ImageGeometry(object): if self.voxel_num_z>1: self.length = 4 self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = ['channel' ,'vertical' , 'horizontal_y' , 'horizontal_x'] + dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] else: self.length = 3 self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x) - dim_labels = ['channel' , 'horizontal_y' , 'horizontal_x'] + dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] else: if self.voxel_num_z>1: self.length = 3 self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = ['vertical', 'horizontal_y' , 'horizontal_x'] + dim_labels = [ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] else: self.length = 2 self.shape = (self.voxel_num_y, self.voxel_num_x) - dim_labels = ['horizontal_y' , 'horizontal_x'] + dim_labels = [ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] self.dimension_labels = dim_labels @@ -134,23 +143,49 @@ class ImageGeometry(object): repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) return repres - def allocate(self, value=0, dimension_labels=None): + def allocate(self, value=0, dimension_labels=None, **kwargs): '''allocates an ImageData according to the size expressed in the instance''' out = ImageData(geometry=self) if isinstance(value, Number): if value != 0: out += value else: - if value == 'random': + if value == ImageData.RANDOM: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) out.fill(numpy.random.random_sample(self.shape)) - elif value == 'random_int': - out.fill(numpy.random.randint(1, 10 + 1,size=self.shape)) + elif value == ImageData.RANDOM_INT: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) + max_value = kwargs.get('max_value', 100) + out.fill(numpy.random.randint(max_value,size=self.shape)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) return out + # The following methods return 2 members of the class, therefore I + # don't think we need to implement them. + # Additionally using __len__ is confusing as one would think this is + # an iterable. + #def __len__(self): + # '''returns the length of the geometry''' + # return self.length + #def shape(self): + # '''Returns the shape of the array of the ImageData it describes''' + # return self.shape + class AcquisitionGeometry(object): - + RANDOM = 'random' + RANDOM_INT = 'random_int' + ANGLE_UNIT = 'angle_unit' + DEGREE = 'degree' + RADIAN = 'radian' + CHANNEL = 'channel' + ANGLE = 'angle' + VERTICAL = 'vertical' + HORIZONTAL = 'horizontal' def __init__(self, geom_type, dimension, @@ -162,7 +197,7 @@ class AcquisitionGeometry(object): dist_source_center=None, dist_center_detector=None, channels=1, - angle_unit='degree', + **kwargs ): """ General inputs for standard type projection geometries @@ -204,21 +239,26 @@ class AcquisitionGeometry(object): self.pixel_size_v = pixel_size_v self.channels = channels - + self.angle_unit=kwargs.get(AcquisitionGeometry.ANGLE_UNIT, + AcquisitionGeometry.DEGREE) if channels > 1: if pixel_num_v > 1: shape = (channels, num_of_angles , pixel_num_v, pixel_num_h) - dim_labels = ['channel' , 'angle' , 'vertical' , 'horizontal'] + dim_labels = [AcquisitionGeometry.CHANNEL , + AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , + AcquisitionGeometry.HORIZONTAL] else: shape = (channels , num_of_angles, pixel_num_h) - dim_labels = ['channel' , 'angle' , 'horizontal'] + dim_labels = [AcquisitionGeometry.CHANNEL , + AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] else: if pixel_num_v > 1: shape = (num_of_angles, pixel_num_v, pixel_num_h) - dim_labels = ['angle' , 'vertical' , 'horizontal'] + dim_labels = [AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , + AcquisitionGeometry.HORIZONTAL] else: shape = (num_of_angles, pixel_num_h) - dim_labels = ['angle' , 'horizontal'] + dim_labels = [AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] self.shape = shape self.dimension_labels = dim_labels @@ -254,10 +294,17 @@ class AcquisitionGeometry(object): if value != 0: out += value else: - if value == 'random': + if value == AcquisitionData.RANDOM: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) out.fill(numpy.random.random_sample(self.shape)) - elif value == 'random_int': - out.fill(numpy.random.out.fill(numpy.random.randint(1, 10 + 1,size=self.shape))) + elif value == AcquisitionData.RANDOM_INT: + seed = kwargs.get('seed', None) + if seed is not None: + numpy.random.seed(seed) + max_value = kwargs.get('max_value', 100) + out.fill(numpy.random.randint(max_value,size=self.shape)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) @@ -810,8 +857,8 @@ class DataContainer(object): #shape = self.shape #size = reduce(lambda x,y:x*y, shape, 1) #y = numpy.reshape(self.as_array(), (size, )) - #return numpy.dot(y, y.conjugate()) - return self.dot(self) + return self.dot(self.conjugate()) + #return self.dot(self) def norm(self): '''return the euclidean norm of the DataContainer viewed as a vector''' return numpy.sqrt(self.squared_norm()) @@ -828,6 +875,7 @@ class DataContainer(object): class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' + def __init__(self, array = None, deep_copy=False, @@ -939,6 +987,7 @@ class ImageData(DataContainer): class AcquisitionData(DataContainer): '''DataContainer for holding 2D or 3D sinogram''' + def __init__(self, array = None, deep_copy=True, -- cgit v1.2.3 From 27ea6509eaa294529c86567bf50def2d27966bb7 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 16:23:14 +0000 Subject: add Vaggelis fix, missing docstrings are added --- .../ccpi/optimisation/functions/BlockFunction.py | 34 +++++++--------------- 1 file changed, 10 insertions(+), 24 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index d6c98c4..89dd9eb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -12,45 +12,31 @@ from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer class BlockFunction(Function): - + '''missing docstring''' def __init__(self, operator, *functions): - + '''missing docstring''' self.functions = functions - self.operator = operator self.length = len(self.functions) super(BlockFunction, self).__init__() def __call__(self, x): - - tmp = self.operator.direct(x) - + '''missing docstring''' t = 0 - for i in range(tmp.shape[0]): - t += self.functions[i](tmp.get_item(i)) + for i in range(x.shape[0]): + t += self.functions[i](x.get_item(i)) return t - def call_adjoint(self, x): - - tmp = operator.adjoint(x) - - t = 0 - for i in range(tmp.shape[0]): - t += self.functions[i](tmp.get_item(i)) - return t - def convex_conjugate(self, x): - - ''' Convex_conjugate does not take into account the BlockOperator''' + '''Convex_conjugate does not take into account the BlockOperator''' t = 0 for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) + t += self.functions[i].convex_conjugate(x.get_item(i)) return t def proximal_conjugate(self, x, tau, out = None): - - ''' proximal_conjugate does not take into account the BlockOperator''' + '''proximal_conjugate does not take into account the BlockOperator''' out = [None]*self.length for i in range(self.length): out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) @@ -58,8 +44,7 @@ class BlockFunction(Function): return BlockDataContainer(*out) def proximal(self, x, tau, out = None): - - ''' proximal does not take into account the BlockOperator''' + '''proximal does not take into account the BlockOperator''' out = [None]*self.length for i in range(self.length): out[i] = self.functions[i].proximal(x.get_item(i), tau) @@ -67,4 +52,5 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): + '''missing docstring''' pass \ No newline at end of file -- cgit v1.2.3 From 4f4303a5d49e8d330442c74d710b98bd14bb78c0 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 16:23:58 +0000 Subject: add out --- .../ccpi/optimisation/functions/FunctionOperatorComposition.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 3ac4358..34b7e35 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -37,17 +37,17 @@ class FunctionOperatorComposition(Function): ''' convex_conjugate does not take into account the Operator''' return self.function.convex_conjugate(x) - def proximal(self, x, tau): + def proximal(self, x, tau, out=None): - ''' proximal does not take into account the Operator''' + '''proximal does not take into account the Operator''' - return self.function.proximal(x, tau, out=None) + return self.function.proximal(x, tau, out=out) def proximal_conjugate(self, x, tau, out=None): ''' proximal conjugate does not take into account the Operator''' - return self.function.proximal_conjugate(x, tau) + return self.function.proximal_conjugate(x, tau, out=out) def gradient(self, x, out=None): -- cgit v1.2.3 From 311a5a82e4ae5bd7bc5661c4e441458deabc17d6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 16:41:50 +0000 Subject: use class strings --- Wrappers/Python/ccpi/framework/framework.py | 47 ++++++++++++++++++----------- 1 file changed, 29 insertions(+), 18 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 3707c07..7afd2c1 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -896,32 +896,36 @@ class ImageData(DataContainer): if channels > 1: if vert > 1: shape = (channels, vert, horiz_y, horiz_x) - dim_labels = ['channel' ,'vertical' , 'horizontal_y' , - 'horizontal_x'] + dim_labels = [ImageGeometry.CHANNEL, + ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] else: shape = (channels , horiz_y, horiz_x) - dim_labels = ['channel' , 'horizontal_y' , - 'horizontal_x'] + dim_labels = [ImageGeometry.CHANNEL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] else: if vert > 1: shape = (vert, horiz_y, horiz_x) - dim_labels = ['vertical' , 'horizontal_y' , - 'horizontal_x'] + dim_labels = [ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] else: shape = (horiz_y, horiz_x) - dim_labels = ['horizontal_y' , - 'horizontal_x'] + dim_labels = [ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] dimension_labels = dim_labels else: shape = [] for dim in dimension_labels: - if dim == 'channel': + if dim == ImageGeometry.CHANNEL: shape.append(channels) - elif dim == 'horizontal_y': + elif dim == ImageGeometry.HORIZONTAL_Y: shape.append(horiz_y) - elif dim == 'vertical': + elif dim == ImageGeometry.VERTICAL: shape.append(vert) - elif dim == 'horizontal_x': + elif dim == ImageGeometry.HORIZONTAL_X: shape.append(horiz_x) if len(shape) != len(dimension_labels): raise ValueError('Missing {0} axes'.format( @@ -956,14 +960,17 @@ class ImageData(DataContainer): if dimension_labels is None: if array.ndim == 4: - dimension_labels = ['channel' ,'vertical' , 'horizontal_y' , - 'horizontal_x'] + dimension_labels = [ImageGeometry.CHANNEL, + ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] elif array.ndim == 3: - dimension_labels = ['vertical' , 'horizontal_y' , - 'horizontal_x'] + dimension_labels = [ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] else: - dimension_labels = ['horizontal_y' , - 'horizontal_x'] + dimension_labels = [ ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) super(ImageData, self).__init__(array, deep_copy, @@ -979,6 +986,10 @@ class ImageData(DataContainer): self.spacing = value def subset(self, dimensions=None, **kw): + # FIXME: this is clearly not rigth + # it should be something like + # out = DataContainer.subset(self, dimensions, **kw) + # followed by regeneration of the proper geometry. out = super(ImageData, self).subset(dimensions, **kw) #out.geometry = self.recalculate_geometry(dimensions , **kw) out.geometry = self.geometry -- cgit v1.2.3 From ac236bc60402df87cd80ab841f3f31158bf0679e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 21 Mar 2019 17:07:32 +0000 Subject: added equality of geometry --- Wrappers/Python/ccpi/framework/framework.py | 19 +++++++++++++++++++ Wrappers/Python/test/test_DataContainer.py | 13 +++++++++++++ 2 files changed, 32 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 7afd2c1..98e9a8c 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -175,6 +175,15 @@ class ImageGeometry(object): #def shape(self): # '''Returns the shape of the array of the ImageData it describes''' # return self.shape + def __eq__(self, other): + '''Returns whether two geometries are equal''' + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) class AcquisitionGeometry(object): RANDOM = 'random' @@ -309,6 +318,16 @@ class AcquisitionGeometry(object): if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) return out + def __eq__(self, other): + '''Returns whether two geometries are equal''' + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + class DataContainer(object): '''Generic class to hold data diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index 7a7e6a0..cb09a1f 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -494,6 +494,8 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[0], image.dimension_labels[0]) self.assertEqual(order[1], image.dimension_labels[1]) self.assertEqual(order[2], image.dimension_labels[2]) + + #vgeometry.allocate('') def test_AcquisitionGeometry_allocate(self): ageometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10), @@ -523,6 +525,17 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[1], sino.dimension_labels[1]) self.assertEqual(order[2], sino.dimension_labels[2]) self.assertEqual(order[2], sino.dimension_labels[2]) + + def test_ImageGeometry_equal(self): + vg1 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) + vg2 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) + self.assertTrue(vg1 == vg2) + self.assertFalse(vg1 != vg2) + + vg2 = ImageGeometry(voxel_num_z=3,voxel_num_x=4, voxel_num_y=3, channels=2) + self.assertTrue(vg1 != vg2) + self.assertFalse(vg1 == vg2) + def assertNumpyArrayEqual(self, first, second): res = True -- cgit v1.2.3 From eb1165092a1b85384740c0d57482d32020ddee95 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 27 Mar 2019 15:50:47 +0000 Subject: remove hard coded strings in AcquisitionData --- Wrappers/Python/ccpi/framework/framework.py | 50 +++++++++++------------------ 1 file changed, 19 insertions(+), 31 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 98e9a8c..5bc01e5 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -175,15 +175,6 @@ class ImageGeometry(object): #def shape(self): # '''Returns the shape of the array of the ImageData it describes''' # return self.shape - def __eq__(self, other): - '''Returns whether two geometries are equal''' - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) class AcquisitionGeometry(object): RANDOM = 'random' @@ -318,16 +309,7 @@ class AcquisitionGeometry(object): if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) return out - def __eq__(self, other): - '''Returns whether two geometries are equal''' - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - else: - return False - - def __ne__(self, other): - return not self.__eq__(other) - + class DataContainer(object): '''Generic class to hold data @@ -1038,33 +1020,39 @@ class AcquisitionData(DataContainer): if channels > 1: if vert > 1: shape = (channels, num_of_angles , vert, horiz) - dim_labels = ['channel' , 'angle' , - 'vertical' , 'horizontal'] + dim_labels = [AcquisitionGeometry.CHANNEL, + AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL] else: shape = (channels , num_of_angles, horiz) - dim_labels = ['channel' , 'angle' , - 'horizontal'] + dim_labels = [AcquisitionGeometry.CHANNEL, + AcquisitionGeometry.ANGLE, + AcquisitionGeometry.HORIZONTAL] else: if vert > 1: shape = (num_of_angles, vert, horiz) - dim_labels = ['angle' , 'vertical' , - 'horizontal'] + dim_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL + ] else: shape = (num_of_angles, horiz) - dim_labels = ['angle' , - 'horizontal'] + dim_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.HORIZONTAL + ] dimension_labels = dim_labels else: shape = [] for dim in dimension_labels: - if dim == 'channel': + if dim == AcquisitionGeometry.CHANNEL: shape.append(channels) - elif dim == 'angle': + elif dim == AcquisitionGeometry.ANGLE: shape.append(num_of_angles) - elif dim == 'vertical': + elif dim == AcquisitionGeometry.VERTICAL: shape.append(vert) - elif dim == 'horizontal': + elif dim == AcquisitionGeometry.HORIZONTAL: shape.append(horiz) if len(shape) != len(dimension_labels): raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ -- cgit v1.2.3 From 1f68f4053c0b72f34168edc07e76c76aaa71ee96 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 27 Mar 2019 16:58:01 +0000 Subject: updates --- Wrappers/Python/ccpi/framework/framework.py | 4 +- .../ccpi/optimisation/operators/BlockOperator.py | 49 ++++++++++++++++++++++ Wrappers/Python/test/test_BlockOperator.py | 49 +++++++++++++++++++++- 3 files changed, 99 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 5bc01e5..bf8273b 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -150,12 +150,12 @@ class ImageGeometry(object): if value != 0: out += value else: - if value == ImageData.RANDOM: + if value == ImageGeometry.RANDOM: seed = kwargs.get('seed', None) if seed is not None: numpy.random.seed(seed) out.fill(numpy.random.random_sample(self.shape)) - elif value == ImageData.RANDOM_INT: + elif value == ImageGeometry.RANDOM_INT: seed = kwargs.get('seed', None) if seed is not None: numpy.random.seed(seed) diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 4ff38c6..3679136 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -23,6 +23,9 @@ class BlockOperator(Operator): Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with N rows and 1 column. + + Operators in a Block are required to have the same domain column-wise and the + same range row-wise. ''' __array_priority__ = 1 def __init__(self, *args, **kwargs): @@ -52,6 +55,39 @@ class BlockOperator(Operator): raise ValueError( 'Dimension and size do not match: expected {} got {}' .format(n_elements,len(args))) + # test if operators are compatible + if not self.column_wise_compatible(): + raise ValueError('Operators in each column must have the same domain') + if not self.row_wise_compatible(): + raise ValueError('Operators in each row must have the same range') + + def column_wise_compatible(self): + '''Operators in a Block should have the same domain per column''' + rows, cols = self.shape + compatible = True + for col in range(cols): + row_compatible = True + for row in range(1,rows): + dg0 = self.get_item(row-1,col).domain_geometry() + dg1 = self.get_item(row,col).domain_geometry() + row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible + compatible = compatible and row_compatible + return compatible + + def row_wise_compatible(self): + '''Operators in a Block should have the same range per row''' + rows, cols = self.shape + compatible = True + for row in range(rows): + column_compatible = True + for col in range(1,cols): + dg0 = self.get_item(row,col-1).range_geometry() + dg1 = self.get_item(row,col).range_geometry() + column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible + print ("column_compatible" , column_compatible, dg0.shape, dg1.shape) + compatible = compatible and column_compatible + return compatible + def get_item(self, row, col): if row > self.shape[0]: raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) @@ -153,5 +189,18 @@ class BlockOperator(Operator): shape = (self.shape[1], self.shape[0]) return type(self)(*self.operators, shape=shape) + def domain_geometry(self): + if self.shape[1] == 1: + # column BlockOperator + return self[0].domain_geometry() + else: + shape = (self.shape[0], 1) + return BlockGeometry(*[el.domain_geometry() for el in self.operators], + shape=shape) + + def range_geometry(self): + shape = (self.shape[1], 1) + return BlockGeometry(*[el.range_geometry() for el in self.operators], + shape=shape) if __name__ == '__main__': pass diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index 951aa0a..c042d04 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -6,6 +6,13 @@ from ccpi.framework import ImageGeometry, ImageData import numpy from ccpi.optimisation.operators import FiniteDiff +class TestOperator(TomoIdentity): + def __init__(self, *args, **kwargs): + super(TestOperator, self).__init__(*args, **kwargs) + self.range = kwargs.get('range', self.geometry) + def range_geometry(self): + return self.range + class TestBlockOperator(unittest.TestCase): def test_BlockOperator(self): @@ -32,7 +39,47 @@ class TestBlockOperator(unittest.TestCase): zero = numpy.zeros(X.get_item(0).shape) numpy.testing.assert_array_equal(Y.get_item(0).as_array(),len(x)+zero) - + try: + # this should fail as the domain is not compatible + ig = [ ImageGeometry(10,20,31) , \ + ImageGeometry(10,20,30) , \ + ImageGeometry(10,20,30) ] + x = [ g.allocate() for g in ig ] + ops = [ TomoIdentity(g) for g in ig ] + + K = BlockOperator(*ops) + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) + + try: + # this should fail as the range is not compatible + ig = [ ImageGeometry(10,20,30) , \ + ImageGeometry(10,20,30) , \ + ImageGeometry(10,20,30) ] + rg0 = [ ImageGeometry(10,20,31) , \ + ImageGeometry(10,20,31) , \ + ImageGeometry(10,20,31) ] + rg1 = [ ImageGeometry(10,22,31) , \ + ImageGeometry(10,22,31) , \ + ImageGeometry(10,20,31) ] + x = [ g.allocate() for g in ig ] + ops = [ TestOperator(g, range=r) for g,r in zip(ig, rg0) ] + ops += [ TestOperator(g, range=r) for g,r in zip(ig, rg1) ] + print (len(ops)) + K = BlockOperator(*ops) + print ("K col comp? " , K.column_wise_compatible()) + print ("K row comp? " , K.row_wise_compatible()) + for op in ops: + print ("range" , op.range_geometry().shape) + for op in ops: + print ("domain" , op.domain_geometry().shape) + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) + def test_ScaledBlockOperatorSingleScalar(self): ig = [ ImageGeometry(10,20,30) , \ ImageGeometry(10,20,30) , \ -- cgit v1.2.3 From a2c82a21f4cc498fd8f7cd8a8a1256f10524df14 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 28 Mar 2019 11:58:14 +0000 Subject: BlockOperator checks compatibility of Operators --- .../ccpi/optimisation/operators/BlockOperator.py | 37 +++++++++++++++------- Wrappers/Python/test/test_BlockOperator.py | 4 +-- 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 3679136..323efcd 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -14,15 +14,22 @@ from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator from ccpi.framework import BlockGeometry class BlockOperator(Operator): - '''Class to hold a block operator + '''A Block matrix containing Operators + + The Block Framework is a generic strategy to treat variational problems in the + following form: + + .. math:: + + min Regulariser + Fidelity - Class to hold a number of Operators in a block. - User may specify the shape of the block, by default is a row vector BlockOperators have a generic shape M x N, and when applied on an Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with N rows and 1 column. + + User may specify the shape of the block, by default is a row vector Operators in a Block are required to have the same domain column-wise and the same range row-wise. @@ -36,8 +43,8 @@ class BlockOperator(Operator): Do not include the `self` parameter in the ``Args`` section. Args: - vararg (Operator): Operators in the block. - shape (:obj:`tuple`, optional): If shape is passed the Operators in + :param: vararg (Operator): Operators in the block. + :param: shape (:obj:`tuple`, optional): If shape is passed the Operators in vararg are considered input in a row-by-row fashion. Shape and number of Operators must match. @@ -66,12 +73,12 @@ class BlockOperator(Operator): rows, cols = self.shape compatible = True for col in range(cols): - row_compatible = True + column_compatible = True for row in range(1,rows): dg0 = self.get_item(row-1,col).domain_geometry() dg1 = self.get_item(row,col).domain_geometry() - row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible - compatible = compatible and row_compatible + column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible + compatible = compatible and column_compatible return compatible def row_wise_compatible(self): @@ -79,16 +86,16 @@ class BlockOperator(Operator): rows, cols = self.shape compatible = True for row in range(rows): - column_compatible = True + row_compatible = True for col in range(1,cols): dg0 = self.get_item(row,col-1).range_geometry() dg1 = self.get_item(row,col).range_geometry() - column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible - print ("column_compatible" , column_compatible, dg0.shape, dg1.shape) - compatible = compatible and column_compatible + row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible + compatible = compatible and row_compatible return compatible def get_item(self, row, col): + '''returns the Operator at specified row and col''' if row > self.shape[0]: raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) if col > self.shape[1]: @@ -190,6 +197,11 @@ class BlockOperator(Operator): return type(self)(*self.operators, shape=shape) def domain_geometry(self): + '''returns the domain of the BlockOperator + + If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry. + Otherwise it is a BlockGeometry. + ''' if self.shape[1] == 1: # column BlockOperator return self[0].domain_geometry() @@ -199,6 +211,7 @@ class BlockOperator(Operator): shape=shape) def range_geometry(self): + '''returns the range of the BlockOperator''' shape = (self.shape[1], 1) return BlockGeometry(*[el.range_geometry() for el in self.operators], shape=shape) diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index c042d04..4eb84bb 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -67,8 +67,8 @@ class TestBlockOperator(unittest.TestCase): x = [ g.allocate() for g in ig ] ops = [ TestOperator(g, range=r) for g,r in zip(ig, rg0) ] ops += [ TestOperator(g, range=r) for g,r in zip(ig, rg1) ] - print (len(ops)) - K = BlockOperator(*ops) + + K = BlockOperator(*ops, shape=(2,3)) print ("K col comp? " , K.column_wise_compatible()) print ("K row comp? " , K.row_wise_compatible()) for op in ops: -- cgit v1.2.3 From bb8d70e564139b10ce0a3eae327f0b5f91c38368 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 28 Mar 2019 16:00:10 +0000 Subject: added docstring to Function --- .../ccpi/optimisation/functions/BlockFunction.py | 24 +++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 89dd9eb..21cd82b 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -12,16 +12,30 @@ from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer class BlockFunction(Function): - '''missing docstring''' - def __init__(self, operator, *functions): - '''missing docstring''' + '''A Block vector of Functions + + .. math:: + + f = [f_1,f_2,f_3] + f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) + + ''' + def __init__(self, *functions): + '''Creator''' self.functions = functions self.length = len(self.functions) super(BlockFunction, self).__init__() def __call__(self, x): - '''missing docstring''' + '''evaluates the BlockFunction on the BlockDataContainer + + :param: x (BlockDataContainer): must have as many rows as self.length + + returns sum(f_i(x_i)) + ''' + if self.length != x.shape[0]: + raise ValueError('BlockFunction and BlockDataContainer have incompatible size') t = 0 for i in range(x.shape[0]): t += self.functions[i](x.get_item(i)) @@ -52,5 +66,5 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): - '''missing docstring''' + '''gradient returns pass''' pass \ No newline at end of file -- cgit v1.2.3 From d9588f62186a9532890907d11536ebe3824a3bb9 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 29 Mar 2019 17:07:46 +0000 Subject: merge Vaggelis branch block_function --- .../Python/ccpi/framework/BlockDataContainer.py | 36 +-- Wrappers/Python/ccpi/framework/BlockGeometry.py | 6 +- Wrappers/Python/ccpi/framework/framework.py | 4 + .../Python/ccpi/optimisation/algorithms/PDHG.py | 49 ++-- .../ccpi/optimisation/algorithms/__init__.py | 2 +- .../ccpi/optimisation/functions/BlockFunction.py | 2 +- .../ccpi/optimisation/functions/L2NormSquared.py | 247 +++++++++++++++------ .../ccpi/optimisation/functions/ScaledFunction.py | 21 +- .../Python/ccpi/optimisation/functions/__init__.py | 6 +- .../ccpi/optimisation/operators/BlockOperator.py | 16 +- .../optimisation/operators/GradientOperator.py | 140 ++++-------- .../optimisation/operators/IdentityOperator.py | 8 +- .../Python/ccpi/optimisation/operators/__init__.py | 1 - Wrappers/Python/test/test_BlockDataContainer.py | 17 +- Wrappers/Python/test/test_DataContainer.py | 11 - Wrappers/Python/test/test_Gradient.py | 90 ++++++++ Wrappers/Python/test/test_functions.py | 4 +- Wrappers/Python/wip/pdhg_TV_denoising.py | 115 ++++++++++ 18 files changed, 514 insertions(+), 261 deletions(-) create mode 100755 Wrappers/Python/test/test_Gradient.py create mode 100755 Wrappers/Python/wip/pdhg_TV_denoising.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index f29f839..b4041e4 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -64,7 +64,7 @@ class BlockDataContainer(object): .format(type(ot))) return len(self.containers) == len(other) elif isinstance(other, numpy.ndarray): - return self.shape == other.shape + return len(self.containers) == len(other) elif issubclass(other.__class__, DataContainer): return self.get_item(0).shape == other.shape return len(self.containers) == len(other.containers) @@ -91,25 +91,26 @@ class BlockDataContainer(object): return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) return type(self)( - *[ el.add(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def subtract(self, other, *args, **kwargs): if not self.is_compatible(other): - raise ValueError('Incompatible for add') + raise ValueError('Incompatible for subtract') out = kwargs.get('out', None) if isinstance(other, Number): - return type(self)(*[ el.subtract(other, out, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) elif issubclass(other.__class__, DataContainer): # try to do algebra with one DataContainer. Will raise error if not compatible return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.subtract(ot, out, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def multiply(self, other, *args, **kwargs): - self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('{} Incompatible for multiply'.format(other)) out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) @@ -124,7 +125,8 @@ class BlockDataContainer(object): shape=self.shape) def divide(self, other, *args, **kwargs): - self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for divide') out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) @@ -137,7 +139,8 @@ class BlockDataContainer(object): shape=self.shape) def power(self, other, *args, **kwargs): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for power') out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) @@ -146,7 +149,8 @@ class BlockDataContainer(object): return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) def maximum(self,other, *args, **kwargs): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for maximum') out = kwargs.get('out', None) if isinstance(other, Number): return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) @@ -265,7 +269,8 @@ class BlockDataContainer(object): for el in self.containers: el += other elif isinstance(other, list) or isinstance(other, numpy.ndarray): - self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for __iadd__') for el,ot in zip(self.containers, other): el += ot return self @@ -280,7 +285,8 @@ class BlockDataContainer(object): for el in self.containers: el -= other elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for __isub__') for el,ot in zip(self.containers, other): el -= ot return self @@ -295,7 +301,8 @@ class BlockDataContainer(object): for el in self.containers: el *= other elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for __imul__') for el,ot in zip(self.containers, other): el *= ot return self @@ -310,7 +317,8 @@ class BlockDataContainer(object): for el in self.containers: el /= other elif isinstance(other, list) or isinstance(other, numpy.ndarray): - assert self.is_compatible(other) + if not self.is_compatible(other): + raise ValueError('Incompatible for __idiv__') for el,ot in zip(self.containers, other): el /= ot return self diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py index 632d320..d336305 100755 --- a/Wrappers/Python/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -6,7 +6,7 @@ from __future__ import unicode_literals import numpy from numbers import Number import functools -#from ccpi.framework import AcquisitionData, ImageData +from ccpi.framework import BlockDataContainer #from ccpi.optimisation.operators import Operator, LinearOperator class BlockGeometry(object): @@ -28,7 +28,7 @@ class BlockGeometry(object): 'Dimension and size do not match: expected {} got {}' .format(n_elements, len(args))) - def allocate(self, value=0): + def allocate(self, value=0, dimension_labels=None): containers = [geom.allocate(value) for geom in self.geometries] - BlockDataContainer(*containers) + return BlockDataContainer(*containers) diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index bf8273b..ae9faf7 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -161,6 +161,8 @@ class ImageGeometry(object): numpy.random.seed(seed) max_value = kwargs.get('max_value', 100) out.fill(numpy.random.randint(max_value,size=self.shape)) + else: + raise ValueError('Value {} unknown'.format(value)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) @@ -305,6 +307,8 @@ class AcquisitionGeometry(object): numpy.random.seed(seed) max_value = kwargs.get('max_value', 100) out.fill(numpy.random.randint(max_value,size=self.shape)) + else: + raise ValueError('Value {} unknown'.format(value)) if dimension_labels is not None: if dimension_labels != self.dimension_labels: return out.subset(dimensions=dimension_labels) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7488310..7e55ee8 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -10,8 +10,8 @@ from ccpi.framework import ImageData import numpy as np import matplotlib.pyplot as plt import time -from Operators.CompositeOperator import CompositeOperator -from Operators.CompositeDataContainer import CompositeDataContainer +from ccpi.optimisation.operators import BlockOperator +from ccpi.framework import BlockDataContainer def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): @@ -29,13 +29,12 @@ def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - if isinstance(operator, CompositeOperator): -# if isinstance(operator, CompositeOperator_DataContainer): - x_old = operator.alloc_domain_dim() - y_old = operator.alloc_range_dim() + if isinstance(operator, BlockOperator): + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() else: - x_old = ImageData(np.zeros(operator.domain_dim())) - y_old = ImageData(np.zeros(operator.range_dim())) + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() xbar = x_old @@ -68,34 +67,12 @@ def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y -# pdgap - print(f(x) + g(x) + f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) - - - - - -# # TV denoising, pdgap with composite -# -# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ -# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) -# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() - - # TV denoising, pdgap with no composite - - - -# primal_obj = f.get_item(0).alpha * ImageData(operator.compMat[0][0].direct(x.get_item(0)).power(2).sum(axis=0)).sqrt().sum() +\ -# 0.5*( (operator.compMat[1][0].direct(x.get_item(0)) - f.get_item(1).b).power(2).sum()) -# dual_obj = 0.5 * ((y.get_item(1).power(2)).sum()) + ( y.get_item(1)*f.get_item(1).b ).sum() - - -# print(primal_obj) -# objective = primal_obj -# - - - +# if i%100==0: +# +# plt.imshow(x.as_array()[100]) +# plt.show() +# print(f(operator.direct(x)) + g(x), i) + t_end = time.time() return x, t_end - t, objective diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py index 7e500e8..443bc78 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -27,4 +27,4 @@ from .CGLS import CGLS from .GradientDescent import GradientDescent from .FISTA import FISTA from .FBPD import FBPD - +from .PDHG import PDHG diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 21cd82b..70216a9 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -66,5 +66,5 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): - '''gradient returns pass''' + '''FIXME: gradient returns pass''' pass \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 1baf365..54f8859 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -8,92 +8,203 @@ Created on Thu Feb 7 13:10:56 2019 @author: evangelos """ -import numpy as np -#from ccpi.optimisation.funcs import Function +import numpy from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.framework import DataContainer, ImageData, ImageGeometry +############################ L2NORM FUNCTION ############################# +class L2NormSquared(Function): -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): + def __init__(self, **kwargs): - super(SimpleL2NormSq, self).__init__() - # Lispchitz constant of gradient - self.L = 2 + ''' L2NormSquared class + f : ImageGeometry --> R + + Cases: f(x) = ||x||^{2}_{2} + f(x) = || x - b ||^{2}_{2} - def __call__(self, x): - return x.power(2).sum() - - def gradient(self,x, out=None): + ''' + + #TODO need x, b to live in the same geometry if b is not None + + super(L2NormSquared, self).__init__() + self.b = kwargs.get('b',None) + + def __call__(self, x, out=None): + + ''' Evaluates L2NormSq at point x''' + + y = x + if self.b is not None: +# x.subtract(self.b, out = x) + y = x - self.b +# else: +# y +# if out is None: +# return x.squared_norm() +# else: + return y.squared_norm() + + + + def gradient(self, x, out=None): + + ''' Evaluates gradient of L2NormSq at point x''' + + if self.b is not None: +# x.subtract(self.b, out=x) + y = x - self.b if out is None: - return 2 * x + return 2*y else: - out.fill(2*x) - - def convex_conjugate(self,x): - return (1/4) * x.squared_norm() + return out.fill(2*y) + + def convex_conjugate(self, x, out=None): - def proximal(self, x, tau, out=None): + ''' Evaluate convex conjugate of L2NormSq ''' + + tmp = 0 + if self.b is not None: + tmp = (self.b * x).sum() + if out is None: - return x.divide(1+2*tau) + return (1/4) * x.squared_norm() + tmp else: - x.divide(1+2*tau, out=out) + out.fill((1/4) * x.squared_norm() + tmp) + + + def proximal(self, x, tau, out = None): + + ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e., + argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' + + if out is None: + if self.b is not None: + return (x - self.b)/(1+2*tau) + self.b + else: + return x/(1+2*tau) + else: + if self.b is not None: + out.fill((x - self.b)/(1+2*tau) + self.b) + else: + out.fill(x/(1+2*tau)) + def proximal_conjugate(self, x, tau, out=None): + if out is None: - return x.divide(1 + tau/2) + if self.b is not None: + return (x - tau*self.b)/(1 + tau/2) + else: + return x/(1 + tau/2 ) else: - x.divide(1+tau/2, out=out) + if self.b is not None: + out.fill((x - tau*self.b)/(1 + tau/2)) + else: + out.fill(x/(1 + tau/2 )) + + def __rmul__(self, scalar): + return ScaledFunction(self, scalar) - -############################ L2NORM FUNCTIONS ############################# -class L2NormSq(SimpleL2NormSq): +if __name__ == '__main__': - def __init__(self, **kwargs): - super(L2NormSq, self).__init__() - self.b = kwargs.get('b',None) + + # TESTS for L2 and scalar * L2 + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate('random_int') + b = ig.allocate('random_int') + + # check grad/call no data + f = L2NormSquared() + a1 = f.gradient(u) + a2 = 2 * u + numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) + numpy.testing.assert_equal(f(u), u.squared_norm()) - def __call__(self, x): - if self.b is None: - return SimpleL2NormSq.__call__(self, x) - else: - return SimpleL2NormSq.__call__(self, x - self.b) + # check grad/call with data + f1 = L2NormSquared(b=b) + b1 = f1.gradient(u) + b2 = 2 * (u-b) - def gradient(self, x, out=None): - if self.b is None: - return 2 * x - else: - return 2 * (x - self.b) - - def convex_conjugate(self, x): - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) + + #check convex conjuagate no data + c1 = f.convex_conjugate(u) + c2 = 1/4 * u.squared_norm() + numpy.testing.assert_equal(c1, c2) + + #check convex conjuagate with data + d1 = f1.convex_conjugate(u) + d2 = (1/4) * u.squared_norm() + (u*b).sum() + numpy.testing.assert_equal(d1, d2) + + # check proximal no data + tau = 5 + e1 = f.proximal(u, tau) + e2 = u/(1+2*tau) + numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) + + # check proximal with data + tau = 5 + h1 = f1.proximal(u, tau) + h2 = (u-b)/(1+2*tau) + b + numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) + + # check proximal conjugate no data + tau = 0.2 + k1 = f.proximal_conjugate(u, tau) + k2 = u/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) + + # check proximal conjugate with data + l1 = f1.proximal_conjugate(u, tau) + l2 = (u - tau * b)/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) + - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) + # check scaled function properties + + # scalar + scalar = 100 + f_scaled_no_data = scalar * L2NormSquared() + f_scaled_data = scalar * L2NormSquared(b=b) + + # call + numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) + numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) + + # grad + numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) + + # conj + numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ + f.convex_conjugate(u/scalar) * scalar, decimal=4) + + numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ + scalar * f1.convex_conjugate(u/scalar), decimal=4) + + # proximal + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ + f.proximal(u, tau*scalar).as_array()) + + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ + f1.proximal(u, tau*scalar).as_array()) + + + # proximal conjugate + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ + (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ + ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + - def proximal_conjugate(self, x, tau): - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 8a52566..b48135d 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -39,9 +39,11 @@ class ScaledFunction(object): def proximal_conjugate(self, x, tau, out = None): '''This returns the proximal operator for the function at x, tau - - TODO check if this is mathematically correct''' - return self.function.proximal_conjugate(x, tau, out=out) + ''' + if out is None: + return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) + else: + out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) def grad(self, x): '''Alias of gradient(x,None)''' @@ -57,10 +59,15 @@ class ScaledFunction(object): def gradient(self, x, out=None): '''Returns the gradient of the function at x, if the function is differentiable''' - return self.scalar * self.function.gradient(x, out=out) + if out is None: + return self.scalar * self.function.gradient(x) + else: + out.fill( self.scalar * self.function.gradient(x) ) def proximal(self, x, tau, out=None): '''This returns the proximal operator for the function at x, tau - - TODO check if this is mathematically correct''' - return self.function.proximal(x, tau, out=out) + ''' + if out is None: + return self.function.proximal(x, tau*self.scalar) + else: + out.fill( self.function.proximal(x, tau*self.scalar) ) diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 9030454..d6edd03 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -3,8 +3,10 @@ from .Function import Function from .ZeroFun import ZeroFun from .L1Norm import SimpleL1Norm, L1Norm -from .L2NormSquared import L2NormSq, SimpleL2NormSq -from .mixed_L12Norm import mixed_L12Norm +#from .L2NormSquared import L2NormSq, SimpleL2NormSq +from .L2NormSquared import L2NormSquared from .BlockFunction import BlockFunction from .ScaledFunction import ScaledFunction from .FunctionOperatorComposition import FunctionOperatorComposition +from .MixedL21Norm import MixedL21Norm + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 323efcd..1240b31 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -105,13 +105,8 @@ class BlockOperator(Operator): return self.operators[index] def norm(self): - norm = [op.norm() for op in self.operators] - b = [] - for i in range(self.shape[0]): - b.append([]) - for j in range(self.shape[1]): - b[-1].append(norm[i*self.shape[1]+j]) - return numpy.asarray(b) + norm = [op.norm()**2 for op in self.operators] + return numpy.sqrt(sum(norm)) def direct(self, x, out=None): '''Direct operation for the BlockOperator @@ -161,7 +156,10 @@ class BlockOperator(Operator): else: prod += self.get_item(row, col).adjoint(x_b.get_item(col)) res.append(prod) - return BlockDataContainer(*res, shape=shape) + if self.shape[1]==1: + return ImageData(*res) + else: + return BlockDataContainer(*res, shape=shape) def get_output_shape(self, xshape, adjoint=False): sshape = self.shape[1] @@ -204,7 +202,7 @@ class BlockOperator(Operator): ''' if self.shape[1] == 1: # column BlockOperator - return self[0].domain_geometry() + return self.get_item(0,0).domain_geometry() else: shape = (self.shape[0], 1) return BlockGeometry(*[el.domain_geometry() for el in self.operators], diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index d0d0f43..ec14b8f 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -6,131 +6,73 @@ Created on Fri Mar 1 22:50:04 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np +from ccpi.framework import ImageData, ImageGeometry, BlockGeometry +import numpy from ccpi.optimisation.operators import FiniteDiff -from ccpi.framework import BlockGeometry #%% -class Gradient(Operator): +class Gradient(LinearOperator): - def __init__(self, gm_domain, gm_range=None, bnd_cond = 'Neumann', **kwargs): + def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): super(Gradient, self).__init__() - + self.gm_domain = gm_domain # Domain of Grad Operator - self.gm_range = gm_range # Range of Grad Operator - self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences - - if self.gm_range is None: - #FIXME this should be a BlockGeometry - self.gm_range = ((len(self.gm_domain),)+self.gm_domain) - - # Kwargs Default options - self.memopt = kwargs.get('memopt',False) - self.correlation = kwargs.get('correlation','Space') + self.correlation = kwargs.get('correlation','Space') - #TODO not tested yet, operator norm??? - self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) - + if self.correlation=='Space': + if self.gm_domain.channels>1: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] ) + self.ind = numpy.arange(1,self.gm_domain.length) + else: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] ) + self.ind = numpy.arange(self.gm_domain.length) + elif self.correlation=='SpaceChannels': + if self.gm_domain.channels>1: + self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)]) + self.ind = range(self.gm_domain.length) + else: + raise ValueError('No channels to correlate') + + self.bnd_cond = bnd_cond + def direct(self, x, out=None): - #tmp = np.zeros(self.gm_range) tmp = self.gm_range.allocate() - for i in range(len(self.gm_domain)): - #tmp[i] = FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] - if self.correlation == 'Space': - if i == 0 : - i+=1 - tmp[i].fill( - FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).direct(x.as_array())/self.voxel_size[i] - ) -# return type(x)(tmp) - return type(x)(tmp) - + + + for i in range(tmp.shape[0]): + tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) + return tmp + def adjoint(self, x, out=None): - - tmp = np.zeros(self.gm_domain) - for i in range(len(self.gm_domain)): - tmp+=FiniteDiff(self.gm_domain, direction = i, bnd_cond = self.bnd_cond).adjoint(x.as_array()[i])/self.voxel_size[i] - return type(x)(-tmp) - def alloc_domain_dim(self): - return ImageData(np.zeros(self.gm_domain)) - - def alloc_range_dim(self): - return ImageData(np.zeros(self.range_dim)) + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + return tmp + def domain_geometry(self): return self.gm_domain def range_geometry(self): - '''fix this''' - return BlockGeometry(self.gm_range, self.gm_range) + return self.gm_range def norm(self): -# return np.sqrt(4*len(self.domainDim())) - #TODO this takes time for big ImageData - # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) - x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + + x0 = self.gm_domain.allocate('random') + self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) return self.s1 + def __rmul__(self, scalar): + return ScaledOperator(self, scalar) if __name__ == '__main__': - N, M = (200,300) - ig = (N,M) - G = Gradient(ig) - u = DataContainer(np.random.randint(10, size=G.domain_dim())) - w = DataContainer(np.random.randint(10, size=G.range_dim())) -# w = [DataContainer(np.random.randint(10, size=G.domain_dim())),\ -# DataContainer(np.random.randint(10, size=G.domain_dim()))] - - # domain_dim - print('Domain {}'.format(G.domain_geometry())) - - # range_dim - print('Range {}'.format(G.range_geometry())) - - # Direct - z = G.direct(u) - - # Adjoint - z1 = G.adjoint(w) - - print(z) - print(z1) - - LHS = (G.direct(u)*w).sum() - RHS = (u * G.adjoint(w)).sum() -# - print(LHS,RHS) - print(G.norm()) - -# print(G.adjoint(G.direct(u))) - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + pass diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py index d49cb30..0f50e82 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -6,10 +6,10 @@ Created on Wed Mar 6 19:30:51 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import LinearOperator -class Identity(Operator): +class Identity(LinearOperator): def __init__(self, gm_domain, gm_range=None): @@ -35,8 +35,8 @@ class Identity(Operator): def norm(self): return 1.0 - def domain_dim(self): + def domain_geometry(self): return self.gm_domain - def range_dim(self): + def range_geometry(self): return self.gm_range \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 1e86efc..1c09faf 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -17,4 +17,3 @@ from .GradientOperator import Gradient from .SymmetrizedGradientOperator import SymmetrizedGradient from .IdentityOperator import Identity from .ZeroOperator import ZeroOp - diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 51d07fa..2ee0e94 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -214,10 +214,15 @@ class TestBlockDataContainer(unittest.TestCase): cp2 = numpy.asarray([3,2]) * cp0 numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) - cp2 = [3,2,3] * cp0 - numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) - numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) + try: + cp2 = [3,2,3] * cp0 + #numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) + #numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) cp2 *= cp1 numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0 , decimal=5) numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , +6., decimal = 5) @@ -230,6 +235,12 @@ class TestBlockDataContainer(unittest.TestCase): numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 0. , decimal=5) numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , -6., decimal = 5) + try: + cp2 *= [2,3,5] + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) cp2 = cp0.divide(cp1) assert (cp2.get_item(0).as_array()[0][0][0] == 0.) diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index cb09a1f..8edfd8b 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -526,17 +526,6 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[2], sino.dimension_labels[2]) self.assertEqual(order[2], sino.dimension_labels[2]) - def test_ImageGeometry_equal(self): - vg1 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) - vg2 = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) - self.assertTrue(vg1 == vg2) - self.assertFalse(vg1 != vg2) - - vg2 = ImageGeometry(voxel_num_z=3,voxel_num_x=4, voxel_num_y=3, channels=2) - self.assertTrue(vg1 != vg2) - self.assertFalse(vg1 == vg2) - - def assertNumpyArrayEqual(self, first, second): res = True try: diff --git a/Wrappers/Python/test/test_Gradient.py b/Wrappers/Python/test/test_Gradient.py new file mode 100755 index 0000000..1d6485c --- /dev/null +++ b/Wrappers/Python/test/test_Gradient.py @@ -0,0 +1,90 @@ +import unittest +import numpy +#from ccpi.plugins.ops import CCPiProjectorSimple +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.framework import ImageGeometry, AcquisitionGeometry +from ccpi.framework import ImageData, AcquisitionData +#from ccpi.optimisation.algorithms import GradientDescent +from ccpi.framework import BlockDataContainer +#from ccpi.optimisation.Algorithms import CGLS +import functools + +from ccpi.optimisation.operators import Gradient, Identity, BlockOperator + +class TestGradient(unittest.TestCase): + def test_Gradient(self): + N, M, K = 20, 30, 40 + channels = 10 + + # check range geometry, examples + + ig1 = ImageGeometry(voxel_num_x = M, voxel_num_y = N) + ig2 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, voxel_num_z = K) + ig3 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels) + ig4 = ImageGeometry(voxel_num_x = M, voxel_num_y = N, channels = channels, voxel_num_z= K) + + G1 = Gradient(ig1, correlation = 'Space') + print(G1.range_geometry().shape, '2D no channels') + + G4 = Gradient(ig3, correlation = 'SpaceChannels') + print(G4.range_geometry().shape, '2D with channels corr') + G5 = Gradient(ig3, correlation = 'Space') + print(G5.range_geometry().shape, '2D with channels no corr') + + G6 = Gradient(ig4, correlation = 'Space') + print(G6.range_geometry().shape, '3D with channels no corr') + G7 = Gradient(ig4, correlation = 'SpaceChannels') + print(G7.range_geometry().shape, '3D with channels with corr') + + + u = ig1.allocate(ImageGeometry.RANDOM) + w = G1.range_geometry().allocate(ImageGeometry.RANDOM_INT) + + LHS = (G1.direct(u)*w).sum() + RHS = (u * G1.adjoint(w)).sum() + numpy.testing.assert_approx_equal(LHS, RHS, significant = 1) + numpy.testing.assert_approx_equal(G1.norm(), numpy.sqrt(2*4), significant = 1) + + + u1 = ig3.allocate('random') + w1 = G4.range_geometry().allocate('random') + LHS1 = (G4.direct(u1) * w1).sum() + RHS1 = (u1 * G4.adjoint(w1)).sum() + numpy.testing.assert_approx_equal(LHS1, RHS1, significant=1) + numpy.testing.assert_almost_equal(G4.norm(), numpy.sqrt(3*4), decimal = 0) + + u2 = ig4.allocate('random') + w2 = G7.range_geometry().allocate('random') + LHS2 = (G7.direct(u2) * w2).sum() + RHS2 = (u2 * G7.adjoint(w2)).sum() + numpy.testing.assert_approx_equal(LHS2, RHS2, significant = 3) + numpy.testing.assert_approx_equal(G7.norm(), numpy.sqrt(3*4), significant = 1) + + + #check direct/adjoint for space/channels correlation + + ig_channel = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3, channels = 2) + G_no_channel = Gradient(ig_channel, correlation = 'Space') + G_channel = Gradient(ig_channel, correlation = 'SpaceChannels') + + u3 = ig_channel.allocate('random_int') + res_no_channel = G_no_channel.direct(u3) + res_channel = G_channel.direct(u3) + + print(" Derivative for 3 directions, first is wrt Channel direction\n") + print(res_channel[0].as_array()) + print(res_channel[1].as_array()) + print(res_channel[2].as_array()) + + print(" Derivative for 2 directions, no Channel direction\n") + print(res_no_channel[0].as_array()) + print(res_no_channel[1].as_array()) + + ig2D = ImageGeometry(voxel_num_x = 2, voxel_num_y = 3) + u4 = ig2D.allocate('random_int') + G2D = Gradient(ig2D) + res = G2D.direct(u4) + print(res[0].as_array()) + print(res[1].as_array()) diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 6a44641..384e351 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -37,13 +37,13 @@ class TestFunction(unittest.TestCase): N = 3 - ig = (N,N) + ig = ImageGeometry(N,N) ag = ig op1 = Gradient(ig) op2 = Identity(ig, ag) # Form Composite Operator - operator = BlockOperator((2,1), op1, op2 ) + operator = BlockOperator(op1, op2 , shape=(2,1) ) # Create functions noisy_data = ImageData(np.random.randint(10, size=ag)) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py new file mode 100755 index 0000000..3819de5 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from skimage.util import random_noise + +# ############################################################################ +# Create phantom for TV denoising + +N = 200 +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode='gaussian', seed=10) +noisy_data = ImageData(n1) + + +#%% + +# Regularisation Parameter +alpha = 200 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions +# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ +# L2NormSq(0.5, b = noisy_data) ) + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + + f = BlockFunction(f1, f2 ) + g = ZeroFun() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = 0.5 * L2NormSquared(b = noisy_data) + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() +print ("normK", normK) +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + + +#%% +## Number of iterations +opt = {'niter':2000} +## +### Run algorithm +result, total_time, objective = PDHG(f, g, operator, \ + tau = tau, sigma = sigma, opt = opt) +#%% +###Show results +if isinstance(result, BlockDataContainer): + sol = result.get_item(0).as_array() +else: + sol = result.as_array() + +#sol = result.as_array() +# +plt.imshow(sol) +plt.colorbar() +plt.show() +# +### +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.show() +## +plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +plt.legend() +plt.show() + + +#%% +# -- cgit v1.2.3 From d76a4d10b5fd2038eeac4eda85361e5b92dabc30 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 14:51:40 +0100 Subject: code and test updates --- .../ccpi/optimisation/functions/L2NormSquared.py | 42 ++++--- .../Python/ccpi/optimisation/functions/ZeroFun.py | 2 +- .../ccpi/optimisation/operators/BlockOperator.py | 12 +- .../Python/ccpi/optimisation/operators/Operator.py | 2 +- Wrappers/Python/test/test_BlockOperator.py | 5 + Wrappers/Python/test/test_functions.py | 137 +++++++++++++++++++-- 6 files changed, 167 insertions(+), 33 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 54f8859..5489d92 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -31,8 +31,7 @@ class L2NormSquared(Function): super(L2NormSquared, self).__init__() self.b = kwargs.get('b',None) - def __call__(self, x, out=None): - + def __call__(self, x): ''' Evaluates L2NormSq at point x''' y = x @@ -44,33 +43,41 @@ class L2NormSquared(Function): # if out is None: # return x.squared_norm() # else: - return y.squared_norm() - + try: + return y.squared_norm() + except AttributeError as ae: + # added for compatibility with SIRF + return (y.norm()**2) + def gradient(self, x, out=None): - ''' Evaluates gradient of L2NormSq at point x''' - + if out is not None: + out.fill(x) + if self.b is not None: + out -= self.b + out *= 2 + else: + y = x if self.b is not None: # x.subtract(self.b, out=x) y = x - self.b - if out is None: - return 2*y - else: - return out.fill(2*y) + return 2*y + def convex_conjugate(self, x, out=None): - - ''' Evaluate convex conjugate of L2NormSq ''' + ''' Evaluate convex conjugate of L2NormSq''' tmp = 0 if self.b is not None: tmp = (self.b * x).sum() if out is None: + # FIXME: this is a number return (1/4) * x.squared_norm() + tmp else: + # FIXME: this is a DataContainer out.fill((1/4) * x.squared_norm() + tmp) @@ -86,10 +93,15 @@ class L2NormSquared(Function): else: return x/(1+2*tau) else: + out.fill(x) if self.b is not None: - out.fill((x - self.b)/(1+2*tau) + self.b) - else: - out.fill(x/(1+2*tau)) + out -= self.b + out /= (1+2*tau) + if self.b is not None: + out += self.b + #out.fill((x - self.b)/(1+2*tau) + self.b) + #else: + # out.fill(x/(1+2*tau)) def proximal_conjugate(self, x, tau, out=None): diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py index 9def741..8c28874 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -29,7 +29,7 @@ class ZeroFun(Function): if x.shape[0]==1: return x.maximum(0).sum() else: - if isinstance(x, CompositeDataContainer): + if isinstance(x, BlockDataContainer): return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() else: return x.maximum(0).sum() + x.maximum(0).sum() diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 1240b31..ee8f609 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -190,9 +190,15 @@ class BlockOperator(Operator): return type(self)(*ops, shape=self.shape) @property def T(self): - '''Return the transposed of self''' - shape = (self.shape[1], self.shape[0]) - return type(self)(*self.operators, shape=shape) + '''Return the transposed of self + + input in a row-by-row''' + newshape = (self.shape[1], self.shape[0]) + oplist = [] + for col in range(newshape[1]): + for row in range(newshape[0]): + oplist.append(self.get_item(col,row)) + return type(self)(*oplist, shape=newshape) def domain_geometry(self): '''returns the domain of the BlockOperator diff --git a/Wrappers/Python/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/ccpi/optimisation/operators/Operator.py index cdf15a7..2d2089b 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/Operator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/Operator.py @@ -4,7 +4,7 @@ Created on Tue Mar 5 15:55:56 2019 @author: ofn77899 """ -from ccpi.optimisation.operators import ScaledOperator +from ccpi.optimisation.operators.ScaledOperator import ScaledOperator class Operator(object): '''Operator that maps from a space X -> Y''' diff --git a/Wrappers/Python/test/test_BlockOperator.py b/Wrappers/Python/test/test_BlockOperator.py index 4eb84bb..e1c05fb 100644 --- a/Wrappers/Python/test/test_BlockOperator.py +++ b/Wrappers/Python/test/test_BlockOperator.py @@ -39,6 +39,11 @@ class TestBlockOperator(unittest.TestCase): zero = numpy.zeros(X.get_item(0).shape) numpy.testing.assert_array_equal(Y.get_item(0).as_array(),len(x)+zero) + K2 = BlockOperator(*(ops+ops), shape=(3,2)) + Y = K2.T.direct(X) + # K.T (2,3) X (3,1) => output shape (2,1) + self.assertTrue(Y.shape == (2,1)) + try: # this should fail as the domain is not compatible ig = [ ImageGeometry(10,20,31) , \ diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 384e351..3e5f26f 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -17,17 +17,20 @@ from ccpi.framework import BlockDataContainer from numbers import Number from ccpi.optimisation.operators import Gradient -from ccpi.optimisation.functions import SimpleL2NormSq -from ccpi.optimisation.functions import L2NormSq +#from ccpi.optimisation.functions import SimpleL2NormSq +from ccpi.optimisation.functions import L2NormSquared from ccpi.optimisation.functions import SimpleL1Norm from ccpi.optimisation.functions import L1Norm + +from ccpi.optimisation.funcs import Norm2sq # from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq # from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm -from ccpi.optimisation.functions import mixed_L12Norm +#from ccpi.optimisation.functions import mixed_L12Norm from ccpi.optimisation.functions import ZeroFun from ccpi.optimisation.functions import FunctionOperatorComposition -import unittest +import unittest +import numpy # @@ -46,12 +49,12 @@ class TestFunction(unittest.TestCase): operator = BlockOperator(op1, op2 , shape=(2,1) ) # Create functions - noisy_data = ImageData(np.random.randint(10, size=ag)) + noisy_data = ag.allocate(ImageGeometry.RANDOM_INT) - d = ImageData(np.random.randint(10, size=ag)) + d = ag.allocate(ImageGeometry.RANDOM_INT) alpha = 0.5 # scaled function - g = alpha * L2NormSq(b=noisy_data) + g = alpha * L2NormSquared(b=noisy_data) # Compare call of g a2 = alpha*(d - noisy_data).power(2).sum() @@ -63,13 +66,121 @@ class TestFunction(unittest.TestCase): self.assertEqual(a3, g.convex_conjugate(d)) #print( a3, g.convex_conjugate(d)) + def test_L2NormSquared(self): + # TESTS for L2 and scalar * L2 - def stest_ScaledFunctin(self): - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate(ImageGeometry.RANDOM_INT) + b = ig.allocate(ImageGeometry.RANDOM_INT) + + # check grad/call no data + f = L2NormSquared() + a1 = f.gradient(u) + a2 = 2 * u + numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) + numpy.testing.assert_equal(f(u), u.squared_norm()) + + # check grad/call with data + f1 = L2NormSquared(b=b) + b1 = f1.gradient(u) + b2 = 2 * (u-b) + + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) + + #check convex conjuagate no data + c1 = f.convex_conjugate(u) + c2 = 1/4 * u.squared_norm() + numpy.testing.assert_equal(c1, c2) + + #check convex conjuagate with data + d1 = f1.convex_conjugate(u) + d2 = (1/4) * u.squared_norm() + (u*b).sum() + numpy.testing.assert_equal(d1, d2) + + # check proximal no data + tau = 5 + e1 = f.proximal(u, tau) + e2 = u/(1+2*tau) + numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) + + # check proximal with data + tau = 5 + h1 = f1.proximal(u, tau) + h2 = (u-b)/(1+2*tau) + b + numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) + + # check proximal conjugate no data + tau = 0.2 + k1 = f.proximal_conjugate(u, tau) + k2 = u/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) + + # check proximal conjugate with data + l1 = f1.proximal_conjugate(u, tau) + l2 = (u - tau * b)/(1 + tau/2 ) + numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) + + + # check scaled function properties + + # scalar + scalar = 100 + f_scaled_no_data = scalar * L2NormSquared() + f_scaled_data = scalar * L2NormSquared(b=b) + + # call + numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) + numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) + + # grad + numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) + + # conj + numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ + f.convex_conjugate(u/scalar) * scalar, decimal=4) + + numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ + scalar * f1.convex_conjugate(u/scalar), decimal=4) + + # proximal + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ + f.proximal(u, tau*scalar).as_array()) + + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ + f1.proximal(u, tau*scalar).as_array()) + + + # proximal conjugate + numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ + (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ + ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + + def test_Norm2sq_as_FunctionOperatorComposition(self): + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate(ImageGeometry.RANDOM_INT) + b = ig.allocate(ImageGeometry.RANDOM_INT) + + A = 0.5 * Identity(ig) + old_chisq = Norm2sq(A, b, 1.0) + new_chisq = FunctionOperatorComposition(A, L2NormSquared(b=b)) + + yold = old_chisq(u) + ynew = new_chisq(u) + self.assertEqual(yold, ynew) + + yold = old_chisq.gradient(u) + ynew = new_chisq.gradient(u) + numpy.testing.assert_array_equal(yold.as_array(), ynew.as_array()) + + # # f1 = L2NormSq(alpha=1, b=noisy_data) -- cgit v1.2.3 From b12088bc999838c7c57e960639f9798acefc23c6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:32:19 +0100 Subject: add fill to BlockDataContainer --- .../Python/ccpi/framework/BlockDataContainer.py | 3 + .../ccpi/optimisation/functions/functions.py | 312 --------------------- .../ccpi/optimisation/functions/mixed_L12Norm.py | 56 ---- 3 files changed, 3 insertions(+), 368 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/functions.py delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index b4041e4..8e55b67 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -181,6 +181,9 @@ class BlockDataContainer(object): return self.clone() def clone(self): return type(self)(*[el.copy() for el in self.containers], shape=self.shape) + def fill(self, x): + for el,ot in zip(self.containers, x): + el.fill(ot) def __add__(self, other): return self.add( other ) diff --git a/Wrappers/Python/ccpi/optimisation/functions/functions.py b/Wrappers/Python/ccpi/optimisation/functions/functions.py deleted file mode 100644 index 8632920..0000000 --- a/Wrappers/Python/ccpi/optimisation/functions/functions.py +++ /dev/null @@ -1,312 +0,0 @@ -# -*- coding: utf-8 -*- - -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry -from operators import CompositeDataContainer, Identity, CompositeOperator -from numbers import Number - - -############################ L2NORM FUNCTIONS ############################# -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): - - super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.power(2).sum() - - def gradient(self,x): - return 2 * self.alpha * x - - def convex_conjugate(self,x): - return (1/4*self.alpha) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) - - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/2*self.alpha ) - - -class L2NormSq(SimpleL2NormSq): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L2NormSq, self).__init__(alpha=alpha) - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL2NormSq.__call__(self, self.A.direct(x)) - else: - return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b) - - def convex_conjugate(self, x): - - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def gradient(self, x): - - if self.b is None: - return 2*self.alpha * self.A.adjoint(self.A.direct(x)) - else: - return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b) - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - - - def proximal_conjugate(self, x, tau): - - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, self.A.direct(x)) - else: - return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, A, b=None, alpha=1, **kwargs): - - super(mixed_L12Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - self.sym_grad = kwargs.get('sym_grad',False) - - - - def gradient(self,x): - return ValueError('Not Differentiable') - - - def __call__(self,x): - - y = self.A.direct(x) - eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt() - eucl_norm.__isub__(self.b) - return eucl_norm.sum() * self.alpha - - def convex_conjugate(self,x): - return 0 - - def proximal_conjugate(self, x, tau): - - if self.b is None: - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - else: - res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0) - - return res - - -#%% - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - return x.maximum(0).sum() - - def proximal(self,x,tau): - return x.copy() - - def proximal_conjugate(self, x, tau): - return 0 - - -class CompositeFunction(Function): - - def __init__(self, *args): - self.functions = args - self.length = len(self.functions) - - def get_item(self, ind): - return self.functions[ind] - - def __call__(self,x): - - t = 0 - for i in range(self.length): - for j in range(x.shape[0]): - t +=self.functions[i](x.get_item(j)) - return t - - def convex_conjugate(self, x): - - z = 0 - t = 0 - for i in range(x.shape[0]): - t += self.functions[z].convex_conjugate(x.get_item(i)) - z += 1 - - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - - -if __name__ == '__main__': - - N = 3 - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - - # Form Composite Operator - operator = CompositeOperator((2,1), op1, op2 ) - - # Create functions - alpha = 1 - noisy_data = ImageData(np.random.randint(10, size=ag)) - f = CompositeFunction(L1Norm(op1,alpha), \ - L2NormSq(op2, noisy_data, c = 0.5, memopt = False) ) - - u = ImageData(np.random.randint(10, size=ig)) - uComp = CompositeDataContainer(u) - - print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u) - - f1 = L1Norm(op1,alpha) - f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False) - - print(f1(u) + f2(u)) - - - diff --git a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py deleted file mode 100644 index ffeb32e..0000000 --- a/Wrappers/Python/ccpi/optimisation/functions/mixed_L12Norm.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:43:12 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, alpha, **kwargs): - - super(mixed_L12Norm, self).__init__() - - self.alpha = alpha - self.b = kwargs.get('b',None) - self.sym_grad = kwargs.get('sym_grad',False) - - def __call__(self,x): - - if self.b is None: - tmp1 = x - else: - tmp1 = x - self.b -# - if self.sym_grad: - tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) - else: - tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() - - return self.alpha*tmp.sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - pass - - def proximal_conjugate(self, x, tau): - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - return res -- cgit v1.2.3 From 7c82130aa8e28e31af56265186d22879d2436790 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:33:10 +0100 Subject: updated reference to ZeroFun --- Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py | 2 +- Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py index 798fb61..445ba7a 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py @@ -23,7 +23,7 @@ Created on Thu Feb 21 11:09:03 2019 """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.funcs import ZeroFun +from ccpi.optimisation.functions import ZeroFun class FBPD(Algorithm): '''FBPD Algorithm diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index bc4489e..93ba178 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -6,7 +6,7 @@ Created on Thu Feb 21 11:07:30 2019 """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.funcs import ZeroFun +from ccpi.optimisation.functions import ZeroFun import numpy class FISTA(Algorithm): -- cgit v1.2.3 From ad9e67c197aa347a83f59f3a0d7ab96745bef8ad Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:33:33 +0100 Subject: update verbose every update_objective_interval --- Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py index cc99344..ed95c3f 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py @@ -146,7 +146,7 @@ class Algorithm(object): print ("Stop cryterion has been reached.") i = 0 for _ in self: - if verbose: + if verbose and self.iteration % self.update_objective_interval == 0: print ("Iteration {}/{}, objective {}".format(self.iteration, self.max_iteration, self.get_last_objective()) ) else: -- cgit v1.2.3 From 93517aa9f1472458fa962beae1abebb3e1223a6c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:33:54 +0100 Subject: PDHG as Algorithm --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 115 +++++++++++---------- 1 file changed, 60 insertions(+), 55 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7e55ee8..fb2bfd8 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -5,6 +5,8 @@ Created on Mon Feb 4 16:18:06 2019 @author: evangelos """ +from ccpi.optimisation.algorithms import Algorithm + from ccpi.framework import ImageData import numpy as np @@ -13,67 +15,70 @@ import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer -def PDHG(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ - 'memopt': False} - - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - - niter = opt['niter'] if 'niter' in opt.keys() else 1000 - tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False - stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False +class PDHG(Algorithm): + '''Primal Dual Hybrid Gradient''' - if isinstance(operator, BlockOperator): - x_old = operator.domain_geometry().allocate() - y_old = operator.range_geometry().allocate() - else: - x_old = operator.domain_geometry().allocate() - y_old = operator.range_geometry().allocate() - - - xbar = x_old - x_tmp = x_old - x = x_old - - y_tmp = y_old - y = y_tmp - - # relaxation parameter - theta = 1 - - t = time.time() - - objective = [] + def __init__(self, **kwargs): + super(PDHG, self).__init__() + self.f = kwargs.get('f', None) + self.operator = kwargs.get('operator', None) + self.g = kwargs.get('g', None) + self.tau = kwargs.get('tau', None) + self.sigma = kwargs.get('sigma', None) + + if self.f is not None and self.operator is not None and \ + self.g is not None: + print ("Calling from creator") + self.set_up(self.f, + self.operator, + self.g, + self.tau, + self.sigma) + + def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + # algorithmic parameters + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + - for i in range(niter): + self.x_old = self.operator.domain_geometry().allocate() + self.y_old = self.operator.range_geometry().allocate() + self.xbar = self.x_old.copy() + #x_tmp = x_old + self.x = self.x_old.copy() + self.y = self.y_old.copy() + #y_tmp = y_old + #y = y_tmp + + # relaxation parameter + self.theta = 1 + + def update(self): # Gradient descent, Dual problem solution - y_tmp = y_old + sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_tmp, sigma) + self.y_old += self.sigma * self.operator.direct(self.xbar) + self.y = self.f.proximal_conjugate(self.y_old, self.sigma) # Gradient ascent, Primal problem solution - x_tmp = x_old - tau * operator.adjoint(y) - x = g.proximal(x_tmp, tau) + self.x_old -= self.tau * self.operator.adjoint(self.y) + self.x = self.g.proximal(self.x_old, self.tau) #Update - xbar = x + theta * (x - x_old) - - x_old = x - y_old = y - -# if i%100==0: -# -# plt.imshow(x.as_array()[100]) -# plt.show() -# print(f(operator.direct(x)) + g(x), i) - - t_end = time.time() - - return x, t_end - t, objective + #xbar = x + theta * (x - x_old) + self.xbar.fill(self.x) + self.xbar -= self.x_old + self.xbar *= self.theta + self.xbar += self.x + + self.x_old.fill(self.x) + self.y_old.fill(self.y) + #self.y_old = y.copy() + #self.y = self.y_old + + def update_objective(self): + self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x), + -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) + ]) + -- cgit v1.2.3 From 0e8dac47faf88379175310552a4611ca34f407ea Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:34:38 +0100 Subject: massive clean --- Wrappers/Python/ccpi/optimisation/funcs.py | 193 ++--------------------------- 1 file changed, 12 insertions(+), 181 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 8ce54c7..6741020 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -21,108 +21,9 @@ from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy from ccpi.framework import DataContainer import warnings +from ccpi.optimisation.functions import Function -def isSizeCorrect(data1 ,data2): - if issubclass(type(data1), DataContainer) and \ - issubclass(type(data2), DataContainer): - # check dimensionality - if data1.check_dimensions(data2): - return True - elif issubclass(type(data1) , numpy.ndarray) and \ - issubclass(type(data2) , numpy.ndarray): - return data1.shape == data2.shape - else: - raise ValueError("{0}: getting two incompatible types: {1} {2}"\ - .format('Function', type(data1), type(data2))) - return False - -class Function(object): - def __init__(self): - self.L = None - def __call__(self,x, out=None): raise NotImplementedError - def grad(self, x): - warnings.warn("grad method is deprecated. use gradient instead", DeprecationWarning) - return self.gradient(x, out=None) - def prox(self, x, tau): - warnings.warn("prox method is deprecated. use proximal instead", DeprecationWarning) - return self.proximal(x,tau,out=None) - def gradient(self, x, out=None): raise NotImplementedError - def proximal(self, x, tau, out=None): raise NotImplementedError - - -class Norm2(Function): - - def __init__(self, - gamma=1.0, - direction=None): - super(Norm2, self).__init__() - self.gamma = gamma; - self.direction = direction; - - def __call__(self, x, out=None): - - if out is None: - xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, - keepdims=True)) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - arr = out.as_array() - numpy.square(x.as_array(), out=arr) - xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - p = numpy.sum(self.gamma*xx) - - return p - - def prox(self, x, tau): - - xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - p = x.as_array() * xx - - return type(x)(p,geometry=x.geometry) - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x,tau) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - numpy.square(x.as_array(), out = out.as_array()) - xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out.as_array()) - - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - -class TV2D(Norm2): - - def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 0) - self.op = FiniteDiff2D() - self.L = self.op.get_max_sing_val() - - # Define a class for squared 2-norm class Norm2sq(Function): ''' @@ -148,8 +49,8 @@ class Norm2sq(Function): self.c = c # Default 1. if memopt: try: - self.adjoint_placehold = A.range_geometry().allocate() - self.direct_placehold = A.domain_geometry().allocate() + self.range_tmp = A.range_geometry().allocate() + self.domain_tmp = A.domain_geometry().allocate() self.memopt = True except NameError as ne: warnings.warn(str(ne)) @@ -164,7 +65,7 @@ class Norm2sq(Function): # Compute the Lipschitz parameter from the operator if possible # Leave it initialised to None otherwise try: - self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) + self.L = 2.0*self.c*(self.A.norm()**2) except AttributeError as ae: pass except NotImplementedError as noe: @@ -192,88 +93,16 @@ class Norm2sq(Function): if self.memopt: #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - self.A.direct(x, out=self.adjoint_placehold) - self.adjoint_placehold.__isub__( self.b ) - self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold) - #self.direct_placehold.__imul__(2.0 * self.c) - ## can this be avoided? - #out.fill(self.direct_placehold) - self.direct_placehold.multiply(2.0*self.c, out=out) + self.A.direct(x, out=self.range_tmp) + self.range_tmp -= self.b + self.A.adjoint(self.range_tmp, out=out) + #self.direct_placehold.multiply(2.0*self.c, out=out) + out *= (self.c * 2.0) else: return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) -class ZeroFun(Function): - - def __init__(self,gamma=0,L=1): - self.gamma = gamma - self.L = L - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def prox(self,x,tau): - return x.copy() - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - out.fill(x) - - elif issubclass(type(out) , numpy.ndarray): - out[:] = x - else: - raise ValueError ('Wrong size: x{0} out{1}' - .format(x.shape,out.shape) ) - -# A more interesting example, least squares plus 1-norm minimization. -# Define class to represent 1-norm including prox function -class Norm1(Function): - - def __init__(self,gamma): - super(Norm1, self).__init__() - self.gamma = gamma - self.L = 1 - self.sign_x = None - - def __call__(self,x,out=None): - if out is None: - return self.gamma*(x.abs().sum()) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - x.abs(out=out) - return out.sum() * self.gamma - - def prox(self,x,tau): - return (x.abs() - tau*self.gamma).maximum(0) * x.sign() - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if isSizeCorrect(x,out): - # check dimensionality - if issubclass(type(out), DataContainer): - v = (x.abs() - tau*self.gamma).maximum(0) - x.sign(out=out) - out *= v - #out.fill(self.prox(x,tau)) - elif issubclass(type(out) , numpy.ndarray): - v = (x.abs() - tau*self.gamma).maximum(0) - out[:] = x.sign() - out *= v - #out[:] = self.prox(x,tau) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - # Box constraints indicator function. Calling returns 0 if argument is within # the box. The prox operator is projection onto the box. Only implements one # scalar lower and one upper as constraint on all elements. Should generalise @@ -282,9 +111,10 @@ class IndicatorBox(Function): def __init__(self,lower=-numpy.inf,upper=numpy.inf): # Do nothing + super(IndicatorBox, self).__init__() self.lower = lower self.upper = upper - super(IndicatorBox, self).__init__() + def __call__(self,x): @@ -315,3 +145,4 @@ class IndicatorBox(Function): x.sign(out=self.sign_x) out.__imul__( self.sign_x ) + -- cgit v1.2.3 From c1ce6acc73edd30115575cd13e4e3f2e1bb108e6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:36:27 +0100 Subject: added header --- .../Python/ccpi/optimisation/functions/L1Norm.py | 18 +++++++++++++- .../ccpi/optimisation/functions/ScaledFunction.py | 18 ++++++++++++++ .../Python/ccpi/optimisation/functions/ZeroFun.py | 28 +++++++++++++++++----- 3 files changed, 57 insertions(+), 7 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py index f83de6f..5a47edd 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -1,5 +1,21 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Created on Wed Mar 6 19:42:34 2019 diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index b48135d..046a4a6 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -1,3 +1,21 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from numbers import Number import numpy diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py index 8c28874..88d9b64 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -1,10 +1,21 @@ -#!/usr/bin/env python3 # -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:44:10 2019 +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC -@author: evangelos -""" +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import numpy as np #from ccpi.optimisation.funcs import Function @@ -41,4 +52,9 @@ class ZeroFun(Function): out.fill(x) def proximal_conjugate(self, x, tau): - return 0 \ No newline at end of file + return 0 + + def domain_geometry(self): + pass + def range_geometry(self): + pass \ No newline at end of file -- cgit v1.2.3 From c3ac82e9f3beda552ee8d3e6ee35e4d768851fd7 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:37:39 +0100 Subject: added to functions --- .../ccpi/optimisation/functions/IndicatorBox.py | 65 ++++++++++ .../ccpi/optimisation/functions/MixedL21Norm.py | 136 +++++++++++++++++++++ .../Python/ccpi/optimisation/functions/Norm2Sq.py | 98 +++++++++++++++ .../Python/ccpi/optimisation/functions/__init__.py | 3 +- 4 files changed, 301 insertions(+), 1 deletion(-) create mode 100755 Wrappers/Python/ccpi/optimisation/functions/IndicatorBox.py create mode 100755 Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py create mode 100755 Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/IndicatorBox.py b/Wrappers/Python/ccpi/optimisation/functions/IndicatorBox.py new file mode 100755 index 0000000..df8dc89 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/IndicatorBox.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ccpi.optimisation.functions import Function +import numpy + +class IndicatorBox(Function): + '''Box constraints indicator function. + + Calling returns 0 if argument is within the box. The prox operator is projection onto the box. + Only implements one scalar lower and one upper as constraint on all elements. Should generalise + to vectors to allow different constraints one elements. +''' + + def __init__(self,lower=-numpy.inf,upper=numpy.inf): + # Do nothing + super(IndicatorBox, self).__init__() + self.lower = lower + self.upper = upper + + + def __call__(self,x): + + if (numpy.all(x.array>=self.lower) and + numpy.all(x.array <= self.upper) ): + val = 0 + else: + val = numpy.inf + return val + + def prox(self,x,tau=None): + return (x.maximum(self.lower)).minimum(self.upper) + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + #(x.abs() - tau*self.gamma).maximum(0) * x.sign() + x.abs(out = out) + out.__isub__(tau*self.gamma) + out.maximum(0, out=out) + if self.sign_x is None or not x.shape == self.sign_x.shape: + self.sign_x = x.sign() + else: + x.sign(out=self.sign_x) + + out.__imul__( self.sign_x ) diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py new file mode 100755 index 0000000..1c51236 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from ccpi.optimisation.functions import Function, ScaledFunction +from ccpi.framework import DataContainer, ImageData, \ + ImageGeometry, BlockDataContainer + +############################ mixed_L1,2NORM FUNCTIONS ##################### +class MixedL21Norm(Function): + + def __init__(self, **kwargs): + + super(MixedL21Norm, self).__init__() + self.SymTensor = kwargs.get('SymTensor',False) + + def __call__(self, x, out=None): + + ''' Evaluates L1,2Norm at point x + + :param: x is a BlockDataContainer + + ''' + if self.SymTensor: + + param = [1]*x.shape[0] + param[-1] = 2 + tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] + res = sum(tmp).sqrt().sum() + else: + +# tmp = [ x[i]**2 for i in range(x.shape[0])] + tmp = [ el**2 for el in x.containers ] + +# print(x.containers) +# print(tmp) +# print(type(sum(tmp))) +# print(type(tmp)) + res = sum(tmp).sqrt().sum() +# print(res) + return res + + def gradient(self, x, out=None): + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + + ''' This is the Indicator function of ||\cdot||_{2, \infty} + which is either 0 if ||x||_{2, \infty} or \infty + ''' + return 0.0 + + def proximal(self, x, tau, out=None): + + ''' + For this we need to define a MixedL2,2 norm acting on BDC, + different form L2NormSquared which acts on DC + + ''' + + pass + + def proximal_conjugate(self, x, tau, out=None): + + if self.SymTensor: + + param = [1]*x.shape[0] + param[-1] = 2 + tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] + frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] + res = BlockDataContainer(*frac) + + return res + +# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha +# res = x.divide(ImageData(tmp2).maximum(1.0)) + else: + + tmp = [ el*el for el in x] + res = (sum(tmp).sqrt()).maximum(1.0) + frac = [x[i]/res for i in range(x.shape[0])] + res = BlockDataContainer(*frac) + + return res + + def __rmul__(self, scalar): + return ScaledFunction(self, scalar) + +#class MixedL21Norm_tensor(Function): +# +# def __init__(self): +# print("feerf") +# +# +if __name__ == '__main__': + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) + u1 = ig.allocate('random_int') + u2 = ig.allocate('random_int') + + U = BlockDataContainer(u1, u2, shape=(2,1)) + + # Define no scale and scaled + f_no_scaled = MixedL21Norm() + f_scaled = 0.5 * MixedL21Norm() + + # call + + a1 = f_no_scaled(U) + a2 = f_scaled(U) + + z = f_no_scaled.proximal_conjugate(U, 1) + + f_no_scaled = MixedL21Norm() + + tmp = [el*el for el in U] + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py new file mode 100755 index 0000000..b553d7c --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from ccpi.optimisation.functions import Function +import numpy +import warnings + +# Define a class for squared 2-norm +class Norm2sq(Function): + ''' + f(x) = c*||A*x-b||_2^2 + + which has + + grad[f](x) = 2*c*A^T*(A*x-b) + + and Lipschitz constant + + L = 2*c*||A||_2^2 = 2*s1(A)^2 + + where s1(A) is the largest singular value of A. + + ''' + + def __init__(self,A,b,c=1.0,memopt=False): + super(Norm2sq, self).__init__() + + self.A = A # Should be an operator, default identity + self.b = b # Default zero DataSet? + self.c = c # Default 1. + if memopt: + try: + self.range_tmp = A.range_geometry().allocate() + self.domain_tmp = A.domain_geometry().allocate() + self.memopt = True + except NameError as ne: + warnings.warn(str(ne)) + self.memopt = False + except NotImplementedError as nie: + print (nie) + warnings.warn(str(nie)) + self.memopt = False + else: + self.memopt = False + + # Compute the Lipschitz parameter from the operator if possible + # Leave it initialised to None otherwise + try: + self.L = 2.0*self.c*(self.A.norm()**2) + except AttributeError as ae: + pass + except NotImplementedError as noe: + pass + + #def grad(self,x): + # return self.gradient(x, out=None) + + def __call__(self,x): + #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) + #if out is None: + # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) + #else: + y = self.A.direct(x) + y.__isub__(self.b) + #y.__imul__(y) + #return y.sum() * self.c + try: + return y.squared_norm() * self.c + except AttributeError as ae: + # added for compatibility with SIRF + return (y.norm()**2) * self.c + + def gradient(self, x, out = None): + if self.memopt: + #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) + + self.A.direct(x, out=self.range_tmp) + self.range_tmp -= self.b + self.A.adjoint(self.range_tmp, out=out) + #self.direct_placehold.multiply(2.0*self.c, out=out) + out *= (self.c * 2.0) + else: + return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index d6edd03..2ed36f5 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -9,4 +9,5 @@ from .BlockFunction import BlockFunction from .ScaledFunction import ScaledFunction from .FunctionOperatorComposition import FunctionOperatorComposition from .MixedL21Norm import MixedL21Norm - +from .IndicatorBox import IndicatorBox +from .Norm2Sq import Norm2sq -- cgit v1.2.3 From 12ccc249a722a64c02d97e8e1513c065d4a7bf48 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:38:12 +0100 Subject: updated example with PDHG algorithm class --- Wrappers/Python/wip/pdhg_TV_denoising.py | 38 +++++++++++++++----------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index 3819de5..a8e721f 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -19,10 +19,12 @@ from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ from skimage.util import random_noise + + # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 600 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -38,7 +40,7 @@ noisy_data = ImageData(n1) #%% # Regularisation Parameter -alpha = 200 +alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") method = '0' @@ -79,31 +81,27 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 2000 +pdhg.update_objective_interval = 10 + +pdhg.run(2000) -#%% -## Number of iterations -opt = {'niter':2000} -## -### Run algorithm -result, total_time, objective = PDHG(f, g, operator, \ - tau = tau, sigma = sigma, opt = opt) -#%% -###Show results -if isinstance(result, BlockDataContainer): - sol = result.get_item(0).as_array() -else: - sol = result.as_array() + +sol = pdhg.get_output().as_array() #sol = result.as_array() # +fig = plt.figure() +plt.subplot(1,2,1) +plt.imshow(noisy_data.as_array()) +#plt.colorbar() +plt.subplot(1,2,2) plt.imshow(sol) -plt.colorbar() +#plt.colorbar() plt.show() # -### -plt.imshow(noisy_data.as_array()) -plt.colorbar() -plt.show() + ## plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -- cgit v1.2.3 From cf36fb59af5806506a6b7b75edb7a5f7bebb8070 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:49:59 +0100 Subject: jenkins errors --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 4 +- Wrappers/Python/ccpi/optimisation/algs.py | 4 +- Wrappers/Python/ccpi/optimisation/funcs.py | 124 +++++++++++++++++++++ 3 files changed, 127 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index fb2bfd8..043fe38 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -6,11 +6,9 @@ Created on Mon Feb 4 16:18:06 2019 @author: evangelos """ from ccpi.optimisation.algorithms import Algorithm - - from ccpi.framework import ImageData import numpy as np -import matplotlib.pyplot as plt +#import matplotlib.pyplot as plt import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 15638a9..6b6ae2c 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -20,8 +20,8 @@ import numpy import time -from ccpi.optimisation.funcs import Function -from ccpi.optimisation.funcs import ZeroFun +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions import ZeroFun from ccpi.framework import ImageData from ccpi.framework import AcquisitionData from ccpi.optimisation.spdhg import spdhg diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 6741020..efc465c 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -22,7 +22,90 @@ import numpy from ccpi.framework import DataContainer import warnings from ccpi.optimisation.functions import Function +def isSizeCorrect(data1 ,data2): + if issubclass(type(data1), DataContainer) and \ + issubclass(type(data2), DataContainer): + # check dimensionality + if data1.check_dimensions(data2): + return True + elif issubclass(type(data1) , numpy.ndarray) and \ + issubclass(type(data2) , numpy.ndarray): + return data1.shape == data2.shape + else: + raise ValueError("{0}: getting two incompatible types: {1} {2}"\ + .format('Function', type(data1), type(data2))) + return False +class Norm2(Function): + + def __init__(self, + gamma=1.0, + direction=None): + super(Norm2, self).__init__() + self.gamma = gamma; + self.direction = direction; + + def __call__(self, x, out=None): + + if out is None: + xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, + keepdims=True)) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + arr = out.as_array() + numpy.square(x.as_array(), out=arr) + xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + + p = numpy.sum(self.gamma*xx) + + return p + + def prox(self, x, tau): + + xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction, + keepdims=True )) + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + p = x.as_array() * xx + + return type(x)(p,geometry=x.geometry) + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x,tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + numpy.square(x.as_array(), out = out.as_array()) + xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, + keepdims=True )) + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out.as_array()) + + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + +class TV2D(Norm2): + + def __init__(self, gamma): + super(TV2D,self).__init__(gamma, 0) + self.op = FiniteDiff2D() + self.L = self.op.get_max_sing_val() + # Define a class for squared 2-norm class Norm2sq(Function): @@ -146,3 +229,44 @@ class IndicatorBox(Function): out.__imul__( self.sign_x ) +# A more interesting example, least squares plus 1-norm minimization. +# Define class to represent 1-norm including prox function +class Norm1(Function): + + def __init__(self,gamma): + super(Norm1, self).__init__() + self.gamma = gamma + self.L = 1 + self.sign_x = None + + def __call__(self,x,out=None): + if out is None: + return self.gamma*(x.abs().sum()) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + x.abs(out=out) + return out.sum() * self.gamma + + def prox(self,x,tau): + return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if isSizeCorrect(x,out): + # check dimensionality + if issubclass(type(out), DataContainer): + v = (x.abs() - tau*self.gamma).maximum(0) + x.sign(out=out) + out *= v + #out.fill(self.prox(x,tau)) + elif issubclass(type(out) , numpy.ndarray): + v = (x.abs() - tau*self.gamma).maximum(0) + out[:] = x.sign() + out *= v + #out[:] = self.prox(x,tau) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) -- cgit v1.2.3 From ad4ba705e2c9265c829c00ff96306070bf045988 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 16:59:48 +0100 Subject: added norm --- Wrappers/Python/ccpi/optimisation/ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 6afb97a..fcd0d9e 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -115,8 +115,8 @@ class TomoIdentity(Operator): def adjoint(self,x, out=None): return self.direct(x, out) - def size(self): - return NotImplemented + def norm(self): + return self.s1 def get_max_sing_val(self): return self.s1 -- cgit v1.2.3 From 2b483605262a756a3bbbcab689f1e6db6e36b8d3 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 17:00:50 +0100 Subject: fixed tests --- Wrappers/Python/test/test_algorithms.py | 1 + Wrappers/Python/test/test_run_test.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index b5959b5..a35ffc1 100755 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -86,6 +86,7 @@ class TestAlgorithms(unittest.TestCase): identity = TomoIdentity(geometry=ig) norm2sq = Norm2sq(identity, b) + norm2sq.L = 2 * norm2sq.c * identity.norm()**2 opt = {'tol': 1e-4, 'memopt':False} alg = FISTA(x_init=x_init, f=norm2sq, g=None, opt=opt) alg.max_iteration = 2 diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py index 3c7d9ab..8cef925 100755 --- a/Wrappers/Python/test/test_run_test.py +++ b/Wrappers/Python/test/test_run_test.py @@ -9,7 +9,7 @@ from ccpi.framework import AcquisitionGeometry from ccpi.optimisation.algs import FISTA from ccpi.optimisation.algs import FBPD from ccpi.optimisation.funcs import Norm2sq -from ccpi.optimisation.funcs import ZeroFun +from ccpi.optimisation.functions import ZeroFun from ccpi.optimisation.funcs import Norm1 from ccpi.optimisation.funcs import TV2D from ccpi.optimisation.funcs import Norm2 -- cgit v1.2.3 From aaa4eae2f43df1a1ed3c15ba2dacdc4dce5a43d6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 17:03:52 +0100 Subject: added tau to call to proximal --- Wrappers/Python/ccpi/optimisation/functions/Function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/Function.py b/Wrappers/Python/ccpi/optimisation/functions/Function.py index 82f24a6..ba33666 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/Function.py +++ b/Wrappers/Python/ccpi/optimisation/functions/Function.py @@ -59,7 +59,7 @@ class Function(object): '''Alias of proximal(x, tau, None)''' warnings.warn('''This method will disappear in following versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) + return self.proximal(x, tau, out=None) def __rmul__(self, scalar): '''Defines the multiplication by a scalar on the left -- cgit v1.2.3 From 47e743cf3ff3474b516d492b0c5b3d47d4b73848 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 1 Apr 2019 17:40:08 +0100 Subject: python2.7 fixes --- .../ccpi/optimisation/functions/L2NormSquared.py | 25 +++++++++++++++------- Wrappers/Python/test/test_functions.py | 6 +++--- 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 5489d92..597d4d8 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -1,12 +1,21 @@ # -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 -@author: evangelos -""" +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import numpy from ccpi.optimisation.functions import Function @@ -75,10 +84,10 @@ class L2NormSquared(Function): if out is None: # FIXME: this is a number - return (1/4) * x.squared_norm() + tmp + return (1./4.) * x.squared_norm() + tmp else: # FIXME: this is a DataContainer - out.fill((1/4) * x.squared_norm() + tmp) + out.fill((1./4.) * x.squared_norm() + tmp) def proximal(self, x, tau, out = None): diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 3e5f26f..54dfa57 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -62,7 +62,7 @@ class TestFunction(unittest.TestCase): self.assertEqual(a2, g(d)) # Compare convex conjugate of g - a3 = 0.5 * d.power(2).sum() + (d*noisy_data).sum() + a3 = 0.5 * d.squared_norm() + d.dot(noisy_data) self.assertEqual(a3, g.convex_conjugate(d)) #print( a3, g.convex_conjugate(d)) @@ -91,12 +91,12 @@ class TestFunction(unittest.TestCase): #check convex conjuagate no data c1 = f.convex_conjugate(u) - c2 = 1/4 * u.squared_norm() + c2 = 1/4. * u.squared_norm() numpy.testing.assert_equal(c1, c2) #check convex conjuagate with data d1 = f1.convex_conjugate(u) - d2 = (1/4) * u.squared_norm() + (u*b).sum() + d2 = (1./4.) * u.squared_norm() + (u*b).sum() numpy.testing.assert_equal(d1, d2) # check proximal no data -- cgit v1.2.3 From f620d650de2c5e6ac16b799913dfcfd6f101ed35 Mon Sep 17 00:00:00 2001 From: Vaggelis Date: Mon, 1 Apr 2019 19:47:29 +0100 Subject: add pdhg TV tomo 2D --- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 108 ++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 Wrappers/Python/wip/pdhg_TV_tomography2D.py (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py new file mode 100644 index 0000000..640e776 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, AcquisitionGeometry, AcquisitionData + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, BlockFunction, ScaledFunction + +from ccpi.astra.ops import AstraProjectorSimple +from skimage.util import random_noise + + +#%%############################################################################### +# Create phantom for TV tomography + +N = 150 +x = np.zeros((N,N)) +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +data = ImageData(x) +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + +detectors = 100 +angles = np.linspace(0,np.pi,100) + +ag = AcquisitionGeometry('parallel','2D',angles, detectors) +Aop = AstraProjectorSimple(ig, ag, 'cpu') +sin = Aop.direct(data) + +plt.imshow(sin.as_array()) +plt.title('Sinogram') +plt.colorbar() +plt.show() + +# Add Gaussian noise to the sinogram data +np.random.seed(10) +n1 = np.random.random(sin.shape) + +noisy_data = sin + ImageData(5*n1) + +plt.imshow(noisy_data.as_array()) +plt.title('Noisy Sinogram') +plt.colorbar() +plt.show() + +#%% Works only with Composite Operator Structure of PDHG + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + +# Create operators +op1 = Gradient(ig) +op2 = Aop + +# Form Composite Operator +operator = BlockOperator(op1, op2, shape=(2,1) ) + +alpha = 50 +f = BlockFunction( alpha * MixedL21Norm(), \ + 0.5 * L2NormSquared(b = noisy_data) ) +g = ZeroFun() + +# Compute operator Norm +normK = operator.norm() + +## Primal & dual stepsizes + +sigma = 10 +tau = 1/(sigma*normK**2) + +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 5000 +pdhg.update_objective_interval = 500 + +pdhg.run(5000) + +#%% +sol = pdhg.get_output().as_array() +fig = plt.figure() +plt.subplot(1,2,1) +plt.imshow(noisy_data.as_array()) +#plt.colorbar() +plt.subplot(1,2,2) +plt.imshow(sol) +#plt.colorbar() +plt.show() + + +#%% +plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') +plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +plt.legend() +plt.show() + + -- cgit v1.2.3 From 4ad78278713683e4c7ba225d303f5e3d1690f979 Mon Sep 17 00:00:00 2001 From: Vaggelis Date: Mon, 1 Apr 2019 20:22:40 +0100 Subject: update pdhg TV tomo 2D --- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 640e776..52b7922 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -26,6 +26,22 @@ from skimage.util import random_noise #%%############################################################################### # Create phantom for TV tomography +import os +import tomophantom +from tomophantom import TomoP2D +from tomophantom.supp.qualitymetrics import QualityTools + +#model = 1 # select a model number from the library +#N = 150 # set dimension of the phantom +## one can specify an exact path to the parameters file +## path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat' +#path = os.path.dirname(tomophantom.__file__) +#path_library2D = os.path.join(path, "Phantom2DLibrary.dat") +##This will generate a N_size x N_size phantom (2D) +#phantom_2D = TomoP2D.Model(model, N, path_library2D) +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +#data = ImageData(phantom_2D, geometry=ig) + N = 150 x = np.zeros((N,N)) x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -34,7 +50,8 @@ x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 data = ImageData(x) ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) -detectors = 100 + +detectors = 150 angles = np.linspace(0,np.pi,100) ag = AcquisitionGeometry('parallel','2D',angles, detectors) @@ -57,9 +74,10 @@ plt.title('Noisy Sinogram') plt.colorbar() plt.show() + #%% Works only with Composite Operator Structure of PDHG -ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) # Create operators op1 = Gradient(ig) @@ -83,7 +101,7 @@ tau = 1/(sigma*normK**2) pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 5000 -pdhg.update_objective_interval = 500 +pdhg.update_objective_interval = 250 pdhg.run(5000) -- cgit v1.2.3 From 4702a82d1e5db55e8a1017eedab79cd0504b42ed Mon Sep 17 00:00:00 2001 From: Vaggelis Date: Tue, 2 Apr 2019 00:05:00 +0100 Subject: add pdhg TV tomo 2D & channels --- Wrappers/Python/wip/pdhg_TV_tomography2D_time.py | 137 +++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 Wrappers/Python/wip/pdhg_TV_tomography2D_time.py (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py new file mode 100644 index 0000000..7ac1566 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, AcquisitionGeometry, AcquisitionData + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, BlockFunction, ScaledFunction + +from ccpi.astra.ops import AstraProjectorSimple, AstraProjectorMC +from skimage.util import random_noise + + +#%%############################################################################### +# Create phantom for TV tomography + +import numpy as np +import matplotlib.pyplot as plt +import os +import tomophantom +from tomophantom import TomoP2D + +model = 102 # note that the selected model is temporal (2D + time) +N = 150 # set dimension of the phantom +# one can specify an exact path to the parameters file +# path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat' +path = os.path.dirname(tomophantom.__file__) +path_library2D = os.path.join(path, "Phantom2DLibrary.dat") +#This will generate a N_size x N_size x Time frames phantom (2D + time) +phantom_2Dt = TomoP2D.ModelTemporal(model, N, path_library2D) + +plt.close('all') +plt.figure(1) +plt.rcParams.update({'font.size': 21}) +plt.title('{}''{}'.format('2D+t phantom using model no.',model)) +for sl in range(0,np.shape(phantom_2Dt)[0]): + im = phantom_2Dt[sl,:,:] + plt.imshow(im, vmin=0, vmax=1) + plt.pause(.1) + plt.draw + +#N = 150 +#x = np.zeros((N,N)) +#x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +#x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +#%% +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N, channels = np.shape(phantom_2Dt)[0]) +data = ImageData(phantom_2Dt, geometry=ig) + + + +detectors = 150 +angles = np.linspace(0,np.pi,100) + +ag = AcquisitionGeometry('parallel','2D',angles, detectors, channels = np.shape(phantom_2Dt)[0]) +Aop = AstraProjectorMC(ig, ag, 'cpu') +sin = Aop.direct(data) + +plt.imshow(sin.as_array()[10]) +plt.title('Sinogram') +plt.colorbar() +plt.show() + +# Add Gaussian noise to the sinogram data +np.random.seed(10) +n1 = np.random.random(sin.shape) + +noisy_data = sin + ImageData(5*n1) + +plt.imshow(noisy_data.as_array()[10]) +plt.title('Noisy Sinogram') +plt.colorbar() +plt.show() + + +#%% Works only with Composite Operator Structure of PDHG + +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + +# Create operators +op1 = Gradient(ig) +op2 = Aop + +# Form Composite Operator +operator = BlockOperator(op1, op2, shape=(2,1) ) + +alpha = 50 +f = BlockFunction( alpha * MixedL21Norm(), \ + 0.5 * L2NormSquared(b = noisy_data) ) +g = ZeroFun() + +# Compute operator Norm +normK = operator.norm() + +## Primal & dual stepsizes + +sigma = 10 +tau = 1/(sigma*normK**2) + +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 5000 +pdhg.update_objective_interval = 20 + +pdhg.run(5000) + +#%% +sol = pdhg.get_output().as_array() +fig = plt.figure() +plt.subplot(1,2,1) +plt.imshow(noisy_data.as_array()) +#plt.colorbar() +plt.subplot(1,2,2) +plt.imshow(sol) +#plt.colorbar() +plt.show() + + +#%% +plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') +plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +plt.legend() +plt.show() + + -- cgit v1.2.3 From ab2d7b0f23c1851ab85203583d8cdff0b2b8341f Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Apr 2019 12:41:29 +0100 Subject: add old pdhg, test gap --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 69 +++++++++++ .../ccpi/optimisation/algorithms/__init__.py | 1 + Wrappers/Python/wip/test_pdhg_gap.py | 129 +++++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100644 Wrappers/Python/wip/test_pdhg_gap.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 043fe38..8600e07 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -13,6 +13,9 @@ import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer + +import matplotlib.pyplot as plt + class PDHG(Algorithm): '''Primal Dual Hybrid Gradient''' @@ -80,3 +83,69 @@ class PDHG(Algorithm): ]) + +def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ + 'memopt': False} + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + + niter = opt['niter'] if 'niter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False + stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False + + + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() + + + xbar = x_old + x_tmp = x_old + x = x_old + + y_tmp = y_old + y = y_tmp + + # relaxation parameter + theta = 1 + + t = time.time() + + objective = [] + + for i in range(niter): + + # Gradient descent, Dual problem solution + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + # Gradient ascent, Primal problem solution + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + #Update + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y + + if i%100==0: + + primal = f(operator.direct(x)) + g(x) + dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) + print( i, primal, dual) + + plt.imshow(x.as_array()) + plt.show() +# print(f(operator.direct(x)) + g(x), i) + + t_end = time.time() + + return x, t_end - t, objective + diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py index 443bc78..a28c0bf 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -28,3 +28,4 @@ from .GradientDescent import GradientDescent from .FISTA import FISTA from .FBPD import FBPD from .PDHG import PDHG +from .PDHG import PDHG_old diff --git a/Wrappers/Python/wip/test_pdhg_gap.py b/Wrappers/Python/wip/test_pdhg_gap.py new file mode 100644 index 0000000..b196e36 --- /dev/null +++ b/Wrappers/Python/wip/test_pdhg_gap.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Apr 2 12:26:24 2019 + +@author: vaggelis +""" + + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, AcquisitionGeometry, AcquisitionData + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, BlockFunction, ScaledFunction + +from ccpi.astra.ops import AstraProjectorSimple +from skimage.util import random_noise + + +#%%############################################################################### +# Create phantom for TV tomography + +#import os +#import tomophantom +#from tomophantom import TomoP2D +#from tomophantom.supp.qualitymetrics import QualityTools + +#model = 1 # select a model number from the library +#N = 150 # set dimension of the phantom +## one can specify an exact path to the parameters file +## path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat' +#path = os.path.dirname(tomophantom.__file__) +#path_library2D = os.path.join(path, "Phantom2DLibrary.dat") +##This will generate a N_size x N_size phantom (2D) +#phantom_2D = TomoP2D.Model(model, N, path_library2D) +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +#data = ImageData(phantom_2D, geometry=ig) + +N = 150 +x = np.zeros((N,N)) +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +data = ImageData(x) +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + + +detectors = 150 +angles = np.linspace(0,np.pi,100) + +ag = AcquisitionGeometry('parallel','2D',angles, detectors) +Aop = AstraProjectorSimple(ig, ag, 'cpu') +sin = Aop.direct(data) + +plt.imshow(sin.as_array()) +plt.title('Sinogram') +plt.colorbar() +plt.show() + +# Add Gaussian noise to the sinogram data +np.random.seed(10) +n1 = np.random.random(sin.shape) + +noisy_data = sin + ImageData(5*n1) + +plt.imshow(noisy_data.as_array()) +plt.title('Noisy Sinogram') +plt.colorbar() +plt.show() + + +#%% Works only with Composite Operator Structure of PDHG + +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + +# Create operators +op1 = Gradient(ig) +op2 = Aop + +# Form Composite Operator +operator = BlockOperator(op1, op2, shape=(2,1) ) + +alpha = 50 +f = BlockFunction( alpha * MixedL21Norm(), \ + 0.5 * L2NormSquared(b = noisy_data) ) +g = ZeroFun() + +# Compute operator Norm +normK = operator.norm() + +## Primal & dual stepsizes + +sigma = 10 +tau = 1/(sigma*normK**2) + +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 2000 +pdhg.update_objective_interval = 100 + +#pdhg.run(5000) + +opt = {'niter':2000} + +res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +#%% +#sol = pdhg.get_output().as_array() +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +# +# +##%% +#plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() + + -- cgit v1.2.3 From 42d01d8f150fd893509a408e233ad0b19480b22d Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Apr 2019 13:12:25 +0100 Subject: add old pdhg, test gap --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 7 ++--- Wrappers/Python/wip/test_pdhg_gap.py | 31 +++++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 8600e07..1229c4e 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -72,9 +72,10 @@ class PDHG(Algorithm): self.xbar *= self.theta self.xbar += self.x - self.x_old.fill(self.x) - self.y_old.fill(self.y) - #self.y_old = y.copy() +# self.x_old.fill(self.x) +# self.y_old.fill(self.y) + self.y_old = self.y.copy() + self.x_old = self.x.copy() #self.y = self.y_old def update_objective(self): diff --git a/Wrappers/Python/wip/test_pdhg_gap.py b/Wrappers/Python/wip/test_pdhg_gap.py index b196e36..6c7ccc9 100644 --- a/Wrappers/Python/wip/test_pdhg_gap.py +++ b/Wrappers/Python/wip/test_pdhg_gap.py @@ -102,22 +102,33 @@ pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) pdhg.max_iteration = 2000 pdhg.update_objective_interval = 100 -#pdhg.run(5000) +pdhg.run(5000) +#%% opt = {'niter':2000} res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) #%% -#sol = pdhg.get_output().as_array() -#fig = plt.figure() -#plt.subplot(1,2,1) -#plt.imshow(noisy_data.as_array()) -##plt.colorbar() -#plt.subplot(1,2,2) -#plt.imshow(sol) -##plt.colorbar() -#plt.show() +sol = pdhg.get_output().as_array() +sol_old = res[0].as_array() +fig = plt.figure(figsize=(20,10)) +plt.subplot(1,3,1) +plt.imshow(noisy_data.as_array()) +#plt.colorbar() +plt.subplot(1,3,2) +plt.imshow(sol) +#plt.colorbar() + +plt.subplot(1,3,3) +plt.imshow(sol_old) +plt.show() + +plt.imshow(np.abs(sol-sol_old)) +plt.colorbar() +plt.show() + + # # ##%% -- cgit v1.2.3 From d2b119495c42182530c4f0329613c24b32d395fa Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 2 Apr 2019 17:47:18 +0100 Subject: add methods for precond pdhg --- .../optimisation/operators/GradientOperator.py | 57 ++++++++- .../optimisation/operators/SparseFiniteDiff.py | 140 +++++++++++++++++++++ .../Python/ccpi/optimisation/operators/__init__.py | 2 + 3 files changed, 195 insertions(+), 4 deletions(-) create mode 100644 Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index ec14b8f..87723f0 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -8,9 +8,9 @@ Created on Fri Mar 1 22:50:04 2019 from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry +from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy -from ccpi.optimisation.operators import FiniteDiff +from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff #%% @@ -45,7 +45,6 @@ class Gradient(LinearOperator): tmp = self.gm_range.allocate() - for i in range(tmp.shape[0]): tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) return tmp @@ -73,6 +72,56 @@ class Gradient(LinearOperator): def __rmul__(self, scalar): return ScaledOperator(self, scalar) + + def matrix(self): + + tmp = self.gm_range.allocate() + + mat = [] + for i in range(tmp.shape[0]): + + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + mat.append(spMat.matrix()) + + return BlockDataContainer(*mat) + + + def sum_abs_row(self): + + tmp = self.gm_range.allocate() + res = self.gm_domain.allocate() + for i in range(tmp.shape[0]): + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + res += spMat.sum_abs_row() + return res + + def sum_abs_col(self): + + tmp = self.gm_range.allocate() + res = [] + for i in range(tmp.shape[0]): + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + res.append(spMat.sum_abs_col()) + return BlockDataContainer(*res) + + if __name__ == '__main__': - pass + M, N = 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int' ) + + G_neum = Gradient(ig) + + d = G_neum.matrix() + print(d[1]) + + d1 = G_neum.sum_abs_row() + print(d1.as_array()) + + d2 = G_neum.sum_abs_col() + print(d2) + + + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py new file mode 100644 index 0000000..0fb5efb --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Apr 2 14:06:15 2019 + +@author: vaggelis +""" + +import scipy.sparse as sp +import numpy as np +from ccpi.framework import ImageData + +class SparseFiniteDiff(): + + def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): + + super(SparseFiniteDiff, self).__init__() + self.gm_domain = gm_domain + self.gm_range = gm_range + self.direction = direction + self.bnd_cond = bnd_cond + + if self.gm_range is None: + self.gm_range = self.gm_domain + + self.get_dims = [i for i in gm_domain.shape] + + if self.direction + 1 > len(self.gm_domain.shape): + raise ValueError('Gradient directions more than geometry domain') + + def matrix(self): + + i = self.direction + + mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil') + + if self.bnd_cond == 'Neumann': + mat[-1,:] = 0 + elif self.bnd_cond == 'Periodic': + mat[-1,0] = 1 + + tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0]) + + for j in range(1, self.gm_domain.length): + + tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad ) + + return tmpGrad + + def T(self): + return self.matrix().T + + def direct(self, x): + + x_asarr = x.as_array() + res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F') + return type(x)(res) + + def adjoint(self, x): + + x_asarr = x.as_array() + res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F') + return type(x)(res) + + def sum_abs_row(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + + def sum_abs_col(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + from ccpi.optimisation.operators import FiniteDiff + + # 2D + M, N= 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + for i in [0,1]: + + # Neumann + sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann') + G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann') + + # Periodic + sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic') + G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic') + + u_neum_direct = G_neum.direct(arr) + u_neum_sp_direct = sFD_neum.direct(arr) + np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4) + + u_neum_adjoint = G_neum.adjoint(arr) + u_neum_sp_adjoint = sFD_neum.adjoint(arr) + np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4) + + u_per_direct = G_neum.direct(arr) + u_per_sp_direct = sFD_neum.direct(arr) + np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4) + + u_per_adjoint = G_per.adjoint(arr) + u_per_sp_adjoint = sFD_per.adjoint(arr) + np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4) + + # 3D + M, N, K = 2, 3, 4 + ig3D = ImageGeometry(M, N, K) + arr3D = ig3D.allocate('random_int') + + for i in [0,1,2]: + + # Neumann + sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann') + G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann') + + # Periodic + sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic') + G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic') + + u_neum_direct3D = G_neum3D.direct(arr3D) + u_neum_sp_direct3D = sFD_neum3D.direct(arr3D) + np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4) + + u_neum_adjoint3D = G_neum3D.adjoint(arr3D) + u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D) + np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4) + + u_per_direct3D = G_neum3D.direct(arr3D) + u_per_sp_direct3D = sFD_neum3D.direct(arr3D) + np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4) + + u_per_adjoint3D = G_per3D.adjoint(arr3D) + u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D) + np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4) + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 1c09faf..57d89ad 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -17,3 +17,5 @@ from .GradientOperator import Gradient from .SymmetrizedGradientOperator import SymmetrizedGradient from .IdentityOperator import Identity from .ZeroOperator import ZeroOp + +from .SparseFiniteDiff import SparseFiniteDiff -- cgit v1.2.3 From 7a204d9a30ac9fc3e7b27b1076037faf10df06df Mon Sep 17 00:00:00 2001 From: Vaggelis Date: Tue, 2 Apr 2019 23:49:52 +0100 Subject: work for precond pdhg --- .../ccpi/optimisation/operators/BlockOperator.py | 52 +++++++++++++++++++++- .../optimisation/operators/IdentityOperator.py | 39 +++++++++++++++- .../Python/ccpi/optimisation/operators/__init__.py | 3 +- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 2 +- Wrappers/Python/wip/pdhg_TV_tomography2D_time.py | 2 +- 5 files changed, 93 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index ee8f609..19da3d4 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -219,5 +219,55 @@ class BlockOperator(Operator): shape = (self.shape[1], 1) return BlockGeometry(*[el.range_geometry() for el in self.operators], shape=shape) + + def sum_abs_row(self): + + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).sum_abs_row() + else: + prod += self.get_item(row,col).sum_abs_row() + res.append(prod) + + if self.shape[1]==1: + tmp = sum(res) + return ImageData(tmp) + else: + return BlockDataContainer(*res) + + if __name__ == '__main__': - pass + + from ccpi.framework import ImageGeometry + from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff + + from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer + from ccpi.optimisation.operators import Operator, LinearOperator + + + M, N= 4, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + G = Gradient(ig) + Id = Identity(ig) + + B = BlockOperator(G, Id) + + print(B.sum_abs_row().as_array()) + + Gx = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann') + Gy = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann') + + d1 = abs(Gx.matrix()).toarray().sum(axis=0) + d2 = abs(Gy.matrix()).toarray().sum(axis=0) + d3 = abs(Id.matrix()).toarray().sum(axis=0) + + d_res = numpy.reshape(d1 + d2 + d3, ig.shape, 'F') + + print(d_res) + + + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py index 0f50e82..df6c076 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -7,6 +7,9 @@ Created on Wed Mar 6 19:30:51 2019 """ from ccpi.optimisation.operators import LinearOperator +import scipy.sparse as sp +import numpy as np +from ccpi.framework import ImageData class Identity(LinearOperator): @@ -39,4 +42,38 @@ class Identity(LinearOperator): return self.gm_domain def range_geometry(self): - return self.gm_range \ No newline at end of file + return self.gm_range + + def matrix(self): + + return sp.eye(np.prod(self.gm_domain.shape)) + + def sum_abs_row(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + + def sum_abs_col(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + + M, N= 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + Id = Identity(ig) + d = Id.matrix() + print(d.toarray()) + + d1 = Id.sum_abs_col() + print(d1.as_array()) + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 57d89ad..63c1320 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -11,6 +11,7 @@ from .ScaledOperator import ScaledOperator from .BlockOperator import BlockOperator from .BlockScaledOperator import BlockScaledOperator +from .SparseFiniteDiff import SparseFiniteDiff from .FiniteDifferenceOperator import FiniteDiff from .GradientOperator import Gradient @@ -18,4 +19,4 @@ from .SymmetrizedGradientOperator import SymmetrizedGradient from .IdentityOperator import Identity from .ZeroOperator import ZeroOp -from .SparseFiniteDiff import SparseFiniteDiff + diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 52b7922..8bd63c2 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -55,7 +55,7 @@ detectors = 150 angles = np.linspace(0,np.pi,100) ag = AcquisitionGeometry('parallel','2D',angles, detectors) -Aop = AstraProjectorSimple(ig, ag, 'cpu') +Aop = AstraProjectorSimple(ig, ag, 'gpu') sin = Aop.direct(data) plt.imshow(sin.as_array()) diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py index 7ac1566..e9a85cc 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py @@ -66,7 +66,7 @@ detectors = 150 angles = np.linspace(0,np.pi,100) ag = AcquisitionGeometry('parallel','2D',angles, detectors, channels = np.shape(phantom_2Dt)[0]) -Aop = AstraProjectorMC(ig, ag, 'cpu') +Aop = AstraProjectorMC(ig, ag, 'gpu') sin = Aop.direct(data) plt.imshow(sin.as_array()[10]) -- cgit v1.2.3 From c8eeb3b9f202c16535f3c056a09fb74f638c43f2 Mon Sep 17 00:00:00 2001 From: Vaggelis Date: Wed, 3 Apr 2019 00:10:52 +0100 Subject: add precond test blockOperator --- .../ccpi/optimisation/operators/BlockOperator.py | 34 ++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 19da3d4..752fd21 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -236,7 +236,21 @@ class BlockOperator(Operator): return ImageData(tmp) else: return BlockDataContainer(*res) - + + def sum_abs_col(self): + + res = [] + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row, col).sum_abs_col() + else: + prod += self.get_item(row, col).sum_abs_col() + res.append(prod) + + return BlockDataContainer(*res) + + if __name__ == '__main__': @@ -247,7 +261,7 @@ if __name__ == '__main__': from ccpi.optimisation.operators import Operator, LinearOperator - M, N= 4, 3 + M, N = 4, 3 ig = ImageGeometry(M, N) arr = ig.allocate('random_int') G = Gradient(ig) @@ -263,11 +277,27 @@ if __name__ == '__main__': d1 = abs(Gx.matrix()).toarray().sum(axis=0) d2 = abs(Gy.matrix()).toarray().sum(axis=0) d3 = abs(Id.matrix()).toarray().sum(axis=0) + d_res = numpy.reshape(d1 + d2 + d3, ig.shape, 'F') print(d_res) + z1 = abs(Gx.matrix()).toarray().sum(axis=1) + z2 = abs(Gy.matrix()).toarray().sum(axis=1) + z3 = abs(Id.matrix()).toarray().sum(axis=1) + + z_res = BlockDataContainer(BlockDataContainer(ImageData(numpy.reshape(z2, ig.shape, 'F')),\ + ImageData(numpy.reshape(z1, ig.shape, 'F'))),\ + ImageData(numpy.reshape(z3, ig.shape, 'F'))) + + ttt = B.sum_abs_col() + + numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) + + -- cgit v1.2.3 From 91b455964f71b1cb612f6e12af4b7faf2fe5c76e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Apr 2019 12:11:48 +0100 Subject: precond test --- .../ccpi/optimisation/operators/BlockOperator.py | 16 +-- .../optimisation/operators/GradientOperator.py | 1 + Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 134 +++++++++++++++++++++ 3 files changed, 143 insertions(+), 8 deletions(-) create mode 100644 Wrappers/Python/wip/pdhg_TV_denoising_precond.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 752fd21..484dc61 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -8,7 +8,7 @@ Created on Thu Feb 14 12:36:40 2019 import numpy from numbers import Number import functools -from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer +from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer, DataContainer from ccpi.optimisation.operators import Operator, LinearOperator from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator from ccpi.framework import BlockGeometry @@ -224,7 +224,7 @@ class BlockOperator(Operator): res = [] for row in range(self.shape[0]): - for col in range(self.shape[1]): + for col in range(self.shape[1]): if col == 0: prod = self.get_item(row,col).sum_abs_row() else: @@ -269,8 +269,8 @@ if __name__ == '__main__': B = BlockOperator(G, Id) - print(B.sum_abs_row().as_array()) - + print(B.sum_abs_row()) +# Gx = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann') Gy = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann') @@ -282,17 +282,17 @@ if __name__ == '__main__': d_res = numpy.reshape(d1 + d2 + d3, ig.shape, 'F') print(d_res) - +# z1 = abs(Gx.matrix()).toarray().sum(axis=1) z2 = abs(Gy.matrix()).toarray().sum(axis=1) z3 = abs(Id.matrix()).toarray().sum(axis=1) - +# z_res = BlockDataContainer(BlockDataContainer(ImageData(numpy.reshape(z2, ig.shape, 'F')),\ ImageData(numpy.reshape(z1, ig.shape, 'F'))),\ ImageData(numpy.reshape(z3, ig.shape, 'F'))) - +# ttt = B.sum_abs_col() - +# numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 87723f0..cd65ee4 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -122,6 +122,7 @@ if __name__ == '__main__': d2 = G_neum.sum_abs_col() print(d2) + d1 * d2 diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py new file mode 100644 index 0000000..518ead2 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from skimage.util import random_noise + + + +# ############################################################################ +# Create phantom for TV denoising + +N = 100 +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode='gaussian', seed=10) +noisy_data = ImageData(n1) + + +#%% + +# Regularisation Parameter +alpha = 2 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions +# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ +# L2NormSq(0.5, b = noisy_data) ) + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + + f = BlockFunction(f1, f2 ) + g = ZeroFun() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = 0.5 * L2NormSquared(b = noisy_data) + ########################################################################### +#%% + +diag_precon = True + +if diag_precon: + tmp_tau = operator.sum_abs_row() + tmp_sigma = operator.sum_abs_col() + + tmp_sigma[0][0].as_array()[tmp_sigma[0][0].as_array()==0]=1 + tmp_sigma[0][1].as_array()[tmp_sigma[0][1].as_array()==0]=1 + tmp_sigma[1].as_array()[tmp_sigma[1].as_array()==0]=1 + + tau = 1/tmp_tau + sigma = 1/tmp_sigma + +else: + # Compute operator Norm + normK = operator.norm() + print ("normK", normK) + # Primal & dual stepsizes + sigma = 1 + tau = 1/(sigma*normK**2) + +#%% + +#opt = {'niter':2000} + +#res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + + +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 10 +# +#pdhg.run(2000) +# +# +# +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +## +# +### +#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() +# + +#%% +# -- cgit v1.2.3 From 6922cdc16cf1a852c07adea49b7792f68d1ffe37 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 3 Apr 2019 17:31:54 +0100 Subject: add matrix identity --- .../optimisation/operators/IdentityOperator.py | 39 +++++- .../optimisation/operators/SparseFiniteDiff.py | 140 +++++++++++++++++++++ .../optimisation/operators/IdentityOperator.py | 2 +- 3 files changed, 179 insertions(+), 2 deletions(-) create mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py (limited to 'Wrappers') diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py index 0f50e82..52c7c3b 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py @@ -7,6 +7,9 @@ Created on Wed Mar 6 19:30:51 2019 """ from ccpi.optimisation.operators import LinearOperator +import scipy.sparse as sp +import numpy as np +from ccpi.framework import ImageData class Identity(LinearOperator): @@ -39,4 +42,38 @@ class Identity(LinearOperator): return self.gm_domain def range_geometry(self): - return self.gm_range \ No newline at end of file + return self.gm_range + + def matrix(self): + + return sp.eye(np.prod(self.gm_domain.shape)) + + def sum_abs_row(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + + def sum_abs_col(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + + M, N = 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + Id = Identity(ig) + d = Id.matrix() + print(d.toarray()) + + d1 = Id.sum_abs_col() + print(d1.as_array()) + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py new file mode 100644 index 0000000..0fb5efb --- /dev/null +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Apr 2 14:06:15 2019 + +@author: vaggelis +""" + +import scipy.sparse as sp +import numpy as np +from ccpi.framework import ImageData + +class SparseFiniteDiff(): + + def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): + + super(SparseFiniteDiff, self).__init__() + self.gm_domain = gm_domain + self.gm_range = gm_range + self.direction = direction + self.bnd_cond = bnd_cond + + if self.gm_range is None: + self.gm_range = self.gm_domain + + self.get_dims = [i for i in gm_domain.shape] + + if self.direction + 1 > len(self.gm_domain.shape): + raise ValueError('Gradient directions more than geometry domain') + + def matrix(self): + + i = self.direction + + mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil') + + if self.bnd_cond == 'Neumann': + mat[-1,:] = 0 + elif self.bnd_cond == 'Periodic': + mat[-1,0] = 1 + + tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0]) + + for j in range(1, self.gm_domain.length): + + tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad ) + + return tmpGrad + + def T(self): + return self.matrix().T + + def direct(self, x): + + x_asarr = x.as_array() + res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F') + return type(x)(res) + + def adjoint(self, x): + + x_asarr = x.as_array() + res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F') + return type(x)(res) + + def sum_abs_row(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + + def sum_abs_col(self): + + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + from ccpi.optimisation.operators import FiniteDiff + + # 2D + M, N= 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + for i in [0,1]: + + # Neumann + sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann') + G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann') + + # Periodic + sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic') + G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic') + + u_neum_direct = G_neum.direct(arr) + u_neum_sp_direct = sFD_neum.direct(arr) + np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4) + + u_neum_adjoint = G_neum.adjoint(arr) + u_neum_sp_adjoint = sFD_neum.adjoint(arr) + np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4) + + u_per_direct = G_neum.direct(arr) + u_per_sp_direct = sFD_neum.direct(arr) + np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4) + + u_per_adjoint = G_per.adjoint(arr) + u_per_sp_adjoint = sFD_per.adjoint(arr) + np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4) + + # 3D + M, N, K = 2, 3, 4 + ig3D = ImageGeometry(M, N, K) + arr3D = ig3D.allocate('random_int') + + for i in [0,1,2]: + + # Neumann + sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann') + G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann') + + # Periodic + sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic') + G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic') + + u_neum_direct3D = G_neum3D.direct(arr3D) + u_neum_sp_direct3D = sFD_neum3D.direct(arr3D) + np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4) + + u_neum_adjoint3D = G_neum3D.adjoint(arr3D) + u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D) + np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4) + + u_per_direct3D = G_neum3D.direct(arr3D) + u_per_sp_direct3D = sFD_neum3D.direct(arr3D) + np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4) + + u_per_adjoint3D = G_per3D.adjoint(arr3D) + u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D) + np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4) + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py index df6c076..52c7c3b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -61,7 +61,7 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry - M, N= 2, 3 + M, N = 2, 3 ig = ImageGeometry(M, N) arr = ig.allocate('random_int') -- cgit v1.2.3 From 7a35a4c8d29455bb268b1e6f01f923c81f9c863a Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:35:22 +0100 Subject: reverse add order proximal conjugate --- Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 597d4d8..889d703 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -80,7 +80,8 @@ class L2NormSquared(Function): tmp = 0 if self.b is not None: - tmp = (self.b * x).sum() +# tmp = (self.b * x).sum() + tmp = (x * self.b).sum() if out is None: # FIXME: this is a number @@ -117,7 +118,8 @@ class L2NormSquared(Function): if out is None: if self.b is not None: - return (x - tau*self.b)/(1 + tau/2) + # change the order cannot add ImageData + NestedBlock + return (-1* tau*self.b + x)/(1 + tau/2) else: return x/(1 + tau/2 ) else: -- cgit v1.2.3 From c102b119a1dd9444fba0c244ebcfe260cd679a7f Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:36:04 +0100 Subject: add methods for precond --- .../optimisation/operators/GradientOperator.py | 74 +++++++++++++++++++--- .../optimisation/operators/IdentityOperator.py | 4 +- .../optimisation/operators/SparseFiniteDiff.py | 2 +- 3 files changed, 69 insertions(+), 11 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index cd65ee4..e00de0c 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -107,22 +107,80 @@ class Gradient(LinearOperator): if __name__ == '__main__': + + from ccpi.optimisation.operators import Identity, BlockOperator + M, N = 2, 3 ig = ImageGeometry(M, N) arr = ig.allocate('random_int' ) - G_neum = Gradient(ig) + # check direct of Gradient and sparse matrix + G = Gradient(ig) + G_sp = G.matrix() + + res1 = G.direct(arr) + res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') + + print(res1[0].as_array()) + print(res1y) + + res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') + + print(res1[1].as_array()) + print(res1x) - d = G_neum.matrix() - print(d[1]) + #check sum abs row + conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))) + print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F')) + print(G.sum_abs_row().as_array()) - d1 = G_neum.sum_abs_row() - print(d1.as_array()) + print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F')) - d2 = G_neum.sum_abs_col() - print(d2) + print(G.sum_abs_col()[0].as_array()) + print(G.sum_abs_col()[1].as_array()) - d1 * d2 + # Check Blockoperator sum abs col and row + op1 = Gradient(ig) + op2 = Identity(ig) + B = BlockOperator( op1, op2) + Brow = B.sum_abs_row() + Bcol = B.sum_abs_col() + + concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray())) + + print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F')) + print(Brow.as_array()) + + print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F')) + print(Bcol[1].as_array()) + + +# print(numpy.concatene(G_sp[0].toarray()+ )) +# print(G_sp[1].toarray()) +# +# d1 = G.sum_abs_row() +# print(d1.as_array()) +# +# d2 = G_neum.sum_abs_col() +## print(d2) +# +# +# ########################################################### + a = BlockDataContainer( BlockDataContainer(arr, arr), arr) + b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2) + c = a/b + + print(c[0][0].as_array(), (arr/(arr+5)).as_array()) + print(c[0][1].as_array(), (arr/(arr+3)).as_array()) + print(c[1].as_array(), (arr/(arr+2)).as_array()) + + + a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) +# +# c1 = arr + a +# c2 = arr + a +# c2 = a1 + arr +# diff --git a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py index 52c7c3b..a58a296 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/IdentityOperator.py @@ -50,11 +50,11 @@ class Identity(LinearOperator): def sum_abs_row(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) def sum_abs_col(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) if __name__ == '__main__': diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py index 0fb5efb..0b5e85f 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -68,7 +68,7 @@ class SparseFiniteDiff(): def sum_abs_col(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C'))) if __name__ == '__main__': -- cgit v1.2.3 From 2ee7afd4cb57a51071ba454e79880e78ce24c03b Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:37:11 +0100 Subject: change proxima, proximal conjugate for tau BlockDataContainer --- .../Python/ccpi/optimisation/functions/BlockFunction.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 70216a9..81c16cd 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -10,6 +10,7 @@ import numpy as np #from ccpi.optimisation.funcs import Function from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer +from numbers import Number class BlockFunction(Function): '''A Block vector of Functions @@ -52,16 +53,24 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) return BlockDataContainer(*out) def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) return BlockDataContainer(*out) -- cgit v1.2.3 From 3fbc6020ac3eecf228133d69bd7683b946cba9bf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:37:35 +0100 Subject: add precond example --- Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 48 ++++++++++++++++-------- 1 file changed, 33 insertions(+), 15 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py index 518ead2..6792f43 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -24,7 +24,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 100 +N = 500 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -74,34 +74,52 @@ else: ########################################################################### #%% -diag_precon = True +diag_precon = False if diag_precon: - tmp_tau = operator.sum_abs_row() - tmp_sigma = operator.sum_abs_col() - tmp_sigma[0][0].as_array()[tmp_sigma[0][0].as_array()==0]=1 - tmp_sigma[0][1].as_array()[tmp_sigma[0][1].as_array()==0]=1 - tmp_sigma[1].as_array()[tmp_sigma[1].as_array()==0]=1 + tmp_tau = 1/operator.sum_abs_row() + tmp_sigma = 1/operator.sum_abs_col() - tau = 1/tmp_tau - sigma = 1/tmp_sigma + tmp_sigma[0][0].as_array()[tmp_sigma[0][0].as_array()==np.inf]=0 + tmp_sigma[0][1].as_array()[tmp_sigma[0][1].as_array()==np.inf]=0 + tmp_sigma[1].as_array()[tmp_sigma[1].as_array()==np.inf]=0 + tau = tmp_tau + sigma = tmp_sigma + else: # Compute operator Norm normK = operator.norm() print ("normK", normK) # Primal & dual stepsizes - sigma = 1 - tau = 1/(sigma*normK**2) + sigma = 1/normK + tau = 1/normK +# tau = 1/(sigma*normK**2) #%% -#opt = {'niter':2000} +opt = {'niter':2000} +# +res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +aaa = res[0].as_array() +# +plt.imshow(aaa) +plt.colorbar() +plt.show() +#c2 = aaa +#del aaa +#%% -#res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) - - +#c2 = aaa +##%% +#%% +z = c1 - c2 +plt.imshow(np.abs(z[0:95,0:95])) +plt.colorbar() + +#%% #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) #pdhg.max_iteration = 2000 #pdhg.update_objective_interval = 10 -- cgit v1.2.3 From 8d11cade9c2dcf30d4a92ad9be4bc39b05c83b7e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:38:57 +0100 Subject: is compatible edit for NestedBlock and image Data --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 8e55b67..da6ee5b 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -52,6 +52,12 @@ class BlockDataContainer(object): def is_compatible(self, other): '''basic check if the size of the 2 objects fit''' + + for i in range(len(self.containers)): + if type(self.containers[i])==type(self): + self = self.containers[i] + + if isinstance(other, Number): return True elif isinstance(other, list): -- cgit v1.2.3 From ea7113b7d86453077dc45674ab8506aac5f2b8e0 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:39:30 +0100 Subject: add get_item --- Wrappers/Python/ccpi/framework/BlockGeometry.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py index d336305..0f43155 100755 --- a/Wrappers/Python/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -27,6 +27,10 @@ class BlockGeometry(object): raise ValueError( 'Dimension and size do not match: expected {} got {}' .format(n_elements, len(args))) + + def get_item(self, index): + '''returns the Geometry in the BlockGeometry located at position index''' + return self.geometries[index] def allocate(self, value=0, dimension_labels=None): containers = [geom.allocate(value) for geom in self.geometries] -- cgit v1.2.3 From 2a1607f35aebc1938f30ba66f700a8f893ed5be4 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:40:28 +0100 Subject: to work with precond --- Wrappers/Python/ccpi/framework/__init__.py | 1 + Wrappers/Python/ccpi/framework/framework.py | 3 +++ Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 13 +++++++------ 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py index 66e2f56..229edb5 100755 --- a/Wrappers/Python/ccpi/framework/__init__.py +++ b/Wrappers/Python/ccpi/framework/__init__.py @@ -15,6 +15,7 @@ from datetime import timedelta, datetime import warnings from functools import reduce + from .framework import DataContainer from .framework import ImageData, AcquisitionData from .framework import ImageGeometry, AcquisitionGeometry diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index ae9faf7..07c2ead 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -29,6 +29,7 @@ import warnings from functools import reduce from numbers import Number + def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] @@ -496,6 +497,7 @@ class DataContainer(object): ## algebra def __add__(self, other, *args, **kwargs): out = kwargs.get('out', None) + if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -601,6 +603,7 @@ class DataContainer(object): deep_copy=True, dimension_labels=self.dimension_labels, geometry=self.geometry) + else: raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , type(other))) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 1229c4e..084818c 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -8,7 +8,7 @@ Created on Mon Feb 4 16:18:06 2019 from ccpi.optimisation.algorithms import Algorithm from ccpi.framework import ImageData import numpy as np -#import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer @@ -120,12 +120,13 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): objective = [] + for i in range(niter): # Gradient descent, Dual problem solution y_tmp = y_old + sigma * operator.direct(xbar) y = f.proximal_conjugate(y_tmp, sigma) - + # Gradient ascent, Primal problem solution x_tmp = x_old - tau * operator.adjoint(y) x = g.proximal(x_tmp, tau) @@ -135,15 +136,15 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y - + if i%100==0: primal = f(operator.direct(x)) + g(x) dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) - print( i, primal, dual) + print( i, primal, dual, primal-dual) - plt.imshow(x.as_array()) - plt.show() +# plt.imshow(x.as_array()) +# plt.show() # print(f(operator.direct(x)) + g(x), i) t_end = time.time() -- cgit v1.2.3 From 2109545a9ff802fb8797694c1443c0858c13960e Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 17:56:09 +0100 Subject: working example for predon tv --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 1 - Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 2 ++ .../ccpi/optimisation/operators/SparseFiniteDiff.py | 8 ++++++-- Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 18 ++++++++---------- 4 files changed, 16 insertions(+), 13 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index da6ee5b..21ef3f0 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -57,7 +57,6 @@ class BlockDataContainer(object): if type(self.containers[i])==type(self): self = self.containers[i] - if isinstance(other, Number): return True elif isinstance(other, list): diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 084818c..d0e27ae 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -151,3 +151,5 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): return x, t_end - t, objective + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py index 0b5e85f..1b88cba 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -64,11 +64,15 @@ class SparseFiniteDiff(): def sum_abs_row(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')) + res[res==0]=1 + return ImageData(res) def sum_abs_col(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C'))) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C')) + res[res==0]=1 + return ImageData(res) if __name__ == '__main__': diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py index 6792f43..2e0b9f4 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -24,7 +24,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 500 +N = 100 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -78,16 +78,14 @@ diag_precon = False if diag_precon: - tmp_tau = 1/operator.sum_abs_row() - tmp_sigma = 1/operator.sum_abs_col() + def tau_sigma_precond(operator): + tau = 1/operator.sum_abs_row() + sigma = 1/ operator.sum_abs_col() - tmp_sigma[0][0].as_array()[tmp_sigma[0][0].as_array()==np.inf]=0 - tmp_sigma[0][1].as_array()[tmp_sigma[0][1].as_array()==np.inf]=0 - tmp_sigma[1].as_array()[tmp_sigma[1].as_array()==np.inf]=0 - - tau = tmp_tau - sigma = tmp_sigma - + return tau, sigma + + tau, sigma = tau_sigma_precond(operator) + else: # Compute operator Norm normK = operator.norm() -- cgit v1.2.3 From 2b53e85e3a6c750ac7241671662e58c9752fd686 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 4 Apr 2019 22:31:45 +0100 Subject: precond with Tomo --- .../build/lib/ccpi/framework/BlockDataContainer.py | 5 + .../build/lib/ccpi/framework/BlockGeometry.py | 4 + .../Python/build/lib/ccpi/framework/__init__.py | 1 + .../Python/build/lib/ccpi/framework/framework.py | 3 + .../build/lib/ccpi/optimisation/algorithms/PDHG.py | 81 +++++++++++++- .../lib/ccpi/optimisation/algorithms/__init__.py | 2 + .../ccpi/optimisation/functions/BlockFunction.py | 17 ++- .../ccpi/optimisation/functions/L2NormSquared.py | 31 ++++-- .../optimisation/operators/GradientOperator.py | 116 ++++++++++++++++++++- .../optimisation/operators/IdentityOperator.py | 4 +- .../optimisation/operators/SparseFiniteDiff.py | 8 +- .../ccpi/optimisation/algorithms/__init__.py | 1 + Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 3 +- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 83 +++++++++------ 14 files changed, 302 insertions(+), 57 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py index 8e55b67..21ef3f0 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py @@ -52,6 +52,11 @@ class BlockDataContainer(object): def is_compatible(self, other): '''basic check if the size of the 2 objects fit''' + + for i in range(len(self.containers)): + if type(self.containers[i])==type(self): + self = self.containers[i] + if isinstance(other, Number): return True elif isinstance(other, list): diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py index d336305..0f43155 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py @@ -27,6 +27,10 @@ class BlockGeometry(object): raise ValueError( 'Dimension and size do not match: expected {} got {}' .format(n_elements, len(args))) + + def get_item(self, index): + '''returns the Geometry in the BlockGeometry located at position index''' + return self.geometries[index] def allocate(self, value=0, dimension_labels=None): containers = [geom.allocate(value) for geom in self.geometries] diff --git a/Wrappers/Python/build/lib/ccpi/framework/__init__.py b/Wrappers/Python/build/lib/ccpi/framework/__init__.py index 66e2f56..229edb5 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/__init__.py +++ b/Wrappers/Python/build/lib/ccpi/framework/__init__.py @@ -15,6 +15,7 @@ from datetime import timedelta, datetime import warnings from functools import reduce + from .framework import DataContainer from .framework import ImageData, AcquisitionData from .framework import ImageGeometry, AcquisitionGeometry diff --git a/Wrappers/Python/build/lib/ccpi/framework/framework.py b/Wrappers/Python/build/lib/ccpi/framework/framework.py index ae9faf7..07c2ead 100644 --- a/Wrappers/Python/build/lib/ccpi/framework/framework.py +++ b/Wrappers/Python/build/lib/ccpi/framework/framework.py @@ -29,6 +29,7 @@ import warnings from functools import reduce from numbers import Number + def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] @@ -496,6 +497,7 @@ class DataContainer(object): ## algebra def __add__(self, other, *args, **kwargs): out = kwargs.get('out', None) + if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -601,6 +603,7 @@ class DataContainer(object): deep_copy=True, dimension_labels=self.dimension_labels, geometry=self.geometry) + else: raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , type(other))) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py index 043fe38..d0e27ae 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py @@ -8,11 +8,14 @@ Created on Mon Feb 4 16:18:06 2019 from ccpi.optimisation.algorithms import Algorithm from ccpi.framework import ImageData import numpy as np -#import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer + +import matplotlib.pyplot as plt + class PDHG(Algorithm): '''Primal Dual Hybrid Gradient''' @@ -69,9 +72,10 @@ class PDHG(Algorithm): self.xbar *= self.theta self.xbar += self.x - self.x_old.fill(self.x) - self.y_old.fill(self.y) - #self.y_old = y.copy() +# self.x_old.fill(self.x) +# self.y_old.fill(self.y) + self.y_old = self.y.copy() + self.x_old = self.x.copy() #self.y = self.y_old def update_objective(self): @@ -80,3 +84,72 @@ class PDHG(Algorithm): ]) + +def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): + + # algorithmic parameters + if opt is None: + opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ + 'memopt': False} + + if sigma is None and tau is None: + raise ValueError('Need sigma*tau||K||^2<1') + + niter = opt['niter'] if 'niter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False + stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False + + + x_old = operator.domain_geometry().allocate() + y_old = operator.range_geometry().allocate() + + + xbar = x_old + x_tmp = x_old + x = x_old + + y_tmp = y_old + y = y_tmp + + # relaxation parameter + theta = 1 + + t = time.time() + + objective = [] + + + for i in range(niter): + + # Gradient descent, Dual problem solution + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + # Gradient ascent, Primal problem solution + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + #Update + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y + + if i%100==0: + + primal = f(operator.direct(x)) + g(x) + dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) + print( i, primal, dual, primal-dual) + +# plt.imshow(x.as_array()) +# plt.show() +# print(f(operator.direct(x)) + g(x), i) + + t_end = time.time() + + return x, t_end - t, objective + + + diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py index 443bc78..f562973 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py @@ -28,3 +28,5 @@ from .GradientDescent import GradientDescent from .FISTA import FISTA from .FBPD import FBPD from .PDHG import PDHG +from .PDHG import PDHG_old + diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py index 70216a9..81c16cd 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py @@ -10,6 +10,7 @@ import numpy as np #from ccpi.optimisation.funcs import Function from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer +from numbers import Number class BlockFunction(Function): '''A Block vector of Functions @@ -52,16 +53,24 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) return BlockDataContainer(*out) def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) return BlockDataContainer(*out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py index 5489d92..889d703 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py @@ -1,12 +1,21 @@ # -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 -@author: evangelos -""" +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import numpy from ccpi.optimisation.functions import Function @@ -71,14 +80,15 @@ class L2NormSquared(Function): tmp = 0 if self.b is not None: - tmp = (self.b * x).sum() +# tmp = (self.b * x).sum() + tmp = (x * self.b).sum() if out is None: # FIXME: this is a number - return (1/4) * x.squared_norm() + tmp + return (1./4.) * x.squared_norm() + tmp else: # FIXME: this is a DataContainer - out.fill((1/4) * x.squared_norm() + tmp) + out.fill((1./4.) * x.squared_norm() + tmp) def proximal(self, x, tau, out = None): @@ -108,7 +118,8 @@ class L2NormSquared(Function): if out is None: if self.b is not None: - return (x - tau*self.b)/(1 + tau/2) + # change the order cannot add ImageData + NestedBlock + return (-1* tau*self.b + x)/(1 + tau/2) else: return x/(1 + tau/2 ) else: diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py index ec14b8f..e00de0c 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py @@ -8,9 +8,9 @@ Created on Fri Mar 1 22:50:04 2019 from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry +from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer import numpy -from ccpi.optimisation.operators import FiniteDiff +from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff #%% @@ -45,7 +45,6 @@ class Gradient(LinearOperator): tmp = self.gm_range.allocate() - for i in range(tmp.shape[0]): tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) return tmp @@ -73,6 +72,115 @@ class Gradient(LinearOperator): def __rmul__(self, scalar): return ScaledOperator(self, scalar) + + def matrix(self): + + tmp = self.gm_range.allocate() + + mat = [] + for i in range(tmp.shape[0]): + + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + mat.append(spMat.matrix()) + + return BlockDataContainer(*mat) + + + def sum_abs_row(self): + + tmp = self.gm_range.allocate() + res = self.gm_domain.allocate() + for i in range(tmp.shape[0]): + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + res += spMat.sum_abs_row() + return res + + def sum_abs_col(self): + + tmp = self.gm_range.allocate() + res = [] + for i in range(tmp.shape[0]): + spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) + res.append(spMat.sum_abs_col()) + return BlockDataContainer(*res) + + if __name__ == '__main__': - pass + + from ccpi.optimisation.operators import Identity, BlockOperator + + M, N = 2, 3 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int' ) + + # check direct of Gradient and sparse matrix + G = Gradient(ig) + G_sp = G.matrix() + + res1 = G.direct(arr) + res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') + + print(res1[0].as_array()) + print(res1y) + + res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') + + print(res1[1].as_array()) + print(res1x) + + #check sum abs row + conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))) + print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F')) + print(G.sum_abs_row().as_array()) + + print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F')) + + print(G.sum_abs_col()[0].as_array()) + print(G.sum_abs_col()[1].as_array()) + + # Check Blockoperator sum abs col and row + + op1 = Gradient(ig) + op2 = Identity(ig) + + B = BlockOperator( op1, op2) + + Brow = B.sum_abs_row() + Bcol = B.sum_abs_col() + + concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray())) + + print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F')) + print(Brow.as_array()) + + print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F')) + print(Bcol[1].as_array()) + + +# print(numpy.concatene(G_sp[0].toarray()+ )) +# print(G_sp[1].toarray()) +# +# d1 = G.sum_abs_row() +# print(d1.as_array()) +# +# d2 = G_neum.sum_abs_col() +## print(d2) +# +# +# ########################################################### + a = BlockDataContainer( BlockDataContainer(arr, arr), arr) + b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2) + c = a/b + + print(c[0][0].as_array(), (arr/(arr+5)).as_array()) + print(c[0][1].as_array(), (arr/(arr+3)).as_array()) + print(c[1].as_array(), (arr/(arr+2)).as_array()) + + + a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) +# +# c1 = arr + a +# c2 = arr + a +# c2 = a1 + arr +# diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py index 52c7c3b..a58a296 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py @@ -50,11 +50,11 @@ class Identity(LinearOperator): def sum_abs_row(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) def sum_abs_col(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) if __name__ == '__main__': diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py index 0fb5efb..1b88cba 100644 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py +++ b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -64,11 +64,15 @@ class SparseFiniteDiff(): def sum_abs_row(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')) + res[res==0]=1 + return ImageData(res) def sum_abs_col(self): - return ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C')) + res[res==0]=1 + return ImageData(res) if __name__ == '__main__': diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py index a28c0bf..f562973 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/__init__.py @@ -29,3 +29,4 @@ from .FISTA import FISTA from .FBPD import FBPD from .PDHG import PDHG from .PDHG import PDHG_old + diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py index 2e0b9f4..d5c021d 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -74,11 +74,12 @@ else: ########################################################################### #%% -diag_precon = False +diag_precon = True if diag_precon: def tau_sigma_precond(operator): + tau = 1/operator.sum_abs_row() sigma = 1/ operator.sum_abs_col() diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 8bd63c2..9feb05b 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -26,10 +26,10 @@ from skimage.util import random_noise #%%############################################################################### # Create phantom for TV tomography -import os -import tomophantom -from tomophantom import TomoP2D -from tomophantom.supp.qualitymetrics import QualityTools +#import os +#import tomophantom +#from tomophantom import TomoP2D +#from tomophantom.supp.qualitymetrics import QualityTools #model = 1 # select a model number from the library #N = 150 # set dimension of the phantom @@ -55,7 +55,7 @@ detectors = 150 angles = np.linspace(0,np.pi,100) ag = AcquisitionGeometry('parallel','2D',angles, detectors) -Aop = AstraProjectorSimple(ig, ag, 'gpu') +Aop = AstraProjectorSimple(ig, ag, 'cpu') sin = Aop.direct(data) plt.imshow(sin.as_array()) @@ -95,32 +95,55 @@ g = ZeroFun() normK = operator.norm() ## Primal & dual stepsizes - -sigma = 10 -tau = 1/(sigma*normK**2) - -pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -pdhg.max_iteration = 5000 -pdhg.update_objective_interval = 250 - -pdhg.run(5000) - -#%% -sol = pdhg.get_output().as_array() -fig = plt.figure() -plt.subplot(1,2,1) -plt.imshow(noisy_data.as_array()) -#plt.colorbar() -plt.subplot(1,2,2) -plt.imshow(sol) -#plt.colorbar() -plt.show() - +diag_precon = False + +if diag_precon: + + def tau_sigma_precond(operator): + + tau = 1/operator.sum_abs_row() + sigma = 1/ operator.sum_abs_col() + + return tau, sigma + + tau, sigma = tau_sigma_precond(operator) + +else: + sigma = 10 + tau = 1/(sigma*normK**2) + +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 5000 +#pdhg.update_objective_interval = 250 +# +#pdhg.run(5000) + +opt = {'niter':2000} +# +res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +aaa = res[0].as_array() +# +plt.imshow(aaa) +plt.colorbar() +plt.show() #%% -plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') -plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -plt.legend() -plt.show() +#sol = pdhg.get_output().as_array() +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +# +# +##%% +#plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() -- cgit v1.2.3 From ec6dabadd80b712beba834732042ba589038314d Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 08:35:20 +0100 Subject: check shape BDC --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 21ef3f0..9664037 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + # -*- coding: utf-8 -*- """ Created on Tue Mar 5 16:04:45 2019 @@ -23,12 +23,12 @@ class BlockDataContainer(object): '''''' self.containers = args self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) - shape = (len(args),1) + shape = kwargs.get('shape', None) + if shape is None: + shape = (len(args),1) +# shape = (len(args),1) self.shape = shape - #print (self.shape) + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) if len(args) != n_elements: raise ValueError( -- cgit v1.2.3 From c5673f753915b88308a32ec8734380c7a5468bb6 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 08:38:39 +0100 Subject: changes for Symmetrized Gradient --- Wrappers/Python/ccpi/framework/BlockGeometry.py | 7 +- .../Python/ccpi/optimisation/algorithms/PDHG.py | 24 ++-- .../operators/FiniteDifferenceOperator.py | 4 +- .../optimisation/operators/GradientOperator.py | 3 +- .../optimisation/operators/SparseFiniteDiff.py | 2 +- .../operators/SymmetrizedGradientOperator.py | 159 ++++++++++++++------- Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 25 ++-- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 13 +- 8 files changed, 155 insertions(+), 82 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockGeometry.py b/Wrappers/Python/ccpi/framework/BlockGeometry.py index 0f43155..5dd6750 100755 --- a/Wrappers/Python/ccpi/framework/BlockGeometry.py +++ b/Wrappers/Python/ccpi/framework/BlockGeometry.py @@ -16,17 +16,16 @@ class BlockGeometry(object): '''''' self.geometries = args self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) + shape = (len(args),1) self.shape = shape - #print (self.shape) + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) if len(args) != n_elements: raise ValueError( 'Dimension and size do not match: expected {} got {}' .format(n_elements, len(args))) + def get_item(self, index): '''returns the Geometry in the BlockGeometry located at position index''' diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index d0e27ae..7c6bc8a 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -104,8 +104,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = operator.domain_geometry().allocate() y_old = operator.range_geometry().allocate() - - + xbar = x_old x_tmp = x_old x = x_old @@ -118,7 +117,9 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): t = time.time() - objective = [] + primal = [] + dual = [] + pdgap = [] for i in range(niter): @@ -137,11 +138,18 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y - if i%100==0: +# if i%100==0: + + p1 = f(operator.direct(x)) + g(x) + d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) + pd1 = p1 - d1 + + primal.append(p1) + dual.append(d1) + pdgap.append(pd1) + - primal = f(operator.direct(x)) + g(x) - dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) - print( i, primal, dual, primal-dual) +# print( i, primal, dual, primal-dual) # plt.imshow(x.as_array()) # plt.show() @@ -149,7 +157,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): t_end = time.time() - return x, t_end - t, objective + return x, t_end - t, primal, dual, pdgap diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 24c4e4b..0faba22 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -6,12 +6,12 @@ Created on Fri Mar 1 22:51:17 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator +from ccpi.optimisation.operators import LinearOperator from ccpi.optimisation.ops import PowerMethodNonsquare from ccpi.framework import ImageData, BlockDataContainer import numpy as np -class FiniteDiff(Operator): +class FiniteDiff(LinearOperator): # Works for Neum/Symmetric & periodic boundary conditions # TODO add central differences??? diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index e00de0c..0c267fc 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -43,8 +43,7 @@ class Gradient(LinearOperator): def direct(self, x, out=None): - tmp = self.gm_range.allocate() - + tmp = self.gm_range.allocate() for i in range(tmp.shape[0]): tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) return tmp diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py index 1b88cba..d54db9b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -70,7 +70,7 @@ class SparseFiniteDiff(): def sum_abs_col(self): - res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C')) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F')) res[res==0]=1 return ImageData(res) diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py index d908e49..ea3ba8f 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -6,59 +6,93 @@ Created on Fri Mar 1 22:53:55 2019 @author: evangelos """ -from ccpi.optimisation.operators import Operator -from ccpi.optimisation.operators import FiniteDiff +from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, DataContainer -import numpy as np +from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer +import numpy +from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff -class SymmetrizedGradient(Operator): +class SymmetrizedGradient(Gradient): - def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs): + def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): - super(SymmetrizedGradient, self).__init__() + super(SymmetrizedGradient, self).__init__(gm_domain, bnd_cond, **kwargs) - self.gm_domain = gm_domain # Domain of Grad Operator - self.gm_range = gm_range # Range of Grad Operator - self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences - - # Kwargs Default options - self.memopt = kwargs.get('memopt',False) - self.correlation = kwargs.get('correlation','Space') + ''' + Domain of SymGrad is the Range of Gradient + ''' + self.gm_domain = self.gm_range + self.bnd_cond = bnd_cond + + self.channels = self.gm_range.get_item(0).channels - #TODO not tested yet, operator norm??? - self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) - + if self.correlation=='Space': + if self.channels>1: + pass + else: +# # 2D image ---> Dx v1, Dyv2, Dx + tmp = self.gm_domain.geometries + (self.gm_domain.get_item(0),) + self.gm_range = BlockGeometry(*tmp ) + self.ind1 = range(self.gm_domain.get_item(0).length) + self.ind2 = range(self.gm_domain.get_item(0).length-1, -1, -1) +# self.order = myorder = [0,1,2 3] + + elif self.correlation=='SpaceChannels': + if self.channels>1: + pass + else: + raise ValueError('No channels to correlate') + def direct(self, x, out=None): - tmp = np.zeros(self.gm_range) - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) - tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + - FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) +# tmp = numpy.zeros(self.gm_range) +# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) +# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) +# tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + +# FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) +# +# return type(x)(tmp) + + tmp = [[None]*2]*2 + for i in range(2): + for j in range(2): + tmp[i][j]=FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).adjoint(x.get_item(j)) + tmp = numpy.array(tmp) + z = 0.5 * (tmp.T + tmp) - return type(x)(tmp) - + return BlockDataContainer(z.tolist()) + def adjoint(self, x, out=None): + pass - tmp = np.zeros(self.gm_domain) + res = [] + for i in range(2): + for j in range(2): + + restmpFiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j)) + + - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) - - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) - - return type(x)(tmp) +# for + +# tmp = numpy.zeros(self.gm_domain) +# +# tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ +# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) +# +# tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ +# FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) +# +# return type(x)(tmp) def alloc_domain_dim(self): - return ImageData(np.zeros(self.gm_domain)) + return ImageData(numpy.zeros(self.gm_domain)) def alloc_range_dim(self): - return ImageData(np.zeros(self.range_dim)) + return ImageData(numpy.zeros(self.range_dim)) def domain_dim(self): return self.gm_domain @@ -80,29 +114,58 @@ if __name__ == '__main__': ########################################################################### ## Symmetrized Gradient + from ccpi.framework import DataContainer + from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff + import numpy as np N, M = 2, 3 - ig = (N,M) - ig2 = (2,) + ig - ig3 = (3,) + ig - u1 = DataContainer(np.random.randint(10, size=ig2)) - w1 = DataContainer(np.random.randint(10, size=ig3)) + K = 2 + + ig1 = ImageGeometry(N, M) + ig2 = ImageGeometry(N, M, channels=K) + + E1 = SymmetrizedGradient(ig1, correlation = 'Space', bnd_cond='Neumann') + E2 = SymmetrizedGradient(ig2, correlation = 'SpaceChannels', bnd_cond='Periodic') - E = SymmetrizedGradient(ig2,ig3) + print(E1.domain_geometry().shape) + print(E2.domain_geometry().shape) - d1 = E.direct(u1) - d2 = E.adjoint(w1) + u1 = E1.gm_domain.allocate('random_int') + u2 = E2.gm_domain.allocate('random_int') + + + res = E1.direct(u1) + + Dx = FiniteDiff(ig1, direction = 1, bnd_cond = 'Neumann') + Dy = FiniteDiff(ig1, direction = 0, bnd_cond = 'Neumann') + + B = BlockOperator(Dy, Dx) + V = BlockDataContainer(u1,u2) + + res = B.adjoint(V) - LHS = (d1.as_array()[0]*w1.as_array()[0] + \ - d1.as_array()[1]*w1.as_array()[1] + \ - 2*d1.as_array()[2]*w1.as_array()[2]).sum() +# ig = (N,M) +# ig2 = (2,) + ig +# ig3 = (3,) + ig +# u1 = ig.allocate('random_int') +# w1 = E.gm_range.allocate('random_int') +# DataContainer(np.random.randint(10, size=ig3)) - RHS = (u1.as_array()[0]*d2.as_array()[0] + \ - u1.as_array()[1]*d2.as_array()[1]).sum() - print(LHS, RHS, E.norm()) +# d1 = E.direct(u1) +# d2 = E.adjoint(w1) +# LHS = (d1.as_array()[0]*w1.as_array()[0] + \ +# d1.as_array()[1]*w1.as_array()[1] + \ +# 2*d1.as_array()[2]*w1.as_array()[2]).sum() +# +# RHS = (u1.as_array()[0]*d2.as_array()[0] + \ +# u1.as_array()[1]*d2.as_array()[1]).sum() +# +# +# print(LHS, RHS, E.norm()) +# # diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py index d5c021d..426ce8b 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -74,7 +74,7 @@ else: ########################################################################### #%% -diag_precon = True +diag_precon = False if diag_precon: @@ -82,7 +82,7 @@ if diag_precon: tau = 1/operator.sum_abs_row() sigma = 1/ operator.sum_abs_col() - + return tau, sigma tau, sigma = tau_sigma_precond(operator) @@ -99,14 +99,19 @@ else: #%% opt = {'niter':2000} -# -res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -aaa = res[0].as_array() -# -plt.imshow(aaa) +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) plt.colorbar() plt.show() + +#aaa = res[0].as_array() +# +#plt.imshow(aaa) +#plt.colorbar() +#plt.show() #c2 = aaa #del aaa #%% @@ -114,9 +119,9 @@ plt.show() #c2 = aaa ##%% #%% -z = c1 - c2 -plt.imshow(np.abs(z[0:95,0:95])) -plt.colorbar() +#z = c1 - c2 +#plt.imshow(np.abs(z[0:95,0:95])) +#plt.colorbar() #%% #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 9feb05b..f06f166 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -13,7 +13,7 @@ from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, Acquisi import numpy as np import matplotlib.pyplot as plt -from ccpi.optimisation.algorithms import PDHG +from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ @@ -109,7 +109,7 @@ if diag_precon: tau, sigma = tau_sigma_precond(operator) else: - sigma = 10 + sigma = 1 tau = 1/(sigma*normK**2) #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) @@ -120,13 +120,12 @@ else: opt = {'niter':2000} # -res = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -aaa = res[0].as_array() -# -plt.imshow(aaa) +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) plt.colorbar() -plt.show() +plt.show() #%% #sol = pdhg.get_output().as_array() -- cgit v1.2.3 From 7114e1b3575e57d7f1f6b2a45edee473d01787a6 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 17:57:21 +0100 Subject: update L1Norm add tests --- .../Python/ccpi/optimisation/functions/L1Norm.py | 229 ++++++++++++++++----- .../Python/ccpi/optimisation/functions/__init__.py | 5 +- .../Python/wip/pdhg_TV_denoising_salt_pepper.py | 126 ++++++++++++ 3 files changed, 308 insertions(+), 52 deletions(-) create mode 100644 Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py index 5a47edd..163eefa 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -22,71 +22,202 @@ Created on Wed Mar 6 19:42:34 2019 @author: evangelos """ -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry +#import numpy as np +##from ccpi.optimisation.funcs import Function +#from ccpi.optimisation.functions import Function +#from ccpi.framework import DataContainer, ImageData +# +# +############################# L1NORM FUNCTIONS ############################# +#class SimpleL1Norm(Function): +# +# def __init__(self, alpha=1): +# +# super(SimpleL1Norm, self).__init__() +# self.alpha = alpha +# +# def __call__(self, x): +# return self.alpha * x.abs().sum() +# +# def gradient(self,x): +# return ValueError('Not Differentiable') +# +# def convex_conjugate(self,x): +# return 0 +# +# def proximal(self, x, tau): +# ''' Soft Threshold''' +# return x.sign() * (x.abs() - tau * self.alpha).maximum(0) +# +# def proximal_conjugate(self, x, tau): +# return x.divide((x.abs()/self.alpha).maximum(1.0)) + +#class L1Norm(SimpleL1Norm): +# +# def __init__(self, alpha=1, **kwargs): +# +# super(L1Norm, self).__init__() +# self.alpha = alpha +# self.b = kwargs.get('b',None) +# +# def __call__(self, x): +# +# if self.b is None: +# return SimpleL1Norm.__call__(self, x) +# else: +# return SimpleL1Norm.__call__(self, x - self.b) +# +# def gradient(self, x): +# return ValueError('Not Differentiable') +# +# def convex_conjugate(self,x): +# if self.b is None: +# return SimpleL1Norm.convex_conjugate(self, x) +# else: +# return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() +# +# def proximal(self, x, tau): +# +# if self.b is None: +# return SimpleL1Norm.proximal(self, x, tau) +# else: +# return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) +# +# def proximal_conjugate(self, x, tau): +# +# if self.b is None: +# return SimpleL1Norm.proximal_conjugate(self, x, tau) +# else: +# return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) +# +############################################################################### +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.optimisation.operators import ShrinkageOperator + -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): +class L1Norm(Function): - def __init__(self, alpha=1): + def __init__(self, **kwargs): - super(SimpleL1Norm, self).__init__() - self.alpha = alpha + super(L1Norm, self).__init__() + self.b = kwargs.get('b',None) def __call__(self, x): - return self.alpha * x.abs().sum() + + y = x + if self.b is not None: + y = x - self.b + return y.abs().sum() def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 + #TODO implement subgradient??? + return ValueError('Not Differentiable') - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): + def convex_conjugate(self,x): + #TODO implement Indicator infty??? + + y = 0 + if self.b is not None: + y = 0 + (self.b * x).sum() + return y - def __init__(self, alpha=1, **kwargs): + def proximal(self, x, tau, out=None): - super(L1Norm, self).__init__() - self.alpha = alpha - self.b = kwargs.get('b',None) + # TODO implement shrinkage operator, we will need it later e.g SplitBregman - def __call__(self, x): + if out is None: + if self.b is not None: + return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) + else: + return ShrinkageOperator.__call__(self, x, tau) + else: + if self.b is not None: + out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) + else: + out.fill(ShrinkageOperator.__call__(self, x, tau)) + + def proximal_conjugate(self, x, tau, out=None): - if self.b is None: - return SimpleL1Norm.__call__(self, x) + if out is None: + if self.b is not None: + return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) + else: + return x.divide(x.abs().maximum(1.0)) else: - return SimpleL1Norm.__call__(self, x - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') + if self.b is not None: + out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) + else: + out.fill(x.divide(x.abs().maximum(1.0)) ) - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() + def __rmul__(self, scalar): + return ScaledFunction(self, scalar) + + + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + import numpy + N, M = 40,40 + ig = ImageGeometry(N, M) + scalar = 10 + b = ig.allocate('random_int') + u = ig.allocate('random_int') + + f = L1Norm() + f_scaled = scalar * L1Norm() + + f_b = L1Norm(b=b) + f_scaled_b = scalar * L1Norm(b=b) + + # call - def proximal(self, x, tau): + a1 = f(u) + a2 = f_scaled(u) + numpy.testing.assert_equal(scalar * a1, a2) + + a3 = f_b(u) + a4 = f_scaled_b(u) + numpy.testing.assert_equal(scalar * a3, a4) + + # proximal + tau = 0.4 + b1 = f.proximal(u, tau*scalar) + b2 = f_scaled.proximal(u, tau) - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + + b3 = f_b.proximal(u, tau*scalar) + b4 = f_scaled_b.proximal(u, tau) + + z1 = b + (u-b).sign() * ((u-b).abs() - tau * scalar).maximum(0) - def proximal_conjugate(self, x, tau): + numpy.testing.assert_array_almost_equal(b3.as_array(), b4.as_array(), decimal=4) +# +# #proximal conjugate +# + c1 = f_scaled.proximal_conjugate(u, tau) + c2 = u.divide((u.abs()/scalar).maximum(1.0)) + + numpy.testing.assert_array_almost_equal(c1.as_array(), c2.as_array(), decimal=4) + + c3 = f_scaled_b.proximal_conjugate(u, tau) + c4 = (u - tau*b).divide( ((u-tau*b).abs()/scalar).maximum(1.0) ) + + numpy.testing.assert_array_almost_equal(c3.as_array(), c4.as_array(), decimal=4) + + + + + + + + - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - \ No newline at end of file + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 2ed36f5..9dbb505 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -2,11 +2,10 @@ from .Function import Function from .ZeroFun import ZeroFun -from .L1Norm import SimpleL1Norm, L1Norm -#from .L2NormSquared import L2NormSq, SimpleL2NormSq +from .L1Norm import L1Norm from .L2NormSquared import L2NormSquared -from .BlockFunction import BlockFunction from .ScaledFunction import ScaledFunction +from .BlockFunction import BlockFunction from .FunctionOperatorComposition import FunctionOperatorComposition from .MixedL21Norm import MixedL21Norm from .IndicatorBox import IndicatorBox diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py new file mode 100644 index 0000000..06df622 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, L1Norm, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction + + +from skimage.util import random_noise + + + +# ############################################################################ +# Create phantom for TV denoising + +N = 200 +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 's&p', seed=10) +noisy_data = ImageData(n1) + +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.show() + +#%% + +# Regularisation Parameter +alpha = 1000 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '1' +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions +# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ +# L2NormSq(0.5, b = noisy_data) ) + + f1 = alpha * MixedL21Norm() + f2 = L1Norm(b = noisy_data) + + f = BlockFunction(f1, f2 ) + g = ZeroFun() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = 0.5 * L1Norm(b = noisy_data) + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() +print ("normK", normK) +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + +opt = {'niter':2000} + +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) +plt.colorbar() +plt.show() + +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 10 +# +#pdhg.run(2000) + + + +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +## + +## +#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() + + +#%% +# -- cgit v1.2.3 From 213ddd3c62975184dfda95320c89db217e503170 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 17:57:57 +0100 Subject: update L1Norm add tests --- .../optimisation/operators/ShrinkageOperator.py | 19 +++++++ Wrappers/Python/wip/pdhg_TV_denoising.py | 60 +++++++++++++--------- 2 files changed, 54 insertions(+), 25 deletions(-) create mode 100644 Wrappers/Python/ccpi/optimisation/operators/ShrinkageOperator.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/ShrinkageOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ShrinkageOperator.py new file mode 100644 index 0000000..f47c655 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/ShrinkageOperator.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Mar 6 19:30:51 2019 + +@author: evangelos +""" + +from ccpi.framework import DataContainer + +class ShrinkageOperator(): + + def __init__(self): + pass + + def __call__(self, x, tau, out=None): + + return x.sign() * (x.abs() - tau).maximum(0) + \ No newline at end of file diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index a8e721f..e9787ac 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -11,7 +11,7 @@ from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer import numpy as np import matplotlib.pyplot as plt -from ccpi.optimisation.algorithms import PDHG +from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ @@ -33,7 +33,7 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) ag = ig # Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode='gaussian', seed=10) +n1 = random_noise(data, mode = 'gaussian', seed=10) noisy_data = ImageData(n1) @@ -43,7 +43,8 @@ noisy_data = ImageData(n1) alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '0' +method = '1' + if method == '0': # Create operators @@ -70,7 +71,7 @@ else: ########################################################################### operator = Gradient(ig) f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) - g = 0.5 * L2NormSquared(b = noisy_data) + g = L2NormSquared(b = noisy_data) ########################################################################### #%% @@ -81,32 +82,41 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -pdhg.max_iteration = 2000 -pdhg.update_objective_interval = 10 - -pdhg.run(2000) +opt = {'niter':2000} - +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) +plt.colorbar() +plt.show() -sol = pdhg.get_output().as_array() -#sol = result.as_array() +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 10 # -fig = plt.figure() -plt.subplot(1,2,1) -plt.imshow(noisy_data.as_array()) -#plt.colorbar() -plt.subplot(1,2,2) -plt.imshow(sol) -#plt.colorbar() -plt.show() +#pdhg.run(2000) # - +# +# +#sol = pdhg.get_output().as_array() +##sol = result.as_array() ## -plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') -plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -plt.legend() -plt.show() +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +## +# +### +#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() #%% -- cgit v1.2.3 From 535241a9dfb6ad29a1d41d2aad737478705d5866 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:17:08 +0100 Subject: fix pdgap for function composition --- Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7c6bc8a..94b0bde 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -12,6 +12,7 @@ import matplotlib.pyplot as plt import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer +from ccpi.optimisation.functions import FunctionOperatorComposition import matplotlib.pyplot as plt @@ -138,9 +139,10 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y -# if i%100==0: - - p1 = f(operator.direct(x)) + g(x) +# if isinstance(f, FunctionOperatorComposition): + p1 = f(x) + g(x) +# else: +# p1 = f(operator.direct(x)) + g(x) d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) pd1 = p1 - d1 @@ -148,12 +150,12 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): dual.append(d1) pdgap.append(pd1) - -# print( i, primal, dual, primal-dual) - -# plt.imshow(x.as_array()) -# plt.show() -# print(f(operator.direct(x)) + g(x), i) + if i%100==0: + print(p1, d1, pd1) +# if isinstance(f, FunctionOperatorComposition): +# p1 = f(x) + g(x) +# else: + t_end = time.time() -- cgit v1.2.3 From 02b5c1521cf810321a7e93714648dd955272043f Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:40:10 +0100 Subject: fix pdgap for function composition --- Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 94b0bde..df53e57 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -140,18 +140,18 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): y_old = y # if isinstance(f, FunctionOperatorComposition): - p1 = f(x) + g(x) +# p1 = f(x) + g(x) # else: -# p1 = f(operator.direct(x)) + g(x) - d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) - pd1 = p1 - d1 +# p1 = f(operator.direct(x)) + g(x) +# d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) +# pd1 = p1 - d1 - primal.append(p1) - dual.append(d1) - pdgap.append(pd1) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(pd1) - if i%100==0: - print(p1, d1, pd1) +# if i%100==0: +# print(p1, d1, pd1) # if isinstance(f, FunctionOperatorComposition): # p1 = f(x) + g(x) # else: -- cgit v1.2.3 From ac82594858c278685ff7b99a4bfe42385966fba2 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:41:10 +0100 Subject: fix prox conjugate --- Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 889d703..f96c7a1 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -119,7 +119,7 @@ class L2NormSquared(Function): if out is None: if self.b is not None: # change the order cannot add ImageData + NestedBlock - return (-1* tau*self.b + x)/(1 + tau/2) + return (x - tau*self.b)/(1 + tau/2) else: return x/(1 + tau/2 ) else: -- cgit v1.2.3 From 01b0a84c552c3b0ca75789f913f8a3c48b60e7f4 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:42:02 +0100 Subject: add examples --- Wrappers/Python/wip/pdhg_TV_denoising.py | 11 ++-- Wrappers/Python/wip/pdhg_TV_denoising_precond.py | 2 +- .../Python/wip/pdhg_TV_denoising_salt_pepper.py | 60 ++++++++++++++++++---- 3 files changed, 58 insertions(+), 15 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index e9787ac..d871ba0 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -24,7 +24,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 600 +N = 200 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -33,9 +33,11 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) ag = ig # Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode = 'gaussian', seed=10) +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) +plt.imshow(noisy_data.as_array()) +plt.show() #%% @@ -55,9 +57,7 @@ if method == '0': operator = BlockOperator(op1, op2, shape=(2,1) ) #### Create functions -# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ -# L2NormSq(0.5, b = noisy_data) ) - + f1 = alpha * MixedL21Norm() f2 = 0.5 * L2NormSquared(b = noisy_data) @@ -72,6 +72,7 @@ else: operator = Gradient(ig) f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) g = L2NormSquared(b = noisy_data) + ########################################################################### #%% diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py index 426ce8b..3fc9320 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_precond.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_precond.py @@ -74,7 +74,7 @@ else: ########################################################################### #%% -diag_precon = False +diag_precon = True if diag_precon: diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py index 06df622..eb7eef4 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py @@ -25,7 +25,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 100 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -34,7 +34,7 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) ag = ig # Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode = 's&p', seed=10) +n1 = random_noise(data, mode = 's&p', salt_vs_pepper = 0.9) noisy_data = ImageData(n1) plt.imshow(noisy_data.as_array()) @@ -44,7 +44,7 @@ plt.show() #%% # Regularisation Parameter -alpha = 1000 +alpha = 10 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") method = '1' @@ -73,8 +73,8 @@ else: # No Composite # ########################################################################### operator = Gradient(ig) - f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) - g = 0.5 * L1Norm(b = noisy_data) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = L1Norm(b = noisy_data) ########################################################################### #%% @@ -82,8 +82,11 @@ else: normK = operator.norm() print ("normK", normK) # Primal & dual stepsizes -sigma = 1 -tau = 1/(sigma*normK**2) +#sigma = 1 +#tau = 1/(sigma*normK**2) + +sigma = 1/normK +tau = 1/normK opt = {'niter':2000} @@ -122,5 +125,44 @@ plt.show() #plt.show() -#%% -# +#%% Compare with cvx + +try_cvx = input("Do you want CVX comparison (0/1)") + +if try_cvx=='0': + + from cvxpy import * + import sys + sys.path.insert(0,'/Users/evangelos/Desktop/Projects/CCPi/CCPi-Framework/Wrappers/Python/ccpi/optimisation/cvx_scripts') + from cvx_functions import TV_cvx + + u = Variable((N, N)) + fidelity = pnorm( u - noisy_data.as_array(),1) + regulariser = alpha * TV_cvx(u) + solver = MOSEK + obj = Minimize( regulariser + fidelity) + constraints = [] + prob = Problem(obj, constraints) + + # Choose solver (SCS is fast but less accurate than MOSEK) + result = prob.solve(verbose = True, solver = solver) + + print('Objective value is {} '.format(obj.value)) + + diff_pdhg_cvx = np.abs(u.value - res.as_array()) + plt.imshow(diff_pdhg_cvx) + plt.colorbar() + plt.title('|CVX-PDHG|') + plt.show() + + plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX') + plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'PDHG') + plt.legend() + plt.show() + +else: + print('No CVX solution available') + + + + -- cgit v1.2.3 From 11179f54365488d6cdad5d265001ee36ec6c6e42 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:43:04 +0100 Subject: fix for symmetrized gradient --- .../ccpi/optimisation/functions/MixedL21Norm.py | 7 +++--- .../optimisation/operators/GradientOperator.py | 2 +- .../operators/SymmetrizedGradientOperator.py | 25 +++++++++++++--------- 3 files changed, 19 insertions(+), 15 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 1c51236..4266e51 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -126,11 +126,10 @@ if __name__ == '__main__': a1 = f_no_scaled(U) a2 = f_scaled(U) - z = f_no_scaled.proximal_conjugate(U, 1) + z1 = f_no_scaled.proximal_conjugate(U, 1) + z2 = f_scaled.proximal_conjugate(U, 1) - f_no_scaled = MixedL21Norm() - - tmp = [el*el for el in U] + diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 0c267fc..52923af 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -179,7 +179,7 @@ if __name__ == '__main__': a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) # -# c1 = arr + a + c1 = arr + a # c2 = arr + a # c2 = a1 + arr # diff --git a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py index ea3ba8f..c38458d 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SymmetrizedGradientOperator.py @@ -61,8 +61,9 @@ class SymmetrizedGradient(Gradient): tmp[i][j]=FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).adjoint(x.get_item(j)) tmp = numpy.array(tmp) z = 0.5 * (tmp.T + tmp) + z = z.to - return BlockDataContainer(z.tolist()) + return BlockDataContainer(*z.tolist()) def adjoint(self, x, out=None): @@ -70,9 +71,11 @@ class SymmetrizedGradient(Gradient): res = [] for i in range(2): - for j in range(2): - - restmpFiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j)) + tmp = ImageData(np.zeros(x.get_item(0))) + for j in range(2): + tmp += FiniteDiff(self.gm_domain.get_item(0), direction = i, bnd_cond = self.bnd_cond).direct(x.get_item(j)) + res.append(tmp) + return res @@ -136,13 +139,15 @@ if __name__ == '__main__': res = E1.direct(u1) - Dx = FiniteDiff(ig1, direction = 1, bnd_cond = 'Neumann') - Dy = FiniteDiff(ig1, direction = 0, bnd_cond = 'Neumann') - - B = BlockOperator(Dy, Dx) - V = BlockDataContainer(u1,u2) + res1 = E1.adjoint(res) - res = B.adjoint(V) +# Dx = FiniteDiff(ig1, direction = 1, bnd_cond = 'Neumann') +# Dy = FiniteDiff(ig1, direction = 0, bnd_cond = 'Neumann') +# +# B = BlockOperator(Dy, Dx) +# V = BlockDataContainer(u1,u2) +# +# res = B.adjoint(V) # ig = (N,M) # ig2 = (2,) + ig -- cgit v1.2.3 From b4230da4f6699c06eebae32e444507458f4500f6 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 7 Apr 2019 22:43:35 +0100 Subject: add shrinkage operator --- Wrappers/Python/ccpi/optimisation/operators/__init__.py | 1 + 1 file changed, 1 insertion(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/ccpi/optimisation/operators/__init__.py index 63c1320..7040d3a 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/operators/__init__.py @@ -12,6 +12,7 @@ from .BlockOperator import BlockOperator from .BlockScaledOperator import BlockScaledOperator from .SparseFiniteDiff import SparseFiniteDiff +from .ShrinkageOperator import ShrinkageOperator from .FiniteDifferenceOperator import FiniteDiff from .GradientOperator import Gradient -- cgit v1.2.3 From 22f5155892a4de8db0a89d3ee56092a4477cf04a Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Apr 2019 11:44:47 +0100 Subject: Kullback leibler example --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 2 + .../ccpi/optimisation/functions/KullbackLeibler.py | 82 ++++++++++ .../Python/ccpi/optimisation/functions/__init__.py | 1 + .../ccpi/optimisation/operators/BlockOperator.py | 1 + .../optimisation/operators/SparseFiniteDiff.py | 2 +- Wrappers/Python/wip/pdhg_TV_tomography2D_time.py | 45 ++++-- Wrappers/Python/wip/pdhg_tv_denoising_poisson.py | 168 +++++++++++++++++++++ 7 files changed, 285 insertions(+), 16 deletions(-) create mode 100644 Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py create mode 100644 Wrappers/Python/wip/pdhg_tv_denoising_poisson.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index df53e57..f25cdbf 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -139,6 +139,8 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = x y_old = y + + # if isinstance(f, FunctionOperatorComposition): # p1 = f(x) + g(x) # else: diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py new file mode 100644 index 0000000..18af154 --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.framework import DataContainer, ImageData, ImageGeometry + +class KullbackLeibler(Function): + + def __init__(self,data,**kwargs): + + super(KullbackLeibler, self).__init__() + + self.b = data + self.bnoise = kwargs.get('bnoise', 0) + + self.sum_value = self.b + self.bnoise + if (self.sum_value.as_array()<0).any(): + self.sum_value = numpy.inf + + def __call__(self, x): + + if self.sum_value==numpy.inf: + return numpy.inf + else: + return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array())) + + + def gradient(self, x): + + #TODO Division check + return 1 - self.b/(x + self.bnoise) + + def convex_conjugate(self, x, out=None): + pass + + def proximal(self, x, tau, out=None): + + z = x + tau * self.bnoise + return (z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt() + + + def proximal_conjugate(self, x, tau, out=None): + pass + + + + +if __name__ == '__main__': + + N, M = 2,3 + ig = ImageGeometry(N, M) + data = ImageData(numpy.random.randint(-10, 100, size=(M, N))) + x = ImageData(numpy.random.randint(-10, 100, size=(M, N))) + + bnoise = ImageData(numpy.random.randint(-100, 100, size=(M, N))) + + f = KullbackLeibler(data, bnoise=bnoise) + print(f.sum_value) + + print(f(x)) + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 9dbb505..65e8848 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -9,4 +9,5 @@ from .BlockFunction import BlockFunction from .FunctionOperatorComposition import FunctionOperatorComposition from .MixedL21Norm import MixedL21Norm from .IndicatorBox import IndicatorBox +from .KullbackLeibler import KullbackLeibler from .Norm2Sq import Norm2sq diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 484dc61..6c080bb 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -235,6 +235,7 @@ class BlockOperator(Operator): tmp = sum(res) return ImageData(tmp) else: + return BlockDataContainer(*res) def sum_abs_col(self): diff --git a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py index d54db9b..5e318ff 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py +++ b/Wrappers/Python/ccpi/optimisation/operators/SparseFiniteDiff.py @@ -70,7 +70,7 @@ class SparseFiniteDiff(): def sum_abs_col(self): - res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F')) + res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F') ) res[res==0]=1 return ImageData(res) diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py index e9a85cc..dea8e5c 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D_time.py @@ -13,7 +13,7 @@ from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, Acquisi import numpy as np import matplotlib.pyplot as plt -from ccpi.optimisation.algorithms import PDHG +from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ @@ -107,26 +107,41 @@ normK = operator.norm() ## Primal & dual stepsizes -sigma = 10 +sigma = 1 tau = 1/(sigma*normK**2) -pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -pdhg.max_iteration = 5000 -pdhg.update_objective_interval = 20 +#sigma = 1/normK +#tau = 1/normK -pdhg.run(5000) +opt = {'niter':2000} -#%% -sol = pdhg.get_output().as_array() -fig = plt.figure() -plt.subplot(1,2,1) -plt.imshow(noisy_data.as_array()) -#plt.colorbar() -plt.subplot(1,2,2) -plt.imshow(sol) -#plt.colorbar() +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) +plt.colorbar() plt.show() +#sigma = 10 +#tau = 1/(sigma*normK**2) +# +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 5000 +#pdhg.update_objective_interval = 20 +# +#pdhg.run(5000) +# +##%% +#sol = pdhg.get_output().as_array() +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() + #%% plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth') diff --git a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py new file mode 100644 index 0000000..9fad6f8 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFun, KullbackLeibler, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction + + +from skimage.util import random_noise + + + +# ############################################################################ +# Create phantom for TV denoising + +N = 100 +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'poisson') +noisy_data = ImageData(n1) + +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.show() + +#%% + +# Regularisation Parameter +alpha = 10 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '1' +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions +# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ +# L2NormSq(0.5, b = noisy_data) ) + + f1 = alpha * MixedL21Norm() + f2 = KullbackLeibler(b = noisy_data) + + f = BlockFunction(f1, f2 ) + g = ZeroFun() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = KullbackLeibler(noisy_data) + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() +print ("normK", normK) +# Primal & dual stepsizes +#sigma = 1 +#tau = 1/(sigma*normK**2) + +sigma = 1/normK +tau = 1/normK + +opt = {'niter':2000} + +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +plt.figure(figsize=(5,5)) +plt.imshow(res.as_array()) +plt.colorbar() +plt.show() + +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 10 +# +#pdhg.run(2000) + + + +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,2,1) +#plt.imshow(noisy_data.as_array()) +##plt.colorbar() +#plt.subplot(1,2,2) +#plt.imshow(sol) +##plt.colorbar() +#plt.show() +## + +## +#plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +#plt.legend() +#plt.show() + + +#%% Compare with cvx + +#try_cvx = input("Do you want CVX comparison (0/1)") +# +#if try_cvx=='0': +# +# from cvxpy import * +# import sys +# sys.path.insert(0,'/Users/evangelos/Desktop/Projects/CCPi/CCPi-Framework/Wrappers/Python/ccpi/optimisation/cvx_scripts') +# from cvx_functions import TV_cvx +# +# u = Variable((N, N)) +# fidelity = pnorm( u - noisy_data.as_array(),1) +# regulariser = alpha * TV_cvx(u) +# solver = MOSEK +# obj = Minimize( regulariser + fidelity) +# constraints = [] +# prob = Problem(obj, constraints) +# +# # Choose solver (SCS is fast but less accurate than MOSEK) +# result = prob.solve(verbose = True, solver = solver) +# +# print('Objective value is {} '.format(obj.value)) +# +# diff_pdhg_cvx = np.abs(u.value - res.as_array()) +# plt.imshow(diff_pdhg_cvx) +# plt.colorbar() +# plt.title('|CVX-PDHG|') +# plt.show() +# +# plt.plot(np.linspace(0,N,N), u.value[int(N/2),:], label = 'CVX') +# plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'PDHG') +# plt.legend() +# plt.show() +# +#else: +# print('No CVX solution available') + + + + -- cgit v1.2.3 From dd7a2438bdfafc5a2bbbb34a5e80336d12b5e86d Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 8 Apr 2019 14:12:14 +0100 Subject: profile gradient --- Wrappers/Python/wip/test_profile.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 Wrappers/Python/wip/test_profile.py (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/test_profile.py b/Wrappers/Python/wip/test_profile.py new file mode 100644 index 0000000..7be19f9 --- /dev/null +++ b/Wrappers/Python/wip/test_profile.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Mon Apr 8 13:57:46 2019 + +@author: evangelos +""" + +# profile direct, adjoint, gradient + +from ccpi.framework import ImageGeometry +from ccpi.optimisation.operators import Gradient + +N, M = 500, 500 + +ig = ImageGeometry(N, M) + +G = Gradient(ig) + +u = G.domain_geometry().allocate('random_int') +w = G.range_geometry().allocate('random_int') + +for i in range(500): + + res = G.adjoint(w) + \ No newline at end of file -- cgit v1.2.3 From e7a0152d17e03df79db6cd3cd50d07202f9d185d Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 9 Apr 2019 09:45:07 +0100 Subject: profile out --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 30 +++++++-- .../ccpi/optimisation/operators/BlockOperator.py | 53 +++++++++++----- .../operators/FiniteDifferenceOperator.py | 56 ++++++++++++++--- .../optimisation/operators/GradientOperator.py | 71 +++++++++++++++++++--- 4 files changed, 171 insertions(+), 39 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index f25cdbf..e9bd801 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -125,19 +125,39 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): - # Gradient descent, Dual problem solution - y_tmp = y_old + sigma * operator.direct(xbar) +# # Gradient descent, Dual problem solution +# y_tmp = y_old + sigma * operator.direct(xbar) + y_tmp = operator.direct(xbar) + y_tmp *= sigma + y_tmp +=y_old + y = f.proximal_conjugate(y_tmp, sigma) # Gradient ascent, Primal problem solution - x_tmp = x_old - tau * operator.adjoint(y) +# x_tmp = x_old - tau * operator.adjoint(y) + + x_tmp = operator.adjoint(y) + x_tmp *=-tau + x_tmp +=x_old + x = g.proximal(x_tmp, tau) #Update - xbar = x + theta * (x - x_old) +# xbar = x + theta * (x - x_old) + xbar = x - x_old + xbar *= theta + xbar += x x_old = x - y_old = y + y_old = y + +# operator.direct(xbar, out = y_tmp) +# y_tmp *= sigma +# y_tmp +=y_old + + + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 6c080bb..c6a7f95 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -114,20 +114,35 @@ class BlockOperator(Operator): BlockOperator work on BlockDataContainer, but they will work on DataContainers and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) ''' + if not isinstance (x, BlockDataContainer): x_b = BlockDataContainer(x) else: x_b = x shape = self.get_output_shape(x_b.shape) res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x_b.get_item(col)) - else: - prod += self.get_item(row,col).direct(x_b.get_item(col)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) + + if out is None: + + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).direct(x_b.get_item(col)) + else: + prod += self.get_item(row,col).direct(x_b.get_item(col)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) + + else: + + tmp = self.range_geometry().allocate() + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col)) + else: + self.get_item(row,col).direct(x_b.get_item(col), out=out) + out+=tmp def adjoint(self, x, out=None): '''Adjoint operation for the BlockOperator @@ -258,13 +273,11 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff - from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer - from ccpi.optimisation.operators import Operator, LinearOperator - - + M, N = 4, 3 ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') + arr = ig.allocate('random_int') + G = Gradient(ig) Id = Identity(ig) @@ -294,11 +307,19 @@ if __name__ == '__main__': # ttt = B.sum_abs_col() # - numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) + #TODO this is not working +# numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) +# numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) +# numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) + + u = ig.allocate('random_int') + + z1 = B.direct(u) + res = B.range_geometry().allocate() + B.direct(u, out = res) + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 0faba22..3d2a96b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -29,6 +29,7 @@ class FiniteDiff(LinearOperator): '''FIXME: domain and range should be geometries''' self.gm_domain = gm_domain self.gm_range = gm_range + self.direction = direction self.bnd_cond = bnd_cond @@ -50,9 +51,16 @@ class FiniteDiff(LinearOperator): x_sz = len(x.shape) if out is None: - out = np.zeros(x.shape) + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() - fd_arr = out +# if out is None: +# out = self.gm_domain.allocate().as_array() +# +# fd_arr = out.as_array() +# fd_arr = self.gm_domain.allocate().as_array() ######################## Direct for 2D ############################### if x_sz == 2: @@ -162,8 +170,9 @@ class FiniteDiff(LinearOperator): else: raise NotImplementedError - res = out/self.voxel_size - return type(x)(res) +# res = out #/self.voxel_size + return type(x)(out) + def adjoint(self, x, out=None): @@ -172,9 +181,17 @@ class FiniteDiff(LinearOperator): x_sz = len(x.shape) if out is None: - out = np.zeros(x.shape) + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() - fd_arr = out +# if out is None: +# out = self.gm_domain.allocate().as_array() +# fd_arr = out +# else: +# fd_arr = out.as_array() +## fd_arr = self.gm_domain.allocate().as_array() ######################## Adjoint for 2D ############################### if x_sz == 2: @@ -299,8 +316,8 @@ class FiniteDiff(LinearOperator): else: raise NotImplementedError - res = out/self.voxel_size - return type(x)(-res) + out *= -1 #/self.voxel_size + return type(x)(out) def range_geometry(self): '''Returns the range geometry''' @@ -317,6 +334,29 @@ class FiniteDiff(LinearOperator): return self.s1 +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + + N, M = 2, 3 + + ig = ImageGeometry(N, M) + + + FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + u = FD.domain_geometry().allocate('random_int') + + + res = FD.domain_geometry().allocate() + FD.direct(u, out=res) + print(res.as_array()) +# z = FD.direct(u) + +# print(z.as_array(), res.as_array()) + + +# w = G.range_geometry().allocate('random_int') + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 52923af..54456cc 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -38,22 +38,47 @@ class Gradient(LinearOperator): else: raise ValueError('No channels to correlate') - self.bnd_cond = bnd_cond + self.bnd_cond = bnd_cond + + # Call FiniteDiff operator + + self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond) def direct(self, x, out=None): - tmp = self.gm_range.allocate() - for i in range(tmp.shape[0]): - tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) - return tmp + + if out is not None: + for i in range(self.gm_range.shape[0]): + self.FD.direction=self.ind[i] + self.FD.direct(x, out = out[i]) +# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x, out=out[i]) + return out + else: + tmp = self.gm_range.allocate() + for i in range(tmp.shape[0]): + self.FD.direction=self.ind[i] + tmp.get_item(i).fill(self.FD.direct(x)) +# tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) + return tmp def adjoint(self, x, out=None): - tmp = self.gm_domain.allocate() - for i in range(x.shape[0]): - tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) - return tmp + if out is not None: + + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + self.FD.direction=self.ind[i] + self.FD.adjoint(x.get_item(i), out = tmp) +# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) + out-=tmp + else: + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + self.FD.direction=self.ind[i] + tmp+=self.FD.adjoint(x.get_item(i)) +# tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + return tmp def domain_geometry(self): @@ -109,6 +134,7 @@ if __name__ == '__main__': from ccpi.optimisation.operators import Identity, BlockOperator + M, N = 2, 3 ig = ImageGeometry(M, N) arr = ig.allocate('random_int' ) @@ -179,7 +205,32 @@ if __name__ == '__main__': a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) # - c1 = arr + a +# c1 = arr + a # c2 = arr + a # c2 = a1 + arr + + from ccpi.framework import ImageGeometry +# from ccpi.optimisation.operators import Gradient +# + N, M = 2, 3 +# + ig = ImageGeometry(N, M) +# + G = Gradient(ig) +# + u = G.domain_geometry().allocate('random_int') + w = G.range_geometry().allocate('random_int') +# +# + res = G.range_geometry().allocate() +# + G.direct(u, out=res) + z = G.direct(u) +# + print(res[0].as_array()) + print(z[0].as_array()) +# +## LHS = (G.direct(u)*w).sum() +## RHS = (u * G.adjoint(w)).sum() + # -- cgit v1.2.3 From bc7b43bfab120134ff761de707202aad10883fbe Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 10 Apr 2019 11:09:47 +0100 Subject: wip for with and without operators, functions --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 101 ++++++++++++----- .../ccpi/optimisation/functions/BlockFunction.py | 119 +++++++++++++++++++-- .../ccpi/optimisation/functions/L2NormSquared.py | 30 +++++- .../ccpi/optimisation/functions/MixedL21Norm.py | 73 ++++++++++--- .../ccpi/optimisation/functions/ScaledFunction.py | 59 +++++++++- .../ccpi/optimisation/operators/BlockOperator.py | 91 +++++++++++----- .../optimisation/operators/GradientOperator.py | 38 +++++-- .../ccpi/optimisation/operators/ScaledOperator.py | 16 ++- Wrappers/Python/wip/pdhg_TV_denoising.py | 21 +++- .../Python/wip/pdhg_TV_denoising_salt_pepper.py | 36 ++++--- Wrappers/Python/wip/test_profile.py | 51 ++++++++- 11 files changed, 521 insertions(+), 114 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index e9bd801..3b81d98 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -111,7 +111,7 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x = x_old y_tmp = y_old - y = y_tmp + y = y_old # relaxation parameter theta = 1 @@ -125,35 +125,82 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): -# # Gradient descent, Dual problem solution -# y_tmp = y_old + sigma * operator.direct(xbar) - y_tmp = operator.direct(xbar) - y_tmp *= sigma - y_tmp +=y_old - - y = f.proximal_conjugate(y_tmp, sigma) - - # Gradient ascent, Primal problem solution -# x_tmp = x_old - tau * operator.adjoint(y) - - x_tmp = operator.adjoint(y) - x_tmp *=-tau - x_tmp +=x_old + if not memopt: + + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + x_tmp = x_old - tau * operator.adjoint(y) + x = g.proximal(x_tmp, tau) + + xbar = x + theta * (x - x_old) + + x_old = x + y_old = y - x = g.proximal(x_tmp, tau) - #Update -# xbar = x + theta * (x - x_old) - xbar = x - x_old - xbar *= theta - xbar += x - - x_old = x - y_old = y - -# operator.direct(xbar, out = y_tmp) + else: + +# operator.direct(xbar, out = y_tmp) +# y_tmp.__imul__(sigma) +# y_tmp.__iadd__(y_old) + +# y_tmp *= sigma +# y_tmp += y_old + + y_tmp = y_old + sigma * operator.direct(xbar) + f.proximal_conjugate(y_tmp, sigma, out=y) + + x_tmp = x_old - tau * operator.adjoint(y) + +# operator.adjoint(y, out = x_tmp) +# z = x_tmp +# x_tmp = x_old - tau * z + +# x_tmp *= -tau +# x_tmp += x_old + + g.proximal(x_tmp, tau, out = x) + + xbar = x - x_old + xbar *= theta + xbar += x + + + +# pass +# +## # Gradient descent, Dual problem solution +## y_tmp = y_old + sigma * operator.direct(xbar) +# y_tmp = operator.direct(xbar) # y_tmp *= sigma -# y_tmp +=y_old +# y_tmp +=y_old +# +# y = f.proximal_conjugate(y_tmp, sigma) +## f.proximal_conjugate(y_tmp, sigma, out = y) +# +# # Gradient ascent, Primal problem solution +## x_tmp = x_old - tau * operator.adjoint(y) +# +# x_tmp = operator.adjoint(y) +# x_tmp *=-tau +# x_tmp +=x_old +# +# x = g.proximal(x_tmp, tau) +## g.proximal(x_tmp, tau, out = x) +# +# #Update +## xbar = x + theta * (x - x_old) +# xbar = x - x_old +# xbar *= theta +# xbar += x +# +# x_old = x +# y_old = y +# +## operator.direct(xbar, out = y_tmp) +## y_tmp *= sigma +## y_tmp +=y_old diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 81c16cd..a74a215 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -13,6 +13,7 @@ from ccpi.framework import BlockDataContainer from numbers import Number class BlockFunction(Function): + '''A Block vector of Functions .. math:: @@ -52,15 +53,29 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + + if out is None: + + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + + return BlockDataContainer(*out) + else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) + + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau, out = out.get_item(i)) + else: + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i), out = out) + + def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' @@ -76,4 +91,90 @@ class BlockFunction(Function): def gradient(self,x, out=None): '''FIXME: gradient returns pass''' - pass \ No newline at end of file + pass + + +if __name__ == '__main__': + + M, N, K = 2,3,5 + + from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm + from ccpi.framework import ImageGeometry, BlockGeometry + from ccpi.optimisation.operators import Gradient, Identity, BlockOperator + import numpy + + + ig = ImageGeometry(M, N) + BG = BlockGeometry(ig, ig) + + u = ig.allocate('random_int') + B = BlockOperator( Gradient(ig), Identity(ig) ) + + U = B.direct(u) + b = ig.allocate('random_int') + + f1 = 10 * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b=b) + + f = BlockFunction(f1, f2) + tau = 0.3 + + print( " without out " ) + res_no_out = f.proximal_conjugate( U, tau) + res_out = B.range_geometry().allocate() + f.proximal_conjugate( U, tau, out = res_out) + + numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ + res_out[0].as_array(), decimal=4) + + + + + + + + ########################################################################## + + + + + + + +# zzz = B.range_geometry().allocate('random_int') +# www = B.range_geometry().allocate() +# www.fill(zzz) + +# res[0].fill(z) + + + + +# f.proximal_conjugate(z, sigma, out = res) + +# print(z1[0][0].as_array()) +# print(res[0][0].as_array()) + + + + +# U = BG.allocate('random_int') +# RES = BG.allocate() +# f = BlockFunction(f1, f2) +# +# z = f.proximal_conjugate(U, 0.2) +# f.proximal_conjugate(U, 0.2, out = RES) +# +# print(z[0].as_array()) +# print(RES[0].as_array()) +# +# print(z[1].as_array()) +# print(RES[1].as_array()) + + + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index f96c7a1..d5e527a 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -121,12 +121,12 @@ class L2NormSquared(Function): # change the order cannot add ImageData + NestedBlock return (x - tau*self.b)/(1 + tau/2) else: - return x/(1 + tau/2 ) + return x/(1 + tau/2) else: if self.b is not None: - out.fill((x - tau*self.b)/(1 + tau/2)) + out.fill( (x - tau*self.b)/(1 + tau/2) ) else: - out.fill(x/(1 + tau/2 )) + out.fill( x/(1 + tau/2) ) def __rmul__(self, scalar): return ScaledFunction(self, scalar) @@ -227,7 +227,29 @@ if __name__ == '__main__': (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ - ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + + + + print( " ####### check without out ######### " ) + + + u_out_no_out = ig.allocate('random_int') + res_no_out = f_scaled_data.proximal_conjugate(u_out_no_out, 0.5) + print(res_no_out.as_array()) + + print( " ####### check with out ######### " ) + + res_out = ig.allocate() + f_scaled_data.proximal_conjugate(u_out_no_out, 0.5, out = res_out) + + print(res_out.as_array()) + + numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ + res_out.as_array(), decimal=4) + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 4266e51..dd463c0 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -78,26 +78,44 @@ class MixedL21Norm(Function): def proximal_conjugate(self, x, tau, out=None): + + if self.SymTensor: - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res + if out is None: + param = [1]*x.shape[0] + param[-1] = 2 + tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] + frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] + res = BlockDataContainer(*frac) + return res + else: + pass + # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha # res = x.divide(ImageData(tmp2).maximum(1.0)) else: + if out is None: + tmp = [ el*el for el in x] res = (sum(tmp).sqrt()).maximum(1.0) frac = [x[i]/res for i in range(x.shape[0])] res = BlockDataContainer(*frac) - - return res + + return res + + else: + + tmp = [ el*el for el in x] + res = (sum(tmp).sqrt()).maximum(1.0) + frac = [x[i]/res for i in range(x.shape[0])] +# res = (sum(x**2).sqrt()).maximum(1.0) +# return x/res + out.fill(frac) + + def __rmul__(self, scalar): return ScaledFunction(self, scalar) @@ -111,11 +129,14 @@ class MixedL21Norm(Function): if __name__ == '__main__': M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) - u1 = ig.allocate('random_int') - u2 = ig.allocate('random_int') + from ccpi.framework import BlockGeometry + import numpy - U = BlockDataContainer(u1, u2, shape=(2,1)) + ig = ImageGeometry(M, N) + + BG = BlockGeometry(ig, ig) + + U = BG.allocate('random_int') # Define no scale and scaled f_no_scaled = MixedL21Norm() @@ -125,9 +146,31 @@ if __name__ == '__main__': a1 = f_no_scaled(U) a2 = f_scaled(U) + print(a1, 2*a2) + + + print( " ####### check without out ######### " ) + + + u_out_no_out = BG.allocate('random_int') + res_no_out = f_scaled.proximal_conjugate(u_out_no_out, 0.5) + print(res_no_out[0].as_array()) + + print( " ####### check with out ######### " ) +# + res_out = BG.allocate() + f_scaled.proximal_conjugate(u_out_no_out, 0.5, out = res_out) +# + print(res_out[0].as_array()) +# + numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ + res_out[0].as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ + res_out[1].as_array(), decimal=4) +# + - z1 = f_no_scaled.proximal_conjugate(U, 1) - z2 = f_scaled.proximal_conjugate(U, 1) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 046a4a6..9fcd4fc 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,8 @@ class ScaledFunction(object): if out is None: return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out = out) + out *= self.scalar def grad(self, x): '''Alias of gradient(x,None)''' @@ -89,3 +90,59 @@ class ScaledFunction(object): return self.function.proximal(x, tau*self.scalar) else: out.fill( self.function.proximal(x, tau*self.scalar) ) + +if __name__ == '__main__': + + from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm + from ccpi.framework import ImageGeometry, BlockGeometry + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + + u = ig.allocate('random_int') + b = ig.allocate('random_int') + + BG = BlockGeometry(ig, ig) + U = BG.allocate('random_int') + + f2 = 0.5 * L2NormSquared(b=b) + f1 = 30 * MixedL21Norm() + tau = 0.355 + + res_no_out1 = f1.proximal_conjugate(U, tau) + res_no_out2 = f2.proximal_conjugate(u, tau) + + +# print( " ######## with out ######## ") + res_out1 = BG.allocate() + res_out2 = ig.allocate() + + f1.proximal_conjugate(U, tau, out = res_out1) + f2.proximal_conjugate(u, tau, out = res_out2) + + + numpy.testing.assert_array_almost_equal(res_no_out1[0].as_array(), \ + res_out1[0].as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(res_no_out2.as_array(), \ + res_out2.as_array(), decimal=4) + + + + + + + + + + + + + + + + + + + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index c6a7f95..1d77510 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -139,11 +139,16 @@ class BlockOperator(Operator): for row in range(self.shape[0]): for col in range(self.shape[1]): if col == 0: - self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col)) + self.get_item(row,col).direct( + x_b.get_item(col), + out=out.get_item(row)) else: - self.get_item(row,col).direct(x_b.get_item(col), out=out) - out+=tmp - + a = out.get_item(row) + self.get_item(row,col).direct( + x_b.get_item(col), + out=tmp.get_item(row)) + a += tmp.get_item(row) + def adjoint(self, x, out=None): '''Adjoint operation for the BlockOperator @@ -156,36 +161,72 @@ class BlockOperator(Operator): Raises: ValueError if the contained Operators are not linear ''' - if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): + if not self.is_linear(): raise ValueError('Not all operators in Block are linear.') if not isinstance (x, BlockDataContainer): x_b = BlockDataContainer(x) else: x_b = x shape = self.get_output_shape(x_b.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row, col).adjoint(x_b.get_item(col)) - else: - prod += self.get_item(row, col).adjoint(x_b.get_item(col)) - res.append(prod) - if self.shape[1]==1: - return ImageData(*res) + if out is None: + res = [] + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + prod = self.get_item(row, col).adjoint(x_b.get_item(row)) + else: + prod += self.get_item(row, col).adjoint(x_b.get_item(row)) + res.append(prod) + if self.shape[1]==1: + return ImageData(*res) + else: + return BlockDataContainer(*res, shape=shape) else: - return BlockDataContainer(*res, shape=shape) - + #tmp = self.domain_geometry().allocate() + + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + if issubclass(out.__class__, DataContainer): + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out) + else: + op = self.get_item(row,col) + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out.get_item(col)) + else: + if issubclass(out.__class__, DataContainer): + out += self.get_item(row,col).adjoint( + x_b.get_item(row)) + else: + a = out.get_item(col) + a += self.get_item(row,col).adjoint( + x_b.get_item(row), + ) + def is_linear(self): + '''returns whether all the elements of the BlockOperator are linear''' + return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True) + def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] + '''returns the shape of the output BlockDataContainer + + A(N,M) direct u(M,1) -> N,1 + A(N,M)^T adjoint u(N,1) -> M,1 + ''' + rows , cols = self.shape + xrows, xcols = xshape + if xcols != 1: + raise ValueError('BlockDataContainer cannot have more than 1 column') if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - + if rows != xrows: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + return (cols,xcols) + if cols != xrows: + raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape)) + return (rows,xcols) + def __rmul__(self, scalar): '''Defines the left multiplication with a scalar diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 54456cc..9c573cb 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -71,7 +71,7 @@ class Gradient(LinearOperator): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) # FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) - out-=tmp + out+=tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): @@ -220,16 +220,38 @@ if __name__ == '__main__': # u = G.domain_geometry().allocate('random_int') w = G.range_geometry().allocate('random_int') + + + print( "################ without out #############") + + print( (G.direct(u)*w).sum(), (u*G.adjoint(w)).sum() ) + + + print( "################ with out #############") + + res = G.range_geometry().allocate() + res1 = G.domain_geometry().allocate() + G.direct(u, out = res) + G.adjoint(w, out = res1) + + print( (res*w).sum(), (u*res1).sum() ) + + + # # - res = G.range_geometry().allocate() -# - G.direct(u, out=res) - z = G.direct(u) -# - print(res[0].as_array()) - print(z[0].as_array()) +# res = G.range_geometry().allocate() +## +# G.direct(u, out=res) +# z = G.direct(u) +## +# print(res[0].as_array()) +# print(z[0].as_array()) # + + + + ## LHS = (G.direct(u)*w).sum() ## RHS = (u * G.adjoint(w)).sum() diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index adcc6d9..0d5030c 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -3,12 +3,10 @@ import numpy class ScaledOperator(object): '''ScaledOperator - A class to represent the scalar multiplication of an Operator with a scalar. It holds an operator and a scalar. Basically it returns the multiplication of the result of direct and adjoint of the operator with the scalar. For the rest it behaves like the operator it holds. - Args: operator (Operator): a Operator or LinearOperator scalar (Number): a scalar multiplier @@ -28,10 +26,18 @@ class ScaledOperator(object): self.scalar = scalar self.operator = operator def direct(self, x, out=None): - return self.scalar * self.operator.direct(x, out=out) + if out is None: + return self.scalar * self.operator.direct(x, out=out) + else: + self.operator.direct(x, out=out) + out *= self.scalar def adjoint(self, x, out=None): if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) + if out is None: + return self.scalar * self.operator.adjoint(x, out=out) + else: + self.operator.adjoint(x, out=out) + out *= self.scalar else: raise TypeError('Operator is not linear') def norm(self): @@ -40,3 +46,5 @@ class ScaledOperator(object): return self.operator.range_geometry() def domain_geometry(self): return self.operator.domain_geometry() + def is_linear(self): + return self.operator.is_linear() \ No newline at end of file diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index d871ba0..feb09ee 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -24,7 +24,7 @@ from skimage.util import random_noise # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 500 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -45,7 +45,7 @@ plt.show() alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': @@ -83,14 +83,27 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':2000} +opt = {'niter':100} +opt1 = {'niter':100, 'memopt': True} +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) - + plt.figure(figsize=(5,5)) plt.imshow(res.as_array()) plt.colorbar() plt.show() + +plt.figure(figsize=(5,5)) +plt.imshow(res1.as_array()) +plt.colorbar() +plt.show() + + +plt.figure(figsize=(5,5)) +plt.imshow(np.abs(res1.as_array()-res.as_array())) +plt.colorbar() +plt.show() #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) #pdhg.max_iteration = 2000 diff --git a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py index eb7eef4..cec9770 100644 --- a/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising_salt_pepper.py @@ -34,7 +34,7 @@ ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) ag = ig # Create noisy data. Add Gaussian noise -n1 = random_noise(data, mode = 's&p', salt_vs_pepper = 0.9) +n1 = random_noise(data, mode = 's&p', salt_vs_pepper = 0.9, amount=0.2) noisy_data = ImageData(n1) plt.imshow(noisy_data.as_array()) @@ -44,10 +44,10 @@ plt.show() #%% # Regularisation Parameter -alpha = 10 +alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': # Create operators @@ -78,15 +78,27 @@ else: ########################################################################### #%% -# Compute operator Norm -normK = operator.norm() -print ("normK", normK) -# Primal & dual stepsizes -#sigma = 1 -#tau = 1/(sigma*normK**2) - -sigma = 1/normK -tau = 1/normK +diag_precon = True + +if diag_precon: + + def tau_sigma_precond(operator): + + tau = 1/operator.sum_abs_row() + sigma = 1/ operator.sum_abs_col() + + return tau, sigma + + tau, sigma = tau_sigma_precond(operator) + +else: + # Compute operator Norm + normK = operator.norm() + print ("normK", normK) + # Primal & dual stepsizes + sigma = 1/normK + tau = 1/normK +# tau = 1/(sigma*normK**2) opt = {'niter':2000} diff --git a/Wrappers/Python/wip/test_profile.py b/Wrappers/Python/wip/test_profile.py index 7be19f9..a97ad8d 100644 --- a/Wrappers/Python/wip/test_profile.py +++ b/Wrappers/Python/wip/test_profile.py @@ -9,18 +9,59 @@ Created on Mon Apr 8 13:57:46 2019 # profile direct, adjoint, gradient from ccpi.framework import ImageGeometry -from ccpi.optimisation.operators import Gradient +from ccpi.optimisation.operators import Gradient, BlockOperator, Identity -N, M = 500, 500 +N, M, K = 200, 300, 100 -ig = ImageGeometry(N, M) +ig = ImageGeometry(N, M, K) G = Gradient(ig) +Id = Identity(ig) u = G.domain_geometry().allocate('random_int') w = G.range_geometry().allocate('random_int') -for i in range(500): + +res = G.range_geometry().allocate() +res1 = G.domain_geometry().allocate() +# +# +#LHS = (G.direct(u)*w).sum() +#RHS = (u * G.adjoint(w)).sum() +# +#print(G.norm()) +#print(LHS, RHS) +# +##%%%re +## +#G.direct(u, out=res) +#G.adjoint(w, out=res1) +## +#LHS1 = (res * w).sum() +#RHS1 = (u * res1).sum() +## +#print(LHS1, RHS1) + +B = BlockOperator(2*G, 3*Id) +uB = B.domain_geometry().allocate('random_int') +resB = B.range_geometry().allocate() + +#z2 = B.direct(uB) +#B.direct(uB, out = resB) + +#%% + +for i in range(100): +# +# z2 = B.direct(uB) +# + B.direct(uB, out = resB) + +# z1 = G.adjoint(w) +# z = G.direct(u) + +# G.adjoint(w, out=res1) + +# G.direct(u, out=res) - res = G.adjoint(w) \ No newline at end of file -- cgit v1.2.3 From ebff46e78ce4ecc442c43fe44d4049d69bc8cc0a Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 10 Apr 2019 11:11:39 +0100 Subject: wip for with and without operators, functions --- Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index a74a215..14105b5 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -124,8 +124,14 @@ if __name__ == '__main__': res_out = B.range_geometry().allocate() f.proximal_conjugate( U, tau, out = res_out) - numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ - res_out[0].as_array(), decimal=4) + numpy.testing.assert_array_almost_equal(res_no_out[0][0].as_array(), \ + res_out[0][0].as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(res_no_out[0][1].as_array(), \ + res_out[0][1].as_array(), decimal=4) + + numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ + res_out[1].as_array(), decimal=4) -- cgit v1.2.3 From 58229d793fc2a24ff65d3128435e4351e3b7e73d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 11:30:28 +0100 Subject: Direct adjoint out (#236) * fix direct out * test blockoperator adjoint and direct with out * add missing method and code for memory optimisation * untrack build directory * fix imports * fixed adjoint without out parameter * fix adjoint operator of Gradient, added test --- Wrappers/Python/build/lib/ccpi/__init__.py | 18 - .../build/lib/ccpi/framework/BlockDataContainer.py | 337 ----- .../build/lib/ccpi/framework/BlockGeometry.py | 38 - .../Python/build/lib/ccpi/framework/__init__.py | 26 - .../Python/build/lib/ccpi/framework/framework.py | 1496 -------------------- Wrappers/Python/build/lib/ccpi/io/__init__.py | 18 - Wrappers/Python/build/lib/ccpi/io/reader.py | 500 ------- .../Python/build/lib/ccpi/optimisation/__init__.py | 18 - .../lib/ccpi/optimisation/algorithms/Algorithm.py | 158 --- .../build/lib/ccpi/optimisation/algorithms/CGLS.py | 87 -- .../build/lib/ccpi/optimisation/algorithms/FBPD.py | 86 -- .../lib/ccpi/optimisation/algorithms/FISTA.py | 121 -- .../optimisation/algorithms/GradientDescent.py | 76 - .../build/lib/ccpi/optimisation/algorithms/PDHG.py | 155 -- .../lib/ccpi/optimisation/algorithms/__init__.py | 32 - .../Python/build/lib/ccpi/optimisation/algs.py | 319 ----- .../Python/build/lib/ccpi/optimisation/funcs.py | 272 ---- .../ccpi/optimisation/functions/BlockFunction.py | 79 -- .../lib/ccpi/optimisation/functions/Function.py | 69 - .../functions/FunctionOperatorComposition.py | 65 - .../ccpi/optimisation/functions/IndicatorBox.py | 65 - .../lib/ccpi/optimisation/functions/L1Norm.py | 92 -- .../ccpi/optimisation/functions/L2NormSquared.py | 233 --- .../ccpi/optimisation/functions/MixedL21Norm.py | 136 -- .../lib/ccpi/optimisation/functions/Norm2Sq.py | 98 -- .../ccpi/optimisation/functions/ScaledFunction.py | 91 -- .../lib/ccpi/optimisation/functions/ZeroFun.py | 60 - .../lib/ccpi/optimisation/functions/__init__.py | 13 - .../lib/ccpi/optimisation/functions/functions.py | 312 ---- .../ccpi/optimisation/functions/mixed_L12Norm.py | 56 - .../ccpi/optimisation/operators/BlockOperator.py | 223 --- .../optimisation/operators/BlockScaledOperator.py | 67 - .../operators/FiniteDifferenceOperator.py | 322 ----- .../optimisation/operators/GradientOperator.py | 186 --- .../optimisation/operators/IdentityOperator.py | 79 -- .../ccpi/optimisation/operators/LinearOperator.py | 22 - .../lib/ccpi/optimisation/operators/Operator.py | 30 - .../ccpi/optimisation/operators/ScaledOperator.py | 42 - .../optimisation/operators/SparseFiniteDiff.py | 144 -- .../operators/SymmetrizedGradientOperator.py | 118 -- .../ccpi/optimisation/operators/ZeroOperator.py | 39 - .../lib/ccpi/optimisation/operators/__init__.py | 19 - Wrappers/Python/build/lib/ccpi/optimisation/ops.py | 294 ---- .../Python/build/lib/ccpi/optimisation/spdhg.py | 338 ----- Wrappers/Python/build/lib/ccpi/processors.py | 514 ------- .../Python/ccpi/optimisation/algorithms/PDHG.py | 4 - .../ccpi/optimisation/operators/BlockOperator.py | 91 +- .../optimisation/operators/GradientOperator.py | 2 +- .../ccpi/optimisation/operators/ScaledOperator.py | 14 +- Wrappers/Python/test/test_Operator.py | 318 ++++- Wrappers/Python/test/test_functions.py | 2 +- 51 files changed, 397 insertions(+), 7597 deletions(-) delete mode 100644 Wrappers/Python/build/lib/ccpi/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/framework.py delete mode 100644 Wrappers/Python/build/lib/ccpi/io/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/io/reader.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algs.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/funcs.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/ops.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py delete mode 100644 Wrappers/Python/build/lib/ccpi/processors.py (limited to 'Wrappers') diff --git a/Wrappers/Python/build/lib/ccpi/__init__.py b/Wrappers/Python/build/lib/ccpi/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py deleted file mode 100644 index 21ef3f0..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 16:04:45 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import DataContainer -#from ccpi.framework import AcquisitionData, ImageData -#from ccpi.optimisation.operators import Operator, LinearOperator - -class BlockDataContainer(object): - '''Class to hold DataContainers as column vector''' - __array_priority__ = 1 - def __init__(self, *args, **kwargs): - '''''' - self.containers = args - self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) - shape = (len(args),1) - self.shape = shape - #print (self.shape) - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements, len(args))) - - - def __iter__(self): - '''BlockDataContainer is Iterable''' - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - - for i in range(len(self.containers)): - if type(self.containers[i])==type(self): - self = self.containers[i] - - if isinstance(other, Number): - return True - elif isinstance(other, list): - for ot in other: - if not isinstance(ot, (Number,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - raise ValueError('List/ numpy array can only contain numbers {}'\ - .format(type(ot))) - return len(self.containers) == len(other) - elif isinstance(other, numpy.ndarray): - return len(self.containers) == len(other) - elif issubclass(other.__class__, DataContainer): - return self.get_item(0).shape == other.shape - return len(self.containers) == len(other.containers) - - def get_item(self, row): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - return self.containers[row] - - def __getitem__(self, row): - return self.get_item(row) - - def add(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for add') - out = kwargs.get('out', None) - #print ("args" , *args) - if isinstance(other, Number): - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - - return type(self)( - *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def subtract(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for subtract') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def multiply(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('{} Incompatible for multiply'.format(other)) - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def divide(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for divide') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def power(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for power') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - def maximum(self,other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for maximum') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - ## unary operations - def abs(self, *args, **kwargs): - return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) - def sign(self, *args, **kwargs): - return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) - def sqrt(self, *args, **kwargs): - return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) - def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) - - ## reductions - def sum(self, *args, **kwargs): - return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) - def squared_norm(self): - y = numpy.asarray([el.squared_norm() for el in self.containers]) - return y.sum() - def norm(self): - return numpy.sqrt(self.squared_norm()) - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers], shape=self.shape) - def fill(self, x): - for el,ot in zip(self.containers, x): - el.fill(ot) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - '''Reverse addition - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self + other - # __radd__ - - def __rsub__(self, other): - '''Reverse subtraction - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - '''Reverse multiplication - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self * other - # __rmul__ - - def __rdiv__(self, other): - '''Reverse division - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - '''Reverse truedivision - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self.__rdiv__(other) - - def __rpow__(self, other): - '''Reverse power - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return other.power(self) - - def __iadd__(self, other): - '''Inline addition''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __iadd__') - for el,ot in zip(self.containers, other): - el += ot - return self - # __iadd__ - - def __isub__(self, other): - '''Inline subtraction''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __isub__') - for el,ot in zip(self.containers, other): - el -= ot - return self - # __isub__ - - def __imul__(self, other): - '''Inline multiplication''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __imul__') - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - '''Inline division''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __idiv__') - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - '''Inline truedivision''' - return self.__idiv__(other) - diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py deleted file mode 100644 index 0f43155..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import BlockDataContainer -#from ccpi.optimisation.operators import Operator, LinearOperator - -class BlockGeometry(object): - '''Class to hold Geometry as column vector''' - #__array_priority__ = 1 - def __init__(self, *args, **kwargs): - '''''' - self.geometries = args - self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) - shape = (len(args),1) - self.shape = shape - #print (self.shape) - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements, len(args))) - - def get_item(self, index): - '''returns the Geometry in the BlockGeometry located at position index''' - return self.geometries[index] - - def allocate(self, value=0, dimension_labels=None): - containers = [geom.allocate(value) for geom in self.geometries] - return BlockDataContainer(*containers) - diff --git a/Wrappers/Python/build/lib/ccpi/framework/__init__.py b/Wrappers/Python/build/lib/ccpi/framework/__init__.py deleted file mode 100644 index 229edb5..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 16:00:18 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce - - -from .framework import DataContainer -from .framework import ImageData, AcquisitionData -from .framework import ImageGeometry, AcquisitionGeometry -from .framework import find_key, message -from .framework import DataProcessor -from .framework import AX, PixelByPixelDataProcessor, CastDataContainer -from .BlockDataContainer import BlockDataContainer -from .BlockGeometry import BlockGeometry diff --git a/Wrappers/Python/build/lib/ccpi/framework/framework.py b/Wrappers/Python/build/lib/ccpi/framework/framework.py deleted file mode 100644 index 07c2ead..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/framework.py +++ /dev/null @@ -1,1496 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce -from numbers import Number - - -def find_key(dic, val): - """return the key of dictionary dic given the value""" - return [k for k, v in dic.items() if v == val][0] - -def message(cls, msg, *args): - msg = "{0}: " + msg - for i in range(len(args)): - msg += " {%d}" %(i+1) - args = list(args) - args.insert(0, cls.__name__ ) - - return msg.format(*args ) - - -class ImageGeometry(object): - RANDOM = 'random' - RANDOM_INT = 'random_int' - CHANNEL = 'channel' - ANGLE = 'angle' - VERTICAL = 'vertical' - HORIZONTAL_X = 'horizontal_x' - HORIZONTAL_Y = 'horizontal_y' - - def __init__(self, - voxel_num_x=0, - voxel_num_y=0, - voxel_num_z=0, - voxel_size_x=1, - voxel_size_y=1, - voxel_size_z=1, - center_x=0, - center_y=0, - center_z=0, - channels=1): - - self.voxel_num_x = voxel_num_x - self.voxel_num_y = voxel_num_y - self.voxel_num_z = voxel_num_z - self.voxel_size_x = voxel_size_x - self.voxel_size_y = voxel_size_y - self.voxel_size_z = voxel_size_z - self.center_x = center_x - self.center_y = center_y - self.center_z = center_z - self.channels = channels - - # this is some code repetition - if self.channels > 1: - if self.voxel_num_z>1: - self.length = 4 - self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - else: - self.length = 3 - self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - else: - if self.voxel_num_z>1: - self.length = 3 - self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - self.length = 2 - self.shape = (self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - - self.dimension_labels = dim_labels - - def get_min_x(self): - return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x - - def get_max_x(self): - return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x - - def get_min_y(self): - return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y - - def get_max_y(self): - return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y - - def get_min_z(self): - if not self.voxel_num_z == 0: - return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def get_max_z(self): - if not self.voxel_num_z == 0: - return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def clone(self): - '''returns a copy of ImageGeometry''' - return ImageGeometry( - self.voxel_num_x, - self.voxel_num_y, - self.voxel_num_z, - self.voxel_size_x, - self.voxel_size_y, - self.voxel_size_z, - self.center_x, - self.center_y, - self.center_z, - self.channels) - def __str__ (self): - repres = "" - repres += "Number of channels: {0}\n".format(self.channels) - repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z) - repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) - repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) - return repres - def allocate(self, value=0, dimension_labels=None, **kwargs): - '''allocates an ImageData according to the size expressed in the instance''' - out = ImageData(geometry=self) - if isinstance(value, Number): - if value != 0: - out += value - else: - if value == ImageGeometry.RANDOM: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - out.fill(numpy.random.random_sample(self.shape)) - elif value == ImageGeometry.RANDOM_INT: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - max_value = kwargs.get('max_value', 100) - out.fill(numpy.random.randint(max_value,size=self.shape)) - else: - raise ValueError('Value {} unknown'.format(value)) - if dimension_labels is not None: - if dimension_labels != self.dimension_labels: - return out.subset(dimensions=dimension_labels) - return out - # The following methods return 2 members of the class, therefore I - # don't think we need to implement them. - # Additionally using __len__ is confusing as one would think this is - # an iterable. - #def __len__(self): - # '''returns the length of the geometry''' - # return self.length - #def shape(self): - # '''Returns the shape of the array of the ImageData it describes''' - # return self.shape - -class AcquisitionGeometry(object): - RANDOM = 'random' - RANDOM_INT = 'random_int' - ANGLE_UNIT = 'angle_unit' - DEGREE = 'degree' - RADIAN = 'radian' - CHANNEL = 'channel' - ANGLE = 'angle' - VERTICAL = 'vertical' - HORIZONTAL = 'horizontal' - def __init__(self, - geom_type, - dimension, - angles, - pixel_num_h=0, - pixel_size_h=1, - pixel_num_v=0, - pixel_size_v=1, - dist_source_center=None, - dist_center_detector=None, - channels=1, - **kwargs - ): - """ - General inputs for standard type projection geometries - detectorDomain or detectorpixelSize: - If 2D - If scalar: Width of detector or single detector pixel - If 2-vec: Error - If 3D - If scalar: Width in both dimensions - If 2-vec: Vertical then horizontal size - grid - If 2D - If scalar: number of detectors - If 2-vec: error - If 3D - If scalar: Square grid that size - If 2-vec vertical then horizontal size - cone or parallel - 2D or 3D - parallel_parameters: ? - cone_parameters: - source_to_center_dist (if parallel: NaN) - center_to_detector_dist (if parallel: NaN) - standard or nonstandard (vec) geometry - angles - angles_format radians or degrees - """ - self.geom_type = geom_type # 'parallel' or 'cone' - self.dimension = dimension # 2D or 3D - self.angles = angles - num_of_angles = len (angles) - - self.dist_source_center = dist_source_center - self.dist_center_detector = dist_center_detector - - self.pixel_num_h = pixel_num_h - self.pixel_size_h = pixel_size_h - self.pixel_num_v = pixel_num_v - self.pixel_size_v = pixel_size_v - - self.channels = channels - self.angle_unit=kwargs.get(AcquisitionGeometry.ANGLE_UNIT, - AcquisitionGeometry.DEGREE) - if channels > 1: - if pixel_num_v > 1: - shape = (channels, num_of_angles , pixel_num_v, pixel_num_h) - dim_labels = [AcquisitionGeometry.CHANNEL , - AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , - AcquisitionGeometry.HORIZONTAL] - else: - shape = (channels , num_of_angles, pixel_num_h) - dim_labels = [AcquisitionGeometry.CHANNEL , - AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] - else: - if pixel_num_v > 1: - shape = (num_of_angles, pixel_num_v, pixel_num_h) - dim_labels = [AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , - AcquisitionGeometry.HORIZONTAL] - else: - shape = (num_of_angles, pixel_num_h) - dim_labels = [AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] - self.shape = shape - - self.dimension_labels = dim_labels - - def clone(self): - '''returns a copy of the AcquisitionGeometry''' - return AcquisitionGeometry(self.geom_type, - self.dimension, - self.angles, - self.pixel_num_h, - self.pixel_size_h, - self.pixel_num_v, - self.pixel_size_v, - self.dist_source_center, - self.dist_center_detector, - self.channels) - - def __str__ (self): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.dimension) - repres += "angles: {0}\n".format(self.angles) - repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v) - repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v) - repres += "geometry type: {0}\n".format(self.geom_type) - repres += "distance source-detector: {0}\n".format(self.dist_source_center) - repres += "distance center-detector: {0}\n".format(self.dist_source_center) - repres += "number of channels: {0}\n".format(self.channels) - return repres - def allocate(self, value=0, dimension_labels=None): - '''allocates an AcquisitionData according to the size expressed in the instance''' - out = AcquisitionData(geometry=self) - if isinstance(value, Number): - if value != 0: - out += value - else: - if value == AcquisitionData.RANDOM: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - out.fill(numpy.random.random_sample(self.shape)) - elif value == AcquisitionData.RANDOM_INT: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - max_value = kwargs.get('max_value', 100) - out.fill(numpy.random.randint(max_value,size=self.shape)) - else: - raise ValueError('Value {} unknown'.format(value)) - if dimension_labels is not None: - if dimension_labels != self.dimension_labels: - return out.subset(dimensions=dimension_labels) - return out - -class DataContainer(object): - '''Generic class to hold data - - Data is currently held in a numpy arrays''' - - def __init__ (self, array, deep_copy=True, dimension_labels=None, - **kwargs): - '''Holds the data''' - - self.shape = numpy.shape(array) - self.number_of_dimensions = len (self.shape) - self.dimension_labels = {} - self.geometry = None # Only relevant for AcquisitionData and ImageData - - if dimension_labels is not None and \ - len (dimension_labels) == self.number_of_dimensions: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = dimension_labels[i] - else: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = 'dimension_{0:02}'.format(i) - - if type(array) == numpy.ndarray: - if deep_copy: - self.array = array.copy() - else: - self.array = array - else: - raise TypeError('Array must be NumpyArray, passed {0}'\ - .format(type(array))) - - # finally copy the geometry - if 'geometry' in kwargs.keys(): - self.geometry = kwargs['geometry'] - else: - # assume it is parallel beam - pass - - def get_dimension_size(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - acq_size = -1 - for k,v in self.dimension_labels.items(): - if v == dimension_label: - acq_size = self.shape[k] - return acq_size - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels)) - def get_dimension_axis(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - for k,v in self.dimension_labels.items(): - if v == dimension_label: - return k - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels.values())) - - - def as_array(self, dimensions=None): - '''Returns the DataContainer as Numpy Array - - Returns the pointer to the array if dimensions is not set. - If dimensions is set, it first creates a new DataContainer with the subset - and then it returns the pointer to the array''' - if dimensions is not None: - return self.subset(dimensions).as_array() - return self.array - - - def subset(self, dimensions=None, **kw): - '''Creates a DataContainer containing a subset of self according to the - labels in dimensions''' - if dimensions is None: - if kw == {}: - return self.array.copy() - else: - reduced_dims = [v for k,v in self.dimension_labels.items()] - for dim_l, dim_v in kw.items(): - for k,v in self.dimension_labels.items(): - if v == dim_l: - reduced_dims.pop(k) - return self.subset(dimensions=reduced_dims, **kw) - else: - # check that all the requested dimensions are in the array - # this is done by checking the dimension_labels - proceed = True - unknown_key = '' - # axis_order contains the order of the axis that the user wants - # in the output DataContainer - axis_order = [] - if type(dimensions) == list: - for dl in dimensions: - if dl not in self.dimension_labels.values(): - proceed = False - unknown_key = dl - break - else: - axis_order.append(find_key(self.dimension_labels, dl)) - if not proceed: - raise KeyError('Subset error: Unknown key specified {0}'.format(dl)) - - # slice away the unwanted data from the array - unwanted_dimensions = self.dimension_labels.copy() - left_dimensions = [] - for ax in sorted(axis_order): - this_dimension = unwanted_dimensions.pop(ax) - left_dimensions.append(this_dimension) - #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) - #print ("left_dimensions {0}".format(left_dimensions)) - #new_shape = [self.shape[ax] for ax in axis_order] - #print ("new_shape {0}".format(new_shape)) - command = "self.array[" - for i in range(self.number_of_dimensions): - if self.dimension_labels[i] in unwanted_dimensions.values(): - value = 0 - for k,v in kw.items(): - if k == self.dimension_labels[i]: - value = v - - command = command + str(value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - command = command + ']' - - cleaned = eval(command) - # cleaned has collapsed dimensions in the same order of - # self.array, but we want it in the order stated in the - # "dimensions". - # create axes order for numpy.transpose - axes = [] - for key in dimensions: - #print ("key {0}".format( key)) - for i in range(len( left_dimensions )): - ld = left_dimensions[i] - #print ("ld {0}".format( ld)) - if ld == key: - axes.append(i) - #print ("axes {0}".format(axes)) - - cleaned = numpy.transpose(cleaned, axes).copy() - - return type(self)(cleaned , True, dimensions) - - def fill(self, array, **dimension): - '''fills the internal numpy array with the one provided''' - if dimension == {}: - if issubclass(type(array), DataContainer) or\ - issubclass(type(array), numpy.ndarray): - if array.shape != self.shape: - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - self.shape,array.shape)) - if issubclass(type(array), DataContainer): - numpy.copyto(self.array, array.array) - else: - #self.array[:] = array - numpy.copyto(self.array, array) - else: - - command = 'self.array[' - i = 0 - for k,v in self.dimension_labels.items(): - for dim_label, dim_value in dimension.items(): - if dim_label == v: - command = command + str(dim_value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - i += 1 - command = command + "] = array[:]" - exec(command) - - - def check_dimensions(self, other): - return self.shape == other.shape - - ## algebra - def __add__(self, other, *args, **kwargs): - out = kwargs.get('out', None) - - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() + other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)( - self.as_array() + other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , - type(other))) - # __add__ - - def __sub__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() - other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() - other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , - type(other))) - # __sub__ - def __truediv__(self,other): - return self.__div__(other) - - def __div__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() / other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() / other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , - type(other))) - # __div__ - - def __pow__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() ** other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() ** other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , - type(other))) - # __pow__ - - def __mul__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() * other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - return type(self)(self.as_array() * other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , - type(other))) - # __mul__ - - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - return self * other - # __rmul__ - - def __rdiv__(self, other): - print ("call __rdiv__") - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - if isinstance(other, (int, float)) : - fother = numpy.ones(numpy.shape(self.array)) * other - return type(self)(fother ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - return type(self)(other.as_array() ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Dimensions do not match') - # __rpow__ - - # in-place arithmetic operators: - # (+=, -=, *=, /= , //=, - # must return self - - - - def __iadd__(self, other): - if isinstance(other, (int, float)) : - numpy.add(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.add(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __iadd__ - - def __imul__(self, other): - if isinstance(other, (int, float)) : - arr = self.as_array() - numpy.multiply(arr, other, out=arr) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.multiply(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __imul__ - - def __isub__(self, other): - if isinstance(other, (int, float)) : - numpy.subtract(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.subtract(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __isub__ - - def __idiv__(self, other): - return self.__itruediv__(other) - def __itruediv__(self, other): - if isinstance(other, (int, float)) : - numpy.divide(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.divide(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __idiv__ - - def __str__ (self, representation=False): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions) - repres += "Shape: {0}\n".format(self.shape) - repres += "Axis labels: {0}\n".format(self.dimension_labels) - if representation: - repres += "Representation: \n{0}\n".format(self.array) - return repres - - def clone(self): - '''returns a copy of itself''' - - return type(self)(self.array, - dimension_labels=self.dimension_labels, - deep_copy=True, - geometry=self.geometry ) - - def get_data_axes_order(self,new_order=None): - '''returns the axes label of self as a list - - if new_order is None returns the labels of the axes as a sorted-by-key list - if new_order is a list of length number_of_dimensions, returns a list - with the indices of the axes in new_order with respect to those in - self.dimension_labels: i.e. - self.dimension_labels = {0:'horizontal',1:'vertical'} - new_order = ['vertical','horizontal'] - returns [1,0] - ''' - if new_order is None: - - axes_order = [i for i in range(len(self.shape))] - for k,v in self.dimension_labels.items(): - axes_order[k] = v - return axes_order - else: - if len(new_order) == self.number_of_dimensions: - axes_order = [i for i in range(self.number_of_dimensions)] - - for i in range(len(self.shape)): - found = False - for k,v in self.dimension_labels.items(): - if new_order[i] == v: - axes_order[i] = k - found = True - if not found: - raise ValueError('Axis label {0} not found.'.format(new_order[i])) - return axes_order - else: - raise ValueError('Expecting {0} axes, got {2}'\ - .format(len(self.shape),len(new_order))) - - - def copy(self): - '''alias of clone''' - return self.clone() - - ## binary operations - - def pixel_wise_binary(self, pwop, x2, *args, **kwargs): - out = kwargs.get('out', None) - if out is None: - if isinstance(x2, (int, float, complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif issubclass(type(x2) , DataContainer): - out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - - elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): - if self.check_dimensions(out) and self.check_dimensions(x2): - kwargs['out'] = out.as_array() - pwop(self.as_array(), x2.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): - if self.check_dimensions(out): - kwargs['out']=out.as_array() - pwop(self.as_array(), x2, *args, **kwargs ) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - kwargs['out'] = out - pwop(self.as_array(), x2, *args, **kwargs) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def add(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.add, other, *args, **kwargs) - - def subtract(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs) - - def multiply(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs) - - def divide(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) - - def power(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) - - def maximum(self, x2, *args, **kwargs): - return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs) - - ## unary operations - def pixel_wise_unary(self, pwop, *args, **kwargs): - out = kwargs.get('out', None) - if out is None: - out = pwop(self.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(out), DataContainer): - if self.check_dimensions(out): - kwargs['out'] = out.as_array() - pwop(self.as_array(), *args, **kwargs ) - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - kwargs['out'] = out - pwop(self.as_array(), *args, **kwargs) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def abs(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.abs, *args, **kwargs) - - def sign(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.sign, *args, **kwargs) - - def sqrt(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs) - - def conjugate(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs) - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - - ## reductions - def sum(self, *args, **kwargs): - return self.as_array().sum(*args, **kwargs) - def squared_norm(self): - '''return the squared euclidean norm of the DataContainer viewed as a vector''' - #shape = self.shape - #size = reduce(lambda x,y:x*y, shape, 1) - #y = numpy.reshape(self.as_array(), (size, )) - return self.dot(self.conjugate()) - #return self.dot(self) - def norm(self): - '''return the euclidean norm of the DataContainer viewed as a vector''' - return numpy.sqrt(self.squared_norm()) - def dot(self, other, *args, **kwargs): - '''return the inner product of 2 DataContainers viewed as vectors''' - if self.shape == other.shape: - return numpy.dot(self.as_array().ravel(), other.as_array().ravel()) - else: - raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape)) - - - - - -class ImageData(DataContainer): - '''DataContainer for holding 2D or 3D DataContainer''' - - def __init__(self, - array = None, - deep_copy=False, - dimension_labels=None, - **kwargs): - - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz_x = geometry.voxel_num_x - horiz_y = geometry.voxel_num_y - vert = 1 if geometry.voxel_num_z is None\ - else geometry.voxel_num_z # this should be 1 for 2D - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (channels , horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - if vert > 1: - shape = (vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (horiz_y, horiz_x) - dim_labels = [ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == ImageGeometry.CHANNEL: - shape.append(channels) - elif dim == ImageGeometry.HORIZONTAL_Y: - shape.append(horiz_y) - elif dim == ImageGeometry.VERTICAL: - shape.append(vert) - elif dim == ImageGeometry.HORIZONTAL_X: - shape.append(horiz_x) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes'.format( - len(dimension_labels) - len(shape))) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - else: - raise ValueError('Please pass either a DataContainer, ' +\ - 'a numpy array or a geometry') - else: - if issubclass(type(array) , DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(ImageData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) , numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = [ImageGeometry.CHANNEL, - ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - elif array.ndim == 3: - dimension_labels = [ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - dimension_labels = [ ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - # load metadata from kwargs if present - for key, value in kwargs.items(): - if (type(value) == list or type(value) == tuple) and \ - ( len (value) == 3 and len (value) == 2) : - if key == 'origin' : - self.origin = value - if key == 'spacing' : - self.spacing = value - - def subset(self, dimensions=None, **kw): - # FIXME: this is clearly not rigth - # it should be something like - # out = DataContainer.subset(self, dimensions, **kw) - # followed by regeneration of the proper geometry. - out = super(ImageData, self).subset(dimensions, **kw) - #out.geometry = self.recalculate_geometry(dimensions , **kw) - out.geometry = self.geometry - return out - - -class AcquisitionData(DataContainer): - '''DataContainer for holding 2D or 3D sinogram''' - - def __init__(self, - array = None, - deep_copy=True, - dimension_labels=None, - **kwargs): - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz = geometry.pixel_num_h - vert = geometry.pixel_num_v - angles = geometry.angles - num_of_angles = numpy.shape(angles)[0] - - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, num_of_angles , vert, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL] - else: - shape = (channels , num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL] - else: - if vert > 1: - shape = (num_of_angles, vert, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL - ] - else: - shape = (num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL - ] - - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == AcquisitionGeometry.CHANNEL: - shape.append(channels) - elif dim == AcquisitionGeometry.ANGLE: - shape.append(num_of_angles) - elif dim == AcquisitionGeometry.VERTICAL: - shape.append(vert) - elif dim == AcquisitionGeometry.HORIZONTAL: - shape.append(horiz) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ - .format( - len(dimension_labels) - len(shape), - dimension_labels, shape) - ) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - else: - - if issubclass(type(array) ,DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) ,numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = ['channel' ,'angle' , 'vertical' , - 'horizontal'] - elif array.ndim == 3: - dimension_labels = ['angle' , 'vertical' , - 'horizontal'] - else: - dimension_labels = ['angle' , - 'horizontal'] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - -class DataProcessor(object): - '''Defines a generic DataContainer processor - - accepts DataContainer as inputs and - outputs DataContainer - additional attributes can be defined with __setattr__ - ''' - - def __init__(self, **attributes): - if not 'store_output' in attributes.keys(): - attributes['store_output'] = True - attributes['output'] = False - attributes['runTime'] = -1 - attributes['mTime'] = datetime.now() - attributes['input'] = None - for key, value in attributes.items(): - self.__dict__[key] = value - - - def __setattr__(self, name, value): - if name == 'input': - self.set_input(value) - elif name in self.__dict__.keys(): - self.__dict__[name] = value - self.__dict__['mTime'] = datetime.now() - else: - raise KeyError('Attribute {0} not found'.format(name)) - #pass - - def set_input(self, dataset): - if issubclass(type(dataset), DataContainer): - if self.check_input(dataset): - self.__dict__['input'] = dataset - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(dataset), DataContainer)) - - def check_input(self, dataset): - '''Checks parameters of the input DataContainer - - Should raise an Error if the DataContainer does not match expectation, e.g. - if the expected input DataContainer is 3D and the Processor expects 2D. - ''' - raise NotImplementedError('Implement basic checks for input DataContainer') - - def get_output(self, out=None): - for k,v in self.__dict__.items(): - if v is None and k != 'output': - raise ValueError('Key {0} is None'.format(k)) - shouldRun = False - if self.runTime == -1: - shouldRun = True - elif self.mTime > self.runTime: - shouldRun = True - - # CHECK this - if self.store_output and shouldRun: - self.runTime = datetime.now() - try: - self.output = self.process(out=out) - return self.output - except TypeError as te: - self.output = self.process() - return self.output - self.runTime = datetime.now() - try: - return self.process(out=out) - except TypeError as te: - return self.process() - - - def set_input_processor(self, processor): - if issubclass(type(processor), DataProcessor): - self.__dict__['input'] = processor - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(processor), DataProcessor)) - - def get_input(self): - '''returns the input DataContainer - - It is useful in the case the user has provided a DataProcessor as - input - ''' - if issubclass(type(self.input), DataProcessor): - dsi = self.input.get_output() - else: - dsi = self.input - return dsi - - def process(self, out=None): - raise NotImplementedError('process must be implemented') - - - - -class DataProcessor23D(DataProcessor): - '''Regularizers DataProcessor - ''' - - def check_input(self, dataset): - '''Checks number of dimensions input DataContainer - - Expected input is 2D or 3D - ''' - if dataset.number_of_dimensions == 2 or \ - dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - -###### Example of DataProcessors - -class AX(DataProcessor): - '''Example DataProcessor - The AXPY routines perform a vector multiplication operation defined as - - y := a*x - where: - - a is a scalar - - x a DataContainer. - ''' - - def __init__(self): - kwargs = {'scalar':None, - 'input':None, - } - - #DataProcessor.__init__(self, **kwargs) - super(AX, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - dsi = self.get_input() - a = self.scalar - if out is None: - y = DataContainer( a * dsi.as_array() , True, - dimension_labels=dsi.dimension_labels ) - #self.setParameter(output_dataset=y) - return y - else: - out.fill(a * dsi.as_array()) - - -###### Example of DataProcessors - -class CastDataContainer(DataProcessor): - '''Example DataProcessor - Cast a DataContainer array to a different type. - - y := a*x - where: - - a is a scalar - - x a DataContainer. - ''' - - def __init__(self, dtype=None): - kwargs = {'dtype':dtype, - 'input':None, - } - - #DataProcessor.__init__(self, **kwargs) - super(CastDataContainer, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - dsi = self.get_input() - dtype = self.dtype - if out is None: - y = numpy.asarray(dsi.as_array(), dtype=dtype) - - return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype), - dimension_labels=dsi.dimension_labels ) - else: - out.fill(numpy.asarray(dsi.as_array(), dtype=dtype)) - - - - - -class PixelByPixelDataProcessor(DataProcessor): - '''Example DataProcessor - - This processor applies a python function to each pixel of the DataContainer - - f is a python function - - x a DataSet. - ''' - - def __init__(self): - kwargs = {'pyfunc':None, - 'input':None, - } - #DataProcessor.__init__(self, **kwargs) - super(PixelByPixelDataProcessor, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - pyfunc = self.pyfunc - dsi = self.get_input() - - eval_func = numpy.frompyfunc(pyfunc,1,1) - - - y = DataContainer( eval_func( dsi.as_array() ) , True, - dimension_labels=dsi.dimension_labels ) - return y - - - - -if __name__ == '__main__': - shape = (2,3,4,5) - size = shape[0] - for i in range(1, len(shape)): - size = size * shape[i] - #print("a refcount " , sys.getrefcount(a)) - a = numpy.asarray([i for i in range( size )]) - print("a refcount " , sys.getrefcount(a)) - a = numpy.reshape(a, shape) - print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) - print("a refcount " , sys.getrefcount(a)) - print ("ds label {0}".format(ds.dimension_labels)) - subset = ['W' ,'X'] - b = ds.subset( subset ) - print("a refcount " , sys.getrefcount(a)) - print ("b label {0} shape {1}".format(b.dimension_labels, - numpy.shape(b.as_array()))) - c = ds.subset(['Z','W','X']) - print("a refcount " , sys.getrefcount(a)) - - # Create a ImageData sharing the array with c - volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels) - volume1 = ImageData(c, False) - - print ("volume0 {0} volume1 {1}".format(id(volume0.array), - id(volume1.array))) - - # Create a ImageData copying the array from c - volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels) - volume3 = ImageData(c) - - print ("volume2 {0} volume3 {1}".format(id(volume2.array), - id(volume3.array))) - - # single number DataSet - sn = DataContainer(numpy.asarray([1])) - - ax = AX() - ax.scalar = 2 - ax.set_input(c) - #ax.apply() - print ("ax in {0} out {1}".format(c.as_array().flatten(), - ax.get_output().as_array().flatten())) - - cast = CastDataContainer(dtype=numpy.float32) - cast.set_input(c) - out = cast.get_output() - out *= 0 - axm = AX() - axm.scalar = 0.5 - axm.set_input_processor(cast) - axm.get_output(out) - #axm.apply() - print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array())) - - # check out in DataSetProcessor - #a = numpy.asarray([i for i in range( size )]) - - - # create a PixelByPixelDataProcessor - - #define a python function which will take only one input (the pixel value) - pyfunc = lambda x: -x if x > 20 else x - clip = PixelByPixelDataProcessor() - clip.pyfunc = pyfunc - clip.set_input(c) - #clip.apply() - - print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array())) - - #dsp = DataProcessor() - #dsp.set_input(ds) - #dsp.input = a - # pipeline - - chain = AX() - chain.scalar = 0.5 - chain.set_input_processor(ax) - print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array())) - - # testing arithmetic operations - - print (b) - print ((b+1)) - print ((1+b)) - - print (b) - print ((b*2)) - - print (b) - print ((2*b)) - - print (b) - print ((b/2)) - - print (b) - print ((2/b)) - - print (b) - print ((b**2)) - - print (b) - print ((2**b)) - - print (type(volume3 + 2)) - - s = [i for i in range(3 * 4 * 4)] - s = numpy.reshape(numpy.asarray(s), (3,4,4)) - sino = AcquisitionData( s ) - - shape = (4,3,2) - a = [i for i in range(2*3*4)] - a = numpy.asarray(a) - a = numpy.reshape(a, shape) - print (numpy.shape(a)) - ds = DataContainer(a, True, ['X', 'Y','Z']) - # this means that I expect the X to be of length 2 , - # y of length 3 and z of length 4 - subset = ['Y' ,'Z'] - b0 = ds.subset( subset ) - print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array()))) - # expectation on b is that it is - # 3x2 cut at z = 0 - - subset = ['X' ,'Y'] - b1 = ds.subset( subset , Z=1) - print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array()))) - - - - # create VolumeData from geometry - vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2) - vol = ImageData(geometry=vgeometry) - - sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20), - geom_type='parallel', pixel_num_v=3, - pixel_num_h=5 , channels=2) - sino = AcquisitionData(geometry=sgeometry) - sino2 = sino.clone() - - a0 = numpy.asarray([i for i in range(2*3*4)]) - a1 = numpy.asarray([2*i for i in range(2*3*4)]) - - - ds0 = DataContainer(numpy.reshape(a0,(2,3,4))) - ds1 = DataContainer(numpy.reshape(a1,(2,3,4))) - - numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1)) - - a2 = numpy.asarray([2*i for i in range(2*3*5)]) - ds2 = DataContainer(numpy.reshape(a2,(2,3,5))) - -# # it should fail if the shape is wrong -# try: -# ds2.dot(ds0) -# self.assertTrue(False) -# except ValueError as ve: -# self.assertTrue(True) - diff --git a/Wrappers/Python/build/lib/ccpi/io/__init__.py b/Wrappers/Python/build/lib/ccpi/io/__init__.py deleted file mode 100644 index 9233d7a..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/io/reader.py b/Wrappers/Python/build/lib/ccpi/io/reader.py deleted file mode 100644 index 856f5e0..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/reader.py +++ /dev/null @@ -1,500 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev, Edoardo Pasca and Srikanth Nagella - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' -This is a reader module with classes for loading 3D datasets. - -@author: Mr. Srikanth Nagella -''' -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from ccpi.framework import AcquisitionGeometry -from ccpi.framework import AcquisitionData -import numpy as np -import os - -h5pyAvailable = True -try: - from h5py import File as NexusFile -except: - h5pyAvailable = False - -pilAvailable = True -try: - from PIL import Image -except: - pilAvailable = False - -class NexusReader(object): - ''' - Reader class for loading Nexus files. - ''' - - def __init__(self, nexus_filename=None): - ''' - This takes in input as filename and loads the data dataset. - ''' - self.flat = None - self.dark = None - self.angles = None - self.geometry = None - self.filename = nexus_filename - self.key_path = 'entry1/tomo_entry/instrument/detector/image_key' - self.data_path = 'entry1/tomo_entry/data/data' - self.angle_path = 'entry1/tomo_entry/data/rotation_angle' - - def get_image_keys(self): - try: - with NexusFile(self.filename,'r') as file: - return np.array(file[self.key_path]) - except KeyError as ke: - raise KeyError("get_image_keys: " , ke.args[0] , self.key_path) - - - def load(self, dimensions=None, image_key_id=0): - ''' - This is generic loading function of flat field, dark field and projection data. - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - image_keys = np.array(file[self.key_path]) - projections = None - if dimensions == None: - projections = np.array(file[self.data_path]) - result = projections[image_keys==image_key_id] - return result - else: - #When dimensions are specified they need to be mapped to image_keys - index_array = np.where(image_keys==image_key_id) - projection_indexes = index_array[0][dimensions[0]] - new_dimensions = list(dimensions) - new_dimensions[0]= projection_indexes - new_dimensions = tuple(new_dimensions) - result = np.array(file[self.data_path][new_dimensions]) - return result - except: - print("Error reading nexus file") - raise - - def load_projection(self, dimensions=None): - ''' - Loads the projection data from the nexus file. - returns: numpy array with projection data - ''' - try: - if 0 not in self.get_image_keys(): - raise ValueError("Projections are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 0) - - def load_flat(self, dimensions=None): - ''' - Loads the flat field data from the nexus file. - returns: numpy array with flat field data - ''' - try: - if 1 not in self.get_image_keys(): - raise ValueError("Flats are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 1) - - def load_dark(self, dimensions=None): - ''' - Loads the Dark field data from the nexus file. - returns: numpy array with dark field data - ''' - try: - if 2 not in self.get_image_keys(): - raise ValueError("Darks are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 2) - - def get_projection_angles(self): - ''' - This function returns the projection angles - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - angles = np.array(file[self.angle_path],np.float32) - image_keys = np.array(file[self.key_path]) - return angles[image_keys==0] - except: - print("get_projection_angles Error reading nexus file") - raise - - - def get_sinogram_dimensions(self): - ''' - Return the dimensions of the dataset - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - projections = file[self.data_path] - image_keys = np.array(file[self.key_path]) - dims = list(projections.shape) - dims[0] = dims[1] - dims[1] = np.sum(image_keys==0) - return tuple(dims) - except: - print("Error reading nexus file") - raise - - def get_projection_dimensions(self): - ''' - Return the dimensions of the dataset - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - try: - projections = file[self.data_path] - except KeyError as ke: - raise KeyError('Error: data path {0} not found\n{1}'\ - .format(self.data_path, - ke.args[0])) - #image_keys = np.array(file[self.key_path]) - image_keys = self.get_image_keys() - dims = list(projections.shape) - dims[0] = np.sum(image_keys==0) - return tuple(dims) - except: - print("Warning: Error reading image_keys trying accessing data on " , self.data_path) - with NexusFile(self.filename,'r') as file: - dims = file[self.data_path].shape - return tuple(dims) - - - - def get_acquisition_data(self, dimensions=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - data = self.load_projection(dimensions) - dims = self.get_projection_dimensions() - geometry = AcquisitionGeometry('parallel', '3D', - self.get_projection_angles(), - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = dims[1], - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - - def get_acquisition_data_subset(self, ymin=None, ymax=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - - - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - pass - dims = file[self.data_path].shape - if ymin is None and ymax is None: - data = np.array(file[self.data_path]) - else: - if ymin is None: - ymin = 0 - if ymax > dims[1]: - raise ValueError('ymax out of range') - data = np.array(file[self.data_path][:,:ymax,:]) - elif ymax is None: - ymax = dims[1] - if ymin < 0: - raise ValueError('ymin out of range') - data = np.array(file[self.data_path][:,ymin:,:]) - else: - if ymax > dims[1]: - raise ValueError('ymax out of range') - if ymin < 0: - raise ValueError('ymin out of range') - - data = np.array(file[self.data_path] - [: , ymin:ymax , :] ) - - except: - print("Error reading nexus file") - raise - - - try: - angles = self.get_projection_angles() - except KeyError as ke: - n = data.shape[0] - angles = np.linspace(0, n, n+1, dtype=np.float32) - - if ymax-ymin > 1: - - geometry = AcquisitionGeometry('parallel', '3D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = ymax-ymin, - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, False, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - elif ymax-ymin == 1: - geometry = AcquisitionGeometry('parallel', '2D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data.squeeze(), False, geometry=geometry, - dimension_labels=['angle','horizontal']) - def get_acquisition_data_slice(self, y_slice=0): - return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1) - def get_acquisition_data_whole(self): - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - print ("Warning: ") - dims = file[self.data_path].shape - - ymin = 0 - ymax = dims[1] - 1 - - return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax) - - - - def list_file_content(self): - try: - with NexusFile(self.filename,'r') as file: - file.visit(print) - except: - print("Error reading nexus file") - raise - def get_acquisition_data_batch(self, bmin=None, bmax=None): - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - - - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - dims = file[self.data_path].shape - if bmin is None or bmax is None: - raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits') - - if bmin >= 0 and bmin < bmax and bmax <= dims[0]: - data = np.array(file[self.data_path][bmin:bmax]) - else: - raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0])) - - except: - print("Error reading nexus file") - raise - - - try: - angles = self.get_projection_angles()[bmin:bmax] - except KeyError as ke: - n = data.shape[0] - angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax] - - if bmax-bmin > 1: - - geometry = AcquisitionGeometry('parallel', '3D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = bmax-bmin, - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, False, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - elif bmax-bmin == 1: - geometry = AcquisitionGeometry('parallel', '2D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data.squeeze(), False, geometry=geometry, - dimension_labels=['angle','horizontal']) - - - -class XTEKReader(object): - ''' - Reader class for loading XTEK files - ''' - - def __init__(self, xtek_config_filename=None): - ''' - This takes in the xtek config filename and loads the dataset and the - required geometry parameters - ''' - self.projections = None - self.geometry = {} - self.filename = xtek_config_filename - self.load() - - def load(self): - pixel_num_h = 0 - pixel_num_v = 0 - xpixel_size = 0 - ypixel_size = 0 - source_x = 0 - detector_x = 0 - with open(self.filename) as f: - content = f.readlines() - content = [x.strip() for x in content] - for line in content: - if line.startswith("SrcToObject"): - source_x = float(line.split('=')[1]) - elif line.startswith("SrcToDetector"): - detector_x = float(line.split('=')[1]) - elif line.startswith("DetectorPixelsY"): - pixel_num_v = int(line.split('=')[1]) - #self.num_of_vertical_pixels = self.calc_v_alighment(self.num_of_vertical_pixels, self.pixels_per_voxel) - elif line.startswith("DetectorPixelsX"): - pixel_num_h = int(line.split('=')[1]) - elif line.startswith("DetectorPixelSizeX"): - xpixel_size = float(line.split('=')[1]) - elif line.startswith("DetectorPixelSizeY"): - ypixel_size = float(line.split('=')[1]) - elif line.startswith("Projections"): - self.num_projections = int(line.split('=')[1]) - elif line.startswith("InitialAngle"): - self.initial_angle = float(line.split('=')[1]) - elif line.startswith("Name"): - self.experiment_name = line.split('=')[1] - elif line.startswith("Scattering"): - self.scattering = float(line.split('=')[1]) - elif line.startswith("WhiteLevel"): - self.white_level = float(line.split('=')[1]) - elif line.startswith("MaskRadius"): - self.mask_radius = float(line.split('=')[1]) - - #Read Angles - angles = self.read_angles() - self.geometry = AcquisitionGeometry('cone', '3D', angles, pixel_num_h, xpixel_size, pixel_num_v, ypixel_size, -1 * source_x, - detector_x - source_x, - ) - - def read_angles(self): - """ - Read the angles file .ang or _ctdata.txt file and returns the angles - as an numpy array. - """ - input_path = os.path.dirname(self.filename) - angles_ctdata_file = os.path.join(input_path, '_ctdata.txt') - angles_named_file = os.path.join(input_path, self.experiment_name+'.ang') - angles = np.zeros(self.num_projections,dtype='f') - #look for _ctdata.txt - if os.path.exists(angles_ctdata_file): - #read txt file with angles - with open(angles_ctdata_file) as f: - content = f.readlines() - #skip firt three lines - #read the middle value of 3 values in each line as angles in degrees - index = 0 - for line in content[3:]: - self.angles[index]=float(line.split(' ')[1]) - index+=1 - angles = np.deg2rad(self.angles+self.initial_angle); - elif os.path.exists(angles_named_file): - #read the angles file which is text with first line as header - with open(angles_named_file) as f: - content = f.readlines() - #skip first line - index = 0 - for line in content[1:]: - angles[index] = float(line.split(':')[1]) - index+=1 - angles = np.flipud(angles+self.initial_angle) #angles are in the reverse order - else: - raise RuntimeError("Can't find angles file") - return angles - - def load_projection(self, dimensions=None): - ''' - This method reads the projection images from the directory and returns a numpy array - ''' - if not pilAvailable: - raise('Image library pillow is not installed') - if dimensions != None: - raise('Extracting subset of data is not implemented') - input_path = os.path.dirname(self.filename) - pixels = np.zeros((self.num_projections, self.geometry.pixel_num_h, self.geometry.pixel_num_v), dtype='float32') - for i in range(1, self.num_projections+1): - im = Image.open(os.path.join(input_path,self.experiment_name+"_%04d"%i+".tif")) - pixels[i-1,:,:] = np.fliplr(np.transpose(np.array(im))) ##Not sure this is the correct way to populate the image - - #normalising the data - #TODO: Move this to a processor - pixels = pixels - (self.white_level*self.scattering)/100.0 - pixels[pixels < 0.0] = 0.000001 # all negative values to approximately 0 as the std log of zero and non negative number is not defined - return pixels - - def get_acquisition_data(self, dimensions=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - data = self.load_projection(dimensions) - return AcquisitionData(data, geometry=self.geometry) - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py deleted file mode 100644 index ed95c3f..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import time -from numbers import Integral - -class Algorithm(object): - '''Base class for iterative algorithms - - provides the minimal infrastructure. - Algorithms are iterables so can be easily run in a for loop. They will - stop as soon as the stop cryterion is met. - The user is required to implement the set_up, __init__, update and - and update_objective methods - - A courtesy method run is available to run n iterations. The method accepts - a callback function that receives the current iteration number and the actual objective - value and can be used to trigger print to screens and other user interactions. The run - method will stop when the stopping cryterion is met. - ''' - - def __init__(self): - '''Constructor - - Set the minimal number of parameters: - iteration: current iteration number - max_iteration: maximum number of iterations - memopt: whether to use memory optimisation () - timing: list to hold the times it took to run each iteration - update_objectice_interval: the interval every which we would save the current - objective. 1 means every iteration, 2 every 2 iteration - and so forth. This is by default 1 and should be increased - when evaluating the objective is computationally expensive. - ''' - self.iteration = 0 - self.__max_iteration = 0 - self.__loss = [] - self.memopt = False - self.timing = [] - self.update_objective_interval = 1 - def set_up(self, *args, **kwargs): - '''Set up the algorithm''' - raise NotImplementedError() - def update(self): - '''A single iteration of the algorithm''' - raise NotImplementedError() - - def should_stop(self): - '''default stopping cryterion: number of iterations - - The user can change this in concrete implementatition of iterative algorithms.''' - return self.max_iteration_stop_cryterion() - - def max_iteration_stop_cryterion(self): - '''default stop cryterion for iterative algorithm: max_iteration reached''' - return self.iteration >= self.max_iteration - def __iter__(self): - '''Algorithm is an iterable''' - return self - def next(self): - '''Algorithm is an iterable - - python2 backwards compatibility''' - return self.__next__() - def __next__(self): - '''Algorithm is an iterable - - calling this method triggers update and update_objective - ''' - if self.should_stop(): - raise StopIteration() - else: - time0 = time.time() - self.update() - self.timing.append( time.time() - time0 ) - if self.iteration % self.update_objective_interval == 0: - self.update_objective() - self.iteration += 1 - def get_output(self): - '''Returns the solution found''' - return self.x - def get_last_loss(self): - '''Returns the last stored value of the loss function - - if update_objective_interval is 1 it is the value of the objective at the current - iteration. If update_objective_interval > 1 it is the last stored value. - ''' - return self.__loss[-1] - def get_last_objective(self): - '''alias to get_last_loss''' - return self.get_last_loss() - def update_objective(self): - '''calculates the objective with the current solution''' - raise NotImplementedError() - @property - def loss(self): - '''returns the list of the values of the objective during the iteration - - The length of this list may be shorter than the number of iterations run when - the update_objective_interval > 1 - ''' - return self.__loss - @property - def objective(self): - '''alias of loss''' - return self.loss - @property - def max_iteration(self): - '''gets the maximum number of iterations''' - return self.__max_iteration - @max_iteration.setter - def max_iteration(self, value): - '''sets the maximum number of iterations''' - assert isinstance(value, int) - self.__max_iteration = value - @property - def update_objective_interval(self): - return self.__update_objective_interval - @update_objective_interval.setter - def update_objective_interval(self, value): - if isinstance(value, Integral): - if value >= 1: - self.__update_objective_interval = value - else: - raise ValueError('Update objective interval must be an integer >= 1') - else: - raise ValueError('Update objective interval must be an integer >= 1') - def run(self, iterations, verbose=True, callback=None): - '''run n iterations and update the user with the callback if specified''' - if self.should_stop(): - print ("Stop cryterion has been reached.") - i = 0 - for _ in self: - if verbose and self.iteration % self.update_objective_interval == 0: - print ("Iteration {}/{}, objective {}".format(self.iteration, - self.max_iteration, self.get_last_objective()) ) - else: - if callback is not None: - callback(self.iteration, self.get_last_objective()) - i += 1 - if i == iterations: - break - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py deleted file mode 100644 index 7194eb8..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:11:23 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -#from collections.abc import Iterable -class CGLS(Algorithm): - - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - ''' - def __init__(self, **kwargs): - super(CGLS, self).__init__() - self.x = kwargs.get('x_init', None) - self.operator = kwargs.get('operator', None) - self.data = kwargs.get('data', None) - if self.x is not None and self.operator is not None and \ - self.data is not None: - print ("Calling from creator") - self.set_up(x_init =kwargs['x_init'], - operator=kwargs['operator'], - data =kwargs['data']) - - def set_up(self, x_init, operator , data ): - - self.r = data.copy() - self.x = x_init.copy() - - self.operator = operator - self.d = operator.adjoint(self.r) - - - self.normr2 = self.d.squared_norm() - #if isinstance(self.normr2, Iterable): - # self.normr2 = sum(self.normr2) - #self.normr2 = numpy.sqrt(self.normr2) - #print ("set_up" , self.normr2) - - def update(self): - - Ad = self.operator.direct(self.d) - #norm = (Ad*Ad).sum() - #if isinstance(norm, Iterable): - # norm = sum(norm) - norm = Ad.squared_norm() - - alpha = self.normr2/norm - self.x += (self.d * alpha) - self.r -= (Ad * alpha) - s = self.operator.adjoint(self.r) - - normr2_new = s.squared_norm() - #if isinstance(normr2_new, Iterable): - # normr2_new = sum(normr2_new) - #normr2_new = numpy.sqrt(normr2_new) - #print (normr2_new) - - beta = normr2_new/self.normr2 - self.normr2 = normr2_new - self.d = s + beta*self.d - - def update_objective(self): - self.loss.append(self.r.squared_norm()) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py deleted file mode 100644 index 445ba7a..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:09:03 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun - -class FBPD(Algorithm): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - constraint = None - data_fidelity = None - regulariser = None - def __init__(self, **kwargs): - pass - def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - - # default inputs - if constraint is None: - self.constraint = ZeroFun() - else: - self.constraint = constraint - if data_fidelity is None: - data_fidelity = ZeroFun() - else: - self.data_fidelity = data_fidelity - if regulariser is None: - self.regulariser = ZeroFun() - else: - self.regulariser = regulariser - - # algorithmic parameters - - - # step-sizes - self.tau = 2 / (self.data_fidelity.L + 2) - self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L - - self.inv_sigma = 1/self.sigma - - # initialization - self.x = x_init - self.y = operator.direct(self.x) - - - def update(self): - - # primal forward-backward step - x_old = self.x - self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) - self.x = self.constraint.prox(self.x, self.tau); - - # dual forward-backward step - self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); - self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); - - # time and criterion - self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py deleted file mode 100644 index 93ba178..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 21 11:07:30 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun -import numpy - -class FISTA(Algorithm): - '''Fast Iterative Shrinkage-Thresholding Algorithm - - Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding - algorithm for linear inverse problems. - SIAM journal on imaging sciences,2(1), pp.183-202. - - Parameters: - x_init: initial guess - f: data fidelity - g: regularizer - h: - opt: additional algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(FISTA, self).__init__() - self.f = None - self.g = None - self.invL = None - self.t_old = 1 - args = ['x_init', 'f', 'g', 'opt'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(kwargs['x_init'], - f=kwargs['f'], - g=kwargs['g'], - opt=kwargs['opt']) - - def set_up(self, x_init, f=None, g=None, opt=None): - - # default inputs - if f is None: - self.f = ZeroFun() - else: - self.f = f - if g is None: - g = ZeroFun() - self.g = g - else: - self.g = g - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'memopt':False} - - self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - self.memopt = memopt - - # initialization - if memopt: - self.y = x_init.clone() - self.x_old = x_init.clone() - self.x = x_init.clone() - self.u = x_init.clone() - else: - self.x_old = x_init.copy() - self.y = x_init.copy() - - #timing = numpy.zeros(max_iter) - #criter = numpy.zeros(max_iter) - - - self.invL = 1/f.L - - self.t_old = 1 - - def update(self): - # algorithm loop - #for it in range(0, max_iter): - - if self.memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - self.f.gradient(self.y, out=self.u) - self.u.__imul__( -self.invL ) - self.u.__iadd__( self.y ) - # x = g.prox(u,invL) - self.g.proximal(self.u, self.invL, out=self.x) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - self.x.subtract(self.x_old, out=self.y) - self.y.__imul__ ((self.t_old-1)/self.t) - self.y.__iadd__( self.x ) - - self.x_old.fill(self.x) - self.t_old = self.t - - - else: - u = self.y - self.invL*self.f.grad(self.y) - - self.x = self.g.prox(u,self.invL) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) - - self.x_old = self.x.copy() - self.t_old = self.t - - def update_objective(self): - self.loss.append( self.f(self.x) + self.g(self.x) ) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py deleted file mode 100644 index f1e4132..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:05:09 2019 - -@author: ofn77899 -""" -from ccpi.optimisation.algorithms import Algorithm - -class GradientDescent(Algorithm): - '''Implementation of Gradient Descent algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(GradientDescent, self).__init__() - self.x = None - self.rate = 0 - self.objective_function = None - self.regulariser = None - args = ['x_init', 'objective_function', 'rate'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(x_init=kwargs['x_init'], - objective_function=kwargs['objective_function'], - rate=kwargs['rate']) - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def set_up(self, x_init, objective_function, rate): - '''initialisation of the algorithm''' - self.x = x_init.copy() - self.objective_function = objective_function - self.rate = rate - self.loss.append(objective_function(x_init)) - self.iteration = 0 - try: - self.memopt = self.objective_function.memopt - except AttributeError as ae: - self.memopt = False - if self.memopt: - self.x_update = x_init.copy() - - def update(self): - '''Single iteration''' - if self.memopt: - self.objective_function.gradient(self.x, out=self.x_update) - self.x_update *= -self.rate - self.x += self.x_update - else: - self.x += -self.rate * self.objective_function.gradient(self.x) - - def update_objective(self): - self.loss.append(self.objective_function(self.x)) - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py deleted file mode 100644 index d0e27ae..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 4 16:18:06 2019 - -@author: evangelos -""" -from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData -import numpy as np -import matplotlib.pyplot as plt -import time -from ccpi.optimisation.operators import BlockOperator -from ccpi.framework import BlockDataContainer - - -import matplotlib.pyplot as plt - -class PDHG(Algorithm): - '''Primal Dual Hybrid Gradient''' - - def __init__(self, **kwargs): - super(PDHG, self).__init__() - self.f = kwargs.get('f', None) - self.operator = kwargs.get('operator', None) - self.g = kwargs.get('g', None) - self.tau = kwargs.get('tau', None) - self.sigma = kwargs.get('sigma', None) - - if self.f is not None and self.operator is not None and \ - self.g is not None: - print ("Calling from creator") - self.set_up(self.f, - self.operator, - self.g, - self.tau, - self.sigma) - - def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): - # algorithmic parameters - - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - - - self.x_old = self.operator.domain_geometry().allocate() - self.y_old = self.operator.range_geometry().allocate() - - self.xbar = self.x_old.copy() - #x_tmp = x_old - self.x = self.x_old.copy() - self.y = self.y_old.copy() - #y_tmp = y_old - #y = y_tmp - - # relaxation parameter - self.theta = 1 - - def update(self): - # Gradient descent, Dual problem solution - self.y_old += self.sigma * self.operator.direct(self.xbar) - self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - - # Gradient ascent, Primal problem solution - self.x_old -= self.tau * self.operator.adjoint(self.y) - self.x = self.g.proximal(self.x_old, self.tau) - - #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old - self.xbar *= self.theta - self.xbar += self.x - -# self.x_old.fill(self.x) -# self.y_old.fill(self.y) - self.y_old = self.y.copy() - self.x_old = self.x.copy() - #self.y = self.y_old - - def update_objective(self): - self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x), - -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) - ]) - - - -def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ - 'memopt': False} - - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - - niter = opt['niter'] if 'niter' in opt.keys() else 1000 - tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False - stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - - - x_old = operator.domain_geometry().allocate() - y_old = operator.range_geometry().allocate() - - - xbar = x_old - x_tmp = x_old - x = x_old - - y_tmp = y_old - y = y_tmp - - # relaxation parameter - theta = 1 - - t = time.time() - - objective = [] - - - for i in range(niter): - - # Gradient descent, Dual problem solution - y_tmp = y_old + sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_tmp, sigma) - - # Gradient ascent, Primal problem solution - x_tmp = x_old - tau * operator.adjoint(y) - x = g.proximal(x_tmp, tau) - - #Update - xbar = x + theta * (x - x_old) - - x_old = x - y_old = y - - if i%100==0: - - primal = f(operator.direct(x)) + g(x) - dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) - print( i, primal, dual, primal-dual) - -# plt.imshow(x.as_array()) -# plt.show() -# print(f(operator.direct(x)) + g(x), i) - - t_end = time.time() - - return x, t_end - t, objective - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py deleted file mode 100644 index f562973..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:03:13 2019 - -@author: ofn77899 -""" - -from .Algorithm import Algorithm -from .CGLS import CGLS -from .GradientDescent import GradientDescent -from .FISTA import FISTA -from .FBPD import FBPD -from .PDHG import PDHG -from .PDHG import PDHG_old - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py b/Wrappers/Python/build/lib/ccpi/optimisation/algs.py deleted file mode 100644 index 6b6ae2c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -import time - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ZeroFun -from ccpi.framework import ImageData -from ccpi.framework import AcquisitionData -from ccpi.optimisation.spdhg import spdhg -from ccpi.optimisation.spdhg import KullbackLeibler -from ccpi.optimisation.spdhg import KullbackLeiblerConvexConjugate - -def FISTA(x_init, f=None, g=None, opt=None): - '''Fast Iterative Shrinkage-Thresholding Algorithm - - Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding - algorithm for linear inverse problems. - SIAM journal on imaging sciences,2(1), pp.183-202. - - Parameters: - x_init: initial guess - f: data fidelity - g: regularizer - h: - opt: additional algorithm - ''' - # default inputs - if f is None: f = ZeroFun() - if g is None: g = ZeroFun() - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} - - max_iter = opt['iter'] if 'iter' in opt.keys() else 1000 - tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - - - # initialization - if memopt: - y = x_init.clone() - x_old = x_init.clone() - x = x_init.clone() - u = x_init.clone() - else: - x_old = x_init - y = x_init; - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - invL = 1/f.L - - t_old = 1 - - c = f(x_init) + g(x_init) - - # algorithm loop - for it in range(0, max_iter): - - time0 = time.time() - if memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - f.gradient(y, out=u) - u.__imul__( -invL ) - u.__iadd__( y ) - # x = g.prox(u,invL) - g.proximal(u, invL, out=x) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - x.subtract(x_old, out=y) - y.__imul__ ((t_old-1)/t) - y.__iadd__( x ) - - x_old.fill(x) - t_old = t - - - else: - u = y - invL*f.grad(y) - - x = g.prox(u,invL) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - y = x + (t_old-1)/t*(x-x_old) - - x_old = x.copy() - t_old = t - - # time and criterion - timing[it] = time.time() - time0 - criter[it] = f(x) + g(x); - - # stopping rule - #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: - # break - - #print(it, 'out of', 10, 'iterations', end='\r'); - - #criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); - - return x, it, timing, criter - -def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - # default inputs - if constraint is None: constraint = ZeroFun() - if data_fidelity is None: data_fidelity = ZeroFun() - if regulariser is None: regulariser = ZeroFun() - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - memopt = opt['memopts'] if 'memopts' in opt.keys() else False - - # step-sizes - tau = 2 / (data_fidelity.L + 2) - sigma = (1/tau - data_fidelity.L/2) / regulariser.L - inv_sigma = 1/sigma - - # initialization - x = x_init - y = operator.direct(x); - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - - - - # algorithm loop - for it in range(0, max_iter): - - t = time.time() - - # primal forward-backward step - x_old = x; - x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); - x = constraint.prox(x, tau); - - # dual forward-backward step - y = y + sigma * operator.direct(2*x - x_old); - y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma); - - # time and criterion - timing[it] = time.time() - t - criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) - - # stopping rule - #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: - # break - - criter = criter[0:it+1] - timing = numpy.cumsum(timing[0:it+1]) - - return x, it, timing, criter - -def CGLS(x_init, operator , data , opt=None): - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - opt: additional algorithm - ''' - - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - - r = data.copy() - x = x_init.copy() - - d = operator.adjoint(r) - - normr2 = (d**2).sum() - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - # algorithm loop - for it in range(0, max_iter): - - t = time.time() - - Ad = operator.direct(d) - alpha = normr2/( (Ad**2).sum() ) - x = x + alpha*d - r = r - alpha*Ad - s = operator.adjoint(r) - - normr2_new = (s**2).sum() - beta = normr2_new/normr2 - normr2 = normr2_new - d = s + beta*d - - # time and criterion - timing[it] = time.time() - t - criter[it] = (r**2).sum() - - return x, it, timing, criter - -def SIRT(x_init, operator , data , opt=None, constraint=None): - '''Simultaneous Iterative Reconstruction Technique - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - opt: additional algorithm - constraint: func of Indicator type specifying convex constraint. - ''' - - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - - # Set default constraint to unconstrained - if constraint==None: - constraint = Function() - - x = x_init.clone() - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0 - relax_par = 1.0 - - # Set up scaling matrices D and M. - im1 = ImageData(geometry=x_init.geometry) - im1.array[:] = 1.0 - M = 1/operator.direct(im1) - del im1 - aq1 = AcquisitionData(geometry=M.geometry) - aq1.array[:] = 1.0 - D = 1/operator.adjoint(aq1) - del aq1 - - # algorithm loop - for it in range(0, max_iter): - t = time.time() - r = data - operator.direct(x) - - x = constraint.prox(x + relax_par * (D*operator.adjoint(M*r)),None) - - timing[it] = time.time() - t - if it > 0: - criter[it-1] = (r**2).sum() - - r = data - operator.direct(x) - criter[it] = (r**2).sum() - return x, it, timing, criter - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py deleted file mode 100644 index efc465c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py +++ /dev/null @@ -1,272 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ccpi.optimisation.ops import Identity, FiniteDiff2D -import numpy -from ccpi.framework import DataContainer -import warnings -from ccpi.optimisation.functions import Function -def isSizeCorrect(data1 ,data2): - if issubclass(type(data1), DataContainer) and \ - issubclass(type(data2), DataContainer): - # check dimensionality - if data1.check_dimensions(data2): - return True - elif issubclass(type(data1) , numpy.ndarray) and \ - issubclass(type(data2) , numpy.ndarray): - return data1.shape == data2.shape - else: - raise ValueError("{0}: getting two incompatible types: {1} {2}"\ - .format('Function', type(data1), type(data2))) - return False -class Norm2(Function): - - def __init__(self, - gamma=1.0, - direction=None): - super(Norm2, self).__init__() - self.gamma = gamma; - self.direction = direction; - - def __call__(self, x, out=None): - - if out is None: - xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, - keepdims=True)) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - arr = out.as_array() - numpy.square(x.as_array(), out=arr) - xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - p = numpy.sum(self.gamma*xx) - - return p - - def prox(self, x, tau): - - xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - p = x.as_array() * xx - - return type(x)(p,geometry=x.geometry) - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x,tau) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - numpy.square(x.as_array(), out = out.as_array()) - xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out.as_array()) - - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - -class TV2D(Norm2): - - def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 0) - self.op = FiniteDiff2D() - self.L = self.op.get_max_sing_val() - - -# Define a class for squared 2-norm -class Norm2sq(Function): - ''' - f(x) = c*||A*x-b||_2^2 - - which has - - grad[f](x) = 2*c*A^T*(A*x-b) - - and Lipschitz constant - - L = 2*c*||A||_2^2 = 2*s1(A)^2 - - where s1(A) is the largest singular value of A. - - ''' - - def __init__(self,A,b,c=1.0,memopt=False): - super(Norm2sq, self).__init__() - - self.A = A # Should be an operator, default identity - self.b = b # Default zero DataSet? - self.c = c # Default 1. - if memopt: - try: - self.range_tmp = A.range_geometry().allocate() - self.domain_tmp = A.domain_geometry().allocate() - self.memopt = True - except NameError as ne: - warnings.warn(str(ne)) - self.memopt = False - except NotImplementedError as nie: - print (nie) - warnings.warn(str(nie)) - self.memopt = False - else: - self.memopt = False - - # Compute the Lipschitz parameter from the operator if possible - # Leave it initialised to None otherwise - try: - self.L = 2.0*self.c*(self.A.norm()**2) - except AttributeError as ae: - pass - except NotImplementedError as noe: - pass - - #def grad(self,x): - # return self.gradient(x, out=None) - - def __call__(self,x): - #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - #if out is None: - # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) - #else: - y = self.A.direct(x) - y.__isub__(self.b) - #y.__imul__(y) - #return y.sum() * self.c - try: - return y.squared_norm() * self.c - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) * self.c - - def gradient(self, x, out = None): - if self.memopt: - #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - - self.A.direct(x, out=self.range_tmp) - self.range_tmp -= self.b - self.A.adjoint(self.range_tmp, out=out) - #self.direct_placehold.multiply(2.0*self.c, out=out) - out *= (self.c * 2.0) - else: - return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - - -# Box constraints indicator function. Calling returns 0 if argument is within -# the box. The prox operator is projection onto the box. Only implements one -# scalar lower and one upper as constraint on all elements. Should generalise -# to vectors to allow different constraints one elements. -class IndicatorBox(Function): - - def __init__(self,lower=-numpy.inf,upper=numpy.inf): - # Do nothing - super(IndicatorBox, self).__init__() - self.lower = lower - self.upper = upper - - - def __call__(self,x): - - if (numpy.all(x.array>=self.lower) and - numpy.all(x.array <= self.upper) ): - val = 0 - else: - val = numpy.inf - return val - - def prox(self,x,tau=None): - return (x.maximum(self.lower)).minimum(self.upper) - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - #(x.abs() - tau*self.gamma).maximum(0) * x.sign() - x.abs(out = out) - out.__isub__(tau*self.gamma) - out.maximum(0, out=out) - if self.sign_x is None or not x.shape == self.sign_x.shape: - self.sign_x = x.sign() - else: - x.sign(out=self.sign_x) - - out.__imul__( self.sign_x ) - -# A more interesting example, least squares plus 1-norm minimization. -# Define class to represent 1-norm including prox function -class Norm1(Function): - - def __init__(self,gamma): - super(Norm1, self).__init__() - self.gamma = gamma - self.L = 1 - self.sign_x = None - - def __call__(self,x,out=None): - if out is None: - return self.gamma*(x.abs().sum()) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - x.abs(out=out) - return out.sum() * self.gamma - - def prox(self,x,tau): - return (x.abs() - tau*self.gamma).maximum(0) * x.sign() - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if isSizeCorrect(x,out): - # check dimensionality - if issubclass(type(out), DataContainer): - v = (x.abs() - tau*self.gamma).maximum(0) - x.sign(out=out) - out *= v - #out.fill(self.prox(x,tau)) - elif issubclass(type(out) , numpy.ndarray): - v = (x.abs() - tau*self.gamma).maximum(0) - out[:] = x.sign() - out *= v - #out[:] = self.prox(x,tau) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py deleted file mode 100644 index 81c16cd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 8 10:01:31 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import BlockDataContainer -from numbers import Number - -class BlockFunction(Function): - '''A Block vector of Functions - - .. math:: - - f = [f_1,f_2,f_3] - f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) - - ''' - def __init__(self, *functions): - '''Creator''' - self.functions = functions - self.length = len(self.functions) - - super(BlockFunction, self).__init__() - - def __call__(self, x): - '''evaluates the BlockFunction on the BlockDataContainer - - :param: x (BlockDataContainer): must have as many rows as self.length - - returns sum(f_i(x_i)) - ''' - if self.length != x.shape[0]: - raise ValueError('BlockFunction and BlockDataContainer have incompatible size') - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - def convex_conjugate(self, x): - '''Convex_conjugate does not take into account the BlockOperator''' - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - - def proximal_conjugate(self, x, tau, out = None): - '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) - - def proximal(self, x, tau, out = None): - '''proximal does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) - else: - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) - - def gradient(self,x, out=None): - '''FIXME: gradient returns pass''' - pass \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py deleted file mode 100644 index 82f24a6..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction - -class Function(object): - '''Abstract class representing a function - - Members: - L is the Lipschitz constant of the gradient of the Function - ''' - def __init__(self): - self.L = None - - def __call__(self,x, out=None): - '''Evaluates the function at x ''' - raise NotImplementedError - - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - raise NotImplementedError - - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau''' - raise NotImplementedError - - def convex_conjugate(self, x, out=None): - '''This evaluates the convex conjugate of the function at x''' - raise NotImplementedError - - def proximal_conjugate(self, x, tau, out = None): - '''This returns the proximal operator for the convex conjugate of the function at x, tau''' - raise NotImplementedError - - def grad(self, x): - '''Alias of gradient(x,None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use gradient instead''', DeprecationWarning) - return self.gradient(x, out=None) - - def prox(self, x, tau): - '''Alias of proximal(x, tau, None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) - - def __rmul__(self, scalar): - '''Defines the multiplication by a scalar on the left - - returns a ScaledFunction''' - return ScaledFunction(self, scalar) - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py deleted file mode 100644 index 34b7e35..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 8 09:55:36 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ScaledFunction - - -class FunctionOperatorComposition(Function): - - def __init__(self, operator, function): - super(FunctionOperatorComposition, self).__init__() - self.function = function - self.operator = operator - alpha = 1 - if isinstance (function, ScaledFunction): - alpha = function.scalar - self.L = 2 * alpha * operator.norm()**2 - - - def __call__(self, x): - - return self.function(self.operator.direct(x)) - - def call_adjoint(self, x): - - return self.function(self.operator.adjoint(x)) - - def convex_conjugate(self, x): - - ''' convex_conjugate does not take into account the Operator''' - return self.function.convex_conjugate(x) - - def proximal(self, x, tau, out=None): - - '''proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=out) - - def proximal_conjugate(self, x, tau, out=None): - - ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=out) - - def gradient(self, x, out=None): - - ''' Gradient takes into account the Operator''' - if out is None: - return self.operator.adjoint( - self.function.gradient(self.operator.direct(x)) - ) - else: - self.operator.adjoint( - self.function.gradient(self.operator.direct(x), - out=out) - ) - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py deleted file mode 100644 index df8dc89..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ccpi.optimisation.functions import Function -import numpy - -class IndicatorBox(Function): - '''Box constraints indicator function. - - Calling returns 0 if argument is within the box. The prox operator is projection onto the box. - Only implements one scalar lower and one upper as constraint on all elements. Should generalise - to vectors to allow different constraints one elements. -''' - - def __init__(self,lower=-numpy.inf,upper=numpy.inf): - # Do nothing - super(IndicatorBox, self).__init__() - self.lower = lower - self.upper = upper - - - def __call__(self,x): - - if (numpy.all(x.array>=self.lower) and - numpy.all(x.array <= self.upper) ): - val = 0 - else: - val = numpy.inf - return val - - def prox(self,x,tau=None): - return (x.maximum(self.lower)).minimum(self.upper) - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - #(x.abs() - tau*self.gamma).maximum(0) * x.sign() - x.abs(out = out) - out.__isub__(tau*self.gamma) - out.maximum(0, out=out) - if self.sign_x is None or not x.shape == self.sign_x.shape: - self.sign_x = x.sign() - else: - x.sign(out=self.sign_x) - - out.__imul__( self.sign_x ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py deleted file mode 100644 index 5a47edd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Wed Mar 6 19:42:34 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - self.b = kwargs.get('b',None) - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, x) - else: - return SimpleL1Norm.__call__(self, x - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py deleted file mode 100644 index 889d703..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py +++ /dev/null @@ -1,233 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.framework import DataContainer, ImageData, ImageGeometry - -############################ L2NORM FUNCTION ############################# -class L2NormSquared(Function): - - def __init__(self, **kwargs): - - ''' L2NormSquared class - f : ImageGeometry --> R - - Cases: f(x) = ||x||^{2}_{2} - f(x) = || x - b ||^{2}_{2} - - ''' - - #TODO need x, b to live in the same geometry if b is not None - - super(L2NormSquared, self).__init__() - self.b = kwargs.get('b',None) - - def __call__(self, x): - ''' Evaluates L2NormSq at point x''' - - y = x - if self.b is not None: -# x.subtract(self.b, out = x) - y = x - self.b -# else: -# y -# if out is None: -# return x.squared_norm() -# else: - try: - return y.squared_norm() - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) - - - - def gradient(self, x, out=None): - ''' Evaluates gradient of L2NormSq at point x''' - if out is not None: - out.fill(x) - if self.b is not None: - out -= self.b - out *= 2 - else: - y = x - if self.b is not None: -# x.subtract(self.b, out=x) - y = x - self.b - return 2*y - - - def convex_conjugate(self, x, out=None): - ''' Evaluate convex conjugate of L2NormSq''' - - tmp = 0 - if self.b is not None: -# tmp = (self.b * x).sum() - tmp = (x * self.b).sum() - - if out is None: - # FIXME: this is a number - return (1./4.) * x.squared_norm() + tmp - else: - # FIXME: this is a DataContainer - out.fill((1./4.) * x.squared_norm() + tmp) - - - def proximal(self, x, tau, out = None): - - ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e., - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if out is None: - if self.b is not None: - return (x - self.b)/(1+2*tau) + self.b - else: - return x/(1+2*tau) - else: - out.fill(x) - if self.b is not None: - out -= self.b - out /= (1+2*tau) - if self.b is not None: - out += self.b - #out.fill((x - self.b)/(1+2*tau) + self.b) - #else: - # out.fill(x/(1+2*tau)) - - - def proximal_conjugate(self, x, tau, out=None): - - if out is None: - if self.b is not None: - # change the order cannot add ImageData + NestedBlock - return (-1* tau*self.b + x)/(1 + tau/2) - else: - return x/(1 + tau/2 ) - else: - if self.b is not None: - out.fill((x - tau*self.b)/(1 + tau/2)) - else: - out.fill(x/(1 + tau/2 )) - - def __rmul__(self, scalar): - return ScaledFunction(self, scalar) - - -if __name__ == '__main__': - - - # TESTS for L2 and scalar * L2 - - M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) - u = ig.allocate('random_int') - b = ig.allocate('random_int') - - # check grad/call no data - f = L2NormSquared() - a1 = f.gradient(u) - a2 = 2 * u - numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) - numpy.testing.assert_equal(f(u), u.squared_norm()) - - # check grad/call with data - f1 = L2NormSquared(b=b) - b1 = f1.gradient(u) - b2 = 2 * (u-b) - - numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) - numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) - - #check convex conjuagate no data - c1 = f.convex_conjugate(u) - c2 = 1/4 * u.squared_norm() - numpy.testing.assert_equal(c1, c2) - - #check convex conjuagate with data - d1 = f1.convex_conjugate(u) - d2 = (1/4) * u.squared_norm() + (u*b).sum() - numpy.testing.assert_equal(d1, d2) - - # check proximal no data - tau = 5 - e1 = f.proximal(u, tau) - e2 = u/(1+2*tau) - numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) - - # check proximal with data - tau = 5 - h1 = f1.proximal(u, tau) - h2 = (u-b)/(1+2*tau) + b - numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) - - # check proximal conjugate no data - tau = 0.2 - k1 = f.proximal_conjugate(u, tau) - k2 = u/(1 + tau/2 ) - numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) - - # check proximal conjugate with data - l1 = f1.proximal_conjugate(u, tau) - l2 = (u - tau * b)/(1 + tau/2 ) - numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) - - - # check scaled function properties - - # scalar - scalar = 100 - f_scaled_no_data = scalar * L2NormSquared() - f_scaled_data = scalar * L2NormSquared(b=b) - - # call - numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) - numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) - - # grad - numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) - - # conj - numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ - f.convex_conjugate(u/scalar) * scalar, decimal=4) - - numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ - scalar * f1.convex_conjugate(u/scalar), decimal=4) - - # proximal - numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ - f.proximal(u, tau*scalar).as_array()) - - - numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ - f1.proximal(u, tau*scalar).as_array()) - - - # proximal conjugate - numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ - (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) - - numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ - ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py deleted file mode 100644 index 1c51236..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import DataContainer, ImageData, \ - ImageGeometry, BlockDataContainer - -############################ mixed_L1,2NORM FUNCTIONS ##################### -class MixedL21Norm(Function): - - def __init__(self, **kwargs): - - super(MixedL21Norm, self).__init__() - self.SymTensor = kwargs.get('SymTensor',False) - - def __call__(self, x, out=None): - - ''' Evaluates L1,2Norm at point x - - :param: x is a BlockDataContainer - - ''' - if self.SymTensor: - - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - res = sum(tmp).sqrt().sum() - else: - -# tmp = [ x[i]**2 for i in range(x.shape[0])] - tmp = [ el**2 for el in x.containers ] - -# print(x.containers) -# print(tmp) -# print(type(sum(tmp))) -# print(type(tmp)) - res = sum(tmp).sqrt().sum() -# print(res) - return res - - def gradient(self, x, out=None): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - - ''' This is the Indicator function of ||\cdot||_{2, \infty} - which is either 0 if ||x||_{2, \infty} or \infty - ''' - return 0.0 - - def proximal(self, x, tau, out=None): - - ''' - For this we need to define a MixedL2,2 norm acting on BDC, - different form L2NormSquared which acts on DC - - ''' - - pass - - def proximal_conjugate(self, x, tau, out=None): - - if self.SymTensor: - - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - -# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - - def __rmul__(self, scalar): - return ScaledFunction(self, scalar) - -#class MixedL21Norm_tensor(Function): -# -# def __init__(self): -# print("feerf") -# -# -if __name__ == '__main__': - - M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) - u1 = ig.allocate('random_int') - u2 = ig.allocate('random_int') - - U = BlockDataContainer(u1, u2, shape=(2,1)) - - # Define no scale and scaled - f_no_scaled = MixedL21Norm() - f_scaled = 0.5 * MixedL21Norm() - - # call - - a1 = f_no_scaled(U) - a2 = f_scaled(U) - - z = f_no_scaled.proximal_conjugate(U, 1) - - f_no_scaled = MixedL21Norm() - - tmp = [el*el for el in U] - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py deleted file mode 100644 index b553d7c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ccpi.optimisation.functions import Function -import numpy -import warnings - -# Define a class for squared 2-norm -class Norm2sq(Function): - ''' - f(x) = c*||A*x-b||_2^2 - - which has - - grad[f](x) = 2*c*A^T*(A*x-b) - - and Lipschitz constant - - L = 2*c*||A||_2^2 = 2*s1(A)^2 - - where s1(A) is the largest singular value of A. - - ''' - - def __init__(self,A,b,c=1.0,memopt=False): - super(Norm2sq, self).__init__() - - self.A = A # Should be an operator, default identity - self.b = b # Default zero DataSet? - self.c = c # Default 1. - if memopt: - try: - self.range_tmp = A.range_geometry().allocate() - self.domain_tmp = A.domain_geometry().allocate() - self.memopt = True - except NameError as ne: - warnings.warn(str(ne)) - self.memopt = False - except NotImplementedError as nie: - print (nie) - warnings.warn(str(nie)) - self.memopt = False - else: - self.memopt = False - - # Compute the Lipschitz parameter from the operator if possible - # Leave it initialised to None otherwise - try: - self.L = 2.0*self.c*(self.A.norm()**2) - except AttributeError as ae: - pass - except NotImplementedError as noe: - pass - - #def grad(self,x): - # return self.gradient(x, out=None) - - def __call__(self,x): - #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - #if out is None: - # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) - #else: - y = self.A.direct(x) - y.__isub__(self.b) - #y.__imul__(y) - #return y.sum() * self.c - try: - return y.squared_norm() * self.c - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) * self.c - - def gradient(self, x, out = None): - if self.memopt: - #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - - self.A.direct(x, out=self.range_tmp) - self.range_tmp -= self.b - self.A.adjoint(self.range_tmp, out=out) - #self.direct_placehold.multiply(2.0*self.c, out=out) - out *= (self.c * 2.0) - else: - return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py deleted file mode 100644 index 046a4a6..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from numbers import Number -import numpy - -class ScaledFunction(object): - '''ScaledFunction - - A class to represent the scalar multiplication of an Function with a scalar. - It holds a function and a scalar. Basically it returns the multiplication - of the product of the function __call__, convex_conjugate and gradient with the scalar. - For the rest it behaves like the function it holds. - - Args: - function (Function): a Function or BlockOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - - ''' - def __init__(self, function, scalar): - super(ScaledFunction, self).__init__() - self.L = None - if not isinstance (scalar, Number): - raise TypeError('expected scalar: got {}'.format(type(scalar))) - self.scalar = scalar - self.function = function - - def __call__(self,x, out=None): - '''Evaluates the function at x ''' - return self.scalar * self.function(x) - - def convex_conjugate(self, x): - '''returns the convex_conjugate of the scaled function ''' - # if out is None: - # return self.scalar * self.function.convex_conjugate(x/self.scalar) - # else: - # out.fill(self.function.convex_conjugate(x/self.scalar)) - # out *= self.scalar - return self.scalar * self.function.convex_conjugate(x/self.scalar) - - def proximal_conjugate(self, x, tau, out = None): - '''This returns the proximal operator for the function at x, tau - ''' - if out is None: - return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) - else: - out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) - - def grad(self, x): - '''Alias of gradient(x,None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use gradient instead''', DeprecationWarning) - return self.gradient(x, out=None) - - def prox(self, x, tau): - '''Alias of proximal(x, tau, None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) - - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - if out is None: - return self.scalar * self.function.gradient(x) - else: - out.fill( self.scalar * self.function.gradient(x) ) - - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau - ''' - if out is None: - return self.function.proximal(x, tau*self.scalar) - else: - out.fill( self.function.proximal(x, tau*self.scalar) ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py deleted file mode 100644 index 88d9b64..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData -from ccpi.framework import BlockDataContainer - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - - if x.shape[0]==1: - return x.maximum(0).sum() - else: - if isinstance(x, BlockDataContainer): - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - else: - return x.maximum(0).sum() + x.maximum(0).sum() - - def proximal(self,x,tau, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def proximal_conjugate(self, x, tau): - return 0 - - def domain_geometry(self): - pass - def range_geometry(self): - pass \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py deleted file mode 100644 index 2ed36f5..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -from .Function import Function -from .ZeroFun import ZeroFun -from .L1Norm import SimpleL1Norm, L1Norm -#from .L2NormSquared import L2NormSq, SimpleL2NormSq -from .L2NormSquared import L2NormSquared -from .BlockFunction import BlockFunction -from .ScaledFunction import ScaledFunction -from .FunctionOperatorComposition import FunctionOperatorComposition -from .MixedL21Norm import MixedL21Norm -from .IndicatorBox import IndicatorBox -from .Norm2Sq import Norm2sq diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py deleted file mode 100644 index 8632920..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py +++ /dev/null @@ -1,312 +0,0 @@ -# -*- coding: utf-8 -*- - -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry -from operators import CompositeDataContainer, Identity, CompositeOperator -from numbers import Number - - -############################ L2NORM FUNCTIONS ############################# -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): - - super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.power(2).sum() - - def gradient(self,x): - return 2 * self.alpha * x - - def convex_conjugate(self,x): - return (1/4*self.alpha) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) - - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/2*self.alpha ) - - -class L2NormSq(SimpleL2NormSq): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L2NormSq, self).__init__(alpha=alpha) - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL2NormSq.__call__(self, self.A.direct(x)) - else: - return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b) - - def convex_conjugate(self, x): - - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def gradient(self, x): - - if self.b is None: - return 2*self.alpha * self.A.adjoint(self.A.direct(x)) - else: - return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b) - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - - - def proximal_conjugate(self, x, tau): - - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, self.A.direct(x)) - else: - return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, A, b=None, alpha=1, **kwargs): - - super(mixed_L12Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - self.sym_grad = kwargs.get('sym_grad',False) - - - - def gradient(self,x): - return ValueError('Not Differentiable') - - - def __call__(self,x): - - y = self.A.direct(x) - eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt() - eucl_norm.__isub__(self.b) - return eucl_norm.sum() * self.alpha - - def convex_conjugate(self,x): - return 0 - - def proximal_conjugate(self, x, tau): - - if self.b is None: - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - else: - res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0) - - return res - - -#%% - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - return x.maximum(0).sum() - - def proximal(self,x,tau): - return x.copy() - - def proximal_conjugate(self, x, tau): - return 0 - - -class CompositeFunction(Function): - - def __init__(self, *args): - self.functions = args - self.length = len(self.functions) - - def get_item(self, ind): - return self.functions[ind] - - def __call__(self,x): - - t = 0 - for i in range(self.length): - for j in range(x.shape[0]): - t +=self.functions[i](x.get_item(j)) - return t - - def convex_conjugate(self, x): - - z = 0 - t = 0 - for i in range(x.shape[0]): - t += self.functions[z].convex_conjugate(x.get_item(i)) - z += 1 - - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - - -if __name__ == '__main__': - - N = 3 - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - - # Form Composite Operator - operator = CompositeOperator((2,1), op1, op2 ) - - # Create functions - alpha = 1 - noisy_data = ImageData(np.random.randint(10, size=ag)) - f = CompositeFunction(L1Norm(op1,alpha), \ - L2NormSq(op2, noisy_data, c = 0.5, memopt = False) ) - - u = ImageData(np.random.randint(10, size=ig)) - uComp = CompositeDataContainer(u) - - print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u) - - f1 = L1Norm(op1,alpha) - f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False) - - print(f1(u) + f2(u)) - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py deleted file mode 100644 index ffeb32e..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:43:12 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, alpha, **kwargs): - - super(mixed_L12Norm, self).__init__() - - self.alpha = alpha - self.b = kwargs.get('b',None) - self.sym_grad = kwargs.get('sym_grad',False) - - def __call__(self,x): - - if self.b is None: - tmp1 = x - else: - tmp1 = x - self.b -# - if self.sym_grad: - tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) - else: - tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() - - return self.alpha*tmp.sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - pass - - def proximal_conjugate(self, x, tau): - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - return res diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py deleted file mode 100644 index ee8f609..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" -#from ccpi.optimisation.ops import Operator -import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer -from ccpi.optimisation.operators import Operator, LinearOperator -from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator -from ccpi.framework import BlockGeometry - -class BlockOperator(Operator): - '''A Block matrix containing Operators - - The Block Framework is a generic strategy to treat variational problems in the - following form: - - .. math:: - - min Regulariser + Fidelity - - - BlockOperators have a generic shape M x N, and when applied on an - Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. - Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with - N rows and 1 column. - - User may specify the shape of the block, by default is a row vector - - Operators in a Block are required to have the same domain column-wise and the - same range row-wise. - ''' - __array_priority__ = 1 - def __init__(self, *args, **kwargs): - ''' - Class creator - - Note: - Do not include the `self` parameter in the ``Args`` section. - - Args: - :param: vararg (Operator): Operators in the block. - :param: shape (:obj:`tuple`, optional): If shape is passed the Operators in - vararg are considered input in a row-by-row fashion. - Shape and number of Operators must match. - - Example: - BlockOperator(op0,op1) results in a row block - BlockOperator(op0,op1,shape=(1,2)) results in a column block - ''' - self.operators = args - shape = kwargs.get('shape', None) - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) - # test if operators are compatible - if not self.column_wise_compatible(): - raise ValueError('Operators in each column must have the same domain') - if not self.row_wise_compatible(): - raise ValueError('Operators in each row must have the same range') - - def column_wise_compatible(self): - '''Operators in a Block should have the same domain per column''' - rows, cols = self.shape - compatible = True - for col in range(cols): - column_compatible = True - for row in range(1,rows): - dg0 = self.get_item(row-1,col).domain_geometry() - dg1 = self.get_item(row,col).domain_geometry() - column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible - compatible = compatible and column_compatible - return compatible - - def row_wise_compatible(self): - '''Operators in a Block should have the same range per row''' - rows, cols = self.shape - compatible = True - for row in range(rows): - row_compatible = True - for col in range(1,cols): - dg0 = self.get_item(row,col-1).range_geometry() - dg1 = self.get_item(row,col).range_geometry() - row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible - compatible = compatible and row_compatible - return compatible - - def get_item(self, row, col): - '''returns the Operator at specified row and col''' - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.operators[index] - - def norm(self): - norm = [op.norm()**2 for op in self.operators] - return numpy.sqrt(sum(norm)) - - def direct(self, x, out=None): - '''Direct operation for the BlockOperator - - BlockOperator work on BlockDataContainer, but they will work on DataContainers - and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) - ''' - if not isinstance (x, BlockDataContainer): - x_b = BlockDataContainer(x) - else: - x_b = x - shape = self.get_output_shape(x_b.shape) - res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x_b.get_item(col)) - else: - prod += self.get_item(row,col).direct(x_b.get_item(col)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) - - def adjoint(self, x, out=None): - '''Adjoint operation for the BlockOperator - - BlockOperator may contain both LinearOperator and Operator - This method exists in BlockOperator as it is not known what type of - Operator it will contain. - - BlockOperator work on BlockDataContainer, but they will work on DataContainers - and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) - - Raises: ValueError if the contained Operators are not linear - ''' - if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): - raise ValueError('Not all operators in Block are linear.') - if not isinstance (x, BlockDataContainer): - x_b = BlockDataContainer(x) - else: - x_b = x - shape = self.get_output_shape(x_b.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row, col).adjoint(x_b.get_item(col)) - else: - prod += self.get_item(row, col).adjoint(x_b.get_item(col)) - res.append(prod) - if self.shape[1]==1: - return ImageData(*res) - else: - return BlockDataContainer(*res, shape=shape) - - def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] - if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - - def __rmul__(self, scalar): - '''Defines the left multiplication with a scalar - - Args: scalar (number or iterable containing numbers): - - Returns: a block operator with Scaled Operators inside''' - if isinstance (scalar, list) or isinstance(scalar, tuple) or \ - isinstance(scalar, numpy.ndarray): - if len(scalar) != len(self.operators): - raise ValueError('dimensions of scalars and operators do not match') - scalars = scalar - else: - scalars = [scalar for _ in self.operators] - # create a list of ScaledOperator-s - ops = [ v * op for v,op in zip(scalars, self.operators)] - #return BlockScaledOperator(self, scalars ,shape=self.shape) - return type(self)(*ops, shape=self.shape) - @property - def T(self): - '''Return the transposed of self - - input in a row-by-row''' - newshape = (self.shape[1], self.shape[0]) - oplist = [] - for col in range(newshape[1]): - for row in range(newshape[0]): - oplist.append(self.get_item(col,row)) - return type(self)(*oplist, shape=newshape) - - def domain_geometry(self): - '''returns the domain of the BlockOperator - - If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry. - Otherwise it is a BlockGeometry. - ''' - if self.shape[1] == 1: - # column BlockOperator - return self.get_item(0,0).domain_geometry() - else: - shape = (self.shape[0], 1) - return BlockGeometry(*[el.domain_geometry() for el in self.operators], - shape=shape) - - def range_geometry(self): - '''returns the range of the BlockOperator''' - shape = (self.shape[1], 1) - return BlockGeometry(*[el.range_geometry() for el in self.operators], - shape=shape) -if __name__ == '__main__': - pass diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py deleted file mode 100644 index aeb6c53..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py +++ /dev/null @@ -1,67 +0,0 @@ -from numbers import Number -import numpy -from ccpi.optimisation.operators import ScaledOperator -import functools - -class BlockScaledOperator(ScaledOperator): - '''ScaledOperator - - A class to represent the scalar multiplication of an Operator with a scalar. - It holds an operator and a scalar. Basically it returns the multiplication - of the result of direct and adjoint of the operator with the scalar. - For the rest it behaves like the operator it holds. - - Args: - operator (Operator): a Operator or LinearOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - sop = ScaledOperator(operator, scalar) - sop.direct(x) = scalar * operator.direct(x) - sop.adjoint(x) = scalar * operator.adjoint(x) - sop.norm() = operator.norm() - sop.range_geometry() = operator.range_geometry() - sop.domain_geometry() = operator.domain_geometry() - ''' - def __init__(self, operator, scalar, shape=None): - if shape is None: - shape = operator.shape - - if isinstance(scalar, (list, tuple, numpy.ndarray)): - size = functools.reduce(lambda x,y:x*y, shape, 1) - if len(scalar) != size: - raise ValueError('Scalar and operators size do not match: {}!={}' - .format(len(scalar), len(operator))) - self.scalar = scalar[:] - print ("BlockScaledOperator ", self.scalar) - elif isinstance (scalar, Number): - self.scalar = scalar - else: - raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar))) - self.operator = operator - self.shape = shape - def direct(self, x, out=None): - print ("BlockScaledOperator self.scalar", self.scalar) - #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array()) - return self.scalar * (self.operator.direct(x, out=out)) - def adjoint(self, x, out=None): - if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) - else: - raise TypeError('Operator is not linear') - def norm(self): - return numpy.abs(self.scalar) * self.operator.norm() - def range_geometry(self): - return self.operator.range_geometry() - def domain_geometry(self): - return self.operator.domain_geometry() - @property - def T(self): - '''Return the transposed of self''' - #print ("transpose before" , self.shape) - #shape = (self.shape[1], self.shape[0]) - ##self.shape = shape - ##self.operator.shape = shape - #print ("transpose" , shape) - #return self - return type(self)(self.operator.T, self.scalar) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py deleted file mode 100644 index 24c4e4b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ /dev/null @@ -1,322 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:51:17 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np - -class FiniteDiff(Operator): - - # Works for Neum/Symmetric & periodic boundary conditions - # TODO add central differences??? - # TODO not very well optimised, too many conditions - # TODO add discretisation step, should get that from imageGeometry - - # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] - # Grad_order = ['channels', 'direction_y', 'direction_x'] - # Grad_order = ['direction_z', 'direction_y', 'direction_x'] - # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] - - def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): - '''''' - super(FiniteDiff, self).__init__() - '''FIXME: domain and range should be geometries''' - self.gm_domain = gm_domain - self.gm_range = gm_range - self.direction = direction - self.bnd_cond = bnd_cond - - # Domain Geometry = Range Geometry if not stated - if self.gm_range is None: - self.gm_range = self.gm_domain - # check direction and "length" of geometry - if self.direction + 1 > len(self.gm_domain.shape): - raise ValueError('Gradient directions more than geometry domain') - - #self.voxel_size = kwargs.get('voxel_size',1) - # this wrongly assumes a homogeneous voxel size - self.voxel_size = self.gm_domain.voxel_size_x - - - def direct(self, x, out=None): - - x_asarr = x.as_array() - x_sz = len(x.shape) - - if out is None: - out = np.zeros(x.shape) - - fd_arr = out - - ######################## Direct for 2D ############################### - if x_sz == 2: - - if self.direction == 1: - - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 0: - - np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Direct for 3D ############################### - elif x_sz == 3: - - if self.direction == 0: - - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - - if self.direction == 2: - - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Direct for 4D ############################### - elif x_sz == 4: - - if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - else: - raise NotImplementedError - - res = out/self.voxel_size - return type(x)(res) - - def adjoint(self, x, out=None): - - x_asarr = x.as_array() - #x_asarr = x - x_sz = len(x.shape) - - if out is None: - out = np.zeros(x.shape) - - fd_arr = out - - ######################## Adjoint for 2D ############################### - if x_sz == 2: - - if self.direction == 1: - - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) - np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) - - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 0: - - np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) - np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) - - else: - raise ValueError('No valid boundary conditions') - - ######################## Adjoint for 3D ############################### - elif x_sz == 3: - - if self.direction == 0: - - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) - np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) - np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) - np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Adjoint for 4D ############################### - elif x_sz == 4: - - if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) - np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) - np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) - np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) - np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) - else: - raise ValueError('No valid boundary conditions') - - else: - raise NotImplementedError - - res = out/self.voxel_size - return type(x)(-res) - - def range_geometry(self): - '''Returns the range geometry''' - return self.gm_range - - def domain_geometry(self): - '''Returns the domain geometry''' - return self.gm_domain - - def norm(self): - x0 = self.gm_domain.allocate() - x0.fill( np.random.random_sample(x0.shape) ) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) - return self.s1 - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py deleted file mode 100644 index e00de0c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:50:04 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer -import numpy -from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff - -#%% - -class Gradient(LinearOperator): - - def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): - - super(Gradient, self).__init__() - - self.gm_domain = gm_domain # Domain of Grad Operator - - self.correlation = kwargs.get('correlation','Space') - - if self.correlation=='Space': - if self.gm_domain.channels>1: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] ) - self.ind = numpy.arange(1,self.gm_domain.length) - else: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] ) - self.ind = numpy.arange(self.gm_domain.length) - elif self.correlation=='SpaceChannels': - if self.gm_domain.channels>1: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)]) - self.ind = range(self.gm_domain.length) - else: - raise ValueError('No channels to correlate') - - self.bnd_cond = bnd_cond - - - def direct(self, x, out=None): - - tmp = self.gm_range.allocate() - - for i in range(tmp.shape[0]): - tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) - return tmp - - def adjoint(self, x, out=None): - - tmp = self.gm_domain.allocate() - for i in range(x.shape[0]): - tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) - return tmp - - - def domain_geometry(self): - return self.gm_domain - - def range_geometry(self): - return self.gm_range - - def norm(self): - - x0 = self.gm_domain.allocate('random') - self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) - return self.s1 - - def __rmul__(self, scalar): - return ScaledOperator(self, scalar) - - - def matrix(self): - - tmp = self.gm_range.allocate() - - mat = [] - for i in range(tmp.shape[0]): - - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - mat.append(spMat.matrix()) - - return BlockDataContainer(*mat) - - - def sum_abs_row(self): - - tmp = self.gm_range.allocate() - res = self.gm_domain.allocate() - for i in range(tmp.shape[0]): - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - res += spMat.sum_abs_row() - return res - - def sum_abs_col(self): - - tmp = self.gm_range.allocate() - res = [] - for i in range(tmp.shape[0]): - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - res.append(spMat.sum_abs_col()) - return BlockDataContainer(*res) - - -if __name__ == '__main__': - - - from ccpi.optimisation.operators import Identity, BlockOperator - - M, N = 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int' ) - - # check direct of Gradient and sparse matrix - G = Gradient(ig) - G_sp = G.matrix() - - res1 = G.direct(arr) - res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') - - print(res1[0].as_array()) - print(res1y) - - res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') - - print(res1[1].as_array()) - print(res1x) - - #check sum abs row - conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))) - print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F')) - print(G.sum_abs_row().as_array()) - - print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F')) - - print(G.sum_abs_col()[0].as_array()) - print(G.sum_abs_col()[1].as_array()) - - # Check Blockoperator sum abs col and row - - op1 = Gradient(ig) - op2 = Identity(ig) - - B = BlockOperator( op1, op2) - - Brow = B.sum_abs_row() - Bcol = B.sum_abs_col() - - concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray())) - - print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F')) - print(Brow.as_array()) - - print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F')) - print(Bcol[1].as_array()) - - -# print(numpy.concatene(G_sp[0].toarray()+ )) -# print(G_sp[1].toarray()) -# -# d1 = G.sum_abs_row() -# print(d1.as_array()) -# -# d2 = G_neum.sum_abs_col() -## print(d2) -# -# -# ########################################################### - a = BlockDataContainer( BlockDataContainer(arr, arr), arr) - b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2) - c = a/b - - print(c[0][0].as_array(), (arr/(arr+5)).as_array()) - print(c[0][1].as_array(), (arr/(arr+3)).as_array()) - print(c[1].as_array(), (arr/(arr+2)).as_array()) - - - a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) -# -# c1 = arr + a -# c2 = arr + a -# c2 = a1 + arr -# diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py deleted file mode 100644 index a58a296..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:30:51 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import LinearOperator -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - - -class Identity(LinearOperator): - - def __init__(self, gm_domain, gm_range=None): - - self.gm_domain = gm_domain - self.gm_range = gm_range - if self.gm_range is None: - self.gm_range = self.gm_domain - - super(Identity, self).__init__() - - def direct(self,x,out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def adjoint(self,x, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def norm(self): - return 1.0 - - def domain_geometry(self): - return self.gm_domain - - def range_geometry(self): - return self.gm_range - - def matrix(self): - - return sp.eye(np.prod(self.gm_domain.shape)) - - def sum_abs_row(self): - - return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) - - def sum_abs_col(self): - - return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) - - -if __name__ == '__main__': - - from ccpi.framework import ImageGeometry - - M, N = 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') - - Id = Identity(ig) - d = Id.matrix() - print(d.toarray()) - - d1 = Id.sum_abs_col() - print(d1.as_array()) - - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py deleted file mode 100644 index e19304f..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:57:52 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.operators import Operator - - -class LinearOperator(Operator): - '''A Linear Operator that maps from a space X <-> Y''' - def __init__(self): - super(LinearOperator, self).__init__() - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - '''returns the adjoint/inverse operation - - only available to linear operators''' - raise NotImplementedError diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py deleted file mode 100644 index 2d2089b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:55:56 2019 - -@author: ofn77899 -""" -from ccpi.optimisation.operators.ScaledOperator import ScaledOperator - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - '''Returns the application of the Operator on x''' - raise NotImplementedError - def norm(self): - '''Returns the norm of the Operator''' - raise NotImplementedError - def range_geometry(self): - '''Returns the range of the Operator: Y space''' - raise NotImplementedError - def domain_geometry(self): - '''Returns the domain of the Operator: X space''' - raise NotImplementedError - def __rmul__(self, scalar): - '''Defines the multiplication by a scalar on the left - - returns a ScaledOperator''' - return ScaledOperator(self, scalar) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py deleted file mode 100644 index adcc6d9..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py +++ /dev/null @@ -1,42 +0,0 @@ -from numbers import Number -import numpy - -class ScaledOperator(object): - '''ScaledOperator - - A class to represent the scalar multiplication of an Operator with a scalar. - It holds an operator and a scalar. Basically it returns the multiplication - of the result of direct and adjoint of the operator with the scalar. - For the rest it behaves like the operator it holds. - - Args: - operator (Operator): a Operator or LinearOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - sop = ScaledOperator(operator, scalar) - sop.direct(x) = scalar * operator.direct(x) - sop.adjoint(x) = scalar * operator.adjoint(x) - sop.norm() = operator.norm() - sop.range_geometry() = operator.range_geometry() - sop.domain_geometry() = operator.domain_geometry() - ''' - def __init__(self, operator, scalar): - super(ScaledOperator, self).__init__() - if not isinstance (scalar, Number): - raise TypeError('expected scalar: got {}'.format(type(scalar))) - self.scalar = scalar - self.operator = operator - def direct(self, x, out=None): - return self.scalar * self.operator.direct(x, out=out) - def adjoint(self, x, out=None): - if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) - else: - raise TypeError('Operator is not linear') - def norm(self): - return numpy.abs(self.scalar) * self.operator.norm() - def range_geometry(self): - return self.operator.range_geometry() - def domain_geometry(self): - return self.operator.domain_geometry() diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py deleted file mode 100644 index 1b88cba..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Apr 2 14:06:15 2019 - -@author: vaggelis -""" - -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - -class SparseFiniteDiff(): - - def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): - - super(SparseFiniteDiff, self).__init__() - self.gm_domain = gm_domain - self.gm_range = gm_range - self.direction = direction - self.bnd_cond = bnd_cond - - if self.gm_range is None: - self.gm_range = self.gm_domain - - self.get_dims = [i for i in gm_domain.shape] - - if self.direction + 1 > len(self.gm_domain.shape): - raise ValueError('Gradient directions more than geometry domain') - - def matrix(self): - - i = self.direction - - mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil') - - if self.bnd_cond == 'Neumann': - mat[-1,:] = 0 - elif self.bnd_cond == 'Periodic': - mat[-1,0] = 1 - - tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0]) - - for j in range(1, self.gm_domain.length): - - tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad ) - - return tmpGrad - - def T(self): - return self.matrix().T - - def direct(self, x): - - x_asarr = x.as_array() - res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F') - return type(x)(res) - - def adjoint(self, x): - - x_asarr = x.as_array() - res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F') - return type(x)(res) - - def sum_abs_row(self): - - res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')) - res[res==0]=1 - return ImageData(res) - - def sum_abs_col(self): - - res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C')) - res[res==0]=1 - return ImageData(res) - -if __name__ == '__main__': - - from ccpi.framework import ImageGeometry - from ccpi.optimisation.operators import FiniteDiff - - # 2D - M, N= 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') - - for i in [0,1]: - - # Neumann - sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann') - G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann') - - # Periodic - sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic') - G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic') - - u_neum_direct = G_neum.direct(arr) - u_neum_sp_direct = sFD_neum.direct(arr) - np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4) - - u_neum_adjoint = G_neum.adjoint(arr) - u_neum_sp_adjoint = sFD_neum.adjoint(arr) - np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4) - - u_per_direct = G_neum.direct(arr) - u_per_sp_direct = sFD_neum.direct(arr) - np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4) - - u_per_adjoint = G_per.adjoint(arr) - u_per_sp_adjoint = sFD_per.adjoint(arr) - np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4) - - # 3D - M, N, K = 2, 3, 4 - ig3D = ImageGeometry(M, N, K) - arr3D = ig3D.allocate('random_int') - - for i in [0,1,2]: - - # Neumann - sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann') - G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann') - - # Periodic - sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic') - G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic') - - u_neum_direct3D = G_neum3D.direct(arr3D) - u_neum_sp_direct3D = sFD_neum3D.direct(arr3D) - np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4) - - u_neum_adjoint3D = G_neum3D.adjoint(arr3D) - u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D) - np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4) - - u_per_direct3D = G_neum3D.direct(arr3D) - u_per_sp_direct3D = sFD_neum3D.direct(arr3D) - np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4) - - u_per_adjoint3D = G_per3D.adjoint(arr3D) - u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D) - np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4) - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py deleted file mode 100644 index d908e49..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:53:55 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator -from ccpi.optimisation.operators import FiniteDiff -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, DataContainer -import numpy as np - - -class SymmetrizedGradient(Operator): - - def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs): - - super(SymmetrizedGradient, self).__init__() - - self.gm_domain = gm_domain # Domain of Grad Operator - self.gm_range = gm_range # Range of Grad Operator - self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences - - # Kwargs Default options - self.memopt = kwargs.get('memopt',False) - self.correlation = kwargs.get('correlation','Space') - - #TODO not tested yet, operator norm??? - self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) - - - def direct(self, x, out=None): - - tmp = np.zeros(self.gm_range) - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) - tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + - FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) - - return type(x)(tmp) - - - def adjoint(self, x, out=None): - - tmp = np.zeros(self.gm_domain) - - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) - - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) - - return type(x)(tmp) - - def alloc_domain_dim(self): - return ImageData(np.zeros(self.gm_domain)) - - def alloc_range_dim(self): - return ImageData(np.zeros(self.range_dim)) - - def domain_dim(self): - return self.gm_domain - - def range_dim(self): - return self.gm_range - - def norm(self): -# return np.sqrt(4*len(self.domainDim())) - #TODO this takes time for big ImageData - # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) - x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) - return self.s1 - - - -if __name__ == '__main__': - - ########################################################################### - ## Symmetrized Gradient - - N, M = 2, 3 - ig = (N,M) - ig2 = (2,) + ig - ig3 = (3,) + ig - u1 = DataContainer(np.random.randint(10, size=ig2)) - w1 = DataContainer(np.random.randint(10, size=ig3)) - - E = SymmetrizedGradient(ig2,ig3) - - d1 = E.direct(u1) - d2 = E.adjoint(w1) - - LHS = (d1.as_array()[0]*w1.as_array()[0] + \ - d1.as_array()[1]*w1.as_array()[1] + \ - 2*d1.as_array()[2]*w1.as_array()[2]).sum() - - RHS = (u1.as_array()[0]*d2.as_array()[0] + \ - u1.as_array()[1]*d2.as_array()[1]).sum() - - - print(LHS, RHS, E.norm()) - - -# - - - - - - - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py deleted file mode 100644 index a7c5f09..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:25:53 2019 - -@author: evangelos -""" - -import numpy as np -from ccpi.framework import ImageData -from ccpi.optimisation.operators import Operator - -class ZeroOp(Operator): - - def __init__(self, gm_domain, gm_range): - self.gm_domain = gm_domain - self.gm_range = gm_range - super(ZeroOp, self).__init__() - - def direct(self,x,out=None): - if out is None: - return ImageData(np.zeros(self.gm_range)) - else: - return ImageData(np.zeros(self.gm_range)) - - def adjoint(self,x, out=None): - if out is None: - return ImageData(np.zeros(self.gm_domain)) - else: - return ImageData(np.zeros(self.gm_domain)) - - def norm(self): - return 0 - - def domain_dim(self): - return self.gm_domain - - def range_dim(self): - return self.gm_range \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py deleted file mode 100644 index 1c09faf..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:56:27 2019 - -@author: ofn77899 -""" - -from .Operator import Operator -from .LinearOperator import LinearOperator -from .ScaledOperator import ScaledOperator -from .BlockOperator import BlockOperator -from .BlockScaledOperator import BlockScaledOperator - - -from .FiniteDifferenceOperator import FiniteDiff -from .GradientOperator import Gradient -from .SymmetrizedGradientOperator import SymmetrizedGradient -from .IdentityOperator import Identity -from .ZeroOperator import ZeroOp diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py b/Wrappers/Python/build/lib/ccpi/optimisation/ops.py deleted file mode 100644 index 6afb97a..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -from scipy.sparse.linalg import svds -from ccpi.framework import DataContainer -from ccpi.framework import AcquisitionData -from ccpi.framework import ImageData -from ccpi.framework import ImageGeometry -from ccpi.framework import AcquisitionGeometry -from numbers import Number -# Maybe operators need to know what types they take as inputs/outputs -# to not just use generic DataContainer - - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def allocate_direct(self): - '''Allocates memory on the Y space''' - raise NotImplementedError - def allocate_adjoint(self): - '''Allocates memory on the X space''' - raise NotImplementedError - def range_geometry(self): - raise NotImplementedError - def domain_geometry(self): - raise NotImplementedError - def __rmul__(self, other): - '''reverse multiplication of Operator with number sets the variable scalar in the Operator''' - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -class Identity(Operator): - def __init__(self): - self.s1 = 1.0 - self.L = 1 - super(Identity, self).__init__() - - def direct(self,x,out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def adjoint(self,x, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -class TomoIdentity(Operator): - def __init__(self, geometry, **kwargs): - super(TomoIdentity, self).__init__() - self.s1 = 1.0 - self.geometry = geometry - - def is_linear(self): - return True - def direct(self,x,out=None): - - if out is None: - if self.scalar != 1: - return x * self.scalar - return x.copy() - else: - if self.scalar != 1: - out.fill(x * self.scalar) - return - out.fill(x) - return - - def adjoint(self,x, out=None): - return self.direct(x, out) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - def allocate_direct(self): - if issubclass(type(self.geometry), ImageGeometry): - return ImageData(geometry=self.geometry) - elif issubclass(type(self.geometry), AcquisitionGeometry): - return AcquisitionData(geometry=self.geometry) - else: - raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) - def allocate_adjoint(self): - return self.allocate_direct() - def range_geometry(self): - return self.geometry - def domain_geometry(self): - return self.geometry - - - -class FiniteDiff2D(Operator): - def __init__(self): - self.s1 = 8.0 - super(FiniteDiff2D, self).__init__() - - def direct(self,x, out=None): - '''Forward differences with Neumann BC.''' - # FIXME this seems to be working only with numpy arrays - - d1 = numpy.zeros_like(x.as_array()) - d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] - d2 = numpy.zeros_like(x.as_array()) - d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] - d = numpy.stack((d1,d2),0) - #x.geometry.voxel_num_z = 2 - return type(x)(d,False,geometry=x.geometry) - - def adjoint(self,x, out=None): - '''Backward differences, Neumann BC.''' - Nrows = x.get_dimension_size('horizontal_x') - Ncols = x.get_dimension_size('horizontal_y') - Nchannels = 1 - if len(x.shape) == 4: - Nchannels = x.get_dimension_size('channel') - zer = numpy.zeros((Nrows,1)) - xxx = x.as_array()[0,:,:-1] - # - h = numpy.concatenate((zer,xxx), 1) - h -= numpy.concatenate((xxx,zer), 1) - - zer = numpy.zeros((1,Ncols)) - xxx = x.as_array()[1,:-1,:] - # - v = numpy.concatenate((zer,xxx), 0) - v -= numpy.concatenate((xxx,zer), 0) - return type(x)(h + v, False, geometry=x.geometry) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -def PowerMethodNonsquareOld(op,numiters): - # Initialise random - # Jakob's - #inputsize = op.size()[1] - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - x0 = op.create_image_data() - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - x1norm = numpy.sqrt((x1**2).sum()) - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0*x0).sum() - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -#def PowerMethod(op,numiters): -# # Initialise random -# x0 = np.random.randn(400) -# s = np.zeros(numiters) -# # Loop -# for it in np.arange(numiters): -# x1 = np.dot(op.transpose(),np.dot(op,x0)) -# x1norm = np.sqrt(np.sum(np.dot(x1,x1))) -# s[it] = np.dot(x1,x0) / np.dot(x1,x0) -# x0 = (1.0/x1norm)*x1 -# return s, x0 - - -def PowerMethodNonsquare(op,numiters , x0=None): - # Initialise random - # Jakob's - # inputsize , outputsize = op.size() - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - if x0 is None: - #x0 = op.create_image_data() - x0 = op.allocate_direct() - x0.fill(numpy.random.randn(*x0.shape)) - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - #x1norm = numpy.sqrt((x1*x1).sum()) - x1norm = x1.norm() - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0.squared_norm()) - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -class LinearOperatorMatrix(Operator): - def __init__(self,A): - self.A = A - self.s1 = None # Largest singular value, initially unknown - super(LinearOperatorMatrix, self).__init__() - - def direct(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A,x.as_array())) - else: - numpy.dot(self.A, x.as_array(), out=out.as_array()) - - - def adjoint(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A.transpose(),x.as_array())) - else: - numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) - - - def size(self): - return self.A.shape - - def get_max_sing_val(self): - # If unknown, compute and store. If known, simply return it. - if self.s1 is None: - self.s1 = svds(self.A,1,return_singular_vectors=False)[0] - return self.s1 - else: - return self.s1 - def allocate_direct(self): - '''allocates the memory to hold the result of adjoint''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((N_A,1)) - return DataContainer(out) - def allocate_adjoint(self): - '''allocate the memory to hold the result of direct''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((M_A,1)) - return DataContainer(out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py b/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py deleted file mode 100644 index 263a7cd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy - -from ccpi.optimisation.funcs import Function -from ccpi.framework import ImageData -from ccpi.framework import AcquisitionData - - -class spdhg(): - """Computes a saddle point with a stochastic PDHG. - - This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that - - (x*, y*) in arg min_x max_y sum_i=1^n - f*[i](y_i) + g(x) - - where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and - proper functionals. For this algorithm, they all may be non-smooth and no - strong convexity is assumed. - - Parameters - ---------- - f : list of functions - Functionals Y[i] -> IR_infty that all have a convex conjugate with a - proximal operator, i.e. - f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i]. - g : function - Functional X -> IR_infty that has a proximal operator, i.e. - g.prox(tau) : X -> X. - A : list of functions - Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint - x : primal variable, optional - By default equals 0. - y : dual variable, optional - Part of a product space. By default equals 0. - z : variable, optional - Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0. - tau : scalar / vector / matrix, optional - Step size for primal variable. Note that the proximal operator of g - has to be well-defined for this input. - sigma : scalar, optional - Scalar / vector / matrix used as step size for dual variable. Note that - the proximal operator related to f (see above) has to be well-defined - for this input. - prob : list of scalars, optional - Probabilities prob[i] that a subset i is selected in each iteration. - If fun_select is not given, then the sum of all probabilities must - equal 1. - A_norms : list of scalars, optional - Norms of the operators in A. Can be used to determine the step sizes - tau and sigma and the probabilities prob. - fun_select : function, optional - Function that selects blocks at every iteration IN -> {1,...,n}. By - default this is serial sampling, fun_select(k) selects an index - i \in {1,...,n} with probability prob[i]. - - References - ---------- - [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, - *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling - and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808 - (2018) http://doi.org/10.1007/s10851-010-0251-1 - - [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott, - A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a - stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII, - 58 (2017) http://doi.org/10.1117/12.2272946. - - [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster - PET Reconstruction with Non-Smooth Priors by Randomization and - Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150 - """ - - def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None, - prob=None, A_norms=None, fun_select=None): - # fun_select is optional and by default performs serial sampling - - if x is None: - x = A[0].allocate_direct(0) - - if y is None: - if z is not None: - raise ValueError('y and z have to be defaulted together') - - y = [Ai.allocate_adjoint(0) for Ai in A] - z = 0 * x.copy() - - else: - if z is None: - raise ValueError('y and z have to be defaulted together') - - if A_norms is not None: - if tau is not None or sigma is not None or prob is not None: - raise ValueError('Either A_norms or (tau, sigma, prob) must ' - 'be given') - - tau = 1 / sum(A_norms) - sigma = [1 / nA for nA in A_norms] - prob = [nA / sum(A_norms) for nA in A_norms] - - #uniform prob, needs different sigma and tau - #n = len(A) - #prob = [1./n] * n - - if fun_select is None: - if prob is None: - raise ValueError('prob was not determined') - - def fun_select(k): - return [int(numpy.random.choice(len(A), 1, p=prob))] - - self.iter = 0 - self.x = x - - self.y = y - self.z = z - - self.f = f - self.g = g - self.A = A - self.tau = tau - self.sigma = sigma - self.prob = prob - self.fun_select = fun_select - - # Initialize variables - self.z_relax = z.copy() - self.tmp = self.x.copy() - - def update(self): - # select block - selected = self.fun_select(self.iter) - - # update primal variable - #tmp = (self.x - self.tau * self.z_relax).as_array() - #self.x.fill(self.g.prox(tmp, self.tau)) - self.tmp = - self.tau * self.z_relax - self.tmp += self.x - self.x = self.g.prox(self.tmp, self.tau) - - # update dual variable and z, z_relax - self.z_relax = self.z.copy() - for i in selected: - # save old yi - y_old = self.y[i].copy() - - # y[i]= prox(tmp) - tmp = y_old + self.sigma[i] * self.A[i].direct(self.x) - self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i]) - - # update adjoint of dual variable - dz = self.A[i].adjoint(self.y[i] - y_old) - self.z += dz - - # compute extrapolation - self.z_relax += (1 + 1 / self.prob[i]) * dz - - self.iter += 1 - - -## Functions - -class KullbackLeibler(Function): - def __init__(self, data, background): - self.data = data - self.background = background - self.__offset = None - - def __call__(self, x): - """Return the KL-diveregnce in the point ``x``. - - If any components of ``x`` is non-positive, the value is positive - infinity. - - Needs one extra array of memory of the size of `prior`. - """ - - # define short variable names - y = self.data - r = self.background - - # Compute - # sum(x + r - y + y * log(y / (x + r))) - # = sum(x - y * log(x + r)) + self.offset - # Assume that - # x + r > 0 - - # sum the result up - obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset() - - if numpy.isnan(obj): - # In this case, some element was less than or equal to zero - return numpy.inf - else: - return obj - - @property - def convex_conj(self): - """The convex conjugate functional of the KL-functional.""" - return KullbackLeiblerConvexConjugate(self.data, self.background) - - def offset(self): - """The offset which is independent of the unknown.""" - - if self.__offset is None: - tmp = self.domain.element() - - # define short variable names - y = self.data - r = self.background - - tmp = self.domain.element(numpy.maximum(y, 1)) - tmp = r - y + y * numpy.log(tmp) - - # sum the result up - self.__offset = numpy.sum(tmp) - - return self.__offset - -# def __repr__(self): -# """to be added???""" -# """Return ``repr(self)``.""" - # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__, - ## self.domain, self.data, - # self.background) - - -class KullbackLeiblerConvexConjugate(Function): - """The convex conjugate of Kullback-Leibler divergence functional. - - Notes - ----- - The functional :math:`F^*` with prior :math:`g>0` is given by: - - .. math:: - F^*(x) - = - \\begin{cases} - \\sum_{i} \left( -g_i \ln(1 - x_i) \\right) - & \\text{if } x_i < 1 \\forall i - \\\\ - +\\infty & \\text{else} - \\end{cases} - - See Also - -------- - KullbackLeibler : convex conjugate functional - """ - - def __init__(self, data, background): - self.data = data - self.background = background - - def __call__(self, x): - y = self.data - r = self.background - - tmp = numpy.sum(- x * r - y * numpy.log(1 - x)) - - if numpy.isnan(tmp): - # In this case, some element was larger than or equal to one - return numpy.inf - else: - return tmp - - - def prox(self, x, tau, out=None): - # Let y = data, r = background, z = x + tau * r - # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y)) - # Currently it needs 3 extra copies of memory. - - if out is None: - out = x.copy() - - # define short variable names - try: # this should be standard SIRF/CIL mode - y = self.data.as_array() - r = self.background.as_array() - x = x.as_array() - - try: - taua = tau.as_array() - except: - taua = tau - - z = x + tau * r - - out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))) - - return out - - except: # e.g. for NumPy - y = self.data - r = self.background - - try: - taua = tau.as_array() - except: - taua = tau - - z = x + tau * r - - out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)) - - return out - - @property - def convex_conj(self): - return KullbackLeibler(self.data, self.background) - - -def mult(x, y): - try: - xa = x.as_array() - except: - xa = x - - out = y.clone() - out.fill(xa * y.as_array()) - - return out diff --git a/Wrappers/Python/build/lib/ccpi/processors.py b/Wrappers/Python/build/lib/ccpi/processors.py deleted file mode 100644 index ccef410..0000000 --- a/Wrappers/Python/build/lib/ccpi/processors.py +++ /dev/null @@ -1,514 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ - AcquisitionGeometry, ImageGeometry, ImageData -from ccpi.reconstruction.parallelbeam import alg as pbalg -import numpy -from scipy import ndimage - -import matplotlib.pyplot as plt - - -class Normalizer(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): - kwargs = { - 'flat_field' : flat_field, - 'dark_field' : dark_field, - # very small number. Used when there is a division by zero - 'tolerance' : tolerance - } - - #DataProcessor.__init__(self, **kwargs) - super(Normalizer, self).__init__(**kwargs) - if not flat_field is None: - self.set_flat_field(flat_field) - if not dark_field is None: - self.set_dark_field(dark_field) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3 or\ - dataset.number_of_dimensions == 2: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def set_dark_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Dark Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.dark_field = df - elif issubclass(type(df), DataContainer): - self.dark_field = self.set_dark_field(df.as_array()) - - def set_flat_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Flat Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.flat_field = df - elif issubclass(type(df), DataContainer): - self.flat_field = self.set_flat_field(df.as_array()) - - @staticmethod - def normalize_projection(projection, flat, dark, tolerance): - a = (projection - dark) - b = (flat-dark) - with numpy.errstate(divide='ignore', invalid='ignore'): - c = numpy.true_divide( a, b ) - c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0 - return c - - @staticmethod - def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): - '''returns the estimated relative error of the normalised projection - - n = (projection - dark) / (flat - dark) - Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) - ''' - a = (projection - dark) - b = (flat-dark) - df = delta_flat / flat - dd = delta_dark / dark - rel_norm_error = (b + a) / (b * a) * (df + dd) - return rel_norm_error - - def process(self, out=None): - - projections = self.get_input() - dark = self.dark_field - flat = self.flat_field - - if projections.number_of_dimensions == 3: - if not (projections.shape[1:] == dark.shape and \ - projections.shape[1:] == flat.shape): - raise ValueError('Flats/Dark and projections size do not match.') - - - a = numpy.asarray( - [ Normalizer.normalize_projection( - projection, flat, dark, self.tolerance) \ - for projection in projections.as_array() ] - ) - elif projections.number_of_dimensions == 2: - a = Normalizer.normalize_projection(projections.as_array(), - flat, dark, self.tolerance) - y = type(projections)( a , True, - dimension_labels=projections.dimension_labels, - geometry=projections.geometry) - return y - - -class CenterOfRotationFinder(DataProcessor): - '''Processor to find the center of rotation in a parallel beam experiment - - This processor read in a AcquisitionDataSet and finds the center of rotation - based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078 - - Input: AcquisitionDataSet - - Output: float. center of rotation in pixel coordinate - ''' - - def __init__(self): - kwargs = { - - } - - #DataProcessor.__init__(self, **kwargs) - super(CenterOfRotationFinder, self).__init__(**kwargs) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3: - if dataset.geometry.geom_type == 'parallel': - return True - else: - raise ValueError('{0} is suitable only for parallel beam geometry'\ - .format(self.__class__.__name__)) - else: - raise ValueError("Expected input dimensions is 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - - # ######################################################################### - # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. # - # # - # Copyright 2015. UChicago Argonne, LLC. This software was produced # - # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # - # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # - # U.S. Department of Energy. The U.S. Government has rights to use, # - # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # - # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # - # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # - # modified to produce derivative works, such modified software should # - # be clearly marked, so as not to confuse it with the version available # - # from ANL. # - # # - # Additionally, redistribution and use in source and binary forms, with # - # or without modification, are permitted provided that the following # - # conditions are met: # - # # - # * Redistributions of source code must retain the above copyright # - # notice, this list of conditions and the following disclaimer. # - # # - # * Redistributions in binary form must reproduce the above copyright # - # notice, this list of conditions and the following disclaimer in # - # the documentation and/or other materials provided with the # - # distribution. # - # # - # * Neither the name of UChicago Argonne, LLC, Argonne National # - # Laboratory, ANL, the U.S. Government, nor the names of its # - # contributors may be used to endorse or promote products derived # - # from this software without specific prior written permission. # - # # - # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # - # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # - # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # - # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # - # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # - # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # - # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # - # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # - # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # - # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # - # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # - # POSSIBILITY OF SUCH DAMAGE. # - # ######################################################################### - - @staticmethod - def as_ndarray(arr, dtype=None, copy=False): - if not isinstance(arr, numpy.ndarray): - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_dtype(arr, dtype, copy=False): - if not arr.dtype == dtype: - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_float32(arr): - arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32) - return CenterOfRotationFinder.as_dtype(arr, numpy.float32) - - - - - @staticmethod - def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5, - ratio=2., drop=20): - """ - Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. - - Parameters - ---------- - tomo : ndarray - 3D tomographic data. - ind : int, optional - Index of the slice to be used for reconstruction. - smin, smax : int, optional - Reference to the horizontal center of the sinogram. - srad : float, optional - Fine search radius. - step : float, optional - Step of fine searching. - ratio : float, optional - The ratio between the FOV of the camera and the size of object. - It's used to generate the mask. - drop : int, optional - Drop lines around vertical center of the mask. - - Returns - ------- - float - Rotation axis location. - - Notes - ----- - The function may not yield a correct estimate, if: - - - the sample size is bigger than the field of view of the camera. - In this case the ``ratio`` argument need to be set larger - than the default of 2.0. - - - there is distortion in the imaging hardware. If there's - no correction applied, the center of the projection image may - yield a better estimate. - - - the sample contrast is weak. Paganin's filter need to be applied - to overcome this. - - - the sample was changed during the scan. - """ - tomo = CenterOfRotationFinder.as_float32(tomo) - - if ind is None: - ind = tomo.shape[1] // 2 - _tomo = tomo[:, ind, :] - - - - # Reduce noise by smooth filters. Use different filters for coarse and fine search - _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) - _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) - - # Coarse and fine searches for finding the rotation center. - if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k) - #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :] - #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop) - #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin, - smax, ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - else: - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, - smin, smax, - ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - - #logger.debug('Rotation center search finished: %i', fine_cen) - return fine_cen - - - @staticmethod - def _search_coarse(sino, smin, smax, ratio, drop): - """ - Coarse search for finding the rotation center. - """ - (Nrow, Ncol) = sino.shape - centerfliplr = (Ncol - 1.0) / 2.0 - - # Copy the sinogram and flip left right, the purpose is to - # make a full [0;2Pi] sinogram - _copy_sino = numpy.fliplr(sino[1:]) - - # This image is used for compensating the shift of sinogram 2 - temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32') - temp_img[:] = sino[-1] - - # Start coarse search in which the shift step is 1 - listshift = numpy.arange(smin, smax + 1) - listmetric = numpy.zeros(len(listshift), dtype='float32') - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol, - 0.5 * ratio * Ncol, drop) - for i in listshift: - _sino = numpy.roll(_copy_sino, i, axis=1) - if i >= 0: - _sino[:, 0:i] = temp_img[:, 0:i] - else: - _sino[:, i:] = temp_img[:, i:] - listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # numpy.vstack((sino, _sino))) - numpy.fft.fft2(numpy.vstack((sino, _sino))) - )) * mask) - minpos = numpy.argmin(listmetric) - return centerfliplr + listshift[minpos] / 2.0 - - @staticmethod - def _search_fine(sino, srad, step, init_cen, ratio, drop): - """ - Fine search for finding the rotation center. - """ - Nrow, Ncol = sino.shape - centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 - # Use to shift the sinogram 2 to the raw CoR. - shiftsino = numpy.int16(2 * (init_cen - centerfliplr)) - _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1) - if init_cen <= centerfliplr: - lefttake = numpy.int16(numpy.ceil(srad + 1)) - righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1)) - else: - lefttake = numpy.int16(numpy.ceil( - init_cen - (Ncol - 1 - init_cen) + srad + 1)) - righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1)) - Ncol1 = righttake - lefttake + 1 - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1, - 0.5 * ratio * Ncol, drop) - numshift = numpy.int16((2 * srad) / step) + 1 - listshift = numpy.linspace(-srad, srad, num=numshift) - listmetric = numpy.zeros(len(listshift), dtype='float32') - factor1 = numpy.mean(sino[-1, lefttake:righttake]) - num1 = 0 - for i in listshift: - _sino = ndimage.interpolation.shift( - _copy_sino, (0, i), prefilter=False) - factor2 = numpy.mean(_sino[0,lefttake:righttake]) - _sino = _sino * factor1 / factor2 - sinojoin = numpy.vstack((sino, _sino)) - listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # sinojoin[:, lefttake:righttake + 1]) - numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1]) - )) * mask) - num1 = num1 + 1 - minpos = numpy.argmin(listmetric) - return init_cen + listshift[minpos] / 2.0 - - @staticmethod - def _create_mask(nrow, ncol, radius, drop): - du = 1.0 / ncol - dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi) - centerrow = numpy.ceil(nrow / 2) - 1 - centercol = numpy.ceil(ncol / 2) - 1 - # added by Edoardo Pasca - centerrow = int(centerrow) - centercol = int(centercol) - mask = numpy.zeros((nrow, ncol), dtype='float32') - for i in range(nrow): - num1 = numpy.round(((i - centerrow) * dv / radius) / du) - (p1, p2) = numpy.int16(numpy.clip(numpy.sort( - (-num1 + centercol, num1 + centercol)), 0, ncol - 1)) - mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32') - if drop < centerrow: - mask[centerrow - drop:centerrow + drop + 1, - :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32') - mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32') - return mask - - def process(self, out=None): - - projections = self.get_input() - - cor = CenterOfRotationFinder.find_center_vo(projections.as_array()) - - return cor - - -class AcquisitionDataPadder(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, - center_of_rotation = None, - acquisition_geometry = None, - pad_value = 1e-5): - kwargs = { - 'acquisition_geometry' : acquisition_geometry, - 'center_of_rotation' : center_of_rotation, - 'pad_value' : pad_value - } - - super(AcquisitionDataPadder, self).__init__(**kwargs) - - def check_input(self, dataset): - if self.acquisition_geometry is None: - self.acquisition_geometry = dataset.geometry - if dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def process(self, out=None): - projections = self.get_input() - w = projections.get_dimension_size('horizontal') - delta = w - 2 * self.center_of_rotation - - padded_width = int ( - numpy.ceil(abs(delta)) + w - ) - delta_pix = padded_width - w - - voxel_per_pixel = 1 - geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), - self.acquisition_geometry.angles, - self.center_of_rotation, - voxel_per_pixel ) - - padded_geometry = self.acquisition_geometry.clone() - - padded_geometry.pixel_num_h = geom['n_h'] - padded_geometry.pixel_num_v = geom['n_v'] - - delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h - delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v - - if delta_pix_h == 0: - delta_pix_h = delta_pix - padded_geometry.pixel_num_h = padded_width - #initialize a new AcquisitionData with values close to 0 - out = AcquisitionData(geometry=padded_geometry) - out = out + self.pad_value - - - #pad in the horizontal-vertical plane -> slice on angles - if delta > 0: - #pad left of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - else: - #pad right of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(0, w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - #cleaned = eval(command) - exec(command) - return out \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index e9bd801..5bf96cc 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -8,15 +8,11 @@ Created on Mon Feb 4 16:18:06 2019 from ccpi.optimisation.algorithms import Algorithm from ccpi.framework import ImageData import numpy as np -import matplotlib.pyplot as plt import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer from ccpi.optimisation.functions import FunctionOperatorComposition - -import matplotlib.pyplot as plt - class PDHG(Algorithm): '''Primal Dual Hybrid Gradient''' diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index c6a7f95..1d77510 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -139,11 +139,16 @@ class BlockOperator(Operator): for row in range(self.shape[0]): for col in range(self.shape[1]): if col == 0: - self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col)) + self.get_item(row,col).direct( + x_b.get_item(col), + out=out.get_item(row)) else: - self.get_item(row,col).direct(x_b.get_item(col), out=out) - out+=tmp - + a = out.get_item(row) + self.get_item(row,col).direct( + x_b.get_item(col), + out=tmp.get_item(row)) + a += tmp.get_item(row) + def adjoint(self, x, out=None): '''Adjoint operation for the BlockOperator @@ -156,36 +161,72 @@ class BlockOperator(Operator): Raises: ValueError if the contained Operators are not linear ''' - if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): + if not self.is_linear(): raise ValueError('Not all operators in Block are linear.') if not isinstance (x, BlockDataContainer): x_b = BlockDataContainer(x) else: x_b = x shape = self.get_output_shape(x_b.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row, col).adjoint(x_b.get_item(col)) - else: - prod += self.get_item(row, col).adjoint(x_b.get_item(col)) - res.append(prod) - if self.shape[1]==1: - return ImageData(*res) + if out is None: + res = [] + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + prod = self.get_item(row, col).adjoint(x_b.get_item(row)) + else: + prod += self.get_item(row, col).adjoint(x_b.get_item(row)) + res.append(prod) + if self.shape[1]==1: + return ImageData(*res) + else: + return BlockDataContainer(*res, shape=shape) else: - return BlockDataContainer(*res, shape=shape) - + #tmp = self.domain_geometry().allocate() + + for col in range(self.shape[1]): + for row in range(self.shape[0]): + if row == 0: + if issubclass(out.__class__, DataContainer): + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out) + else: + op = self.get_item(row,col) + self.get_item(row, col).adjoint( + x_b.get_item(row), + out=out.get_item(col)) + else: + if issubclass(out.__class__, DataContainer): + out += self.get_item(row,col).adjoint( + x_b.get_item(row)) + else: + a = out.get_item(col) + a += self.get_item(row,col).adjoint( + x_b.get_item(row), + ) + def is_linear(self): + '''returns whether all the elements of the BlockOperator are linear''' + return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True) + def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] + '''returns the shape of the output BlockDataContainer + + A(N,M) direct u(M,1) -> N,1 + A(N,M)^T adjoint u(N,1) -> M,1 + ''' + rows , cols = self.shape + xrows, xcols = xshape + if xcols != 1: + raise ValueError('BlockDataContainer cannot have more than 1 column') if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - + if rows != xrows: + raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) + return (cols,xcols) + if cols != xrows: + raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape)) + return (rows,xcols) + def __rmul__(self, scalar): '''Defines the left multiplication with a scalar diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 54456cc..9c639df 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -71,7 +71,7 @@ class Gradient(LinearOperator): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) # FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) - out-=tmp + out+=tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): diff --git a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py index adcc6d9..3203dde 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/ScaledOperator.py @@ -28,10 +28,18 @@ class ScaledOperator(object): self.scalar = scalar self.operator = operator def direct(self, x, out=None): - return self.scalar * self.operator.direct(x, out=out) + if out is None: + return self.scalar * self.operator.direct(x, out=out) + else: + self.operator.direct(x, out=out) + out *= self.scalar def adjoint(self, x, out=None): if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) + if out is None: + return self.scalar * self.operator.adjoint(x, out=out) + else: + self.operator.adjoint(x, out=out) + out *= self.scalar else: raise TypeError('Operator is not linear') def norm(self): @@ -40,3 +48,5 @@ class ScaledOperator(object): return self.operator.range_geometry() def domain_geometry(self): return self.operator.domain_geometry() + def is_linear(self): + return self.operator.is_linear() diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py index 46e8c7c..6656d34 100644 --- a/Wrappers/Python/test/test_Operator.py +++ b/Wrappers/Python/test/test_Operator.py @@ -1,8 +1,15 @@ import unittest #from ccpi.optimisation.operators import Operator from ccpi.optimisation.ops import TomoIdentity -from ccpi.framework import ImageGeometry, ImageData +from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer +from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator import numpy +from timeit import default_timer as timer +from ccpi.framework import ImageGeometry +from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff + +def dt(steps): + return steps[-1] - steps[-2] class TestOperator(unittest.TestCase): def test_ScaledOperator(self): @@ -22,3 +29,312 @@ class TestOperator(unittest.TestCase): y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + + +class TestBlockOperator(unittest.TestCase): + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print(err) + print("expected " , second) + print("actual " , first) + + self.assertTrue(res) + + def test_BlockOperator(self): + + + M, N = 3, 4 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + G = Gradient(ig) + Id = Identity(ig) + + B = BlockOperator(G, Id) + # Nx1 case + u = ig.allocate('random_int') + z1 = B.direct(u) + + res = B.range_geometry().allocate() + #res = z1.copy() + B.direct(u, out=res) + + + print (type(z1), type(res)) + print (z1.shape) + print(z1[0][0].as_array()) + print(res[0][0].as_array()) + + for col in range(z1.shape[0]): + a = z1.get_item(col) + b = res.get_item(col) + if isinstance(a, BlockDataContainer): + for col2 in range(a.shape[0]): + self.assertNumpyArrayEqual( + a.get_item(col2).as_array(), + b.get_item(col2).as_array() + ) + else: + self.assertNumpyArrayEqual( + a.as_array(), + b.as_array() + ) + z1 = B.direct(u) + res1 = B.adjoint(z1) + res2 = B.domain_geometry().allocate() + B.adjoint(z1, out=res2) + + self.assertNumpyArrayEqual(res1.as_array(), res2.as_array()) + + BB = BlockOperator( Id, 2 * Id) + B = BlockOperator( BB, Id ) + v = B.domain_geometry().allocate() + B.adjoint(res,out=v) + vv = B.adjoint(res) + el1 = B.get_item(0,0).adjoint(z1.get_item(0)) +\ + B.get_item(1,0).adjoint(z1.get_item(1)) + print ("el1" , el1.as_array()) + print ("vv" , vv.as_array()) + print ("v" , v.as_array()) + + self.assertNumpyArrayEqual(v.as_array(),vv.as_array()) + # test adjoint + print ("############ 2x1 #############") + + BB = BlockOperator( Id, 2 * Id) + u = ig.allocate(1) + z1 = BB.direct(u) + print ("z1 shape {} one\n{} two\n{}".format(z1.shape, + z1.get_item(0).as_array(), + z1.get_item(1).as_array())) + res = BB.range_geometry().allocate(0) + BB.direct(u, out=res) + print ("res shape {} one\n{} two\n{}".format(res.shape, + res.get_item(0).as_array(), + res.get_item(1).as_array())) + + + self.assertNumpyArrayEqual(z1.get_item(0).as_array(), + u.as_array()) + self.assertNumpyArrayEqual(z1.get_item(1).as_array(), + 2 * u.as_array()) + self.assertNumpyArrayEqual(res.get_item(0).as_array(), + u.as_array()) + self.assertNumpyArrayEqual(res.get_item(1).as_array(), + 2 * u.as_array()) + + x1 = BB.adjoint(z1) + print("adjoint x1\n",x1.as_array()) + + res1 = BB.domain_geometry().allocate() + BB.adjoint(z1, out=res1) + print("res1\n",res1.as_array()) + self.assertNumpyArrayEqual(x1.as_array(), + res1.as_array()) + + self.assertNumpyArrayEqual(x1.as_array(), + 5 * u.as_array()) + self.assertNumpyArrayEqual(res1.as_array(), + 5 * u.as_array()) + ################################################# + + print ("############ 2x2 #############") + BB = BlockOperator( Id, 2 * Id, 3 * Id, Id, shape=(2,2)) + B = BB + u = ig.allocate(1) + U = BlockDataContainer(u,u) + z1 = B.direct(U) + + + print ("z1 shape {} one\n{} two\n{}".format(z1.shape, + z1.get_item(0).as_array(), + z1.get_item(1).as_array())) + self.assertNumpyArrayEqual(z1.get_item(0).as_array(), + 3 * u.as_array()) + self.assertNumpyArrayEqual(z1.get_item(1).as_array(), + 4 * u.as_array()) + res = B.range_geometry().allocate() + B.direct(U, out=res) + self.assertNumpyArrayEqual(res.get_item(0).as_array(), + 3 * u.as_array()) + self.assertNumpyArrayEqual(res.get_item(1).as_array(), + 4 * u.as_array()) + + + x1 = B.adjoint(z1) + # this should be [15 u, 10 u] + el1 = B.get_item(0,0).adjoint(z1.get_item(0)) + B.get_item(1,0).adjoint(z1.get_item(1)) + el2 = B.get_item(0,1).adjoint(z1.get_item(0)) + B.get_item(1,1).adjoint(z1.get_item(1)) + + shape = B.get_output_shape(z1.shape, adjoint=True) + print ("shape ", shape) + out = B.domain_geometry().allocate() + + for col in range(B.shape[1]): + for row in range(B.shape[0]): + if row == 0: + el = B.get_item(row,col).adjoint(z1.get_item(row)) + else: + el += B.get_item(row,col).adjoint(z1.get_item(row)) + out.get_item(col).fill(el) + + print ("el1 " , el1.as_array()) + print ("el2 " , el2.as_array()) + print ("out shape {} one\n{} two\n{}".format(out.shape, + out.get_item(0).as_array(), + out.get_item(1).as_array())) + + self.assertNumpyArrayEqual(out.get_item(0).as_array(), + 15 * u.as_array()) + self.assertNumpyArrayEqual(out.get_item(1).as_array(), + 10 * u.as_array()) + + res2 = B.domain_geometry().allocate() + #print (res2, res2.as_array()) + B.adjoint(z1, out = res2) + + #print ("adjoint",x1.as_array(),"\n",res2.as_array()) + self.assertNumpyArrayEqual( + out.get_item(0).as_array(), + res2.get_item(0).as_array() + ) + self.assertNumpyArrayEqual( + out.get_item(1).as_array(), + res2.get_item(1).as_array() + ) + + if True: + #B1 = BlockOperator(Id, Id, Id, Id, shape=(2,2)) + B1 = BlockOperator(G, Id) + U = ig.allocate(ImageGeometry.RANDOM_INT) + #U = BlockDataContainer(u,u) + RES1 = B1.range_geometry().allocate() + + Z1 = B1.direct(U) + B1.direct(U, out = RES1) + + self.assertBlockDataContainerEqual(Z1,RES1) + + + + print("U", U.as_array()) + print("Z1", Z1[0][0].as_array()) + print("RES1", RES1[0][0].as_array()) + print("Z1", Z1[0][1].as_array()) + print("RES1", RES1[0][1].as_array()) + def test_timedifference(self): + + M, N ,W = 100, 512, 512 + ig = ImageGeometry(M, N, W) + arr = ig.allocate('random_int') + + G = Gradient(ig) + Id = Identity(ig) + + B = BlockOperator(G, Id) + + + # Nx1 case + u = ig.allocate('random_int') + steps = [timer()] + i = 0 + n = 25. + t1 = t2 = 0 + res = B.range_geometry().allocate() + + while (i < n): + print ("i ", i) + steps.append(timer()) + z1 = B.direct(u) + steps.append(timer()) + t = dt(steps) + #print ("B.direct(u) " ,t) + t1 += t/n + + steps.append(timer()) + B.direct(u, out = res) + steps.append(timer()) + t = dt(steps) + #print ("B.direct(u, out=res) " ,t) + t2 += t/n + i += 1 + + print ("Time difference ", t1,t2) + self.assertGreater(t1,t2) + + steps = [timer()] + i = 0 + #n = 50. + t1 = t2 = 0 + resd = B.domain_geometry().allocate() + z1 = B.direct(u) + #B.adjoint(z1, out=resd) + + print (type(res)) + while (i < n): + print ("i ", i) + steps.append(timer()) + w1 = B.adjoint(z1) + steps.append(timer()) + t = dt(steps) + #print ("B.adjoint(z1) " ,t) + t1 += t/n + + steps.append(timer()) + B.adjoint(z1, out=resd) + steps.append(timer()) + t = dt(steps) + #print ("B.adjoint(z1, out=res) " ,t) + t2 += t/n + i += 1 + + print ("Time difference ", t1,t2) + self.assertGreater(t1,t2) + + def test_BlockOperatorLinearValidity(self): + + + M, N = 3, 4 + ig = ImageGeometry(M, N) + arr = ig.allocate('random_int') + + G = Gradient(ig) + Id = Identity(ig) + + B = BlockOperator(G, Id) + # Nx1 case + u = ig.allocate('random_int') + w = B.range_geometry().allocate(ImageGeometry.RANDOM_INT) + w1 = B.direct(u) + u1 = B.adjoint(w) + self.assertEqual((w * w1).sum() , (u1*u).sum()) + + + + diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 54dfa57..19cb65f 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -19,7 +19,7 @@ from ccpi.optimisation.operators import Gradient #from ccpi.optimisation.functions import SimpleL2NormSq from ccpi.optimisation.functions import L2NormSquared -from ccpi.optimisation.functions import SimpleL1Norm +#from ccpi.optimisation.functions import SimpleL1Norm from ccpi.optimisation.functions import L1Norm from ccpi.optimisation.funcs import Norm2sq -- cgit v1.2.3 From 76f88d25572ce80057552fe05fc78ec621e98adf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 10 Apr 2019 11:33:31 +0100 Subject: wip for with and without operators, functions --- Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 3b81d98..0d479ee 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -141,21 +141,25 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): else: -# operator.direct(xbar, out = y_tmp) + operator.direct(xbar, out = y_tmp) + + y_tmp.multiply(sigma, out = y_tmp) + y_tmp.add(y_old, out = y_tmp) # y_tmp.__imul__(sigma) # y_tmp.__iadd__(y_old) # y_tmp *= sigma # y_tmp += y_old - y_tmp = y_old + sigma * operator.direct(xbar) +# y_tmp = y_old + sigma * operator.direct(xbar) f.proximal_conjugate(y_tmp, sigma, out=y) - x_tmp = x_old - tau * operator.adjoint(y) +# x_tmp = x_old - tau * operator.adjoint(y) + + operator.adjoint(y, out = x_tmp) + x_tmp.multiply(-tau, out = x_tmp) + x_tmp.add(x_old, out = x_tmp) -# operator.adjoint(y, out = x_tmp) -# z = x_tmp -# x_tmp = x_old - tau * z # x_tmp *= -tau # x_tmp += x_old @@ -166,7 +170,8 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): xbar *= theta xbar += x - + x_old.fill(x) + y_old.fill(y) # pass # -- cgit v1.2.3 From 44de36ba886e8eacd5df96642140d594bd95eec9 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Wed, 10 Apr 2019 11:34:50 +0100 Subject: remove build --- Wrappers/Python/build/lib/ccpi/__init__.py | 18 - .../build/lib/ccpi/framework/BlockDataContainer.py | 337 ----- .../build/lib/ccpi/framework/BlockGeometry.py | 38 - .../Python/build/lib/ccpi/framework/__init__.py | 26 - .../Python/build/lib/ccpi/framework/framework.py | 1496 -------------------- Wrappers/Python/build/lib/ccpi/io/__init__.py | 18 - Wrappers/Python/build/lib/ccpi/io/reader.py | 500 ------- .../Python/build/lib/ccpi/optimisation/__init__.py | 18 - .../lib/ccpi/optimisation/algorithms/Algorithm.py | 158 --- .../build/lib/ccpi/optimisation/algorithms/CGLS.py | 87 -- .../build/lib/ccpi/optimisation/algorithms/FBPD.py | 86 -- .../lib/ccpi/optimisation/algorithms/FISTA.py | 121 -- .../optimisation/algorithms/GradientDescent.py | 76 - .../build/lib/ccpi/optimisation/algorithms/PDHG.py | 155 -- .../lib/ccpi/optimisation/algorithms/__init__.py | 32 - .../Python/build/lib/ccpi/optimisation/algs.py | 319 ----- .../Python/build/lib/ccpi/optimisation/funcs.py | 272 ---- .../ccpi/optimisation/functions/BlockFunction.py | 79 -- .../lib/ccpi/optimisation/functions/Function.py | 69 - .../functions/FunctionOperatorComposition.py | 65 - .../ccpi/optimisation/functions/IndicatorBox.py | 65 - .../lib/ccpi/optimisation/functions/L1Norm.py | 92 -- .../ccpi/optimisation/functions/L2NormSquared.py | 233 --- .../ccpi/optimisation/functions/MixedL21Norm.py | 136 -- .../lib/ccpi/optimisation/functions/Norm2Sq.py | 98 -- .../ccpi/optimisation/functions/ScaledFunction.py | 91 -- .../lib/ccpi/optimisation/functions/ZeroFun.py | 60 - .../lib/ccpi/optimisation/functions/__init__.py | 13 - .../lib/ccpi/optimisation/functions/functions.py | 312 ---- .../ccpi/optimisation/functions/mixed_L12Norm.py | 56 - .../ccpi/optimisation/operators/BlockOperator.py | 223 --- .../optimisation/operators/BlockScaledOperator.py | 67 - .../operators/FiniteDifferenceOperator.py | 322 ----- .../optimisation/operators/GradientOperator.py | 186 --- .../optimisation/operators/IdentityOperator.py | 79 -- .../ccpi/optimisation/operators/LinearOperator.py | 22 - .../lib/ccpi/optimisation/operators/Operator.py | 30 - .../ccpi/optimisation/operators/ScaledOperator.py | 42 - .../optimisation/operators/SparseFiniteDiff.py | 144 -- .../operators/SymmetrizedGradientOperator.py | 118 -- .../ccpi/optimisation/operators/ZeroOperator.py | 39 - .../lib/ccpi/optimisation/operators/__init__.py | 19 - Wrappers/Python/build/lib/ccpi/optimisation/ops.py | 294 ---- .../Python/build/lib/ccpi/optimisation/spdhg.py | 338 ----- Wrappers/Python/build/lib/ccpi/processors.py | 514 ------- 45 files changed, 7563 deletions(-) delete mode 100644 Wrappers/Python/build/lib/ccpi/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/framework/framework.py delete mode 100644 Wrappers/Python/build/lib/ccpi/io/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/io/reader.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/algs.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/funcs.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/ops.py delete mode 100644 Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py delete mode 100644 Wrappers/Python/build/lib/ccpi/processors.py (limited to 'Wrappers') diff --git a/Wrappers/Python/build/lib/ccpi/__init__.py b/Wrappers/Python/build/lib/ccpi/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py deleted file mode 100644 index 21ef3f0..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 16:04:45 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import DataContainer -#from ccpi.framework import AcquisitionData, ImageData -#from ccpi.optimisation.operators import Operator, LinearOperator - -class BlockDataContainer(object): - '''Class to hold DataContainers as column vector''' - __array_priority__ = 1 - def __init__(self, *args, **kwargs): - '''''' - self.containers = args - self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) - shape = (len(args),1) - self.shape = shape - #print (self.shape) - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements, len(args))) - - - def __iter__(self): - '''BlockDataContainer is Iterable''' - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - - for i in range(len(self.containers)): - if type(self.containers[i])==type(self): - self = self.containers[i] - - if isinstance(other, Number): - return True - elif isinstance(other, list): - for ot in other: - if not isinstance(ot, (Number,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - raise ValueError('List/ numpy array can only contain numbers {}'\ - .format(type(ot))) - return len(self.containers) == len(other) - elif isinstance(other, numpy.ndarray): - return len(self.containers) == len(other) - elif issubclass(other.__class__, DataContainer): - return self.get_item(0).shape == other.shape - return len(self.containers) == len(other.containers) - - def get_item(self, row): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - return self.containers[row] - - def __getitem__(self, row): - return self.get_item(row) - - def add(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for add') - out = kwargs.get('out', None) - #print ("args" , *args) - if isinstance(other, Number): - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - - return type(self)( - *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def subtract(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for subtract') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def multiply(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('{} Incompatible for multiply'.format(other)) - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def divide(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for divide') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def power(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for power') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - def maximum(self,other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for maximum') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - ## unary operations - def abs(self, *args, **kwargs): - return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) - def sign(self, *args, **kwargs): - return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) - def sqrt(self, *args, **kwargs): - return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) - def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) - - ## reductions - def sum(self, *args, **kwargs): - return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) - def squared_norm(self): - y = numpy.asarray([el.squared_norm() for el in self.containers]) - return y.sum() - def norm(self): - return numpy.sqrt(self.squared_norm()) - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers], shape=self.shape) - def fill(self, x): - for el,ot in zip(self.containers, x): - el.fill(ot) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - '''Reverse addition - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self + other - # __radd__ - - def __rsub__(self, other): - '''Reverse subtraction - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - '''Reverse multiplication - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self * other - # __rmul__ - - def __rdiv__(self, other): - '''Reverse division - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - '''Reverse truedivision - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self.__rdiv__(other) - - def __rpow__(self, other): - '''Reverse power - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return other.power(self) - - def __iadd__(self, other): - '''Inline addition''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __iadd__') - for el,ot in zip(self.containers, other): - el += ot - return self - # __iadd__ - - def __isub__(self, other): - '''Inline subtraction''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __isub__') - for el,ot in zip(self.containers, other): - el -= ot - return self - # __isub__ - - def __imul__(self, other): - '''Inline multiplication''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __imul__') - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - '''Inline division''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __idiv__') - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - '''Inline truedivision''' - return self.__idiv__(other) - diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py deleted file mode 100644 index 0f43155..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import BlockDataContainer -#from ccpi.optimisation.operators import Operator, LinearOperator - -class BlockGeometry(object): - '''Class to hold Geometry as column vector''' - #__array_priority__ = 1 - def __init__(self, *args, **kwargs): - '''''' - self.geometries = args - self.index = 0 - #shape = kwargs.get('shape', None) - #if shape is None: - # shape = (len(args),1) - shape = (len(args),1) - self.shape = shape - #print (self.shape) - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements, len(args))) - - def get_item(self, index): - '''returns the Geometry in the BlockGeometry located at position index''' - return self.geometries[index] - - def allocate(self, value=0, dimension_labels=None): - containers = [geom.allocate(value) for geom in self.geometries] - return BlockDataContainer(*containers) - diff --git a/Wrappers/Python/build/lib/ccpi/framework/__init__.py b/Wrappers/Python/build/lib/ccpi/framework/__init__.py deleted file mode 100644 index 229edb5..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 16:00:18 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce - - -from .framework import DataContainer -from .framework import ImageData, AcquisitionData -from .framework import ImageGeometry, AcquisitionGeometry -from .framework import find_key, message -from .framework import DataProcessor -from .framework import AX, PixelByPixelDataProcessor, CastDataContainer -from .BlockDataContainer import BlockDataContainer -from .BlockGeometry import BlockGeometry diff --git a/Wrappers/Python/build/lib/ccpi/framework/framework.py b/Wrappers/Python/build/lib/ccpi/framework/framework.py deleted file mode 100644 index 07c2ead..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/framework.py +++ /dev/null @@ -1,1496 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce -from numbers import Number - - -def find_key(dic, val): - """return the key of dictionary dic given the value""" - return [k for k, v in dic.items() if v == val][0] - -def message(cls, msg, *args): - msg = "{0}: " + msg - for i in range(len(args)): - msg += " {%d}" %(i+1) - args = list(args) - args.insert(0, cls.__name__ ) - - return msg.format(*args ) - - -class ImageGeometry(object): - RANDOM = 'random' - RANDOM_INT = 'random_int' - CHANNEL = 'channel' - ANGLE = 'angle' - VERTICAL = 'vertical' - HORIZONTAL_X = 'horizontal_x' - HORIZONTAL_Y = 'horizontal_y' - - def __init__(self, - voxel_num_x=0, - voxel_num_y=0, - voxel_num_z=0, - voxel_size_x=1, - voxel_size_y=1, - voxel_size_z=1, - center_x=0, - center_y=0, - center_z=0, - channels=1): - - self.voxel_num_x = voxel_num_x - self.voxel_num_y = voxel_num_y - self.voxel_num_z = voxel_num_z - self.voxel_size_x = voxel_size_x - self.voxel_size_y = voxel_size_y - self.voxel_size_z = voxel_size_z - self.center_x = center_x - self.center_y = center_y - self.center_z = center_z - self.channels = channels - - # this is some code repetition - if self.channels > 1: - if self.voxel_num_z>1: - self.length = 4 - self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - else: - self.length = 3 - self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - else: - if self.voxel_num_z>1: - self.length = 3 - self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - self.length = 2 - self.shape = (self.voxel_num_y, self.voxel_num_x) - dim_labels = [ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] - - self.dimension_labels = dim_labels - - def get_min_x(self): - return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x - - def get_max_x(self): - return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x - - def get_min_y(self): - return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y - - def get_max_y(self): - return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y - - def get_min_z(self): - if not self.voxel_num_z == 0: - return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def get_max_z(self): - if not self.voxel_num_z == 0: - return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z - else: - return 0 - - def clone(self): - '''returns a copy of ImageGeometry''' - return ImageGeometry( - self.voxel_num_x, - self.voxel_num_y, - self.voxel_num_z, - self.voxel_size_x, - self.voxel_size_y, - self.voxel_size_z, - self.center_x, - self.center_y, - self.center_z, - self.channels) - def __str__ (self): - repres = "" - repres += "Number of channels: {0}\n".format(self.channels) - repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z) - repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) - repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) - return repres - def allocate(self, value=0, dimension_labels=None, **kwargs): - '''allocates an ImageData according to the size expressed in the instance''' - out = ImageData(geometry=self) - if isinstance(value, Number): - if value != 0: - out += value - else: - if value == ImageGeometry.RANDOM: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - out.fill(numpy.random.random_sample(self.shape)) - elif value == ImageGeometry.RANDOM_INT: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - max_value = kwargs.get('max_value', 100) - out.fill(numpy.random.randint(max_value,size=self.shape)) - else: - raise ValueError('Value {} unknown'.format(value)) - if dimension_labels is not None: - if dimension_labels != self.dimension_labels: - return out.subset(dimensions=dimension_labels) - return out - # The following methods return 2 members of the class, therefore I - # don't think we need to implement them. - # Additionally using __len__ is confusing as one would think this is - # an iterable. - #def __len__(self): - # '''returns the length of the geometry''' - # return self.length - #def shape(self): - # '''Returns the shape of the array of the ImageData it describes''' - # return self.shape - -class AcquisitionGeometry(object): - RANDOM = 'random' - RANDOM_INT = 'random_int' - ANGLE_UNIT = 'angle_unit' - DEGREE = 'degree' - RADIAN = 'radian' - CHANNEL = 'channel' - ANGLE = 'angle' - VERTICAL = 'vertical' - HORIZONTAL = 'horizontal' - def __init__(self, - geom_type, - dimension, - angles, - pixel_num_h=0, - pixel_size_h=1, - pixel_num_v=0, - pixel_size_v=1, - dist_source_center=None, - dist_center_detector=None, - channels=1, - **kwargs - ): - """ - General inputs for standard type projection geometries - detectorDomain or detectorpixelSize: - If 2D - If scalar: Width of detector or single detector pixel - If 2-vec: Error - If 3D - If scalar: Width in both dimensions - If 2-vec: Vertical then horizontal size - grid - If 2D - If scalar: number of detectors - If 2-vec: error - If 3D - If scalar: Square grid that size - If 2-vec vertical then horizontal size - cone or parallel - 2D or 3D - parallel_parameters: ? - cone_parameters: - source_to_center_dist (if parallel: NaN) - center_to_detector_dist (if parallel: NaN) - standard or nonstandard (vec) geometry - angles - angles_format radians or degrees - """ - self.geom_type = geom_type # 'parallel' or 'cone' - self.dimension = dimension # 2D or 3D - self.angles = angles - num_of_angles = len (angles) - - self.dist_source_center = dist_source_center - self.dist_center_detector = dist_center_detector - - self.pixel_num_h = pixel_num_h - self.pixel_size_h = pixel_size_h - self.pixel_num_v = pixel_num_v - self.pixel_size_v = pixel_size_v - - self.channels = channels - self.angle_unit=kwargs.get(AcquisitionGeometry.ANGLE_UNIT, - AcquisitionGeometry.DEGREE) - if channels > 1: - if pixel_num_v > 1: - shape = (channels, num_of_angles , pixel_num_v, pixel_num_h) - dim_labels = [AcquisitionGeometry.CHANNEL , - AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , - AcquisitionGeometry.HORIZONTAL] - else: - shape = (channels , num_of_angles, pixel_num_h) - dim_labels = [AcquisitionGeometry.CHANNEL , - AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] - else: - if pixel_num_v > 1: - shape = (num_of_angles, pixel_num_v, pixel_num_h) - dim_labels = [AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , - AcquisitionGeometry.HORIZONTAL] - else: - shape = (num_of_angles, pixel_num_h) - dim_labels = [AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] - self.shape = shape - - self.dimension_labels = dim_labels - - def clone(self): - '''returns a copy of the AcquisitionGeometry''' - return AcquisitionGeometry(self.geom_type, - self.dimension, - self.angles, - self.pixel_num_h, - self.pixel_size_h, - self.pixel_num_v, - self.pixel_size_v, - self.dist_source_center, - self.dist_center_detector, - self.channels) - - def __str__ (self): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.dimension) - repres += "angles: {0}\n".format(self.angles) - repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v) - repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v) - repres += "geometry type: {0}\n".format(self.geom_type) - repres += "distance source-detector: {0}\n".format(self.dist_source_center) - repres += "distance center-detector: {0}\n".format(self.dist_source_center) - repres += "number of channels: {0}\n".format(self.channels) - return repres - def allocate(self, value=0, dimension_labels=None): - '''allocates an AcquisitionData according to the size expressed in the instance''' - out = AcquisitionData(geometry=self) - if isinstance(value, Number): - if value != 0: - out += value - else: - if value == AcquisitionData.RANDOM: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - out.fill(numpy.random.random_sample(self.shape)) - elif value == AcquisitionData.RANDOM_INT: - seed = kwargs.get('seed', None) - if seed is not None: - numpy.random.seed(seed) - max_value = kwargs.get('max_value', 100) - out.fill(numpy.random.randint(max_value,size=self.shape)) - else: - raise ValueError('Value {} unknown'.format(value)) - if dimension_labels is not None: - if dimension_labels != self.dimension_labels: - return out.subset(dimensions=dimension_labels) - return out - -class DataContainer(object): - '''Generic class to hold data - - Data is currently held in a numpy arrays''' - - def __init__ (self, array, deep_copy=True, dimension_labels=None, - **kwargs): - '''Holds the data''' - - self.shape = numpy.shape(array) - self.number_of_dimensions = len (self.shape) - self.dimension_labels = {} - self.geometry = None # Only relevant for AcquisitionData and ImageData - - if dimension_labels is not None and \ - len (dimension_labels) == self.number_of_dimensions: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = dimension_labels[i] - else: - for i in range(self.number_of_dimensions): - self.dimension_labels[i] = 'dimension_{0:02}'.format(i) - - if type(array) == numpy.ndarray: - if deep_copy: - self.array = array.copy() - else: - self.array = array - else: - raise TypeError('Array must be NumpyArray, passed {0}'\ - .format(type(array))) - - # finally copy the geometry - if 'geometry' in kwargs.keys(): - self.geometry = kwargs['geometry'] - else: - # assume it is parallel beam - pass - - def get_dimension_size(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - acq_size = -1 - for k,v in self.dimension_labels.items(): - if v == dimension_label: - acq_size = self.shape[k] - return acq_size - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels)) - def get_dimension_axis(self, dimension_label): - if dimension_label in self.dimension_labels.values(): - for k,v in self.dimension_labels.items(): - if v == dimension_label: - return k - else: - raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, - self.dimension_labels.values())) - - - def as_array(self, dimensions=None): - '''Returns the DataContainer as Numpy Array - - Returns the pointer to the array if dimensions is not set. - If dimensions is set, it first creates a new DataContainer with the subset - and then it returns the pointer to the array''' - if dimensions is not None: - return self.subset(dimensions).as_array() - return self.array - - - def subset(self, dimensions=None, **kw): - '''Creates a DataContainer containing a subset of self according to the - labels in dimensions''' - if dimensions is None: - if kw == {}: - return self.array.copy() - else: - reduced_dims = [v for k,v in self.dimension_labels.items()] - for dim_l, dim_v in kw.items(): - for k,v in self.dimension_labels.items(): - if v == dim_l: - reduced_dims.pop(k) - return self.subset(dimensions=reduced_dims, **kw) - else: - # check that all the requested dimensions are in the array - # this is done by checking the dimension_labels - proceed = True - unknown_key = '' - # axis_order contains the order of the axis that the user wants - # in the output DataContainer - axis_order = [] - if type(dimensions) == list: - for dl in dimensions: - if dl not in self.dimension_labels.values(): - proceed = False - unknown_key = dl - break - else: - axis_order.append(find_key(self.dimension_labels, dl)) - if not proceed: - raise KeyError('Subset error: Unknown key specified {0}'.format(dl)) - - # slice away the unwanted data from the array - unwanted_dimensions = self.dimension_labels.copy() - left_dimensions = [] - for ax in sorted(axis_order): - this_dimension = unwanted_dimensions.pop(ax) - left_dimensions.append(this_dimension) - #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) - #print ("left_dimensions {0}".format(left_dimensions)) - #new_shape = [self.shape[ax] for ax in axis_order] - #print ("new_shape {0}".format(new_shape)) - command = "self.array[" - for i in range(self.number_of_dimensions): - if self.dimension_labels[i] in unwanted_dimensions.values(): - value = 0 - for k,v in kw.items(): - if k == self.dimension_labels[i]: - value = v - - command = command + str(value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - command = command + ']' - - cleaned = eval(command) - # cleaned has collapsed dimensions in the same order of - # self.array, but we want it in the order stated in the - # "dimensions". - # create axes order for numpy.transpose - axes = [] - for key in dimensions: - #print ("key {0}".format( key)) - for i in range(len( left_dimensions )): - ld = left_dimensions[i] - #print ("ld {0}".format( ld)) - if ld == key: - axes.append(i) - #print ("axes {0}".format(axes)) - - cleaned = numpy.transpose(cleaned, axes).copy() - - return type(self)(cleaned , True, dimensions) - - def fill(self, array, **dimension): - '''fills the internal numpy array with the one provided''' - if dimension == {}: - if issubclass(type(array), DataContainer) or\ - issubclass(type(array), numpy.ndarray): - if array.shape != self.shape: - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - self.shape,array.shape)) - if issubclass(type(array), DataContainer): - numpy.copyto(self.array, array.array) - else: - #self.array[:] = array - numpy.copyto(self.array, array) - else: - - command = 'self.array[' - i = 0 - for k,v in self.dimension_labels.items(): - for dim_label, dim_value in dimension.items(): - if dim_label == v: - command = command + str(dim_value) - else: - command = command + ":" - if i < self.number_of_dimensions -1: - command = command + ',' - i += 1 - command = command + "] = array[:]" - exec(command) - - - def check_dimensions(self, other): - return self.shape == other.shape - - ## algebra - def __add__(self, other, *args, **kwargs): - out = kwargs.get('out', None) - - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() + other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)( - self.as_array() + other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , - type(other))) - # __add__ - - def __sub__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() - other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() - other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , - type(other))) - # __sub__ - def __truediv__(self,other): - return self.__div__(other) - - def __div__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() / other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() / other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , - type(other))) - # __div__ - - def __pow__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() ** other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() ** other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , - type(other))) - # __pow__ - - def __mul__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() * other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - return type(self)(self.as_array() * other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , - type(other))) - # __mul__ - - # reverse operand - def __radd__(self, other): - return self + other - # __radd__ - - def __rsub__(self, other): - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - return self * other - # __rmul__ - - def __rdiv__(self, other): - print ("call __rdiv__") - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - return self.__rdiv__(other) - - def __rpow__(self, other): - if isinstance(other, (int, float)) : - fother = numpy.ones(numpy.shape(self.array)) * other - return type(self)(fother ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - return type(self)(other.as_array() ** self.array , - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Dimensions do not match') - # __rpow__ - - # in-place arithmetic operators: - # (+=, -=, *=, /= , //=, - # must return self - - - - def __iadd__(self, other): - if isinstance(other, (int, float)) : - numpy.add(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.add(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __iadd__ - - def __imul__(self, other): - if isinstance(other, (int, float)) : - arr = self.as_array() - numpy.multiply(arr, other, out=arr) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.multiply(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __imul__ - - def __isub__(self, other): - if isinstance(other, (int, float)) : - numpy.subtract(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.subtract(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __isub__ - - def __idiv__(self, other): - return self.__itruediv__(other) - def __itruediv__(self, other): - if isinstance(other, (int, float)) : - numpy.divide(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.divide(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __idiv__ - - def __str__ (self, representation=False): - repres = "" - repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions) - repres += "Shape: {0}\n".format(self.shape) - repres += "Axis labels: {0}\n".format(self.dimension_labels) - if representation: - repres += "Representation: \n{0}\n".format(self.array) - return repres - - def clone(self): - '''returns a copy of itself''' - - return type(self)(self.array, - dimension_labels=self.dimension_labels, - deep_copy=True, - geometry=self.geometry ) - - def get_data_axes_order(self,new_order=None): - '''returns the axes label of self as a list - - if new_order is None returns the labels of the axes as a sorted-by-key list - if new_order is a list of length number_of_dimensions, returns a list - with the indices of the axes in new_order with respect to those in - self.dimension_labels: i.e. - self.dimension_labels = {0:'horizontal',1:'vertical'} - new_order = ['vertical','horizontal'] - returns [1,0] - ''' - if new_order is None: - - axes_order = [i for i in range(len(self.shape))] - for k,v in self.dimension_labels.items(): - axes_order[k] = v - return axes_order - else: - if len(new_order) == self.number_of_dimensions: - axes_order = [i for i in range(self.number_of_dimensions)] - - for i in range(len(self.shape)): - found = False - for k,v in self.dimension_labels.items(): - if new_order[i] == v: - axes_order[i] = k - found = True - if not found: - raise ValueError('Axis label {0} not found.'.format(new_order[i])) - return axes_order - else: - raise ValueError('Expecting {0} axes, got {2}'\ - .format(len(self.shape),len(new_order))) - - - def copy(self): - '''alias of clone''' - return self.clone() - - ## binary operations - - def pixel_wise_binary(self, pwop, x2, *args, **kwargs): - out = kwargs.get('out', None) - if out is None: - if isinstance(x2, (int, float, complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - out = pwop(self.as_array() , x2 , *args, **kwargs ) - elif issubclass(type(x2) , DataContainer): - out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - - elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): - if self.check_dimensions(out) and self.check_dimensions(x2): - kwargs['out'] = out.as_array() - pwop(self.as_array(), x2.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): - if self.check_dimensions(out): - kwargs['out']=out.as_array() - pwop(self.as_array(), x2, *args, **kwargs ) - return out - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - kwargs['out'] = out - pwop(self.as_array(), x2, *args, **kwargs) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def add(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.add, other, *args, **kwargs) - - def subtract(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs) - - def multiply(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs) - - def divide(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) - - def power(self, other, *args, **kwargs): - return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) - - def maximum(self, x2, *args, **kwargs): - return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs) - - ## unary operations - def pixel_wise_unary(self, pwop, *args, **kwargs): - out = kwargs.get('out', None) - if out is None: - out = pwop(self.as_array() , *args, **kwargs ) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - elif issubclass(type(out), DataContainer): - if self.check_dimensions(out): - kwargs['out'] = out.as_array() - pwop(self.as_array(), *args, **kwargs ) - else: - raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) - elif issubclass(type(out), numpy.ndarray): - if self.array.shape == out.shape and self.array.dtype == out.dtype: - kwargs['out'] = out - pwop(self.as_array(), *args, **kwargs) - else: - raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - - def abs(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.abs, *args, **kwargs) - - def sign(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.sign, *args, **kwargs) - - def sqrt(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs) - - def conjugate(self, *args, **kwargs): - return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs) - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - - ## reductions - def sum(self, *args, **kwargs): - return self.as_array().sum(*args, **kwargs) - def squared_norm(self): - '''return the squared euclidean norm of the DataContainer viewed as a vector''' - #shape = self.shape - #size = reduce(lambda x,y:x*y, shape, 1) - #y = numpy.reshape(self.as_array(), (size, )) - return self.dot(self.conjugate()) - #return self.dot(self) - def norm(self): - '''return the euclidean norm of the DataContainer viewed as a vector''' - return numpy.sqrt(self.squared_norm()) - def dot(self, other, *args, **kwargs): - '''return the inner product of 2 DataContainers viewed as vectors''' - if self.shape == other.shape: - return numpy.dot(self.as_array().ravel(), other.as_array().ravel()) - else: - raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape)) - - - - - -class ImageData(DataContainer): - '''DataContainer for holding 2D or 3D DataContainer''' - - def __init__(self, - array = None, - deep_copy=False, - dimension_labels=None, - **kwargs): - - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz_x = geometry.voxel_num_x - horiz_y = geometry.voxel_num_y - vert = 1 if geometry.voxel_num_z is None\ - else geometry.voxel_num_z # this should be 1 for 2D - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (channels , horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - if vert > 1: - shape = (vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (horiz_y, horiz_x) - dim_labels = [ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == ImageGeometry.CHANNEL: - shape.append(channels) - elif dim == ImageGeometry.HORIZONTAL_Y: - shape.append(horiz_y) - elif dim == ImageGeometry.VERTICAL: - shape.append(vert) - elif dim == ImageGeometry.HORIZONTAL_X: - shape.append(horiz_x) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes'.format( - len(dimension_labels) - len(shape))) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - else: - raise ValueError('Please pass either a DataContainer, ' +\ - 'a numpy array or a geometry') - else: - if issubclass(type(array) , DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(ImageData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) , numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = [ImageGeometry.CHANNEL, - ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - elif array.ndim == 3: - dimension_labels = [ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - dimension_labels = [ ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(ImageData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - # load metadata from kwargs if present - for key, value in kwargs.items(): - if (type(value) == list or type(value) == tuple) and \ - ( len (value) == 3 and len (value) == 2) : - if key == 'origin' : - self.origin = value - if key == 'spacing' : - self.spacing = value - - def subset(self, dimensions=None, **kw): - # FIXME: this is clearly not rigth - # it should be something like - # out = DataContainer.subset(self, dimensions, **kw) - # followed by regeneration of the proper geometry. - out = super(ImageData, self).subset(dimensions, **kw) - #out.geometry = self.recalculate_geometry(dimensions , **kw) - out.geometry = self.geometry - return out - - -class AcquisitionData(DataContainer): - '''DataContainer for holding 2D or 3D sinogram''' - - def __init__(self, - array = None, - deep_copy=True, - dimension_labels=None, - **kwargs): - self.geometry = None - if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz = geometry.pixel_num_h - vert = geometry.pixel_num_v - angles = geometry.angles - num_of_angles = numpy.shape(angles)[0] - - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, num_of_angles , vert, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL] - else: - shape = (channels , num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL] - else: - if vert > 1: - shape = (num_of_angles, vert, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL - ] - else: - shape = (num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL - ] - - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == AcquisitionGeometry.CHANNEL: - shape.append(channels) - elif dim == AcquisitionGeometry.ANGLE: - shape.append(num_of_angles) - elif dim == AcquisitionGeometry.VERTICAL: - shape.append(vert) - elif dim == AcquisitionGeometry.HORIZONTAL: - shape.append(horiz) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ - .format( - len(dimension_labels) - len(shape), - dimension_labels, shape) - ) - shape = tuple(shape) - - array = numpy.zeros( shape , dtype=numpy.float32) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - else: - - if issubclass(type(array) ,DataContainer): - # if the array is a DataContainer get the info from there - if not ( array.number_of_dimensions == 2 or \ - array.number_of_dimensions == 3 or \ - array.number_of_dimensions == 4): - raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ - .format(array.number_of_dimensions)) - - #DataContainer.__init__(self, array.as_array(), deep_copy, - # array.dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array.as_array(), deep_copy, - array.dimension_labels, **kwargs) - elif issubclass(type(array) ,numpy.ndarray): - if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): - raise ValueError( - 'Number of dimensions are not 2 or 3 or 4 : {0}'\ - .format(array.ndim)) - - if dimension_labels is None: - if array.ndim == 4: - dimension_labels = ['channel' ,'angle' , 'vertical' , - 'horizontal'] - elif array.ndim == 3: - dimension_labels = ['angle' , 'vertical' , - 'horizontal'] - else: - dimension_labels = ['angle' , - 'horizontal'] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) - super(AcquisitionData, self).__init__(array, deep_copy, - dimension_labels, **kwargs) - - -class DataProcessor(object): - '''Defines a generic DataContainer processor - - accepts DataContainer as inputs and - outputs DataContainer - additional attributes can be defined with __setattr__ - ''' - - def __init__(self, **attributes): - if not 'store_output' in attributes.keys(): - attributes['store_output'] = True - attributes['output'] = False - attributes['runTime'] = -1 - attributes['mTime'] = datetime.now() - attributes['input'] = None - for key, value in attributes.items(): - self.__dict__[key] = value - - - def __setattr__(self, name, value): - if name == 'input': - self.set_input(value) - elif name in self.__dict__.keys(): - self.__dict__[name] = value - self.__dict__['mTime'] = datetime.now() - else: - raise KeyError('Attribute {0} not found'.format(name)) - #pass - - def set_input(self, dataset): - if issubclass(type(dataset), DataContainer): - if self.check_input(dataset): - self.__dict__['input'] = dataset - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(dataset), DataContainer)) - - def check_input(self, dataset): - '''Checks parameters of the input DataContainer - - Should raise an Error if the DataContainer does not match expectation, e.g. - if the expected input DataContainer is 3D and the Processor expects 2D. - ''' - raise NotImplementedError('Implement basic checks for input DataContainer') - - def get_output(self, out=None): - for k,v in self.__dict__.items(): - if v is None and k != 'output': - raise ValueError('Key {0} is None'.format(k)) - shouldRun = False - if self.runTime == -1: - shouldRun = True - elif self.mTime > self.runTime: - shouldRun = True - - # CHECK this - if self.store_output and shouldRun: - self.runTime = datetime.now() - try: - self.output = self.process(out=out) - return self.output - except TypeError as te: - self.output = self.process() - return self.output - self.runTime = datetime.now() - try: - return self.process(out=out) - except TypeError as te: - return self.process() - - - def set_input_processor(self, processor): - if issubclass(type(processor), DataProcessor): - self.__dict__['input'] = processor - else: - raise TypeError("Input type mismatch: got {0} expecting {1}"\ - .format(type(processor), DataProcessor)) - - def get_input(self): - '''returns the input DataContainer - - It is useful in the case the user has provided a DataProcessor as - input - ''' - if issubclass(type(self.input), DataProcessor): - dsi = self.input.get_output() - else: - dsi = self.input - return dsi - - def process(self, out=None): - raise NotImplementedError('process must be implemented') - - - - -class DataProcessor23D(DataProcessor): - '''Regularizers DataProcessor - ''' - - def check_input(self, dataset): - '''Checks number of dimensions input DataContainer - - Expected input is 2D or 3D - ''' - if dataset.number_of_dimensions == 2 or \ - dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - -###### Example of DataProcessors - -class AX(DataProcessor): - '''Example DataProcessor - The AXPY routines perform a vector multiplication operation defined as - - y := a*x - where: - - a is a scalar - - x a DataContainer. - ''' - - def __init__(self): - kwargs = {'scalar':None, - 'input':None, - } - - #DataProcessor.__init__(self, **kwargs) - super(AX, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - dsi = self.get_input() - a = self.scalar - if out is None: - y = DataContainer( a * dsi.as_array() , True, - dimension_labels=dsi.dimension_labels ) - #self.setParameter(output_dataset=y) - return y - else: - out.fill(a * dsi.as_array()) - - -###### Example of DataProcessors - -class CastDataContainer(DataProcessor): - '''Example DataProcessor - Cast a DataContainer array to a different type. - - y := a*x - where: - - a is a scalar - - x a DataContainer. - ''' - - def __init__(self, dtype=None): - kwargs = {'dtype':dtype, - 'input':None, - } - - #DataProcessor.__init__(self, **kwargs) - super(CastDataContainer, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - dsi = self.get_input() - dtype = self.dtype - if out is None: - y = numpy.asarray(dsi.as_array(), dtype=dtype) - - return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype), - dimension_labels=dsi.dimension_labels ) - else: - out.fill(numpy.asarray(dsi.as_array(), dtype=dtype)) - - - - - -class PixelByPixelDataProcessor(DataProcessor): - '''Example DataProcessor - - This processor applies a python function to each pixel of the DataContainer - - f is a python function - - x a DataSet. - ''' - - def __init__(self): - kwargs = {'pyfunc':None, - 'input':None, - } - #DataProcessor.__init__(self, **kwargs) - super(PixelByPixelDataProcessor, self).__init__(**kwargs) - - def check_input(self, dataset): - return True - - def process(self, out=None): - - pyfunc = self.pyfunc - dsi = self.get_input() - - eval_func = numpy.frompyfunc(pyfunc,1,1) - - - y = DataContainer( eval_func( dsi.as_array() ) , True, - dimension_labels=dsi.dimension_labels ) - return y - - - - -if __name__ == '__main__': - shape = (2,3,4,5) - size = shape[0] - for i in range(1, len(shape)): - size = size * shape[i] - #print("a refcount " , sys.getrefcount(a)) - a = numpy.asarray([i for i in range( size )]) - print("a refcount " , sys.getrefcount(a)) - a = numpy.reshape(a, shape) - print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) - print("a refcount " , sys.getrefcount(a)) - print ("ds label {0}".format(ds.dimension_labels)) - subset = ['W' ,'X'] - b = ds.subset( subset ) - print("a refcount " , sys.getrefcount(a)) - print ("b label {0} shape {1}".format(b.dimension_labels, - numpy.shape(b.as_array()))) - c = ds.subset(['Z','W','X']) - print("a refcount " , sys.getrefcount(a)) - - # Create a ImageData sharing the array with c - volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels) - volume1 = ImageData(c, False) - - print ("volume0 {0} volume1 {1}".format(id(volume0.array), - id(volume1.array))) - - # Create a ImageData copying the array from c - volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels) - volume3 = ImageData(c) - - print ("volume2 {0} volume3 {1}".format(id(volume2.array), - id(volume3.array))) - - # single number DataSet - sn = DataContainer(numpy.asarray([1])) - - ax = AX() - ax.scalar = 2 - ax.set_input(c) - #ax.apply() - print ("ax in {0} out {1}".format(c.as_array().flatten(), - ax.get_output().as_array().flatten())) - - cast = CastDataContainer(dtype=numpy.float32) - cast.set_input(c) - out = cast.get_output() - out *= 0 - axm = AX() - axm.scalar = 0.5 - axm.set_input_processor(cast) - axm.get_output(out) - #axm.apply() - print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array())) - - # check out in DataSetProcessor - #a = numpy.asarray([i for i in range( size )]) - - - # create a PixelByPixelDataProcessor - - #define a python function which will take only one input (the pixel value) - pyfunc = lambda x: -x if x > 20 else x - clip = PixelByPixelDataProcessor() - clip.pyfunc = pyfunc - clip.set_input(c) - #clip.apply() - - print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array())) - - #dsp = DataProcessor() - #dsp.set_input(ds) - #dsp.input = a - # pipeline - - chain = AX() - chain.scalar = 0.5 - chain.set_input_processor(ax) - print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array())) - - # testing arithmetic operations - - print (b) - print ((b+1)) - print ((1+b)) - - print (b) - print ((b*2)) - - print (b) - print ((2*b)) - - print (b) - print ((b/2)) - - print (b) - print ((2/b)) - - print (b) - print ((b**2)) - - print (b) - print ((2**b)) - - print (type(volume3 + 2)) - - s = [i for i in range(3 * 4 * 4)] - s = numpy.reshape(numpy.asarray(s), (3,4,4)) - sino = AcquisitionData( s ) - - shape = (4,3,2) - a = [i for i in range(2*3*4)] - a = numpy.asarray(a) - a = numpy.reshape(a, shape) - print (numpy.shape(a)) - ds = DataContainer(a, True, ['X', 'Y','Z']) - # this means that I expect the X to be of length 2 , - # y of length 3 and z of length 4 - subset = ['Y' ,'Z'] - b0 = ds.subset( subset ) - print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array()))) - # expectation on b is that it is - # 3x2 cut at z = 0 - - subset = ['X' ,'Y'] - b1 = ds.subset( subset , Z=1) - print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array()))) - - - - # create VolumeData from geometry - vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2) - vol = ImageData(geometry=vgeometry) - - sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20), - geom_type='parallel', pixel_num_v=3, - pixel_num_h=5 , channels=2) - sino = AcquisitionData(geometry=sgeometry) - sino2 = sino.clone() - - a0 = numpy.asarray([i for i in range(2*3*4)]) - a1 = numpy.asarray([2*i for i in range(2*3*4)]) - - - ds0 = DataContainer(numpy.reshape(a0,(2,3,4))) - ds1 = DataContainer(numpy.reshape(a1,(2,3,4))) - - numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1)) - - a2 = numpy.asarray([2*i for i in range(2*3*5)]) - ds2 = DataContainer(numpy.reshape(a2,(2,3,5))) - -# # it should fail if the shape is wrong -# try: -# ds2.dot(ds0) -# self.assertTrue(False) -# except ValueError as ve: -# self.assertTrue(True) - diff --git a/Wrappers/Python/build/lib/ccpi/io/__init__.py b/Wrappers/Python/build/lib/ccpi/io/__init__.py deleted file mode 100644 index 9233d7a..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/io/reader.py b/Wrappers/Python/build/lib/ccpi/io/reader.py deleted file mode 100644 index 856f5e0..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/reader.py +++ /dev/null @@ -1,500 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev, Edoardo Pasca and Srikanth Nagella - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -''' -This is a reader module with classes for loading 3D datasets. - -@author: Mr. Srikanth Nagella -''' -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from ccpi.framework import AcquisitionGeometry -from ccpi.framework import AcquisitionData -import numpy as np -import os - -h5pyAvailable = True -try: - from h5py import File as NexusFile -except: - h5pyAvailable = False - -pilAvailable = True -try: - from PIL import Image -except: - pilAvailable = False - -class NexusReader(object): - ''' - Reader class for loading Nexus files. - ''' - - def __init__(self, nexus_filename=None): - ''' - This takes in input as filename and loads the data dataset. - ''' - self.flat = None - self.dark = None - self.angles = None - self.geometry = None - self.filename = nexus_filename - self.key_path = 'entry1/tomo_entry/instrument/detector/image_key' - self.data_path = 'entry1/tomo_entry/data/data' - self.angle_path = 'entry1/tomo_entry/data/rotation_angle' - - def get_image_keys(self): - try: - with NexusFile(self.filename,'r') as file: - return np.array(file[self.key_path]) - except KeyError as ke: - raise KeyError("get_image_keys: " , ke.args[0] , self.key_path) - - - def load(self, dimensions=None, image_key_id=0): - ''' - This is generic loading function of flat field, dark field and projection data. - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - image_keys = np.array(file[self.key_path]) - projections = None - if dimensions == None: - projections = np.array(file[self.data_path]) - result = projections[image_keys==image_key_id] - return result - else: - #When dimensions are specified they need to be mapped to image_keys - index_array = np.where(image_keys==image_key_id) - projection_indexes = index_array[0][dimensions[0]] - new_dimensions = list(dimensions) - new_dimensions[0]= projection_indexes - new_dimensions = tuple(new_dimensions) - result = np.array(file[self.data_path][new_dimensions]) - return result - except: - print("Error reading nexus file") - raise - - def load_projection(self, dimensions=None): - ''' - Loads the projection data from the nexus file. - returns: numpy array with projection data - ''' - try: - if 0 not in self.get_image_keys(): - raise ValueError("Projections are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 0) - - def load_flat(self, dimensions=None): - ''' - Loads the flat field data from the nexus file. - returns: numpy array with flat field data - ''' - try: - if 1 not in self.get_image_keys(): - raise ValueError("Flats are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 1) - - def load_dark(self, dimensions=None): - ''' - Loads the Dark field data from the nexus file. - returns: numpy array with dark field data - ''' - try: - if 2 not in self.get_image_keys(): - raise ValueError("Darks are not in the data. Data Path " , - self.data_path) - except KeyError as ke: - raise KeyError(ke.args[0] , self.data_path) - return self.load(dimensions, 2) - - def get_projection_angles(self): - ''' - This function returns the projection angles - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - angles = np.array(file[self.angle_path],np.float32) - image_keys = np.array(file[self.key_path]) - return angles[image_keys==0] - except: - print("get_projection_angles Error reading nexus file") - raise - - - def get_sinogram_dimensions(self): - ''' - Return the dimensions of the dataset - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - projections = file[self.data_path] - image_keys = np.array(file[self.key_path]) - dims = list(projections.shape) - dims[0] = dims[1] - dims[1] = np.sum(image_keys==0) - return tuple(dims) - except: - print("Error reading nexus file") - raise - - def get_projection_dimensions(self): - ''' - Return the dimensions of the dataset - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - with NexusFile(self.filename,'r') as file: - try: - projections = file[self.data_path] - except KeyError as ke: - raise KeyError('Error: data path {0} not found\n{1}'\ - .format(self.data_path, - ke.args[0])) - #image_keys = np.array(file[self.key_path]) - image_keys = self.get_image_keys() - dims = list(projections.shape) - dims[0] = np.sum(image_keys==0) - return tuple(dims) - except: - print("Warning: Error reading image_keys trying accessing data on " , self.data_path) - with NexusFile(self.filename,'r') as file: - dims = file[self.data_path].shape - return tuple(dims) - - - - def get_acquisition_data(self, dimensions=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - data = self.load_projection(dimensions) - dims = self.get_projection_dimensions() - geometry = AcquisitionGeometry('parallel', '3D', - self.get_projection_angles(), - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = dims[1], - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - - def get_acquisition_data_subset(self, ymin=None, ymax=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - - - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - pass - dims = file[self.data_path].shape - if ymin is None and ymax is None: - data = np.array(file[self.data_path]) - else: - if ymin is None: - ymin = 0 - if ymax > dims[1]: - raise ValueError('ymax out of range') - data = np.array(file[self.data_path][:,:ymax,:]) - elif ymax is None: - ymax = dims[1] - if ymin < 0: - raise ValueError('ymin out of range') - data = np.array(file[self.data_path][:,ymin:,:]) - else: - if ymax > dims[1]: - raise ValueError('ymax out of range') - if ymin < 0: - raise ValueError('ymin out of range') - - data = np.array(file[self.data_path] - [: , ymin:ymax , :] ) - - except: - print("Error reading nexus file") - raise - - - try: - angles = self.get_projection_angles() - except KeyError as ke: - n = data.shape[0] - angles = np.linspace(0, n, n+1, dtype=np.float32) - - if ymax-ymin > 1: - - geometry = AcquisitionGeometry('parallel', '3D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = ymax-ymin, - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, False, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - elif ymax-ymin == 1: - geometry = AcquisitionGeometry('parallel', '2D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data.squeeze(), False, geometry=geometry, - dimension_labels=['angle','horizontal']) - def get_acquisition_data_slice(self, y_slice=0): - return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1) - def get_acquisition_data_whole(self): - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - print ("Warning: ") - dims = file[self.data_path].shape - - ymin = 0 - ymax = dims[1] - 1 - - return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax) - - - - def list_file_content(self): - try: - with NexusFile(self.filename,'r') as file: - file.visit(print) - except: - print("Error reading nexus file") - raise - def get_acquisition_data_batch(self, bmin=None, bmax=None): - if not h5pyAvailable: - raise Exception("Error: h5py is not installed") - if self.filename is None: - return - try: - - - with NexusFile(self.filename,'r') as file: - try: - dims = self.get_projection_dimensions() - except KeyError: - dims = file[self.data_path].shape - if bmin is None or bmax is None: - raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits') - - if bmin >= 0 and bmin < bmax and bmax <= dims[0]: - data = np.array(file[self.data_path][bmin:bmax]) - else: - raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0])) - - except: - print("Error reading nexus file") - raise - - - try: - angles = self.get_projection_angles()[bmin:bmax] - except KeyError as ke: - n = data.shape[0] - angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax] - - if bmax-bmin > 1: - - geometry = AcquisitionGeometry('parallel', '3D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - pixel_num_v = bmax-bmin, - pixel_size_v = 1, - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data, False, geometry=geometry, - dimension_labels=['angle','vertical','horizontal']) - elif bmax-bmin == 1: - geometry = AcquisitionGeometry('parallel', '2D', - angles, - pixel_num_h = dims[2], - pixel_size_h = 1 , - dist_source_center = None, - dist_center_detector = None, - channels = 1) - return AcquisitionData(data.squeeze(), False, geometry=geometry, - dimension_labels=['angle','horizontal']) - - - -class XTEKReader(object): - ''' - Reader class for loading XTEK files - ''' - - def __init__(self, xtek_config_filename=None): - ''' - This takes in the xtek config filename and loads the dataset and the - required geometry parameters - ''' - self.projections = None - self.geometry = {} - self.filename = xtek_config_filename - self.load() - - def load(self): - pixel_num_h = 0 - pixel_num_v = 0 - xpixel_size = 0 - ypixel_size = 0 - source_x = 0 - detector_x = 0 - with open(self.filename) as f: - content = f.readlines() - content = [x.strip() for x in content] - for line in content: - if line.startswith("SrcToObject"): - source_x = float(line.split('=')[1]) - elif line.startswith("SrcToDetector"): - detector_x = float(line.split('=')[1]) - elif line.startswith("DetectorPixelsY"): - pixel_num_v = int(line.split('=')[1]) - #self.num_of_vertical_pixels = self.calc_v_alighment(self.num_of_vertical_pixels, self.pixels_per_voxel) - elif line.startswith("DetectorPixelsX"): - pixel_num_h = int(line.split('=')[1]) - elif line.startswith("DetectorPixelSizeX"): - xpixel_size = float(line.split('=')[1]) - elif line.startswith("DetectorPixelSizeY"): - ypixel_size = float(line.split('=')[1]) - elif line.startswith("Projections"): - self.num_projections = int(line.split('=')[1]) - elif line.startswith("InitialAngle"): - self.initial_angle = float(line.split('=')[1]) - elif line.startswith("Name"): - self.experiment_name = line.split('=')[1] - elif line.startswith("Scattering"): - self.scattering = float(line.split('=')[1]) - elif line.startswith("WhiteLevel"): - self.white_level = float(line.split('=')[1]) - elif line.startswith("MaskRadius"): - self.mask_radius = float(line.split('=')[1]) - - #Read Angles - angles = self.read_angles() - self.geometry = AcquisitionGeometry('cone', '3D', angles, pixel_num_h, xpixel_size, pixel_num_v, ypixel_size, -1 * source_x, - detector_x - source_x, - ) - - def read_angles(self): - """ - Read the angles file .ang or _ctdata.txt file and returns the angles - as an numpy array. - """ - input_path = os.path.dirname(self.filename) - angles_ctdata_file = os.path.join(input_path, '_ctdata.txt') - angles_named_file = os.path.join(input_path, self.experiment_name+'.ang') - angles = np.zeros(self.num_projections,dtype='f') - #look for _ctdata.txt - if os.path.exists(angles_ctdata_file): - #read txt file with angles - with open(angles_ctdata_file) as f: - content = f.readlines() - #skip firt three lines - #read the middle value of 3 values in each line as angles in degrees - index = 0 - for line in content[3:]: - self.angles[index]=float(line.split(' ')[1]) - index+=1 - angles = np.deg2rad(self.angles+self.initial_angle); - elif os.path.exists(angles_named_file): - #read the angles file which is text with first line as header - with open(angles_named_file) as f: - content = f.readlines() - #skip first line - index = 0 - for line in content[1:]: - angles[index] = float(line.split(':')[1]) - index+=1 - angles = np.flipud(angles+self.initial_angle) #angles are in the reverse order - else: - raise RuntimeError("Can't find angles file") - return angles - - def load_projection(self, dimensions=None): - ''' - This method reads the projection images from the directory and returns a numpy array - ''' - if not pilAvailable: - raise('Image library pillow is not installed') - if dimensions != None: - raise('Extracting subset of data is not implemented') - input_path = os.path.dirname(self.filename) - pixels = np.zeros((self.num_projections, self.geometry.pixel_num_h, self.geometry.pixel_num_v), dtype='float32') - for i in range(1, self.num_projections+1): - im = Image.open(os.path.join(input_path,self.experiment_name+"_%04d"%i+".tif")) - pixels[i-1,:,:] = np.fliplr(np.transpose(np.array(im))) ##Not sure this is the correct way to populate the image - - #normalising the data - #TODO: Move this to a processor - pixels = pixels - (self.white_level*self.scattering)/100.0 - pixels[pixels < 0.0] = 0.000001 # all negative values to approximately 0 as the std log of zero and non negative number is not defined - return pixels - - def get_acquisition_data(self, dimensions=None): - ''' - This method load the acquisition data and given dimension and returns an AcquisitionData Object - ''' - data = self.load_projection(dimensions) - return AcquisitionData(data, geometry=self.geometry) - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py deleted file mode 100644 index ed95c3f..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import time -from numbers import Integral - -class Algorithm(object): - '''Base class for iterative algorithms - - provides the minimal infrastructure. - Algorithms are iterables so can be easily run in a for loop. They will - stop as soon as the stop cryterion is met. - The user is required to implement the set_up, __init__, update and - and update_objective methods - - A courtesy method run is available to run n iterations. The method accepts - a callback function that receives the current iteration number and the actual objective - value and can be used to trigger print to screens and other user interactions. The run - method will stop when the stopping cryterion is met. - ''' - - def __init__(self): - '''Constructor - - Set the minimal number of parameters: - iteration: current iteration number - max_iteration: maximum number of iterations - memopt: whether to use memory optimisation () - timing: list to hold the times it took to run each iteration - update_objectice_interval: the interval every which we would save the current - objective. 1 means every iteration, 2 every 2 iteration - and so forth. This is by default 1 and should be increased - when evaluating the objective is computationally expensive. - ''' - self.iteration = 0 - self.__max_iteration = 0 - self.__loss = [] - self.memopt = False - self.timing = [] - self.update_objective_interval = 1 - def set_up(self, *args, **kwargs): - '''Set up the algorithm''' - raise NotImplementedError() - def update(self): - '''A single iteration of the algorithm''' - raise NotImplementedError() - - def should_stop(self): - '''default stopping cryterion: number of iterations - - The user can change this in concrete implementatition of iterative algorithms.''' - return self.max_iteration_stop_cryterion() - - def max_iteration_stop_cryterion(self): - '''default stop cryterion for iterative algorithm: max_iteration reached''' - return self.iteration >= self.max_iteration - def __iter__(self): - '''Algorithm is an iterable''' - return self - def next(self): - '''Algorithm is an iterable - - python2 backwards compatibility''' - return self.__next__() - def __next__(self): - '''Algorithm is an iterable - - calling this method triggers update and update_objective - ''' - if self.should_stop(): - raise StopIteration() - else: - time0 = time.time() - self.update() - self.timing.append( time.time() - time0 ) - if self.iteration % self.update_objective_interval == 0: - self.update_objective() - self.iteration += 1 - def get_output(self): - '''Returns the solution found''' - return self.x - def get_last_loss(self): - '''Returns the last stored value of the loss function - - if update_objective_interval is 1 it is the value of the objective at the current - iteration. If update_objective_interval > 1 it is the last stored value. - ''' - return self.__loss[-1] - def get_last_objective(self): - '''alias to get_last_loss''' - return self.get_last_loss() - def update_objective(self): - '''calculates the objective with the current solution''' - raise NotImplementedError() - @property - def loss(self): - '''returns the list of the values of the objective during the iteration - - The length of this list may be shorter than the number of iterations run when - the update_objective_interval > 1 - ''' - return self.__loss - @property - def objective(self): - '''alias of loss''' - return self.loss - @property - def max_iteration(self): - '''gets the maximum number of iterations''' - return self.__max_iteration - @max_iteration.setter - def max_iteration(self, value): - '''sets the maximum number of iterations''' - assert isinstance(value, int) - self.__max_iteration = value - @property - def update_objective_interval(self): - return self.__update_objective_interval - @update_objective_interval.setter - def update_objective_interval(self, value): - if isinstance(value, Integral): - if value >= 1: - self.__update_objective_interval = value - else: - raise ValueError('Update objective interval must be an integer >= 1') - else: - raise ValueError('Update objective interval must be an integer >= 1') - def run(self, iterations, verbose=True, callback=None): - '''run n iterations and update the user with the callback if specified''' - if self.should_stop(): - print ("Stop cryterion has been reached.") - i = 0 - for _ in self: - if verbose and self.iteration % self.update_objective_interval == 0: - print ("Iteration {}/{}, objective {}".format(self.iteration, - self.max_iteration, self.get_last_objective()) ) - else: - if callback is not None: - callback(self.iteration, self.get_last_objective()) - i += 1 - if i == iterations: - break - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py deleted file mode 100644 index 7194eb8..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:11:23 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -#from collections.abc import Iterable -class CGLS(Algorithm): - - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - ''' - def __init__(self, **kwargs): - super(CGLS, self).__init__() - self.x = kwargs.get('x_init', None) - self.operator = kwargs.get('operator', None) - self.data = kwargs.get('data', None) - if self.x is not None and self.operator is not None and \ - self.data is not None: - print ("Calling from creator") - self.set_up(x_init =kwargs['x_init'], - operator=kwargs['operator'], - data =kwargs['data']) - - def set_up(self, x_init, operator , data ): - - self.r = data.copy() - self.x = x_init.copy() - - self.operator = operator - self.d = operator.adjoint(self.r) - - - self.normr2 = self.d.squared_norm() - #if isinstance(self.normr2, Iterable): - # self.normr2 = sum(self.normr2) - #self.normr2 = numpy.sqrt(self.normr2) - #print ("set_up" , self.normr2) - - def update(self): - - Ad = self.operator.direct(self.d) - #norm = (Ad*Ad).sum() - #if isinstance(norm, Iterable): - # norm = sum(norm) - norm = Ad.squared_norm() - - alpha = self.normr2/norm - self.x += (self.d * alpha) - self.r -= (Ad * alpha) - s = self.operator.adjoint(self.r) - - normr2_new = s.squared_norm() - #if isinstance(normr2_new, Iterable): - # normr2_new = sum(normr2_new) - #normr2_new = numpy.sqrt(normr2_new) - #print (normr2_new) - - beta = normr2_new/self.normr2 - self.normr2 = normr2_new - self.d = s + beta*self.d - - def update_objective(self): - self.loss.append(self.r.squared_norm()) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py deleted file mode 100644 index 445ba7a..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:09:03 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun - -class FBPD(Algorithm): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - constraint = None - data_fidelity = None - regulariser = None - def __init__(self, **kwargs): - pass - def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - - # default inputs - if constraint is None: - self.constraint = ZeroFun() - else: - self.constraint = constraint - if data_fidelity is None: - data_fidelity = ZeroFun() - else: - self.data_fidelity = data_fidelity - if regulariser is None: - self.regulariser = ZeroFun() - else: - self.regulariser = regulariser - - # algorithmic parameters - - - # step-sizes - self.tau = 2 / (self.data_fidelity.L + 2) - self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L - - self.inv_sigma = 1/self.sigma - - # initialization - self.x = x_init - self.y = operator.direct(self.x) - - - def update(self): - - # primal forward-backward step - x_old = self.x - self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) - self.x = self.constraint.prox(self.x, self.tau); - - # dual forward-backward step - self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); - self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma); - - # time and criterion - self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py deleted file mode 100644 index 93ba178..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 21 11:07:30 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun -import numpy - -class FISTA(Algorithm): - '''Fast Iterative Shrinkage-Thresholding Algorithm - - Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding - algorithm for linear inverse problems. - SIAM journal on imaging sciences,2(1), pp.183-202. - - Parameters: - x_init: initial guess - f: data fidelity - g: regularizer - h: - opt: additional algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(FISTA, self).__init__() - self.f = None - self.g = None - self.invL = None - self.t_old = 1 - args = ['x_init', 'f', 'g', 'opt'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(kwargs['x_init'], - f=kwargs['f'], - g=kwargs['g'], - opt=kwargs['opt']) - - def set_up(self, x_init, f=None, g=None, opt=None): - - # default inputs - if f is None: - self.f = ZeroFun() - else: - self.f = f - if g is None: - g = ZeroFun() - self.g = g - else: - self.g = g - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'memopt':False} - - self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - self.memopt = memopt - - # initialization - if memopt: - self.y = x_init.clone() - self.x_old = x_init.clone() - self.x = x_init.clone() - self.u = x_init.clone() - else: - self.x_old = x_init.copy() - self.y = x_init.copy() - - #timing = numpy.zeros(max_iter) - #criter = numpy.zeros(max_iter) - - - self.invL = 1/f.L - - self.t_old = 1 - - def update(self): - # algorithm loop - #for it in range(0, max_iter): - - if self.memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - self.f.gradient(self.y, out=self.u) - self.u.__imul__( -self.invL ) - self.u.__iadd__( self.y ) - # x = g.prox(u,invL) - self.g.proximal(self.u, self.invL, out=self.x) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - self.x.subtract(self.x_old, out=self.y) - self.y.__imul__ ((self.t_old-1)/self.t) - self.y.__iadd__( self.x ) - - self.x_old.fill(self.x) - self.t_old = self.t - - - else: - u = self.y - self.invL*self.f.grad(self.y) - - self.x = self.g.prox(u,self.invL) - - self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) - - self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) - - self.x_old = self.x.copy() - self.t_old = self.t - - def update_objective(self): - self.loss.append( self.f(self.x) + self.g(self.x) ) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py deleted file mode 100644 index f1e4132..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:05:09 2019 - -@author: ofn77899 -""" -from ccpi.optimisation.algorithms import Algorithm - -class GradientDescent(Algorithm): - '''Implementation of Gradient Descent algorithm - ''' - - def __init__(self, **kwargs): - '''initialisation can be done at creation time if all - proper variables are passed or later with set_up''' - super(GradientDescent, self).__init__() - self.x = None - self.rate = 0 - self.objective_function = None - self.regulariser = None - args = ['x_init', 'objective_function', 'rate'] - for k,v in kwargs.items(): - if k in args: - args.pop(args.index(k)) - if len(args) == 0: - return self.set_up(x_init=kwargs['x_init'], - objective_function=kwargs['objective_function'], - rate=kwargs['rate']) - - def should_stop(self): - '''stopping cryterion, currently only based on number of iterations''' - return self.iteration >= self.max_iteration - - def set_up(self, x_init, objective_function, rate): - '''initialisation of the algorithm''' - self.x = x_init.copy() - self.objective_function = objective_function - self.rate = rate - self.loss.append(objective_function(x_init)) - self.iteration = 0 - try: - self.memopt = self.objective_function.memopt - except AttributeError as ae: - self.memopt = False - if self.memopt: - self.x_update = x_init.copy() - - def update(self): - '''Single iteration''' - if self.memopt: - self.objective_function.gradient(self.x, out=self.x_update) - self.x_update *= -self.rate - self.x += self.x_update - else: - self.x += -self.rate * self.objective_function.gradient(self.x) - - def update_objective(self): - self.loss.append(self.objective_function(self.x)) - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py deleted file mode 100644 index d0e27ae..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb 4 16:18:06 2019 - -@author: evangelos -""" -from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData -import numpy as np -import matplotlib.pyplot as plt -import time -from ccpi.optimisation.operators import BlockOperator -from ccpi.framework import BlockDataContainer - - -import matplotlib.pyplot as plt - -class PDHG(Algorithm): - '''Primal Dual Hybrid Gradient''' - - def __init__(self, **kwargs): - super(PDHG, self).__init__() - self.f = kwargs.get('f', None) - self.operator = kwargs.get('operator', None) - self.g = kwargs.get('g', None) - self.tau = kwargs.get('tau', None) - self.sigma = kwargs.get('sigma', None) - - if self.f is not None and self.operator is not None and \ - self.g is not None: - print ("Calling from creator") - self.set_up(self.f, - self.operator, - self.g, - self.tau, - self.sigma) - - def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): - # algorithmic parameters - - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - - - self.x_old = self.operator.domain_geometry().allocate() - self.y_old = self.operator.range_geometry().allocate() - - self.xbar = self.x_old.copy() - #x_tmp = x_old - self.x = self.x_old.copy() - self.y = self.y_old.copy() - #y_tmp = y_old - #y = y_tmp - - # relaxation parameter - self.theta = 1 - - def update(self): - # Gradient descent, Dual problem solution - self.y_old += self.sigma * self.operator.direct(self.xbar) - self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - - # Gradient ascent, Primal problem solution - self.x_old -= self.tau * self.operator.adjoint(self.y) - self.x = self.g.proximal(self.x_old, self.tau) - - #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old - self.xbar *= self.theta - self.xbar += self.x - -# self.x_old.fill(self.x) -# self.y_old.fill(self.y) - self.y_old = self.y.copy() - self.x_old = self.x.copy() - #self.y = self.y_old - - def update_objective(self): - self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x), - -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) - ]) - - - -def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ - 'memopt': False} - - if sigma is None and tau is None: - raise ValueError('Need sigma*tau||K||^2<1') - - niter = opt['niter'] if 'niter' in opt.keys() else 1000 - tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False - stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - - - x_old = operator.domain_geometry().allocate() - y_old = operator.range_geometry().allocate() - - - xbar = x_old - x_tmp = x_old - x = x_old - - y_tmp = y_old - y = y_tmp - - # relaxation parameter - theta = 1 - - t = time.time() - - objective = [] - - - for i in range(niter): - - # Gradient descent, Dual problem solution - y_tmp = y_old + sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_tmp, sigma) - - # Gradient ascent, Primal problem solution - x_tmp = x_old - tau * operator.adjoint(y) - x = g.proximal(x_tmp, tau) - - #Update - xbar = x + theta * (x - x_old) - - x_old = x - y_old = y - - if i%100==0: - - primal = f(operator.direct(x)) + g(x) - dual = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) - print( i, primal, dual, primal-dual) - -# plt.imshow(x.as_array()) -# plt.show() -# print(f(operator.direct(x)) + g(x), i) - - t_end = time.time() - - return x, t_end - t, objective - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py deleted file mode 100644 index f562973..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2019 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Thu Feb 21 11:03:13 2019 - -@author: ofn77899 -""" - -from .Algorithm import Algorithm -from .CGLS import CGLS -from .GradientDescent import GradientDescent -from .FISTA import FISTA -from .FBPD import FBPD -from .PDHG import PDHG -from .PDHG import PDHG_old - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py b/Wrappers/Python/build/lib/ccpi/optimisation/algs.py deleted file mode 100644 index 6b6ae2c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -import time - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ZeroFun -from ccpi.framework import ImageData -from ccpi.framework import AcquisitionData -from ccpi.optimisation.spdhg import spdhg -from ccpi.optimisation.spdhg import KullbackLeibler -from ccpi.optimisation.spdhg import KullbackLeiblerConvexConjugate - -def FISTA(x_init, f=None, g=None, opt=None): - '''Fast Iterative Shrinkage-Thresholding Algorithm - - Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding - algorithm for linear inverse problems. - SIAM journal on imaging sciences,2(1), pp.183-202. - - Parameters: - x_init: initial guess - f: data fidelity - g: regularizer - h: - opt: additional algorithm - ''' - # default inputs - if f is None: f = ZeroFun() - if g is None: g = ZeroFun() - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} - - max_iter = opt['iter'] if 'iter' in opt.keys() else 1000 - tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 - memopt = opt['memopt'] if 'memopt' in opt.keys() else False - - - # initialization - if memopt: - y = x_init.clone() - x_old = x_init.clone() - x = x_init.clone() - u = x_init.clone() - else: - x_old = x_init - y = x_init; - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - invL = 1/f.L - - t_old = 1 - - c = f(x_init) + g(x_init) - - # algorithm loop - for it in range(0, max_iter): - - time0 = time.time() - if memopt: - # u = y - invL*f.grad(y) - # store the result in x_old - f.gradient(y, out=u) - u.__imul__( -invL ) - u.__iadd__( y ) - # x = g.prox(u,invL) - g.proximal(u, invL, out=x) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - # y = x + (t_old-1)/t*(x-x_old) - x.subtract(x_old, out=y) - y.__imul__ ((t_old-1)/t) - y.__iadd__( x ) - - x_old.fill(x) - t_old = t - - - else: - u = y - invL*f.grad(y) - - x = g.prox(u,invL) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - y = x + (t_old-1)/t*(x-x_old) - - x_old = x.copy() - t_old = t - - # time and criterion - timing[it] = time.time() - time0 - criter[it] = f(x) + g(x); - - # stopping rule - #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: - # break - - #print(it, 'out of', 10, 'iterations', end='\r'); - - #criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); - - return x, it, timing, criter - -def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ - regulariser=None, opt=None): - '''FBPD Algorithm - - Parameters: - x_init: initial guess - f: constraint - g: data fidelity - h: regularizer - opt: additional algorithm - ''' - # default inputs - if constraint is None: constraint = ZeroFun() - if data_fidelity is None: data_fidelity = ZeroFun() - if regulariser is None: regulariser = ZeroFun() - - # algorithmic parameters - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - memopt = opt['memopts'] if 'memopts' in opt.keys() else False - - # step-sizes - tau = 2 / (data_fidelity.L + 2) - sigma = (1/tau - data_fidelity.L/2) / regulariser.L - inv_sigma = 1/sigma - - # initialization - x = x_init - y = operator.direct(x); - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - - - - # algorithm loop - for it in range(0, max_iter): - - t = time.time() - - # primal forward-backward step - x_old = x; - x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); - x = constraint.prox(x, tau); - - # dual forward-backward step - y = y + sigma * operator.direct(2*x - x_old); - y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma); - - # time and criterion - timing[it] = time.time() - t - criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) - - # stopping rule - #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: - # break - - criter = criter[0:it+1] - timing = numpy.cumsum(timing[0:it+1]) - - return x, it, timing, criter - -def CGLS(x_init, operator , data , opt=None): - '''Conjugate Gradient Least Squares algorithm - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - opt: additional algorithm - ''' - - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - - r = data.copy() - x = x_init.copy() - - d = operator.adjoint(r) - - normr2 = (d**2).sum() - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - # algorithm loop - for it in range(0, max_iter): - - t = time.time() - - Ad = operator.direct(d) - alpha = normr2/( (Ad**2).sum() ) - x = x + alpha*d - r = r - alpha*Ad - s = operator.adjoint(r) - - normr2_new = (s**2).sum() - beta = normr2_new/normr2 - normr2 = normr2_new - d = s + beta*d - - # time and criterion - timing[it] = time.time() - t - criter[it] = (r**2).sum() - - return x, it, timing, criter - -def SIRT(x_init, operator , data , opt=None, constraint=None): - '''Simultaneous Iterative Reconstruction Technique - - Parameters: - x_init: initial guess - operator: operator for forward/backward projections - data: data to operate on - opt: additional algorithm - constraint: func of Indicator type specifying convex constraint. - ''' - - if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] - - # Set default constraint to unconstrained - if constraint==None: - constraint = Function() - - x = x_init.clone() - - timing = numpy.zeros(max_iter) - criter = numpy.zeros(max_iter) - - # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0 - relax_par = 1.0 - - # Set up scaling matrices D and M. - im1 = ImageData(geometry=x_init.geometry) - im1.array[:] = 1.0 - M = 1/operator.direct(im1) - del im1 - aq1 = AcquisitionData(geometry=M.geometry) - aq1.array[:] = 1.0 - D = 1/operator.adjoint(aq1) - del aq1 - - # algorithm loop - for it in range(0, max_iter): - t = time.time() - r = data - operator.direct(x) - - x = constraint.prox(x + relax_par * (D*operator.adjoint(M*r)),None) - - timing[it] = time.time() - t - if it > 0: - criter[it-1] = (r**2).sum() - - r = data - operator.direct(x) - criter[it] = (r**2).sum() - return x, it, timing, criter - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py deleted file mode 100644 index efc465c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py +++ /dev/null @@ -1,272 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ccpi.optimisation.ops import Identity, FiniteDiff2D -import numpy -from ccpi.framework import DataContainer -import warnings -from ccpi.optimisation.functions import Function -def isSizeCorrect(data1 ,data2): - if issubclass(type(data1), DataContainer) and \ - issubclass(type(data2), DataContainer): - # check dimensionality - if data1.check_dimensions(data2): - return True - elif issubclass(type(data1) , numpy.ndarray) and \ - issubclass(type(data2) , numpy.ndarray): - return data1.shape == data2.shape - else: - raise ValueError("{0}: getting two incompatible types: {1} {2}"\ - .format('Function', type(data1), type(data2))) - return False -class Norm2(Function): - - def __init__(self, - gamma=1.0, - direction=None): - super(Norm2, self).__init__() - self.gamma = gamma; - self.direction = direction; - - def __call__(self, x, out=None): - - if out is None: - xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, - keepdims=True)) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - arr = out.as_array() - numpy.square(x.as_array(), out=arr) - xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - p = numpy.sum(self.gamma*xx) - - return p - - def prox(self, x, tau): - - xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - p = x.as_array() * xx - - return type(x)(p,geometry=x.geometry) - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x,tau) - else: - if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - numpy.square(x.as_array(), out = out.as_array()) - xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, - keepdims=True )) - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out.as_array()) - - - elif issubclass(type(out) , numpy.ndarray): - numpy.square(x.as_array(), out=out) - xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) - - xx = numpy.maximum(0, 1 - tau*self.gamma / xx) - x.multiply(xx, out= out) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - - -class TV2D(Norm2): - - def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 0) - self.op = FiniteDiff2D() - self.L = self.op.get_max_sing_val() - - -# Define a class for squared 2-norm -class Norm2sq(Function): - ''' - f(x) = c*||A*x-b||_2^2 - - which has - - grad[f](x) = 2*c*A^T*(A*x-b) - - and Lipschitz constant - - L = 2*c*||A||_2^2 = 2*s1(A)^2 - - where s1(A) is the largest singular value of A. - - ''' - - def __init__(self,A,b,c=1.0,memopt=False): - super(Norm2sq, self).__init__() - - self.A = A # Should be an operator, default identity - self.b = b # Default zero DataSet? - self.c = c # Default 1. - if memopt: - try: - self.range_tmp = A.range_geometry().allocate() - self.domain_tmp = A.domain_geometry().allocate() - self.memopt = True - except NameError as ne: - warnings.warn(str(ne)) - self.memopt = False - except NotImplementedError as nie: - print (nie) - warnings.warn(str(nie)) - self.memopt = False - else: - self.memopt = False - - # Compute the Lipschitz parameter from the operator if possible - # Leave it initialised to None otherwise - try: - self.L = 2.0*self.c*(self.A.norm()**2) - except AttributeError as ae: - pass - except NotImplementedError as noe: - pass - - #def grad(self,x): - # return self.gradient(x, out=None) - - def __call__(self,x): - #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - #if out is None: - # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) - #else: - y = self.A.direct(x) - y.__isub__(self.b) - #y.__imul__(y) - #return y.sum() * self.c - try: - return y.squared_norm() * self.c - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) * self.c - - def gradient(self, x, out = None): - if self.memopt: - #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - - self.A.direct(x, out=self.range_tmp) - self.range_tmp -= self.b - self.A.adjoint(self.range_tmp, out=out) - #self.direct_placehold.multiply(2.0*self.c, out=out) - out *= (self.c * 2.0) - else: - return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - - -# Box constraints indicator function. Calling returns 0 if argument is within -# the box. The prox operator is projection onto the box. Only implements one -# scalar lower and one upper as constraint on all elements. Should generalise -# to vectors to allow different constraints one elements. -class IndicatorBox(Function): - - def __init__(self,lower=-numpy.inf,upper=numpy.inf): - # Do nothing - super(IndicatorBox, self).__init__() - self.lower = lower - self.upper = upper - - - def __call__(self,x): - - if (numpy.all(x.array>=self.lower) and - numpy.all(x.array <= self.upper) ): - val = 0 - else: - val = numpy.inf - return val - - def prox(self,x,tau=None): - return (x.maximum(self.lower)).minimum(self.upper) - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - #(x.abs() - tau*self.gamma).maximum(0) * x.sign() - x.abs(out = out) - out.__isub__(tau*self.gamma) - out.maximum(0, out=out) - if self.sign_x is None or not x.shape == self.sign_x.shape: - self.sign_x = x.sign() - else: - x.sign(out=self.sign_x) - - out.__imul__( self.sign_x ) - -# A more interesting example, least squares plus 1-norm minimization. -# Define class to represent 1-norm including prox function -class Norm1(Function): - - def __init__(self,gamma): - super(Norm1, self).__init__() - self.gamma = gamma - self.L = 1 - self.sign_x = None - - def __call__(self,x,out=None): - if out is None: - return self.gamma*(x.abs().sum()) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - x.abs(out=out) - return out.sum() * self.gamma - - def prox(self,x,tau): - return (x.abs() - tau*self.gamma).maximum(0) * x.sign() - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if isSizeCorrect(x,out): - # check dimensionality - if issubclass(type(out), DataContainer): - v = (x.abs() - tau*self.gamma).maximum(0) - x.sign(out=out) - out *= v - #out.fill(self.prox(x,tau)) - elif issubclass(type(out) , numpy.ndarray): - v = (x.abs() - tau*self.gamma).maximum(0) - out[:] = x.sign() - out *= v - #out[:] = self.prox(x,tau) - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py deleted file mode 100644 index 81c16cd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 8 10:01:31 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import BlockDataContainer -from numbers import Number - -class BlockFunction(Function): - '''A Block vector of Functions - - .. math:: - - f = [f_1,f_2,f_3] - f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) - - ''' - def __init__(self, *functions): - '''Creator''' - self.functions = functions - self.length = len(self.functions) - - super(BlockFunction, self).__init__() - - def __call__(self, x): - '''evaluates the BlockFunction on the BlockDataContainer - - :param: x (BlockDataContainer): must have as many rows as self.length - - returns sum(f_i(x_i)) - ''' - if self.length != x.shape[0]: - raise ValueError('BlockFunction and BlockDataContainer have incompatible size') - t = 0 - for i in range(x.shape[0]): - t += self.functions[i](x.get_item(i)) - return t - - def convex_conjugate(self, x): - '''Convex_conjugate does not take into account the BlockOperator''' - t = 0 - for i in range(x.shape[0]): - t += self.functions[i].convex_conjugate(x.get_item(i)) - return t - - - def proximal_conjugate(self, x, tau, out = None): - '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) - else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) - - def proximal(self, x, tau, out = None): - '''proximal does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau) - else: - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) - - def gradient(self,x, out=None): - '''FIXME: gradient returns pass''' - pass \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py deleted file mode 100644 index 82f24a6..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import warnings -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction - -class Function(object): - '''Abstract class representing a function - - Members: - L is the Lipschitz constant of the gradient of the Function - ''' - def __init__(self): - self.L = None - - def __call__(self,x, out=None): - '''Evaluates the function at x ''' - raise NotImplementedError - - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - raise NotImplementedError - - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau''' - raise NotImplementedError - - def convex_conjugate(self, x, out=None): - '''This evaluates the convex conjugate of the function at x''' - raise NotImplementedError - - def proximal_conjugate(self, x, tau, out = None): - '''This returns the proximal operator for the convex conjugate of the function at x, tau''' - raise NotImplementedError - - def grad(self, x): - '''Alias of gradient(x,None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use gradient instead''', DeprecationWarning) - return self.gradient(x, out=None) - - def prox(self, x, tau): - '''Alias of proximal(x, tau, None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) - - def __rmul__(self, scalar): - '''Defines the multiplication by a scalar on the left - - returns a ScaledFunction''' - return ScaledFunction(self, scalar) - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py deleted file mode 100644 index 34b7e35..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 8 09:55:36 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ScaledFunction - - -class FunctionOperatorComposition(Function): - - def __init__(self, operator, function): - super(FunctionOperatorComposition, self).__init__() - self.function = function - self.operator = operator - alpha = 1 - if isinstance (function, ScaledFunction): - alpha = function.scalar - self.L = 2 * alpha * operator.norm()**2 - - - def __call__(self, x): - - return self.function(self.operator.direct(x)) - - def call_adjoint(self, x): - - return self.function(self.operator.adjoint(x)) - - def convex_conjugate(self, x): - - ''' convex_conjugate does not take into account the Operator''' - return self.function.convex_conjugate(x) - - def proximal(self, x, tau, out=None): - - '''proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=out) - - def proximal_conjugate(self, x, tau, out=None): - - ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=out) - - def gradient(self, x, out=None): - - ''' Gradient takes into account the Operator''' - if out is None: - return self.operator.adjoint( - self.function.gradient(self.operator.direct(x)) - ) - else: - self.operator.adjoint( - self.function.gradient(self.operator.direct(x), - out=out) - ) - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py deleted file mode 100644 index df8dc89..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ccpi.optimisation.functions import Function -import numpy - -class IndicatorBox(Function): - '''Box constraints indicator function. - - Calling returns 0 if argument is within the box. The prox operator is projection onto the box. - Only implements one scalar lower and one upper as constraint on all elements. Should generalise - to vectors to allow different constraints one elements. -''' - - def __init__(self,lower=-numpy.inf,upper=numpy.inf): - # Do nothing - super(IndicatorBox, self).__init__() - self.lower = lower - self.upper = upper - - - def __call__(self,x): - - if (numpy.all(x.array>=self.lower) and - numpy.all(x.array <= self.upper) ): - val = 0 - else: - val = numpy.inf - return val - - def prox(self,x,tau=None): - return (x.maximum(self.lower)).minimum(self.upper) - - def proximal(self, x, tau, out=None): - if out is None: - return self.prox(x, tau) - else: - if not x.shape == out.shape: - raise ValueError('Norm1 Incompatible size:', - x.shape, out.shape) - #(x.abs() - tau*self.gamma).maximum(0) * x.sign() - x.abs(out = out) - out.__isub__(tau*self.gamma) - out.maximum(0, out=out) - if self.sign_x is None or not x.shape == self.sign_x.shape: - self.sign_x = x.sign() - else: - x.sign(out=self.sign_x) - - out.__imul__( self.sign_x ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py deleted file mode 100644 index 5a47edd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Created on Wed Mar 6 19:42:34 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - self.b = kwargs.get('b',None) - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, x) - else: - return SimpleL1Norm.__call__(self, x - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py deleted file mode 100644 index 889d703..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py +++ /dev/null @@ -1,233 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.framework import DataContainer, ImageData, ImageGeometry - -############################ L2NORM FUNCTION ############################# -class L2NormSquared(Function): - - def __init__(self, **kwargs): - - ''' L2NormSquared class - f : ImageGeometry --> R - - Cases: f(x) = ||x||^{2}_{2} - f(x) = || x - b ||^{2}_{2} - - ''' - - #TODO need x, b to live in the same geometry if b is not None - - super(L2NormSquared, self).__init__() - self.b = kwargs.get('b',None) - - def __call__(self, x): - ''' Evaluates L2NormSq at point x''' - - y = x - if self.b is not None: -# x.subtract(self.b, out = x) - y = x - self.b -# else: -# y -# if out is None: -# return x.squared_norm() -# else: - try: - return y.squared_norm() - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) - - - - def gradient(self, x, out=None): - ''' Evaluates gradient of L2NormSq at point x''' - if out is not None: - out.fill(x) - if self.b is not None: - out -= self.b - out *= 2 - else: - y = x - if self.b is not None: -# x.subtract(self.b, out=x) - y = x - self.b - return 2*y - - - def convex_conjugate(self, x, out=None): - ''' Evaluate convex conjugate of L2NormSq''' - - tmp = 0 - if self.b is not None: -# tmp = (self.b * x).sum() - tmp = (x * self.b).sum() - - if out is None: - # FIXME: this is a number - return (1./4.) * x.squared_norm() + tmp - else: - # FIXME: this is a DataContainer - out.fill((1./4.) * x.squared_norm() + tmp) - - - def proximal(self, x, tau, out = None): - - ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e., - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if out is None: - if self.b is not None: - return (x - self.b)/(1+2*tau) + self.b - else: - return x/(1+2*tau) - else: - out.fill(x) - if self.b is not None: - out -= self.b - out /= (1+2*tau) - if self.b is not None: - out += self.b - #out.fill((x - self.b)/(1+2*tau) + self.b) - #else: - # out.fill(x/(1+2*tau)) - - - def proximal_conjugate(self, x, tau, out=None): - - if out is None: - if self.b is not None: - # change the order cannot add ImageData + NestedBlock - return (-1* tau*self.b + x)/(1 + tau/2) - else: - return x/(1 + tau/2 ) - else: - if self.b is not None: - out.fill((x - tau*self.b)/(1 + tau/2)) - else: - out.fill(x/(1 + tau/2 )) - - def __rmul__(self, scalar): - return ScaledFunction(self, scalar) - - -if __name__ == '__main__': - - - # TESTS for L2 and scalar * L2 - - M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) - u = ig.allocate('random_int') - b = ig.allocate('random_int') - - # check grad/call no data - f = L2NormSquared() - a1 = f.gradient(u) - a2 = 2 * u - numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) - numpy.testing.assert_equal(f(u), u.squared_norm()) - - # check grad/call with data - f1 = L2NormSquared(b=b) - b1 = f1.gradient(u) - b2 = 2 * (u-b) - - numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) - numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) - - #check convex conjuagate no data - c1 = f.convex_conjugate(u) - c2 = 1/4 * u.squared_norm() - numpy.testing.assert_equal(c1, c2) - - #check convex conjuagate with data - d1 = f1.convex_conjugate(u) - d2 = (1/4) * u.squared_norm() + (u*b).sum() - numpy.testing.assert_equal(d1, d2) - - # check proximal no data - tau = 5 - e1 = f.proximal(u, tau) - e2 = u/(1+2*tau) - numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) - - # check proximal with data - tau = 5 - h1 = f1.proximal(u, tau) - h2 = (u-b)/(1+2*tau) + b - numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) - - # check proximal conjugate no data - tau = 0.2 - k1 = f.proximal_conjugate(u, tau) - k2 = u/(1 + tau/2 ) - numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) - - # check proximal conjugate with data - l1 = f1.proximal_conjugate(u, tau) - l2 = (u - tau * b)/(1 + tau/2 ) - numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) - - - # check scaled function properties - - # scalar - scalar = 100 - f_scaled_no_data = scalar * L2NormSquared() - f_scaled_data = scalar * L2NormSquared(b=b) - - # call - numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) - numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) - - # grad - numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) - - # conj - numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ - f.convex_conjugate(u/scalar) * scalar, decimal=4) - - numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ - scalar * f1.convex_conjugate(u/scalar), decimal=4) - - # proximal - numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ - f.proximal(u, tau*scalar).as_array()) - - - numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ - f1.proximal(u, tau*scalar).as_array()) - - - # proximal conjugate - numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ - (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) - - numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ - ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py deleted file mode 100644 index 1c51236..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import DataContainer, ImageData, \ - ImageGeometry, BlockDataContainer - -############################ mixed_L1,2NORM FUNCTIONS ##################### -class MixedL21Norm(Function): - - def __init__(self, **kwargs): - - super(MixedL21Norm, self).__init__() - self.SymTensor = kwargs.get('SymTensor',False) - - def __call__(self, x, out=None): - - ''' Evaluates L1,2Norm at point x - - :param: x is a BlockDataContainer - - ''' - if self.SymTensor: - - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - res = sum(tmp).sqrt().sum() - else: - -# tmp = [ x[i]**2 for i in range(x.shape[0])] - tmp = [ el**2 for el in x.containers ] - -# print(x.containers) -# print(tmp) -# print(type(sum(tmp))) -# print(type(tmp)) - res = sum(tmp).sqrt().sum() -# print(res) - return res - - def gradient(self, x, out=None): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - - ''' This is the Indicator function of ||\cdot||_{2, \infty} - which is either 0 if ||x||_{2, \infty} or \infty - ''' - return 0.0 - - def proximal(self, x, tau, out=None): - - ''' - For this we need to define a MixedL2,2 norm acting on BDC, - different form L2NormSquared which acts on DC - - ''' - - pass - - def proximal_conjugate(self, x, tau, out=None): - - if self.SymTensor: - - param = [1]*x.shape[0] - param[-1] = 2 - tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - frac = [x[i]/(sum(tmp).sqrt()).maximum(1.0) for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - -# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - - def __rmul__(self, scalar): - return ScaledFunction(self, scalar) - -#class MixedL21Norm_tensor(Function): -# -# def __init__(self): -# print("feerf") -# -# -if __name__ == '__main__': - - M, N, K = 2,3,5 - ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) - u1 = ig.allocate('random_int') - u2 = ig.allocate('random_int') - - U = BlockDataContainer(u1, u2, shape=(2,1)) - - # Define no scale and scaled - f_no_scaled = MixedL21Norm() - f_scaled = 0.5 * MixedL21Norm() - - # call - - a1 = f_no_scaled(U) - a2 = f_scaled(U) - - z = f_no_scaled.proximal_conjugate(U, 1) - - f_no_scaled = MixedL21Norm() - - tmp = [el*el for el in U] - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py deleted file mode 100644 index b553d7c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ccpi.optimisation.functions import Function -import numpy -import warnings - -# Define a class for squared 2-norm -class Norm2sq(Function): - ''' - f(x) = c*||A*x-b||_2^2 - - which has - - grad[f](x) = 2*c*A^T*(A*x-b) - - and Lipschitz constant - - L = 2*c*||A||_2^2 = 2*s1(A)^2 - - where s1(A) is the largest singular value of A. - - ''' - - def __init__(self,A,b,c=1.0,memopt=False): - super(Norm2sq, self).__init__() - - self.A = A # Should be an operator, default identity - self.b = b # Default zero DataSet? - self.c = c # Default 1. - if memopt: - try: - self.range_tmp = A.range_geometry().allocate() - self.domain_tmp = A.domain_geometry().allocate() - self.memopt = True - except NameError as ne: - warnings.warn(str(ne)) - self.memopt = False - except NotImplementedError as nie: - print (nie) - warnings.warn(str(nie)) - self.memopt = False - else: - self.memopt = False - - # Compute the Lipschitz parameter from the operator if possible - # Leave it initialised to None otherwise - try: - self.L = 2.0*self.c*(self.A.norm()**2) - except AttributeError as ae: - pass - except NotImplementedError as noe: - pass - - #def grad(self,x): - # return self.gradient(x, out=None) - - def __call__(self,x): - #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - #if out is None: - # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) - #else: - y = self.A.direct(x) - y.__isub__(self.b) - #y.__imul__(y) - #return y.sum() * self.c - try: - return y.squared_norm() * self.c - except AttributeError as ae: - # added for compatibility with SIRF - return (y.norm()**2) * self.c - - def gradient(self, x, out = None): - if self.memopt: - #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - - self.A.direct(x, out=self.range_tmp) - self.range_tmp -= self.b - self.A.adjoint(self.range_tmp, out=out) - #self.direct_placehold.multiply(2.0*self.c, out=out) - out *= (self.c * 2.0) - else: - return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py deleted file mode 100644 index 046a4a6..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from numbers import Number -import numpy - -class ScaledFunction(object): - '''ScaledFunction - - A class to represent the scalar multiplication of an Function with a scalar. - It holds a function and a scalar. Basically it returns the multiplication - of the product of the function __call__, convex_conjugate and gradient with the scalar. - For the rest it behaves like the function it holds. - - Args: - function (Function): a Function or BlockOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - - ''' - def __init__(self, function, scalar): - super(ScaledFunction, self).__init__() - self.L = None - if not isinstance (scalar, Number): - raise TypeError('expected scalar: got {}'.format(type(scalar))) - self.scalar = scalar - self.function = function - - def __call__(self,x, out=None): - '''Evaluates the function at x ''' - return self.scalar * self.function(x) - - def convex_conjugate(self, x): - '''returns the convex_conjugate of the scaled function ''' - # if out is None: - # return self.scalar * self.function.convex_conjugate(x/self.scalar) - # else: - # out.fill(self.function.convex_conjugate(x/self.scalar)) - # out *= self.scalar - return self.scalar * self.function.convex_conjugate(x/self.scalar) - - def proximal_conjugate(self, x, tau, out = None): - '''This returns the proximal operator for the function at x, tau - ''' - if out is None: - return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) - else: - out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) - - def grad(self, x): - '''Alias of gradient(x,None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use gradient instead''', DeprecationWarning) - return self.gradient(x, out=None) - - def prox(self, x, tau): - '''Alias of proximal(x, tau, None)''' - warnings.warn('''This method will disappear in following - versions of the CIL. Use proximal instead''', DeprecationWarning) - return self.proximal(x, out=None) - - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - if out is None: - return self.scalar * self.function.gradient(x) - else: - out.fill( self.scalar * self.function.gradient(x) ) - - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau - ''' - if out is None: - return self.function.proximal(x, tau*self.scalar) - else: - out.fill( self.function.proximal(x, tau*self.scalar) ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py deleted file mode 100644 index 88d9b64..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFun.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData -from ccpi.framework import BlockDataContainer - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - - if x.shape[0]==1: - return x.maximum(0).sum() - else: - if isinstance(x, BlockDataContainer): - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - else: - return x.maximum(0).sum() + x.maximum(0).sum() - - def proximal(self,x,tau, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def proximal_conjugate(self, x, tau): - return 0 - - def domain_geometry(self): - pass - def range_geometry(self): - pass \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py deleted file mode 100644 index 2ed36f5..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -from .Function import Function -from .ZeroFun import ZeroFun -from .L1Norm import SimpleL1Norm, L1Norm -#from .L2NormSquared import L2NormSq, SimpleL2NormSq -from .L2NormSquared import L2NormSquared -from .BlockFunction import BlockFunction -from .ScaledFunction import ScaledFunction -from .FunctionOperatorComposition import FunctionOperatorComposition -from .MixedL21Norm import MixedL21Norm -from .IndicatorBox import IndicatorBox -from .Norm2Sq import Norm2sq diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py deleted file mode 100644 index 8632920..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/functions.py +++ /dev/null @@ -1,312 +0,0 @@ -# -*- coding: utf-8 -*- - -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 7 13:10:56 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry -from operators import CompositeDataContainer, Identity, CompositeOperator -from numbers import Number - - -############################ L2NORM FUNCTIONS ############################# -class SimpleL2NormSq(Function): - - def __init__(self, alpha=1): - - super(SimpleL2NormSq, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.power(2).sum() - - def gradient(self,x): - return 2 * self.alpha * x - - def convex_conjugate(self,x): - return (1/4*self.alpha) * x.power(2).sum() - - def proximal(self, x, tau): - return x.divide(1+2*tau*self.alpha) - - def proximal_conjugate(self, x, tau): - return x.divide(1 + tau/2*self.alpha ) - - -class L2NormSq(SimpleL2NormSq): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L2NormSq, self).__init__(alpha=alpha) - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL2NormSq.__call__(self, self.A.direct(x)) - else: - return SimpleL2NormSq.__call__(self, self.A.direct(x) - self.b) - - def convex_conjugate(self, x): - - ''' The convex conjugate corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - ''' - - if self.b is None: - return SimpleL2NormSq.convex_conjugate(self, x) - else: - return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum() - - def gradient(self, x): - - if self.b is None: - return 2*self.alpha * self.A.adjoint(self.A.direct(x)) - else: - return 2*self.alpha * self.A.adjoint(self.A.direct(x) - self.b) - - def proximal(self, x, tau): - - ''' The proximal operator corresponds to the simple functional i.e., - f(x) = alpha * ||x - b||_{2}^{2} - - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - - if self.b is None: - return SimpleL2NormSq.proximal(self, x, tau) - else: - return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau) - - - def proximal_conjugate(self, x, tau): - - ''' The proximal operator corresponds to the simple convex conjugate - functional i.e., f^{*}(x^{) - argmin_x { 0.5||x - u||^{2} + tau f(x) } - ''' - if self.b is None: - return SimpleL2NormSq.proximal_conjugate(self, x, tau) - else: - return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau) - - -############################ L1NORM FUNCTIONS ############################# -class SimpleL1Norm(Function): - - def __init__(self, alpha=1): - - super(SimpleL1Norm, self).__init__() - self.alpha = alpha - - def __call__(self, x): - return self.alpha * x.abs().sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - ''' Soft Threshold''' - return x.sign() * (x.abs() - tau * self.alpha).maximum(1.0) - - def proximal_conjugate(self, x, tau): - return x.divide((x.abs()/self.alpha).maximum(1.0)) - -class L1Norm(SimpleL1Norm): - - def __init__(self, A, b = None, alpha=1, **kwargs): - - super(L1Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - def __call__(self, x): - - if self.b is None: - return SimpleL1Norm.__call__(self, self.A.direct(x)) - else: - return SimpleL1Norm.__call__(self, self.A.direct(x) - self.b) - - def gradient(self, x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - if self.b is None: - return SimpleL1Norm.convex_conjugate(self, x) - else: - return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() - - def proximal(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal(self, x, tau) - else: - return self.b + SimpleL1Norm.proximal(self, x + self.b , tau) - - def proximal_conjugate(self, x, tau): - - if self.b is None: - return SimpleL1Norm.proximal_conjugate(self, x, tau) - else: - return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) - - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, A, b=None, alpha=1, **kwargs): - - super(mixed_L12Norm, self).__init__() - self.alpha = alpha - self.A = A - self.b = b - - self.sym_grad = kwargs.get('sym_grad',False) - - - - def gradient(self,x): - return ValueError('Not Differentiable') - - - def __call__(self,x): - - y = self.A.direct(x) - eucl_norm = ImageData(y.power(2).sum(axis=0)).sqrt() - eucl_norm.__isub__(self.b) - return eucl_norm.sum() * self.alpha - - def convex_conjugate(self,x): - return 0 - - def proximal_conjugate(self, x, tau): - - if self.b is None: - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - else: - res = (x - tau*self.b)/ ((x - tau*self.b)).abs().maximum(1.0) - - return res - - -#%% - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - return x.maximum(0).sum() - - def proximal(self,x,tau): - return x.copy() - - def proximal_conjugate(self, x, tau): - return 0 - - -class CompositeFunction(Function): - - def __init__(self, *args): - self.functions = args - self.length = len(self.functions) - - def get_item(self, ind): - return self.functions[ind] - - def __call__(self,x): - - t = 0 - for i in range(self.length): - for j in range(x.shape[0]): - t +=self.functions[i](x.get_item(j)) - return t - - def convex_conjugate(self, x): - - z = 0 - t = 0 - for i in range(x.shape[0]): - t += self.functions[z].convex_conjugate(x.get_item(i)) - z += 1 - - return t - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - def proximal_conjugate(self, x, tau, out = None): - - if isinstance(tau, Number): - tau = CompositeDataContainer(tau) - out = [None]*self.length - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(0)) - return CompositeDataContainer(*out) - - - - -if __name__ == '__main__': - - N = 3 - ig = (N,N) - ag = ig - op1 = Gradient(ig) - op2 = Identity(ig, ag) - - # Form Composite Operator - operator = CompositeOperator((2,1), op1, op2 ) - - # Create functions - alpha = 1 - noisy_data = ImageData(np.random.randint(10, size=ag)) - f = CompositeFunction(L1Norm(op1,alpha), \ - L2NormSq(op2, noisy_data, c = 0.5, memopt = False) ) - - u = ImageData(np.random.randint(10, size=ig)) - uComp = CompositeDataContainer(u) - - print(f(uComp)) # This is f(Kx) = f1(K1*u) + f2(K2*u) - - f1 = L1Norm(op1,alpha) - f2 = L2NormSq(op2, noisy_data, c = 0.5, memopt = False) - - print(f1(u) + f2(u)) - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py deleted file mode 100644 index ffeb32e..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/mixed_L12Norm.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:43:12 2019 - -@author: evangelos -""" - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData, ImageGeometry - -############################ mixed_L1,2NORM FUNCTIONS ############################# -class mixed_L12Norm(Function): - - def __init__(self, alpha, **kwargs): - - super(mixed_L12Norm, self).__init__() - - self.alpha = alpha - self.b = kwargs.get('b',None) - self.sym_grad = kwargs.get('sym_grad',False) - - def __call__(self,x): - - if self.b is None: - tmp1 = x - else: - tmp1 = x - self.b -# - if self.sym_grad: - tmp = np.sqrt(tmp1.as_array()[0]**2 + tmp1.as_array()[1]**2 + 2*tmp1.as_array()[2]**2) - else: - tmp = ImageData(tmp1.power(2).sum(axis=0)).sqrt() - - return self.alpha*tmp.sum() - - def gradient(self,x): - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - return 0 - - def proximal(self, x, tau): - pass - - def proximal_conjugate(self, x, tau): - - if self.sym_grad: - tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha - res = x.divide(ImageData(tmp2).maximum(1.0)) - else: - res = x.divide((ImageData(x.power(2).sum(axis=0)).sqrt()/self.alpha).maximum(1.0)) - - return res diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py deleted file mode 100644 index ee8f609..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" -#from ccpi.optimisation.ops import Operator -import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer -from ccpi.optimisation.operators import Operator, LinearOperator -from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator -from ccpi.framework import BlockGeometry - -class BlockOperator(Operator): - '''A Block matrix containing Operators - - The Block Framework is a generic strategy to treat variational problems in the - following form: - - .. math:: - - min Regulariser + Fidelity - - - BlockOperators have a generic shape M x N, and when applied on an - Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. - Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with - N rows and 1 column. - - User may specify the shape of the block, by default is a row vector - - Operators in a Block are required to have the same domain column-wise and the - same range row-wise. - ''' - __array_priority__ = 1 - def __init__(self, *args, **kwargs): - ''' - Class creator - - Note: - Do not include the `self` parameter in the ``Args`` section. - - Args: - :param: vararg (Operator): Operators in the block. - :param: shape (:obj:`tuple`, optional): If shape is passed the Operators in - vararg are considered input in a row-by-row fashion. - Shape and number of Operators must match. - - Example: - BlockOperator(op0,op1) results in a row block - BlockOperator(op0,op1,shape=(1,2)) results in a column block - ''' - self.operators = args - shape = kwargs.get('shape', None) - if shape is None: - shape = (len(args),1) - self.shape = shape - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements,len(args))) - # test if operators are compatible - if not self.column_wise_compatible(): - raise ValueError('Operators in each column must have the same domain') - if not self.row_wise_compatible(): - raise ValueError('Operators in each row must have the same range') - - def column_wise_compatible(self): - '''Operators in a Block should have the same domain per column''' - rows, cols = self.shape - compatible = True - for col in range(cols): - column_compatible = True - for row in range(1,rows): - dg0 = self.get_item(row-1,col).domain_geometry() - dg1 = self.get_item(row,col).domain_geometry() - column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible - compatible = compatible and column_compatible - return compatible - - def row_wise_compatible(self): - '''Operators in a Block should have the same range per row''' - rows, cols = self.shape - compatible = True - for row in range(rows): - row_compatible = True - for col in range(1,cols): - dg0 = self.get_item(row,col-1).range_geometry() - dg1 = self.get_item(row,col).range_geometry() - row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible - compatible = compatible and row_compatible - return compatible - - def get_item(self, row, col): - '''returns the Operator at specified row and col''' - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - if col > self.shape[1]: - raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) - - index = row*self.shape[1]+col - return self.operators[index] - - def norm(self): - norm = [op.norm()**2 for op in self.operators] - return numpy.sqrt(sum(norm)) - - def direct(self, x, out=None): - '''Direct operation for the BlockOperator - - BlockOperator work on BlockDataContainer, but they will work on DataContainers - and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) - ''' - if not isinstance (x, BlockDataContainer): - x_b = BlockDataContainer(x) - else: - x_b = x - shape = self.get_output_shape(x_b.shape) - res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x_b.get_item(col)) - else: - prod += self.get_item(row,col).direct(x_b.get_item(col)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) - - def adjoint(self, x, out=None): - '''Adjoint operation for the BlockOperator - - BlockOperator may contain both LinearOperator and Operator - This method exists in BlockOperator as it is not known what type of - Operator it will contain. - - BlockOperator work on BlockDataContainer, but they will work on DataContainers - and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) - - Raises: ValueError if the contained Operators are not linear - ''' - if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True): - raise ValueError('Not all operators in Block are linear.') - if not isinstance (x, BlockDataContainer): - x_b = BlockDataContainer(x) - else: - x_b = x - shape = self.get_output_shape(x_b.shape, adjoint=True) - res = [] - for row in range(self.shape[1]): - for col in range(self.shape[0]): - if col == 0: - prod = self.get_item(row, col).adjoint(x_b.get_item(col)) - else: - prod += self.get_item(row, col).adjoint(x_b.get_item(col)) - res.append(prod) - if self.shape[1]==1: - return ImageData(*res) - else: - return BlockDataContainer(*res, shape=shape) - - def get_output_shape(self, xshape, adjoint=False): - sshape = self.shape[1] - oshape = self.shape[0] - if adjoint: - sshape = self.shape[0] - oshape = self.shape[1] - if sshape != xshape[0]: - raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) - return (oshape, xshape[-1]) - - def __rmul__(self, scalar): - '''Defines the left multiplication with a scalar - - Args: scalar (number or iterable containing numbers): - - Returns: a block operator with Scaled Operators inside''' - if isinstance (scalar, list) or isinstance(scalar, tuple) or \ - isinstance(scalar, numpy.ndarray): - if len(scalar) != len(self.operators): - raise ValueError('dimensions of scalars and operators do not match') - scalars = scalar - else: - scalars = [scalar for _ in self.operators] - # create a list of ScaledOperator-s - ops = [ v * op for v,op in zip(scalars, self.operators)] - #return BlockScaledOperator(self, scalars ,shape=self.shape) - return type(self)(*ops, shape=self.shape) - @property - def T(self): - '''Return the transposed of self - - input in a row-by-row''' - newshape = (self.shape[1], self.shape[0]) - oplist = [] - for col in range(newshape[1]): - for row in range(newshape[0]): - oplist.append(self.get_item(col,row)) - return type(self)(*oplist, shape=newshape) - - def domain_geometry(self): - '''returns the domain of the BlockOperator - - If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry. - Otherwise it is a BlockGeometry. - ''' - if self.shape[1] == 1: - # column BlockOperator - return self.get_item(0,0).domain_geometry() - else: - shape = (self.shape[0], 1) - return BlockGeometry(*[el.domain_geometry() for el in self.operators], - shape=shape) - - def range_geometry(self): - '''returns the range of the BlockOperator''' - shape = (self.shape[1], 1) - return BlockGeometry(*[el.range_geometry() for el in self.operators], - shape=shape) -if __name__ == '__main__': - pass diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py deleted file mode 100644 index aeb6c53..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py +++ /dev/null @@ -1,67 +0,0 @@ -from numbers import Number -import numpy -from ccpi.optimisation.operators import ScaledOperator -import functools - -class BlockScaledOperator(ScaledOperator): - '''ScaledOperator - - A class to represent the scalar multiplication of an Operator with a scalar. - It holds an operator and a scalar. Basically it returns the multiplication - of the result of direct and adjoint of the operator with the scalar. - For the rest it behaves like the operator it holds. - - Args: - operator (Operator): a Operator or LinearOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - sop = ScaledOperator(operator, scalar) - sop.direct(x) = scalar * operator.direct(x) - sop.adjoint(x) = scalar * operator.adjoint(x) - sop.norm() = operator.norm() - sop.range_geometry() = operator.range_geometry() - sop.domain_geometry() = operator.domain_geometry() - ''' - def __init__(self, operator, scalar, shape=None): - if shape is None: - shape = operator.shape - - if isinstance(scalar, (list, tuple, numpy.ndarray)): - size = functools.reduce(lambda x,y:x*y, shape, 1) - if len(scalar) != size: - raise ValueError('Scalar and operators size do not match: {}!={}' - .format(len(scalar), len(operator))) - self.scalar = scalar[:] - print ("BlockScaledOperator ", self.scalar) - elif isinstance (scalar, Number): - self.scalar = scalar - else: - raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar))) - self.operator = operator - self.shape = shape - def direct(self, x, out=None): - print ("BlockScaledOperator self.scalar", self.scalar) - #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array()) - return self.scalar * (self.operator.direct(x, out=out)) - def adjoint(self, x, out=None): - if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) - else: - raise TypeError('Operator is not linear') - def norm(self): - return numpy.abs(self.scalar) * self.operator.norm() - def range_geometry(self): - return self.operator.range_geometry() - def domain_geometry(self): - return self.operator.domain_geometry() - @property - def T(self): - '''Return the transposed of self''' - #print ("transpose before" , self.shape) - #shape = (self.shape[1], self.shape[0]) - ##self.shape = shape - ##self.operator.shape = shape - #print ("transpose" , shape) - #return self - return type(self)(self.operator.T, self.scalar) \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py deleted file mode 100644 index 24c4e4b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ /dev/null @@ -1,322 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:51:17 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np - -class FiniteDiff(Operator): - - # Works for Neum/Symmetric & periodic boundary conditions - # TODO add central differences??? - # TODO not very well optimised, too many conditions - # TODO add discretisation step, should get that from imageGeometry - - # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] - # Grad_order = ['channels', 'direction_y', 'direction_x'] - # Grad_order = ['direction_z', 'direction_y', 'direction_x'] - # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] - - def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): - '''''' - super(FiniteDiff, self).__init__() - '''FIXME: domain and range should be geometries''' - self.gm_domain = gm_domain - self.gm_range = gm_range - self.direction = direction - self.bnd_cond = bnd_cond - - # Domain Geometry = Range Geometry if not stated - if self.gm_range is None: - self.gm_range = self.gm_domain - # check direction and "length" of geometry - if self.direction + 1 > len(self.gm_domain.shape): - raise ValueError('Gradient directions more than geometry domain') - - #self.voxel_size = kwargs.get('voxel_size',1) - # this wrongly assumes a homogeneous voxel size - self.voxel_size = self.gm_domain.voxel_size_x - - - def direct(self, x, out=None): - - x_asarr = x.as_array() - x_sz = len(x.shape) - - if out is None: - out = np.zeros(x.shape) - - fd_arr = out - - ######################## Direct for 2D ############################### - if x_sz == 2: - - if self.direction == 1: - - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 0: - - np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Direct for 3D ############################### - elif x_sz == 3: - - if self.direction == 0: - - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - - if self.direction == 2: - - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Direct for 4D ############################### - elif x_sz == 4: - - if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) - - if self.bnd_cond == 'Neumann': - pass - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) - else: - raise ValueError('No valid boundary conditions') - - else: - raise NotImplementedError - - res = out/self.voxel_size - return type(x)(res) - - def adjoint(self, x, out=None): - - x_asarr = x.as_array() - #x_asarr = x - x_sz = len(x.shape) - - if out is None: - out = np.zeros(x.shape) - - fd_arr = out - - ######################## Adjoint for 2D ############################### - if x_sz == 2: - - if self.direction == 1: - - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) - np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) - - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 0: - - np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) - np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) - - else: - raise ValueError('No valid boundary conditions') - - ######################## Adjoint for 3D ############################### - elif x_sz == 3: - - if self.direction == 0: - - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) - np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) - np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) - np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) - else: - raise ValueError('No valid boundary conditions') - - ######################## Adjoint for 4D ############################### - elif x_sz == 4: - - if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) - np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) - np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) - else: - raise ValueError('No valid boundary conditions') - - - if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) - np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) - else: - raise ValueError('No valid boundary conditions') - - if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) - - if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) - np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) - - elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) - else: - raise ValueError('No valid boundary conditions') - - else: - raise NotImplementedError - - res = out/self.voxel_size - return type(x)(-res) - - def range_geometry(self): - '''Returns the range geometry''' - return self.gm_range - - def domain_geometry(self): - '''Returns the domain geometry''' - return self.gm_domain - - def norm(self): - x0 = self.gm_domain.allocate() - x0.fill( np.random.random_sample(x0.shape) ) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) - return self.s1 - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py deleted file mode 100644 index e00de0c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:50:04 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer -import numpy -from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff - -#%% - -class Gradient(LinearOperator): - - def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): - - super(Gradient, self).__init__() - - self.gm_domain = gm_domain # Domain of Grad Operator - - self.correlation = kwargs.get('correlation','Space') - - if self.correlation=='Space': - if self.gm_domain.channels>1: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] ) - self.ind = numpy.arange(1,self.gm_domain.length) - else: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] ) - self.ind = numpy.arange(self.gm_domain.length) - elif self.correlation=='SpaceChannels': - if self.gm_domain.channels>1: - self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)]) - self.ind = range(self.gm_domain.length) - else: - raise ValueError('No channels to correlate') - - self.bnd_cond = bnd_cond - - - def direct(self, x, out=None): - - tmp = self.gm_range.allocate() - - for i in range(tmp.shape[0]): - tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) - return tmp - - def adjoint(self, x, out=None): - - tmp = self.gm_domain.allocate() - for i in range(x.shape[0]): - tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) - return tmp - - - def domain_geometry(self): - return self.gm_domain - - def range_geometry(self): - return self.gm_range - - def norm(self): - - x0 = self.gm_domain.allocate('random') - self.s1, sall, svec = PowerMethodNonsquare(self, 10, x0) - return self.s1 - - def __rmul__(self, scalar): - return ScaledOperator(self, scalar) - - - def matrix(self): - - tmp = self.gm_range.allocate() - - mat = [] - for i in range(tmp.shape[0]): - - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - mat.append(spMat.matrix()) - - return BlockDataContainer(*mat) - - - def sum_abs_row(self): - - tmp = self.gm_range.allocate() - res = self.gm_domain.allocate() - for i in range(tmp.shape[0]): - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - res += spMat.sum_abs_row() - return res - - def sum_abs_col(self): - - tmp = self.gm_range.allocate() - res = [] - for i in range(tmp.shape[0]): - spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) - res.append(spMat.sum_abs_col()) - return BlockDataContainer(*res) - - -if __name__ == '__main__': - - - from ccpi.optimisation.operators import Identity, BlockOperator - - M, N = 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int' ) - - # check direct of Gradient and sparse matrix - G = Gradient(ig) - G_sp = G.matrix() - - res1 = G.direct(arr) - res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') - - print(res1[0].as_array()) - print(res1y) - - res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') - - print(res1[1].as_array()) - print(res1x) - - #check sum abs row - conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))) - print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F')) - print(G.sum_abs_row().as_array()) - - print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F')) - - print(G.sum_abs_col()[0].as_array()) - print(G.sum_abs_col()[1].as_array()) - - # Check Blockoperator sum abs col and row - - op1 = Gradient(ig) - op2 = Identity(ig) - - B = BlockOperator( op1, op2) - - Brow = B.sum_abs_row() - Bcol = B.sum_abs_col() - - concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray())) - - print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F')) - print(Brow.as_array()) - - print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F')) - print(Bcol[1].as_array()) - - -# print(numpy.concatene(G_sp[0].toarray()+ )) -# print(G_sp[1].toarray()) -# -# d1 = G.sum_abs_row() -# print(d1.as_array()) -# -# d2 = G_neum.sum_abs_col() -## print(d2) -# -# -# ########################################################### - a = BlockDataContainer( BlockDataContainer(arr, arr), arr) - b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2) - c = a/b - - print(c[0][0].as_array(), (arr/(arr+5)).as_array()) - print(c[0][1].as_array(), (arr/(arr+3)).as_array()) - print(c[1].as_array(), (arr/(arr+2)).as_array()) - - - a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) -# -# c1 = arr + a -# c2 = arr + a -# c2 = a1 + arr -# diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py deleted file mode 100644 index a58a296..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:30:51 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import LinearOperator -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - - -class Identity(LinearOperator): - - def __init__(self, gm_domain, gm_range=None): - - self.gm_domain = gm_domain - self.gm_range = gm_range - if self.gm_range is None: - self.gm_range = self.gm_domain - - super(Identity, self).__init__() - - def direct(self,x,out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def adjoint(self,x, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def norm(self): - return 1.0 - - def domain_geometry(self): - return self.gm_domain - - def range_geometry(self): - return self.gm_range - - def matrix(self): - - return sp.eye(np.prod(self.gm_domain.shape)) - - def sum_abs_row(self): - - return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) - - def sum_abs_col(self): - - return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) - - -if __name__ == '__main__': - - from ccpi.framework import ImageGeometry - - M, N = 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') - - Id = Identity(ig) - d = Id.matrix() - print(d.toarray()) - - d1 = Id.sum_abs_col() - print(d1.as_array()) - - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py deleted file mode 100644 index e19304f..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:57:52 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.operators import Operator - - -class LinearOperator(Operator): - '''A Linear Operator that maps from a space X <-> Y''' - def __init__(self): - super(LinearOperator, self).__init__() - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - '''returns the adjoint/inverse operation - - only available to linear operators''' - raise NotImplementedError diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py deleted file mode 100644 index 2d2089b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:55:56 2019 - -@author: ofn77899 -""" -from ccpi.optimisation.operators.ScaledOperator import ScaledOperator - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - '''Returns the application of the Operator on x''' - raise NotImplementedError - def norm(self): - '''Returns the norm of the Operator''' - raise NotImplementedError - def range_geometry(self): - '''Returns the range of the Operator: Y space''' - raise NotImplementedError - def domain_geometry(self): - '''Returns the domain of the Operator: X space''' - raise NotImplementedError - def __rmul__(self, scalar): - '''Defines the multiplication by a scalar on the left - - returns a ScaledOperator''' - return ScaledOperator(self, scalar) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py deleted file mode 100644 index adcc6d9..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py +++ /dev/null @@ -1,42 +0,0 @@ -from numbers import Number -import numpy - -class ScaledOperator(object): - '''ScaledOperator - - A class to represent the scalar multiplication of an Operator with a scalar. - It holds an operator and a scalar. Basically it returns the multiplication - of the result of direct and adjoint of the operator with the scalar. - For the rest it behaves like the operator it holds. - - Args: - operator (Operator): a Operator or LinearOperator - scalar (Number): a scalar multiplier - Example: - The scaled operator behaves like the following: - sop = ScaledOperator(operator, scalar) - sop.direct(x) = scalar * operator.direct(x) - sop.adjoint(x) = scalar * operator.adjoint(x) - sop.norm() = operator.norm() - sop.range_geometry() = operator.range_geometry() - sop.domain_geometry() = operator.domain_geometry() - ''' - def __init__(self, operator, scalar): - super(ScaledOperator, self).__init__() - if not isinstance (scalar, Number): - raise TypeError('expected scalar: got {}'.format(type(scalar))) - self.scalar = scalar - self.operator = operator - def direct(self, x, out=None): - return self.scalar * self.operator.direct(x, out=out) - def adjoint(self, x, out=None): - if self.operator.is_linear(): - return self.scalar * self.operator.adjoint(x, out=out) - else: - raise TypeError('Operator is not linear') - def norm(self): - return numpy.abs(self.scalar) * self.operator.norm() - def range_geometry(self): - return self.operator.range_geometry() - def domain_geometry(self): - return self.operator.domain_geometry() diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py deleted file mode 100644 index 1b88cba..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Apr 2 14:06:15 2019 - -@author: vaggelis -""" - -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - -class SparseFiniteDiff(): - - def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): - - super(SparseFiniteDiff, self).__init__() - self.gm_domain = gm_domain - self.gm_range = gm_range - self.direction = direction - self.bnd_cond = bnd_cond - - if self.gm_range is None: - self.gm_range = self.gm_domain - - self.get_dims = [i for i in gm_domain.shape] - - if self.direction + 1 > len(self.gm_domain.shape): - raise ValueError('Gradient directions more than geometry domain') - - def matrix(self): - - i = self.direction - - mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil') - - if self.bnd_cond == 'Neumann': - mat[-1,:] = 0 - elif self.bnd_cond == 'Periodic': - mat[-1,0] = 1 - - tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0]) - - for j in range(1, self.gm_domain.length): - - tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad ) - - return tmpGrad - - def T(self): - return self.matrix().T - - def direct(self, x): - - x_asarr = x.as_array() - res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F') - return type(x)(res) - - def adjoint(self, x): - - x_asarr = x.as_array() - res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F') - return type(x)(res) - - def sum_abs_row(self): - - res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')) - res[res==0]=1 - return ImageData(res) - - def sum_abs_col(self): - - res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'C')) - res[res==0]=1 - return ImageData(res) - -if __name__ == '__main__': - - from ccpi.framework import ImageGeometry - from ccpi.optimisation.operators import FiniteDiff - - # 2D - M, N= 2, 3 - ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') - - for i in [0,1]: - - # Neumann - sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann') - G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann') - - # Periodic - sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic') - G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic') - - u_neum_direct = G_neum.direct(arr) - u_neum_sp_direct = sFD_neum.direct(arr) - np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4) - - u_neum_adjoint = G_neum.adjoint(arr) - u_neum_sp_adjoint = sFD_neum.adjoint(arr) - np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4) - - u_per_direct = G_neum.direct(arr) - u_per_sp_direct = sFD_neum.direct(arr) - np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4) - - u_per_adjoint = G_per.adjoint(arr) - u_per_sp_adjoint = sFD_per.adjoint(arr) - np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4) - - # 3D - M, N, K = 2, 3, 4 - ig3D = ImageGeometry(M, N, K) - arr3D = ig3D.allocate('random_int') - - for i in [0,1,2]: - - # Neumann - sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann') - G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann') - - # Periodic - sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic') - G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic') - - u_neum_direct3D = G_neum3D.direct(arr3D) - u_neum_sp_direct3D = sFD_neum3D.direct(arr3D) - np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4) - - u_neum_adjoint3D = G_neum3D.adjoint(arr3D) - u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D) - np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4) - - u_per_direct3D = G_neum3D.direct(arr3D) - u_per_sp_direct3D = sFD_neum3D.direct(arr3D) - np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4) - - u_per_adjoint3D = G_per3D.adjoint(arr3D) - u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D) - np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4) - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py deleted file mode 100644 index d908e49..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar 1 22:53:55 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator -from ccpi.optimisation.operators import FiniteDiff -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, DataContainer -import numpy as np - - -class SymmetrizedGradient(Operator): - - def __init__(self, gm_domain, gm_range, bnd_cond = 'Neumann', **kwargs): - - super(SymmetrizedGradient, self).__init__() - - self.gm_domain = gm_domain # Domain of Grad Operator - self.gm_range = gm_range # Range of Grad Operator - self.bnd_cond = bnd_cond # Boundary conditions of Finite Differences - - # Kwargs Default options - self.memopt = kwargs.get('memopt',False) - self.correlation = kwargs.get('correlation','Space') - - #TODO not tested yet, operator norm??? - self.voxel_size = kwargs.get('voxel_size',[1]*len(gm_domain)) - - - def direct(self, x, out=None): - - tmp = np.zeros(self.gm_range) - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) - tmp[2] = 0.5 * (FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).adjoint(x.as_array()[0]) + - FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).adjoint(x.as_array()[1]) ) - - return type(x)(tmp) - - - def adjoint(self, x, out=None): - - tmp = np.zeros(self.gm_domain) - - tmp[0] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[0]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) - - tmp[1] = FiniteDiff(self.gm_domain[1:], direction = 1, bnd_cond = self.bnd_cond).direct(x.as_array()[2]) + \ - FiniteDiff(self.gm_domain[1:], direction = 0, bnd_cond = self.bnd_cond).direct(x.as_array()[1]) - - return type(x)(tmp) - - def alloc_domain_dim(self): - return ImageData(np.zeros(self.gm_domain)) - - def alloc_range_dim(self): - return ImageData(np.zeros(self.range_dim)) - - def domain_dim(self): - return self.gm_domain - - def range_dim(self): - return self.gm_range - - def norm(self): -# return np.sqrt(4*len(self.domainDim())) - #TODO this takes time for big ImageData - # for 2D ||grad|| = sqrt(8), 3D ||grad|| = sqrt(12) - x0 = ImageData(np.random.random_sample(self.domain_dim())) - self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) - return self.s1 - - - -if __name__ == '__main__': - - ########################################################################### - ## Symmetrized Gradient - - N, M = 2, 3 - ig = (N,M) - ig2 = (2,) + ig - ig3 = (3,) + ig - u1 = DataContainer(np.random.randint(10, size=ig2)) - w1 = DataContainer(np.random.randint(10, size=ig3)) - - E = SymmetrizedGradient(ig2,ig3) - - d1 = E.direct(u1) - d2 = E.adjoint(w1) - - LHS = (d1.as_array()[0]*w1.as_array()[0] + \ - d1.as_array()[1]*w1.as_array()[1] + \ - 2*d1.as_array()[2]*w1.as_array()[2]).sum() - - RHS = (u1.as_array()[0]*d2.as_array()[0] + \ - u1.as_array()[1]*d2.as_array()[1]).sum() - - - print(LHS, RHS, E.norm()) - - -# - - - - - - - - - - - \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py deleted file mode 100644 index a7c5f09..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar 6 19:25:53 2019 - -@author: evangelos -""" - -import numpy as np -from ccpi.framework import ImageData -from ccpi.optimisation.operators import Operator - -class ZeroOp(Operator): - - def __init__(self, gm_domain, gm_range): - self.gm_domain = gm_domain - self.gm_range = gm_range - super(ZeroOp, self).__init__() - - def direct(self,x,out=None): - if out is None: - return ImageData(np.zeros(self.gm_range)) - else: - return ImageData(np.zeros(self.gm_range)) - - def adjoint(self,x, out=None): - if out is None: - return ImageData(np.zeros(self.gm_domain)) - else: - return ImageData(np.zeros(self.gm_domain)) - - def norm(self): - return 0 - - def domain_dim(self): - return self.gm_domain - - def range_dim(self): - return self.gm_range \ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py deleted file mode 100644 index 1c09faf..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 15:56:27 2019 - -@author: ofn77899 -""" - -from .Operator import Operator -from .LinearOperator import LinearOperator -from .ScaledOperator import ScaledOperator -from .BlockOperator import BlockOperator -from .BlockScaledOperator import BlockScaledOperator - - -from .FiniteDifferenceOperator import FiniteDiff -from .GradientOperator import Gradient -from .SymmetrizedGradientOperator import SymmetrizedGradient -from .IdentityOperator import Identity -from .ZeroOperator import ZeroOp diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py b/Wrappers/Python/build/lib/ccpi/optimisation/ops.py deleted file mode 100644 index 6afb97a..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/ops.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy -from scipy.sparse.linalg import svds -from ccpi.framework import DataContainer -from ccpi.framework import AcquisitionData -from ccpi.framework import ImageData -from ccpi.framework import ImageGeometry -from ccpi.framework import AcquisitionGeometry -from numbers import Number -# Maybe operators need to know what types they take as inputs/outputs -# to not just use generic DataContainer - - -class Operator(object): - '''Operator that maps from a space X -> Y''' - def __init__(self, **kwargs): - self.scalar = 1 - def is_linear(self): - '''Returns if the operator is linear''' - return False - def direct(self,x, out=None): - raise NotImplementedError - def size(self): - # To be defined for specific class - raise NotImplementedError - def norm(self): - raise NotImplementedError - def allocate_direct(self): - '''Allocates memory on the Y space''' - raise NotImplementedError - def allocate_adjoint(self): - '''Allocates memory on the X space''' - raise NotImplementedError - def range_geometry(self): - raise NotImplementedError - def domain_geometry(self): - raise NotImplementedError - def __rmul__(self, other): - '''reverse multiplication of Operator with number sets the variable scalar in the Operator''' - assert isinstance(other, Number) - self.scalar = other - return self - -class LinearOperator(Operator): - '''Operator that maps from a space X -> Y''' - def is_linear(self): - '''Returns if the operator is linear''' - return True - def adjoint(self,x, out=None): - raise NotImplementedError - -class Identity(Operator): - def __init__(self): - self.s1 = 1.0 - self.L = 1 - super(Identity, self).__init__() - - def direct(self,x,out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def adjoint(self,x, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -class TomoIdentity(Operator): - def __init__(self, geometry, **kwargs): - super(TomoIdentity, self).__init__() - self.s1 = 1.0 - self.geometry = geometry - - def is_linear(self): - return True - def direct(self,x,out=None): - - if out is None: - if self.scalar != 1: - return x * self.scalar - return x.copy() - else: - if self.scalar != 1: - out.fill(x * self.scalar) - return - out.fill(x) - return - - def adjoint(self,x, out=None): - return self.direct(x, out) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - def allocate_direct(self): - if issubclass(type(self.geometry), ImageGeometry): - return ImageData(geometry=self.geometry) - elif issubclass(type(self.geometry), AcquisitionGeometry): - return AcquisitionData(geometry=self.geometry) - else: - raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) - def allocate_adjoint(self): - return self.allocate_direct() - def range_geometry(self): - return self.geometry - def domain_geometry(self): - return self.geometry - - - -class FiniteDiff2D(Operator): - def __init__(self): - self.s1 = 8.0 - super(FiniteDiff2D, self).__init__() - - def direct(self,x, out=None): - '''Forward differences with Neumann BC.''' - # FIXME this seems to be working only with numpy arrays - - d1 = numpy.zeros_like(x.as_array()) - d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] - d2 = numpy.zeros_like(x.as_array()) - d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] - d = numpy.stack((d1,d2),0) - #x.geometry.voxel_num_z = 2 - return type(x)(d,False,geometry=x.geometry) - - def adjoint(self,x, out=None): - '''Backward differences, Neumann BC.''' - Nrows = x.get_dimension_size('horizontal_x') - Ncols = x.get_dimension_size('horizontal_y') - Nchannels = 1 - if len(x.shape) == 4: - Nchannels = x.get_dimension_size('channel') - zer = numpy.zeros((Nrows,1)) - xxx = x.as_array()[0,:,:-1] - # - h = numpy.concatenate((zer,xxx), 1) - h -= numpy.concatenate((xxx,zer), 1) - - zer = numpy.zeros((1,Ncols)) - xxx = x.as_array()[1,:-1,:] - # - v = numpy.concatenate((zer,xxx), 0) - v -= numpy.concatenate((xxx,zer), 0) - return type(x)(h + v, False, geometry=x.geometry) - - def size(self): - return NotImplemented - - def get_max_sing_val(self): - return self.s1 - -def PowerMethodNonsquareOld(op,numiters): - # Initialise random - # Jakob's - #inputsize = op.size()[1] - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - x0 = op.create_image_data() - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - x1norm = numpy.sqrt((x1**2).sum()) - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0*x0).sum() - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -#def PowerMethod(op,numiters): -# # Initialise random -# x0 = np.random.randn(400) -# s = np.zeros(numiters) -# # Loop -# for it in np.arange(numiters): -# x1 = np.dot(op.transpose(),np.dot(op,x0)) -# x1norm = np.sqrt(np.sum(np.dot(x1,x1))) -# s[it] = np.dot(x1,x0) / np.dot(x1,x0) -# x0 = (1.0/x1norm)*x1 -# return s, x0 - - -def PowerMethodNonsquare(op,numiters , x0=None): - # Initialise random - # Jakob's - # inputsize , outputsize = op.size() - #x0 = ImageContainer(numpy.random.randn(*inputsize) - # Edo's - #vg = ImageGeometry(voxel_num_x=inputsize[0], - # voxel_num_y=inputsize[1], - # voxel_num_z=inputsize[2]) - # - #x0 = ImageData(geometry = vg, dimension_labels=['vertical','horizontal_y','horizontal_x']) - #print (x0) - #x0.fill(numpy.random.randn(*x0.shape)) - - if x0 is None: - #x0 = op.create_image_data() - x0 = op.allocate_direct() - x0.fill(numpy.random.randn(*x0.shape)) - - s = numpy.zeros(numiters) - # Loop - for it in numpy.arange(numiters): - x1 = op.adjoint(op.direct(x0)) - #x1norm = numpy.sqrt((x1*x1).sum()) - x1norm = x1.norm() - #print ("x0 **********" ,x0) - #print ("x1 **********" ,x1) - s[it] = (x1*x0).sum() / (x0.squared_norm()) - x0 = (1.0/x1norm)*x1 - return numpy.sqrt(s[-1]), numpy.sqrt(s), x0 - -class LinearOperatorMatrix(Operator): - def __init__(self,A): - self.A = A - self.s1 = None # Largest singular value, initially unknown - super(LinearOperatorMatrix, self).__init__() - - def direct(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A,x.as_array())) - else: - numpy.dot(self.A, x.as_array(), out=out.as_array()) - - - def adjoint(self,x, out=None): - if out is None: - return type(x)(numpy.dot(self.A.transpose(),x.as_array())) - else: - numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) - - - def size(self): - return self.A.shape - - def get_max_sing_val(self): - # If unknown, compute and store. If known, simply return it. - if self.s1 is None: - self.s1 = svds(self.A,1,return_singular_vectors=False)[0] - return self.s1 - else: - return self.s1 - def allocate_direct(self): - '''allocates the memory to hold the result of adjoint''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((N_A,1)) - return DataContainer(out) - def allocate_adjoint(self): - '''allocate the memory to hold the result of direct''' - #numpy.dot(self.A.transpose(),x.as_array()) - M_A, N_A = self.A.shape - out = numpy.zeros((M_A,1)) - return DataContainer(out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py b/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py deleted file mode 100644 index 263a7cd..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/spdhg.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy - -from ccpi.optimisation.funcs import Function -from ccpi.framework import ImageData -from ccpi.framework import AcquisitionData - - -class spdhg(): - """Computes a saddle point with a stochastic PDHG. - - This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that - - (x*, y*) in arg min_x max_y sum_i=1^n - f*[i](y_i) + g(x) - - where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and - proper functionals. For this algorithm, they all may be non-smooth and no - strong convexity is assumed. - - Parameters - ---------- - f : list of functions - Functionals Y[i] -> IR_infty that all have a convex conjugate with a - proximal operator, i.e. - f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i]. - g : function - Functional X -> IR_infty that has a proximal operator, i.e. - g.prox(tau) : X -> X. - A : list of functions - Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint - x : primal variable, optional - By default equals 0. - y : dual variable, optional - Part of a product space. By default equals 0. - z : variable, optional - Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0. - tau : scalar / vector / matrix, optional - Step size for primal variable. Note that the proximal operator of g - has to be well-defined for this input. - sigma : scalar, optional - Scalar / vector / matrix used as step size for dual variable. Note that - the proximal operator related to f (see above) has to be well-defined - for this input. - prob : list of scalars, optional - Probabilities prob[i] that a subset i is selected in each iteration. - If fun_select is not given, then the sum of all probabilities must - equal 1. - A_norms : list of scalars, optional - Norms of the operators in A. Can be used to determine the step sizes - tau and sigma and the probabilities prob. - fun_select : function, optional - Function that selects blocks at every iteration IN -> {1,...,n}. By - default this is serial sampling, fun_select(k) selects an index - i \in {1,...,n} with probability prob[i]. - - References - ---------- - [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, - *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling - and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808 - (2018) http://doi.org/10.1007/s10851-010-0251-1 - - [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott, - A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a - stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII, - 58 (2017) http://doi.org/10.1117/12.2272946. - - [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster - PET Reconstruction with Non-Smooth Priors by Randomization and - Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150 - """ - - def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None, - prob=None, A_norms=None, fun_select=None): - # fun_select is optional and by default performs serial sampling - - if x is None: - x = A[0].allocate_direct(0) - - if y is None: - if z is not None: - raise ValueError('y and z have to be defaulted together') - - y = [Ai.allocate_adjoint(0) for Ai in A] - z = 0 * x.copy() - - else: - if z is None: - raise ValueError('y and z have to be defaulted together') - - if A_norms is not None: - if tau is not None or sigma is not None or prob is not None: - raise ValueError('Either A_norms or (tau, sigma, prob) must ' - 'be given') - - tau = 1 / sum(A_norms) - sigma = [1 / nA for nA in A_norms] - prob = [nA / sum(A_norms) for nA in A_norms] - - #uniform prob, needs different sigma and tau - #n = len(A) - #prob = [1./n] * n - - if fun_select is None: - if prob is None: - raise ValueError('prob was not determined') - - def fun_select(k): - return [int(numpy.random.choice(len(A), 1, p=prob))] - - self.iter = 0 - self.x = x - - self.y = y - self.z = z - - self.f = f - self.g = g - self.A = A - self.tau = tau - self.sigma = sigma - self.prob = prob - self.fun_select = fun_select - - # Initialize variables - self.z_relax = z.copy() - self.tmp = self.x.copy() - - def update(self): - # select block - selected = self.fun_select(self.iter) - - # update primal variable - #tmp = (self.x - self.tau * self.z_relax).as_array() - #self.x.fill(self.g.prox(tmp, self.tau)) - self.tmp = - self.tau * self.z_relax - self.tmp += self.x - self.x = self.g.prox(self.tmp, self.tau) - - # update dual variable and z, z_relax - self.z_relax = self.z.copy() - for i in selected: - # save old yi - y_old = self.y[i].copy() - - # y[i]= prox(tmp) - tmp = y_old + self.sigma[i] * self.A[i].direct(self.x) - self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i]) - - # update adjoint of dual variable - dz = self.A[i].adjoint(self.y[i] - y_old) - self.z += dz - - # compute extrapolation - self.z_relax += (1 + 1 / self.prob[i]) * dz - - self.iter += 1 - - -## Functions - -class KullbackLeibler(Function): - def __init__(self, data, background): - self.data = data - self.background = background - self.__offset = None - - def __call__(self, x): - """Return the KL-diveregnce in the point ``x``. - - If any components of ``x`` is non-positive, the value is positive - infinity. - - Needs one extra array of memory of the size of `prior`. - """ - - # define short variable names - y = self.data - r = self.background - - # Compute - # sum(x + r - y + y * log(y / (x + r))) - # = sum(x - y * log(x + r)) + self.offset - # Assume that - # x + r > 0 - - # sum the result up - obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset() - - if numpy.isnan(obj): - # In this case, some element was less than or equal to zero - return numpy.inf - else: - return obj - - @property - def convex_conj(self): - """The convex conjugate functional of the KL-functional.""" - return KullbackLeiblerConvexConjugate(self.data, self.background) - - def offset(self): - """The offset which is independent of the unknown.""" - - if self.__offset is None: - tmp = self.domain.element() - - # define short variable names - y = self.data - r = self.background - - tmp = self.domain.element(numpy.maximum(y, 1)) - tmp = r - y + y * numpy.log(tmp) - - # sum the result up - self.__offset = numpy.sum(tmp) - - return self.__offset - -# def __repr__(self): -# """to be added???""" -# """Return ``repr(self)``.""" - # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__, - ## self.domain, self.data, - # self.background) - - -class KullbackLeiblerConvexConjugate(Function): - """The convex conjugate of Kullback-Leibler divergence functional. - - Notes - ----- - The functional :math:`F^*` with prior :math:`g>0` is given by: - - .. math:: - F^*(x) - = - \\begin{cases} - \\sum_{i} \left( -g_i \ln(1 - x_i) \\right) - & \\text{if } x_i < 1 \\forall i - \\\\ - +\\infty & \\text{else} - \\end{cases} - - See Also - -------- - KullbackLeibler : convex conjugate functional - """ - - def __init__(self, data, background): - self.data = data - self.background = background - - def __call__(self, x): - y = self.data - r = self.background - - tmp = numpy.sum(- x * r - y * numpy.log(1 - x)) - - if numpy.isnan(tmp): - # In this case, some element was larger than or equal to one - return numpy.inf - else: - return tmp - - - def prox(self, x, tau, out=None): - # Let y = data, r = background, z = x + tau * r - # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y)) - # Currently it needs 3 extra copies of memory. - - if out is None: - out = x.copy() - - # define short variable names - try: # this should be standard SIRF/CIL mode - y = self.data.as_array() - r = self.background.as_array() - x = x.as_array() - - try: - taua = tau.as_array() - except: - taua = tau - - z = x + tau * r - - out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))) - - return out - - except: # e.g. for NumPy - y = self.data - r = self.background - - try: - taua = tau.as_array() - except: - taua = tau - - z = x + tau * r - - out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)) - - return out - - @property - def convex_conj(self): - return KullbackLeibler(self.data, self.background) - - -def mult(x, y): - try: - xa = x.as_array() - except: - xa = x - - out = y.clone() - out.fill(xa * y.as_array()) - - return out diff --git a/Wrappers/Python/build/lib/ccpi/processors.py b/Wrappers/Python/build/lib/ccpi/processors.py deleted file mode 100644 index ccef410..0000000 --- a/Wrappers/Python/build/lib/ccpi/processors.py +++ /dev/null @@ -1,514 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018 Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License - -from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ - AcquisitionGeometry, ImageGeometry, ImageData -from ccpi.reconstruction.parallelbeam import alg as pbalg -import numpy -from scipy import ndimage - -import matplotlib.pyplot as plt - - -class Normalizer(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): - kwargs = { - 'flat_field' : flat_field, - 'dark_field' : dark_field, - # very small number. Used when there is a division by zero - 'tolerance' : tolerance - } - - #DataProcessor.__init__(self, **kwargs) - super(Normalizer, self).__init__(**kwargs) - if not flat_field is None: - self.set_flat_field(flat_field) - if not dark_field is None: - self.set_dark_field(dark_field) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3 or\ - dataset.number_of_dimensions == 2: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def set_dark_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Dark Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.dark_field = df - elif issubclass(type(df), DataContainer): - self.dark_field = self.set_dark_field(df.as_array()) - - def set_flat_field(self, df): - if type(df) is numpy.ndarray: - if len(numpy.shape(df)) == 3: - raise ValueError('Flat Field should be 2D') - elif len(numpy.shape(df)) == 2: - self.flat_field = df - elif issubclass(type(df), DataContainer): - self.flat_field = self.set_flat_field(df.as_array()) - - @staticmethod - def normalize_projection(projection, flat, dark, tolerance): - a = (projection - dark) - b = (flat-dark) - with numpy.errstate(divide='ignore', invalid='ignore'): - c = numpy.true_divide( a, b ) - c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0 - return c - - @staticmethod - def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): - '''returns the estimated relative error of the normalised projection - - n = (projection - dark) / (flat - dark) - Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) - ''' - a = (projection - dark) - b = (flat-dark) - df = delta_flat / flat - dd = delta_dark / dark - rel_norm_error = (b + a) / (b * a) * (df + dd) - return rel_norm_error - - def process(self, out=None): - - projections = self.get_input() - dark = self.dark_field - flat = self.flat_field - - if projections.number_of_dimensions == 3: - if not (projections.shape[1:] == dark.shape and \ - projections.shape[1:] == flat.shape): - raise ValueError('Flats/Dark and projections size do not match.') - - - a = numpy.asarray( - [ Normalizer.normalize_projection( - projection, flat, dark, self.tolerance) \ - for projection in projections.as_array() ] - ) - elif projections.number_of_dimensions == 2: - a = Normalizer.normalize_projection(projections.as_array(), - flat, dark, self.tolerance) - y = type(projections)( a , True, - dimension_labels=projections.dimension_labels, - geometry=projections.geometry) - return y - - -class CenterOfRotationFinder(DataProcessor): - '''Processor to find the center of rotation in a parallel beam experiment - - This processor read in a AcquisitionDataSet and finds the center of rotation - based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078 - - Input: AcquisitionDataSet - - Output: float. center of rotation in pixel coordinate - ''' - - def __init__(self): - kwargs = { - - } - - #DataProcessor.__init__(self, **kwargs) - super(CenterOfRotationFinder, self).__init__(**kwargs) - - def check_input(self, dataset): - if dataset.number_of_dimensions == 3: - if dataset.geometry.geom_type == 'parallel': - return True - else: - raise ValueError('{0} is suitable only for parallel beam geometry'\ - .format(self.__class__.__name__)) - else: - raise ValueError("Expected input dimensions is 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - - # ######################################################################### - # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. # - # # - # Copyright 2015. UChicago Argonne, LLC. This software was produced # - # under U.S. Government contract DE-AC02-06CH11357 for Argonne National # - # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the # - # U.S. Department of Energy. The U.S. Government has rights to use, # - # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR # - # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR # - # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is # - # modified to produce derivative works, such modified software should # - # be clearly marked, so as not to confuse it with the version available # - # from ANL. # - # # - # Additionally, redistribution and use in source and binary forms, with # - # or without modification, are permitted provided that the following # - # conditions are met: # - # # - # * Redistributions of source code must retain the above copyright # - # notice, this list of conditions and the following disclaimer. # - # # - # * Redistributions in binary form must reproduce the above copyright # - # notice, this list of conditions and the following disclaimer in # - # the documentation and/or other materials provided with the # - # distribution. # - # # - # * Neither the name of UChicago Argonne, LLC, Argonne National # - # Laboratory, ANL, the U.S. Government, nor the names of its # - # contributors may be used to endorse or promote products derived # - # from this software without specific prior written permission. # - # # - # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS # - # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # - # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # - # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago # - # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # - # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # - # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # - # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # - # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # - # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # - # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # - # POSSIBILITY OF SUCH DAMAGE. # - # ######################################################################### - - @staticmethod - def as_ndarray(arr, dtype=None, copy=False): - if not isinstance(arr, numpy.ndarray): - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_dtype(arr, dtype, copy=False): - if not arr.dtype == dtype: - arr = numpy.array(arr, dtype=dtype, copy=copy) - return arr - - @staticmethod - def as_float32(arr): - arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32) - return CenterOfRotationFinder.as_dtype(arr, numpy.float32) - - - - - @staticmethod - def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5, - ratio=2., drop=20): - """ - Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. - - Parameters - ---------- - tomo : ndarray - 3D tomographic data. - ind : int, optional - Index of the slice to be used for reconstruction. - smin, smax : int, optional - Reference to the horizontal center of the sinogram. - srad : float, optional - Fine search radius. - step : float, optional - Step of fine searching. - ratio : float, optional - The ratio between the FOV of the camera and the size of object. - It's used to generate the mask. - drop : int, optional - Drop lines around vertical center of the mask. - - Returns - ------- - float - Rotation axis location. - - Notes - ----- - The function may not yield a correct estimate, if: - - - the sample size is bigger than the field of view of the camera. - In this case the ``ratio`` argument need to be set larger - than the default of 2.0. - - - there is distortion in the imaging hardware. If there's - no correction applied, the center of the projection image may - yield a better estimate. - - - the sample contrast is weak. Paganin's filter need to be applied - to overcome this. - - - the sample was changed during the scan. - """ - tomo = CenterOfRotationFinder.as_float32(tomo) - - if ind is None: - ind = tomo.shape[1] // 2 - _tomo = tomo[:, ind, :] - - - - # Reduce noise by smooth filters. Use different filters for coarse and fine search - _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) - _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) - - # Coarse and fine searches for finding the rotation center. - if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k) - #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :] - #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop) - #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin, - smax, ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - else: - init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, - smin, smax, - ratio, drop) - fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad, - step, init_cen, - ratio, drop) - - #logger.debug('Rotation center search finished: %i', fine_cen) - return fine_cen - - - @staticmethod - def _search_coarse(sino, smin, smax, ratio, drop): - """ - Coarse search for finding the rotation center. - """ - (Nrow, Ncol) = sino.shape - centerfliplr = (Ncol - 1.0) / 2.0 - - # Copy the sinogram and flip left right, the purpose is to - # make a full [0;2Pi] sinogram - _copy_sino = numpy.fliplr(sino[1:]) - - # This image is used for compensating the shift of sinogram 2 - temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32') - temp_img[:] = sino[-1] - - # Start coarse search in which the shift step is 1 - listshift = numpy.arange(smin, smax + 1) - listmetric = numpy.zeros(len(listshift), dtype='float32') - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol, - 0.5 * ratio * Ncol, drop) - for i in listshift: - _sino = numpy.roll(_copy_sino, i, axis=1) - if i >= 0: - _sino[:, 0:i] = temp_img[:, 0:i] - else: - _sino[:, i:] = temp_img[:, i:] - listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # numpy.vstack((sino, _sino))) - numpy.fft.fft2(numpy.vstack((sino, _sino))) - )) * mask) - minpos = numpy.argmin(listmetric) - return centerfliplr + listshift[minpos] / 2.0 - - @staticmethod - def _search_fine(sino, srad, step, init_cen, ratio, drop): - """ - Fine search for finding the rotation center. - """ - Nrow, Ncol = sino.shape - centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 - # Use to shift the sinogram 2 to the raw CoR. - shiftsino = numpy.int16(2 * (init_cen - centerfliplr)) - _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1) - if init_cen <= centerfliplr: - lefttake = numpy.int16(numpy.ceil(srad + 1)) - righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1)) - else: - lefttake = numpy.int16(numpy.ceil( - init_cen - (Ncol - 1 - init_cen) + srad + 1)) - righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1)) - Ncol1 = righttake - lefttake + 1 - mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1, - 0.5 * ratio * Ncol, drop) - numshift = numpy.int16((2 * srad) / step) + 1 - listshift = numpy.linspace(-srad, srad, num=numshift) - listmetric = numpy.zeros(len(listshift), dtype='float32') - factor1 = numpy.mean(sino[-1, lefttake:righttake]) - num1 = 0 - for i in listshift: - _sino = ndimage.interpolation.shift( - _copy_sino, (0, i), prefilter=False) - factor2 = numpy.mean(_sino[0,lefttake:righttake]) - _sino = _sino * factor1 / factor2 - sinojoin = numpy.vstack((sino, _sino)) - listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift( - #pyfftw.interfaces.numpy_fft.fft2( - # sinojoin[:, lefttake:righttake + 1]) - numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1]) - )) * mask) - num1 = num1 + 1 - minpos = numpy.argmin(listmetric) - return init_cen + listshift[minpos] / 2.0 - - @staticmethod - def _create_mask(nrow, ncol, radius, drop): - du = 1.0 / ncol - dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi) - centerrow = numpy.ceil(nrow / 2) - 1 - centercol = numpy.ceil(ncol / 2) - 1 - # added by Edoardo Pasca - centerrow = int(centerrow) - centercol = int(centercol) - mask = numpy.zeros((nrow, ncol), dtype='float32') - for i in range(nrow): - num1 = numpy.round(((i - centerrow) * dv / radius) / du) - (p1, p2) = numpy.int16(numpy.clip(numpy.sort( - (-num1 + centercol, num1 + centercol)), 0, ncol - 1)) - mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32') - if drop < centerrow: - mask[centerrow - drop:centerrow + drop + 1, - :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32') - mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32') - return mask - - def process(self, out=None): - - projections = self.get_input() - - cor = CenterOfRotationFinder.find_center_vo(projections.as_array()) - - return cor - - -class AcquisitionDataPadder(DataProcessor): - '''Normalization based on flat and dark - - This processor read in a AcquisitionData and normalises it based on - the instrument reading with and without incident photons or neutrons. - - Input: AcquisitionData - Parameter: 2D projection with flat field (or stack) - 2D projection with dark field (or stack) - Output: AcquisitionDataSetn - ''' - - def __init__(self, - center_of_rotation = None, - acquisition_geometry = None, - pad_value = 1e-5): - kwargs = { - 'acquisition_geometry' : acquisition_geometry, - 'center_of_rotation' : center_of_rotation, - 'pad_value' : pad_value - } - - super(AcquisitionDataPadder, self).__init__(**kwargs) - - def check_input(self, dataset): - if self.acquisition_geometry is None: - self.acquisition_geometry = dataset.geometry - if dataset.number_of_dimensions == 3: - return True - else: - raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ - .format(dataset.number_of_dimensions)) - - def process(self, out=None): - projections = self.get_input() - w = projections.get_dimension_size('horizontal') - delta = w - 2 * self.center_of_rotation - - padded_width = int ( - numpy.ceil(abs(delta)) + w - ) - delta_pix = padded_width - w - - voxel_per_pixel = 1 - geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), - self.acquisition_geometry.angles, - self.center_of_rotation, - voxel_per_pixel ) - - padded_geometry = self.acquisition_geometry.clone() - - padded_geometry.pixel_num_h = geom['n_h'] - padded_geometry.pixel_num_v = geom['n_v'] - - delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h - delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v - - if delta_pix_h == 0: - delta_pix_h = delta_pix - padded_geometry.pixel_num_h = padded_width - #initialize a new AcquisitionData with values close to 0 - out = AcquisitionData(geometry=padded_geometry) - out = out + self.pad_value - - - #pad in the horizontal-vertical plane -> slice on angles - if delta > 0: - #pad left of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - else: - #pad right of middle - command = "out.array[" - for i in range(out.number_of_dimensions): - if out.dimension_labels[i] == 'horizontal': - value = '{0}:{1}'.format(0, w) - command = command + str(value) - else: - if out.dimension_labels[i] == 'vertical' : - value = '{0}:'.format(delta_pix_v) - command = command + str(value) - else: - command = command + ":" - if i < out.number_of_dimensions -1: - command = command + ',' - command = command + '] = projections.array' - #print (command) - #cleaned = eval(command) - exec(command) - return out \ No newline at end of file -- cgit v1.2.3 From 584df30253cc36920f17f6b0bc438c978c3d8462 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 15:24:35 +0100 Subject: working no optimisation --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 143 +++++++++++++++------ 1 file changed, 102 insertions(+), 41 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 5bf96cc..0b9921c 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -6,7 +6,7 @@ Created on Mon Feb 4 16:18:06 2019 @author: evangelos """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData +from ccpi.framework import ImageData, DataContainer import numpy as np import time from ccpi.optimisation.operators import BlockOperator @@ -23,6 +23,7 @@ class PDHG(Algorithm): self.g = kwargs.get('g', None) self.tau = kwargs.get('tau', None) self.sigma = kwargs.get('sigma', None) + self.memopt = kwargs.get('memopt', False) if self.f is not None and self.operator is not None and \ self.g is not None: @@ -76,11 +77,32 @@ class PDHG(Algorithm): #self.y = self.y_old def update_objective(self): - self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x), - -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) - ]) - - + p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) + d1 = -(self.f.convex_conjugate(self.y) + self.g(-1*self.operator.adjoint(self.y))) + + self.loss.append([p1,d1,p1-d1]) + + +def assertBlockDataContainerEqual(container1, container2): + print ("assert Block Data Container Equal") + assert issubclass(container1.__class__, container2.__class__) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + +def assertNumpyArrayEqual(first, second): + res = True + try: + np.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + assert res def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): @@ -98,16 +120,19 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - + if memopt: + print ("memopt") + else: + print("no memopt") x_old = operator.domain_geometry().allocate() y_old = operator.range_geometry().allocate() - xbar = x_old - x_tmp = x_old - x = x_old + xbar = x_old.copy() + x_tmp = x_old.copy() + x = x_old.copy() - y_tmp = y_old - y = y_tmp + y_tmp = y_old.copy() + y = y_tmp.copy() # relaxation parameter theta = 1 @@ -120,36 +145,72 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): - -# # Gradient descent, Dual problem solution -# y_tmp = y_old + sigma * operator.direct(xbar) - y_tmp = operator.direct(xbar) - y_tmp *= sigma - y_tmp +=y_old - - y = f.proximal_conjugate(y_tmp, sigma) + if memopt: + # # Gradient descent, Dual problem solution + # y_tmp = y_old + sigma * operator.direct(xbar) + #y_tmp = operator.direct(xbar) + operator.direct(xbar, out=y_tmp) + y_tmp *= sigma + y_tmp +=y_old + + y = f.proximal_conjugate(y_tmp, sigma) + #f.proximal_conjugate(y_tmp, sigma, out=y) + + # Gradient ascent, Primal problem solution + # x_tmp = x_old - tau * operator.adjoint(y) + + #x_tmp = operator.adjoint(y) + operator.adjoint(y, out=x_tmp) + x_tmp *=-tau + x_tmp +=x_old + + #x = g.proximal(x_tmp, tau) + g.proximal(x_tmp, tau, out=x) + + #Update + # xbar = x + theta * (x - x_old) + x.subtract(x_old, out=xbar) + xbar *= theta + xbar += x + + x_old.fill(x) + y_old.fill(y) + else: + + # # Gradient descent, Dual problem solution + y_tmp1 = y_old + sigma * operator.direct(xbar) + # y_tmp = operator.direct(xbar) + operator.direct(xbar, out=y_tmp) + y_tmp *= sigma + y_tmp +=y_old + #print ("y_tmp1 equale y_tmp?") + #assertBlockDataContainerEqual(y_tmp1, y_tmp) + + y = f.proximal_conjugate(y_tmp, sigma) + #f.proximal_conjugate(y_tmp, sigma, out=y) + #print ("y1 equale y?") + #assertBlockDataContainerEqual(y1, y) + # Gradient ascent, Primal problem solution + x_tmp1 = x_old - tau * operator.adjoint(y) + + # x_tmp = operator.adjoint(y) + operator.adjoint(y, out=x_tmp) + x_tmp *=-tau + x_tmp +=x_old + + assertNumpyArrayEqual(x_tmp.as_array(),x_tmp1.as_array()) - # Gradient ascent, Primal problem solution -# x_tmp = x_old - tau * operator.adjoint(y) - - x_tmp = operator.adjoint(y) - x_tmp *=-tau - x_tmp +=x_old - - x = g.proximal(x_tmp, tau) - - #Update -# xbar = x + theta * (x - x_old) - xbar = x - x_old - xbar *= theta - xbar += x - - x_old = x - y_old = y - -# operator.direct(xbar, out = y_tmp) -# y_tmp *= sigma -# y_tmp +=y_old + x = g.proximal(x_tmp, tau) + # g.proximal(x_tmp, tau, out=x) + + #Update + xbar = x + theta * (x - x_old) + # xbar = x - x_old + # xbar *= theta + # xbar += x + + x_old = x + y_old = y -- cgit v1.2.3 From d1cd883ce417ae08cfc7f853377f3e17fa55be01 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:36:25 +0100 Subject: set output image to 0 --- .../Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 3d2a96b..db9f09d 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -54,7 +54,9 @@ class FiniteDiff(LinearOperator): out = np.zeros_like(x_asarr) fd_arr = out else: - fd_arr = out.as_array() + fd_arr = out.as_array() + # set the array to 0 + fd_arr[:] = 0 # if out is None: # out = self.gm_domain.allocate().as_array() @@ -70,7 +72,7 @@ class FiniteDiff(LinearOperator): np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) if self.bnd_cond == 'Neumann': - pass + pass elif self.bnd_cond == 'Periodic': np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) else: -- cgit v1.2.3 From c20f443cb80221260e284668fad2053798f9000a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:37:20 +0100 Subject: fix out --- .../ccpi/optimisation/functions/BlockFunction.py | 26 +++++++++++++++------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 81c16cd..bbf1d29 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -52,15 +52,25 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + if out is not None: + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) + else: + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) + else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) + + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + + return BlockDataContainer(*out) def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' -- cgit v1.2.3 From 430618bd44aec77468900222b996e1d3c32dae03 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:38:19 +0100 Subject: wrong indentation --- Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index f96c7a1..3c06641 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -69,10 +69,10 @@ class L2NormSquared(Function): out *= 2 else: y = x - if self.b is not None: -# x.subtract(self.b, out=x) - y = x - self.b - return 2*y + if self.b is not None: + # x.subtract(self.b, out=x) + y = x - self.b + return 2*y def convex_conjugate(self, x, out=None): -- cgit v1.2.3 From da02e629dcd85b2dc1e06a4a8d8bff973fc70a88 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:39:58 +0100 Subject: nothing really --- .../ccpi/optimisation/functions/MixedL21Norm.py | 28 +++++++++++++++------- .../ccpi/optimisation/functions/ScaledFunction.py | 4 +++- 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 4266e51..ed1d5e5 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -87,17 +87,29 @@ class MixedL21Norm(Function): res = BlockDataContainer(*frac) return res - -# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# res = x.divide(ImageData(tmp2).maximum(1.0)) else: +# pass + - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) +# # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha +# # res = x.divide(ImageData(tmp2).maximum(1.0)) +# if out is None: + + tmp = [ el*el for el in x] + res = (sum(tmp).sqrt()).maximum(1.0) + frac = [x[i]/res for i in range(x.shape[0])] + res = BlockDataContainer(*frac) - return res + return res + # else: + # tmp = [ el*el for el in x] + # res = (sum(tmp).sqrt()).maximum(1.0) + # #frac = [x[i]/res for i in range(x.shape[0])] + # for i in range(x.shape[0]): + # a = out.get_item(i) + # b = x.get_item(i) + # b /= res + # a.fill( b ) def __rmul__(self, scalar): return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 046a4a6..9e2ba0c 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,9 @@ class ScaledFunction(object): if out is None: return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + out.fill(self.scalar*self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + #self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out) + #out *= self.scalar def grad(self, x): '''Alias of gradient(x,None)''' -- cgit v1.2.3 From 5c74019510f95599b87ba869a7b8efc71edcde23 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:01:14 +0100 Subject: fix fill method --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 9664037..13663c2 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -186,9 +186,14 @@ class BlockDataContainer(object): return self.clone() def clone(self): return type(self)(*[el.copy() for el in self.containers], shape=self.shape) - def fill(self, x): - for el,ot in zip(self.containers, x): - el.fill(ot) + def fill(self, other): + if isinstance (other, BlockDataContainer): + if not self.is_compatible(other): + raise ValueError('Incompatible containers') + for el,ot in zip(self.containers, other.containers): + el.fill(ot) + else: + return ValueError('Cannot fill with object provided {}'.format(type(other))) def __add__(self, other): return self.add( other ) -- cgit v1.2.3 From d8c15850eb715f5865c9bdf6bada6a8fb1602518 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Apr 2019 12:01:41 +0100 Subject: memopt test --- .../Python/ccpi/framework/BlockDataContainer.py | 2 +- .../Python/ccpi/optimisation/algorithms/PDHG.py | 71 +++++---- .../ccpi/optimisation/functions/MixedL21Norm.py | 24 ++- .../Python/ccpi/optimisation/functions/ZeroFun.py | 9 +- .../operators/FiniteDifferenceOperator.py | 169 ++++++++++++--------- .../optimisation/operators/GradientOperator.py | 34 +---- Wrappers/Python/wip/pdhg_TV_denoising.py | 14 +- Wrappers/Python/wip/test_profile.py | 101 +++++++----- 8 files changed, 236 insertions(+), 188 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 9664037..888950d 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -187,7 +187,7 @@ class BlockDataContainer(object): def clone(self): return type(self)(*[el.copy() for el in self.containers], shape=self.shape) def fill(self, x): - for el,ot in zip(self.containers, x): + for el,ot in zip(self.containers, x.containers): el.fill(ot) def __add__(self, other): diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 46a1969..d07005a 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -6,8 +6,9 @@ Created on Mon Feb 4 16:18:06 2019 @author: evangelos """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData +from ccpi.framework import ImageData, DataContainer import numpy as np +import numpy import time from ccpi.optimisation.operators import BlockOperator from ccpi.framework import BlockDataContainer @@ -80,6 +81,29 @@ class PDHG(Algorithm): -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) ]) + + +def assertBlockDataContainerEqual( container1, container2): + print ("assert Block Data Container Equal") + assert issubclass(container1.__class__, container2.__class__) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + +def assertNumpyArrayEqual(first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + assert res def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): @@ -102,12 +126,12 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old = operator.domain_geometry().allocate() y_old = operator.range_geometry().allocate() - xbar = x_old - x_tmp = x_old - x = x_old + xbar = x_old.copy() + x_tmp = x_old.copy() + x = x_old.copy() - y_tmp = y_old - y = y_old + y_tmp = y_old.copy() + y = y_old.copy() # relaxation parameter theta = 1 @@ -137,37 +161,26 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): else: - operator.direct(xbar, out = y_tmp) - - y_tmp.multiply(sigma, out = y_tmp) - y_tmp.add(y_old, out = y_tmp) -# y_tmp.__imul__(sigma) -# y_tmp.__iadd__(y_old) - -# y_tmp *= sigma -# y_tmp += y_old - -# y_tmp = y_old + sigma * operator.direct(xbar) + + operator.direct(xbar, out = y_tmp) + y_tmp *= sigma + y_tmp += y_old f.proximal_conjugate(y_tmp, sigma, out=y) - -# x_tmp = x_old - tau * operator.adjoint(y) - + operator.adjoint(y, out = x_tmp) - x_tmp.multiply(-tau, out = x_tmp) - x_tmp.add(x_old, out = x_tmp) - - -# x_tmp *= -tau -# x_tmp += x_old + x_tmp *= -tau + x_tmp += x_old g.proximal(x_tmp, tau, out = x) xbar = x - x_old xbar *= theta xbar += x - - x_old.fill(x) - y_old.fill(y) + + + x_old = x.copy() + y_old = y.copy() + # pass # diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index dd463c0..639f7bf 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -91,6 +91,8 @@ class MixedL21Norm(Function): res = BlockDataContainer(*frac) return res else: + + pass # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha @@ -101,19 +103,25 @@ class MixedL21Norm(Function): tmp = [ el*el for el in x] res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - return res +# res = sum(x**2).sqrt().maximum(1.0) + + #frac = [x[i]/res for i in range(x.shape[0])] + #res = BlockDataContainer(*frac) + + return x/res else: - tmp = [ el*el for el in x] + tmp = x**2 #[ el*el for el in x] res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] -# res = (sum(x**2).sqrt()).maximum(1.0) -# return x/res - out.fill(frac) + +# res = sum(x**2).sqrt().maximum(1.0) +# frac = [x[i]/res for i in range(x.shape[0])] +# res = BlockDataContainer(*frac) +# res = sum(x**2).sqrt().maximum(1.0) + out.fill(x/res) +# return res diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py index 88d9b64..6d21acb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py @@ -45,14 +45,17 @@ class ZeroFun(Function): else: return x.maximum(0).sum() + x.maximum(0).sum() - def proximal(self,x,tau, out=None): + def proximal(self, x, tau, out=None): if out is None: return x.copy() else: out.fill(x) - def proximal_conjugate(self, x, tau): - return 0 + def proximal_conjugate(self, x, tau, out = None): + if out is None: + return 0 + else: + return 0 def domain_geometry(self): pass diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 3d2a96b..8e73ff2 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -52,38 +52,33 @@ class FiniteDiff(LinearOperator): if out is None: out = np.zeros_like(x_asarr) - fd_arr = out else: - fd_arr = out.as_array() - -# if out is None: -# out = self.gm_domain.allocate().as_array() -# -# fd_arr = out.as_array() -# fd_arr = self.gm_domain.allocate().as_array() - + out = out.as_array() + out[:]=0 + + ######################## Direct for 2D ############################### if x_sz == 2: if self.direction == 1: - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = out[:,0:-1] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = out[:,-1] ) else: raise ValueError('No valid boundary conditions') if self.direction == 0: - np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) + np.subtract( x_asarr[1:], x_asarr[0:-1], out = out[0:-1,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = out[-1,:] ) else: raise ValueError('No valid boundary conditions') @@ -92,35 +87,35 @@ class FiniteDiff(LinearOperator): if self.direction == 0: - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = out[0:-1,:,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = out[-1,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 1: - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = out[:,0:-1,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = out[:,-1,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 2: - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = out[:,:,0:-1] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = out[:,:,-1] ) else: raise ValueError('No valid boundary conditions') @@ -128,42 +123,42 @@ class FiniteDiff(LinearOperator): elif x_sz == 4: if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = out[0:-1,:,:,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = out[-1,:,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = out[:,0:-1,:,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = out[:,-1,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = out[:,:,0:-1,:] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = out[:,:,-1,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = out[:,:,:,0:-1] ) if self.bnd_cond == 'Neumann': pass elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = out[:,:,:,-1] ) else: raise ValueError('No valid boundary conditions') @@ -177,14 +172,18 @@ class FiniteDiff(LinearOperator): def adjoint(self, x, out=None): x_asarr = x.as_array() - #x_asarr = x x_sz = len(x.shape) if out is None: out = np.zeros_like(x_asarr) - fd_arr = out else: - fd_arr = out.as_array() + out = out.as_array() + +# if out is None: +# out = np.zeros_like(x_asarr) +# fd_arr = out +# else: +# fd_arr = out.as_array() # if out is None: # out = self.gm_domain.allocate().as_array() @@ -198,28 +197,28 @@ class FiniteDiff(LinearOperator): if self.direction == 1: - np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = out[:,1:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) - np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) + np.subtract( x_asarr[:,0], 0, out = out[:,0] ) + np.subtract( -x_asarr[:,-2], 0, out = out[:,-1] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = out[:,0] ) else: raise ValueError('No valid boundary conditions') if self.direction == 0: - np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) + np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = out[1:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) - np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) + np.subtract( x_asarr[0,:], 0, out = out[0,:] ) + np.subtract( -x_asarr[-2,:], 0, out = out[-1,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = out[0,:] ) else: raise ValueError('No valid boundary conditions') @@ -229,35 +228,35 @@ class FiniteDiff(LinearOperator): if self.direction == 0: - np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = out[1:,:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) - np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) + np.subtract( x_asarr[0,:,:], 0, out = out[0,:,:] ) + np.subtract( -x_asarr[-2,:,:], 0, out = out[-1,:,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = out[0,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 1: - np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = out[:,1:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) - np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) + np.subtract( x_asarr[:,0,:], 0, out = out[:,0,:] ) + np.subtract( -x_asarr[:,-2,:], 0, out = out[:,-1,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = out[:,0,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 2: - np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = out[:,:,1:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) - np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) + np.subtract( x_asarr[:,:,0], 0, out = out[:,:,0] ) + np.subtract( -x_asarr[:,:,-2], 0, out = out[:,:,-1] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = out[:,:,0] ) else: raise ValueError('No valid boundary conditions') @@ -265,51 +264,51 @@ class FiniteDiff(LinearOperator): elif x_sz == 4: if self.direction == 0: - np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = out[1:,:,:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) - np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) + np.subtract( x_asarr[0,:,:,:], 0, out = out[0,:,:,:] ) + np.subtract( -x_asarr[-2,:,:,:], 0, out = out[-1,:,:,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = out[0,:,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 1: - np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = out[:,1:,:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) - np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) + np.subtract( x_asarr[:,0,:,:], 0, out = out[:,0,:,:] ) + np.subtract( -x_asarr[:,-2,:,:], 0, out = out[:,-1,:,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = out[:,0,:,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 2: - np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = out[:,:,1:,:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) - np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) + np.subtract( x_asarr[:,:,0,:], 0, out = out[:,:,0,:] ) + np.subtract( -x_asarr[:,:,-2,:], 0, out = out[:,:,-1,:] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = out[:,:,0,:] ) else: raise ValueError('No valid boundary conditions') if self.direction == 3: - np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = out[:,:,:,1:] ) if self.bnd_cond == 'Neumann': - np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) - np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) + np.subtract( x_asarr[:,:,:,0], 0, out = out[:,:,:,0] ) + np.subtract( -x_asarr[:,:,:,-2], 0, out = out[:,:,:,-1] ) elif self.bnd_cond == 'Periodic': - np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = out[:,:,:,0] ) else: raise ValueError('No valid boundary conditions') @@ -328,8 +327,7 @@ class FiniteDiff(LinearOperator): return self.gm_domain def norm(self): - x0 = self.gm_domain.allocate() - x0.fill( np.random.random_sample(x0.shape) ) + x0 = self.gm_domain.allocate('random_int') self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) return self.s1 @@ -337,23 +335,46 @@ class FiniteDiff(LinearOperator): if __name__ == '__main__': from ccpi.framework import ImageGeometry + import numpy N, M = 2, 3 ig = ImageGeometry(N, M) - FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + FD = FiniteDiff(ig, direction = 1, bnd_cond = 'Neumann') u = FD.domain_geometry().allocate('random_int') - - + res = FD.domain_geometry().allocate() + res1 = FD.range_geometry().allocate() FD.direct(u, out=res) - print(res.as_array()) -# z = FD.direct(u) - + + z = FD.direct(u) # print(z.as_array(), res.as_array()) + for i in range(10): +# + z1 = FD.direct(u) + FD.direct(u, out=res) + + u = ig.allocate('random_int') + res = u + z1 = u + numpy.testing.assert_array_almost_equal(z1.as_array(), \ + res.as_array(), decimal=4) + +# print(z1.as_array(), res.as_array()) + z2 = FD.adjoint(z1) + FD.adjoint(z1, out=res1) + numpy.testing.assert_array_almost_equal(z2.as_array(), \ + res1.as_array(), decimal=4) + + + + + + + # w = G.range_geometry().allocate('random_int') diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 9c573cb..e535847 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -49,35 +49,32 @@ class Gradient(LinearOperator): if out is not None: + for i in range(self.gm_range.shape[0]): - self.FD.direction=self.ind[i] + self.FD.direction = self.ind[i] self.FD.direct(x, out = out[i]) -# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x, out=out[i]) - return out else: tmp = self.gm_range.allocate() for i in range(tmp.shape[0]): self.FD.direction=self.ind[i] tmp.get_item(i).fill(self.FD.direct(x)) -# tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) return tmp def adjoint(self, x, out=None): if out is not None: - tmp = self.gm_domain.allocate() + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) -# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) out+=tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): self.FD.direction=self.ind[i] tmp+=self.FD.adjoint(x.get_item(i)) -# tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) return tmp @@ -96,7 +93,9 @@ class Gradient(LinearOperator): def __rmul__(self, scalar): return ScaledOperator(self, scalar) - + ########################################################################### + ############### For preconditioning ###################################### + ########################################################################### def matrix(self): tmp = self.gm_range.allocate() @@ -238,21 +237,4 @@ if __name__ == '__main__': -# -# -# res = G.range_geometry().allocate() -## -# G.direct(u, out=res) -# z = G.direct(u) -## -# print(res[0].as_array()) -# print(z[0].as_array()) -# - - - - -## LHS = (G.direct(u)*w).sum() -## RHS = (u * G.adjoint(w)).sum() - -# + diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index feb09ee..d0cb198 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -58,10 +58,10 @@ if method == '0': #### Create functions - f1 = alpha * MixedL21Norm() - f2 = 0.5 * L2NormSquared(b = noisy_data) - - f = BlockFunction(f1, f2 ) + f1 = MixedL21Norm() + f2 = L2NormSquared(b = noisy_data) + f = BlockFunction(f1, f2) + g = ZeroFun() else: @@ -79,6 +79,7 @@ else: # Compute operator Norm normK = operator.norm() print ("normK", normK) + # Primal & dual stepsizes sigma = 1 tau = 1/(sigma*normK**2) @@ -86,9 +87,12 @@ tau = 1/(sigma*normK**2) opt = {'niter':100} opt1 = {'niter':100, 'memopt': True} -res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +print("with memopt \n") + +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) + plt.figure(figsize=(5,5)) plt.imshow(res.as_array()) plt.colorbar() diff --git a/Wrappers/Python/wip/test_profile.py b/Wrappers/Python/wip/test_profile.py index a97ad8d..f14c0c3 100644 --- a/Wrappers/Python/wip/test_profile.py +++ b/Wrappers/Python/wip/test_profile.py @@ -10,58 +10,75 @@ Created on Mon Apr 8 13:57:46 2019 from ccpi.framework import ImageGeometry from ccpi.optimisation.operators import Gradient, BlockOperator, Identity +from ccpi.optimisation.functions import MixedL21Norm, L2NormSquared, BlockFunction +import numpy -N, M, K = 200, 300, 100 +N, M, K = 2, 3, 2 -ig = ImageGeometry(N, M, K) +ig = ImageGeometry(N, M) +b = ig.allocate('random_int') G = Gradient(ig) Id = Identity(ig) -u = G.domain_geometry().allocate('random_int') -w = G.range_geometry().allocate('random_int') - - -res = G.range_geometry().allocate() -res1 = G.domain_geometry().allocate() -# -# -#LHS = (G.direct(u)*w).sum() -#RHS = (u * G.adjoint(w)).sum() -# -#print(G.norm()) -#print(LHS, RHS) -# -##%%%re -## -#G.direct(u, out=res) -#G.adjoint(w, out=res1) -## -#LHS1 = (res * w).sum() -#RHS1 = (u * res1).sum() -## -#print(LHS1, RHS1) - -B = BlockOperator(2*G, 3*Id) -uB = B.domain_geometry().allocate('random_int') -resB = B.range_geometry().allocate() - -#z2 = B.direct(uB) -#B.direct(uB, out = resB) - -#%% +#operator = BlockOperator(G, Id) +operator = G + +f1 = MixedL21Norm() +f2 = L2NormSquared(b = b) + +f = BlockFunction( f1, f2) + + +x_old = operator.domain_geometry().allocate() +y_old = operator.range_geometry().allocate('random_int') + + +xbar = operator.domain_geometry().allocate('random_int') + +x_tmp = x_old.copy() +x = x_old.copy() + +y_tmp = operator.range_geometry().allocate() +y = y_old.copy() + +y1 = y.copy() + +sigma = 20 for i in range(100): -# -# z2 = B.direct(uB) -# - B.direct(uB, out = resB) -# z1 = G.adjoint(w) -# z = G.direct(u) + operator.direct(xbar, out = y_tmp) + y_tmp *= sigma + y_tmp += y_old + + + y_tmp1 = sigma * operator.direct(xbar) + y_old + + print(i) + print(" y_old :", y_old[0].as_array(), "\n") + print(" y_tmp[0] :", y_tmp[0].as_array(),"\n") + print(" y_tmp1[0] :", y_tmp1[0].as_array()) + + + numpy.testing.assert_array_equal(y_tmp[0].as_array(), \ + y_tmp1[0].as_array()) + + numpy.testing.assert_array_equal(y_tmp[1].as_array(), \ + y_tmp1[1].as_array()) + + + y1 = f.proximal_conjugate(y_tmp1, sigma) + f.proximal_conjugate(y_tmp, sigma, y) + + + + + + + + -# G.adjoint(w, out=res1) -# G.direct(u, out=res) \ No newline at end of file -- cgit v1.2.3 From 3ff8a543fb4ef59179ce3490bc28b8f61bf979ac Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:02:02 +0100 Subject: fix PDHG optimised --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 74 +++++++++++++++------- 1 file changed, 52 insertions(+), 22 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 0b9921c..086e322 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -45,36 +45,66 @@ class PDHG(Algorithm): self.y_old = self.operator.range_geometry().allocate() self.xbar = self.x_old.copy() - #x_tmp = x_old + self.x = self.x_old.copy() self.y = self.y_old.copy() - #y_tmp = y_old + if self.memopt: + self.y_tmp = self.y_old.copy() + self.x_tmp = self.x_old.copy() #y = y_tmp # relaxation parameter self.theta = 1 def update(self): - # Gradient descent, Dual problem solution - self.y_old += self.sigma * self.operator.direct(self.xbar) - self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - - # Gradient ascent, Primal problem solution - self.x_old -= self.tau * self.operator.adjoint(self.y) - self.x = self.g.proximal(self.x_old, self.tau) - - #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old - self.xbar *= self.theta - self.xbar += self.x - -# self.x_old.fill(self.x) -# self.y_old.fill(self.y) - self.y_old = self.y.copy() - self.x_old = self.x.copy() - #self.y = self.y_old + if self.memopt: + # Gradient descent, Dual problem solution + # self.y_old += self.sigma * self.operator.direct(self.xbar) + self.operator.direct(self.xbar, out=self.y_tmp) + self.y_tmp *= self.sigma + self.y_old += self.y_tmp + + #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + self.f.proximal_conjugate(self.y_old, self.sigma, out=self.y) + + # Gradient ascent, Primal problem solution + self.operator.adjoint(self.y, out=self.x_tmp) + self.x_tmp *= self.tau + self.x_old -= self.x_tmp + + self.g.proximal(self.x_old, self.tau, out=self.x) + + #Update + self.x.subtract(self.x_old, out=self.xbar) + #self.xbar -= self.x_old + self.xbar *= self.theta + self.xbar += self.x + + self.x_old.fill(self.x) + self.y_old.fill(self.y) + #self.y_old = self.y.copy() + #self.x_old = self.x.copy() + else: + # Gradient descent, Dual problem solution + self.y_old += self.sigma * self.operator.direct(self.xbar) + self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + + # Gradient ascent, Primal problem solution + self.x_old -= self.tau * self.operator.adjoint(self.y) + self.x = self.g.proximal(self.x_old, self.tau) + + #Update + #xbar = x + theta * (x - x_old) + self.xbar.fill(self.x) + self.xbar -= self.x_old + self.xbar *= self.theta + self.xbar += self.x + + self.x_old.fill(self.x) + self.y_old.fill(self.y) + #self.y_old = self.y.copy() + #self.x_old = self.x.copy() + #self.y = self.y_old def update_objective(self): p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) -- cgit v1.2.3 From b9cf8208aa69eaa40e74d8f1d36c455f1f6c9548 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Apr 2019 12:03:11 +0100 Subject: memopt test --- .../ccpi/optimisation/functions/ScaledFunction.py | 6 +- .../operators/FiniteDifferenceOperator_old.py | 374 +++++++++++++++++++++ 2 files changed, 377 insertions(+), 3 deletions(-) create mode 100644 Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 9fcd4fc..c3d5ab9 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -59,10 +59,10 @@ class ScaledFunction(object): '''This returns the proximal operator for the function at x, tau ''' if out is None: - return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) + return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out = out) - out *= self.scalar + out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + def grad(self, x): '''Alias of gradient(x,None)''' diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py new file mode 100644 index 0000000..387fb4b --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py @@ -0,0 +1,374 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 1 22:51:17 2019 + +@author: evangelos +""" + +from ccpi.optimisation.operators import LinearOperator +from ccpi.optimisation.ops import PowerMethodNonsquare +from ccpi.framework import ImageData, BlockDataContainer +import numpy as np + +class FiniteDiff(LinearOperator): + + # Works for Neum/Symmetric & periodic boundary conditions + # TODO add central differences??? + # TODO not very well optimised, too many conditions + # TODO add discretisation step, should get that from imageGeometry + + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_y', 'direction_x'] + # Grad_order = ['direction_z', 'direction_y', 'direction_x'] + # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] + + def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): + '''''' + super(FiniteDiff, self).__init__() + '''FIXME: domain and range should be geometries''' + self.gm_domain = gm_domain + self.gm_range = gm_range + + self.direction = direction + self.bnd_cond = bnd_cond + + # Domain Geometry = Range Geometry if not stated + if self.gm_range is None: + self.gm_range = self.gm_domain + # check direction and "length" of geometry + if self.direction + 1 > len(self.gm_domain.shape): + raise ValueError('Gradient directions more than geometry domain') + + #self.voxel_size = kwargs.get('voxel_size',1) + # this wrongly assumes a homogeneous voxel size + self.voxel_size = self.gm_domain.voxel_size_x + + + def direct(self, x, out=None): + + x_asarr = x.as_array() + x_sz = len(x.shape) + + if out is None: + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() +# fd_arr[:]=0 + +# if out is None: +# out = self.gm_domain.allocate().as_array() +# +# fd_arr = out.as_array() +# fd_arr = self.gm_domain.allocate().as_array() + + ######################## Direct for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Direct for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] ) + + if self.bnd_cond == 'Neumann': + pass + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + +# res = out #/self.voxel_size + return type(x)(out) + + + def adjoint(self, x, out=None): + + x_asarr = x.as_array() + #x_asarr = x + x_sz = len(x.shape) + + if out is None: + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() + +# if out is None: +# out = self.gm_domain.allocate().as_array() +# fd_arr = out +# else: +# fd_arr = out.as_array() +## fd_arr = self.gm_domain.allocate().as_array() + + ######################## Adjoint for 2D ############################### + if x_sz == 2: + + if self.direction == 1: + + np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) + np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] ) + + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 0: + + np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) + np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] ) + + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 3D ############################### + elif x_sz == 3: + + if self.direction == 0: + + np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) + np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) + np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] ) + np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] ) + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + ######################## Adjoint for 4D ############################### + elif x_sz == 4: + + if self.direction == 0: + np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) + np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 1: + np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) + np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) + else: + raise ValueError('No valid boundary conditions') + + + if self.direction == 2: + np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] ) + np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) + else: + raise ValueError('No valid boundary conditions') + + if self.direction == 3: + np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) + + if self.bnd_cond == 'Neumann': + np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] ) + np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] ) + + elif self.bnd_cond == 'Periodic': + np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) + else: + raise ValueError('No valid boundary conditions') + + else: + raise NotImplementedError + + out *= -1 #/self.voxel_size + return type(x)(out) + + def range_geometry(self): + '''Returns the range geometry''' + return self.gm_range + + def domain_geometry(self): + '''Returns the domain geometry''' + return self.gm_domain + + def norm(self): + x0 = self.gm_domain.allocate() + x0.fill( np.random.random_sample(x0.shape) ) + self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) + return self.s1 + + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + import numpy + + N, M = 2, 3 + + ig = ImageGeometry(N, M) + + + FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + u = FD.domain_geometry().allocate('random_int') + + + res = FD.domain_geometry().allocate() + FD.direct(u, out=res) + + z = FD.direct(u) + print(z.as_array(), res.as_array()) + + for i in range(10): + + z1 = FD.direct(u) + FD.direct(u, out=res) + numpy.testing.assert_array_almost_equal(z1.as_array(), \ + res.as_array(), decimal=4) + + + + + + +# w = G.range_geometry().allocate('random_int') + + + + \ No newline at end of file -- cgit v1.2.3 From 2415b5a334d3bdf87fec47ca3ec290a6602ee13c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:03:45 +0100 Subject: set to zero the output variable on input if memopt --- .../Python/ccpi/optimisation/operators/GradientOperator.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 9c639df..d655653 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -65,19 +65,19 @@ class Gradient(LinearOperator): def adjoint(self, x, out=None): if out is not None: - tmp = self.gm_domain.allocate() for i in range(x.shape[0]): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) -# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) - out+=tmp + if i == 0: + out.fill(tmp) + else: + out += tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): self.FD.direction=self.ind[i] - tmp+=self.FD.adjoint(x.get_item(i)) -# tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + tmp += self.FD.adjoint(x.get_item(i)) return tmp -- cgit v1.2.3 From 362e786b86a1ae8c7b1e88b45fc553e8f57b7dfa Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:04:48 +0100 Subject: output variable is required to contain zeros on start --- .../Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index db9f09d..954f022 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -186,7 +186,9 @@ class FiniteDiff(LinearOperator): out = np.zeros_like(x_asarr) fd_arr = out else: - fd_arr = out.as_array() + #out *= 0 + fd_arr = out.as_array() + fd_arr[:] = 0 # if out is None: # out = self.gm_domain.allocate().as_array() -- cgit v1.2.3 From a7bb88da8e8d4e94a3dbeb04f95928cb7d1fbd48 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:07:36 +0100 Subject: updated tests --- Wrappers/Python/test/test_BlockDataContainer.py | 51 ++++++++++- Wrappers/Python/test/test_Operator.py | 108 ++++++++++++++++++++---- Wrappers/Python/test/test_functions.py | 108 +++++++++++++++++++++++- Wrappers/Python/wip/pdhg_TV_denoising.py | 88 +++++++++++++------ 4 files changed, 304 insertions(+), 51 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 2ee0e94..2fca23c 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -14,7 +14,7 @@ from ccpi.optimisation.funcs import Norm2sq, Norm1 from ccpi.framework import ImageGeometry, AcquisitionGeometry from ccpi.framework import ImageData, AcquisitionData #from ccpi.optimisation.algorithms import GradientDescent -from ccpi.framework import BlockDataContainer +from ccpi.framework import BlockDataContainer, DataContainer #from ccpi.optimisation.Algorithms import CGLS import functools @@ -402,3 +402,52 @@ class TestBlockDataContainer(unittest.TestCase): c5 = d.get_item(0).power(2).sum() + def test_BlockDataContainer_fill(self): + print ("test block data container") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(2,3,5) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + #cp1 = BlockDataContainer(data2,data3) + + cp2 = BlockDataContainer(data0+1, data1+1) + + data0.fill(data2) + self.assertNumpyArrayEqual(data0.as_array(), data2.as_array()) + data0 = ImageData(geometry=ig0) + + for el,ot in zip(cp0, cp2): + print (el.shape, ot.shape) + cp0.fill(cp2) + self.assertBlockDataContainerEqual(cp0, cp2) + + + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py index 6656d34..293fb43 100644 --- a/Wrappers/Python/test/test_Operator.py +++ b/Wrappers/Python/test/test_Operator.py @@ -2,7 +2,8 @@ import unittest #from ccpi.optimisation.operators import Operator from ccpi.optimisation.ops import TomoIdentity from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer -from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator +from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator,\ + FiniteDiff import numpy from timeit import default_timer as timer from ccpi.framework import ImageGeometry @@ -11,7 +12,43 @@ from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff def dt(steps): return steps[-1] - steps[-2] -class TestOperator(unittest.TestCase): +class CCPiTestClass(unittest.TestCase): + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print(err) + print("expected " , second) + print("actual " , first) + + self.assertTrue(res) + + +class TestOperator(CCPiTestClass): def test_ScaledOperator(self): ig = ImageGeometry(10,20,30) img = ig.allocate() @@ -29,6 +66,40 @@ class TestOperator(unittest.TestCase): y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + def test_FiniteDifference(self): + ## + N, M = 2, 3 + + ig = ImageGeometry(N, M) + Id = Identity(ig) + + FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + u = FD.domain_geometry().allocate('random_int') + + + res = FD.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + FD.adjoint(u, out=res) + w = FD.adjoint(u) + + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + + res = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + Id.adjoint(u, out=res) + w = Id.adjoint(u) + + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + self.assertNumpyArrayEqual(u.as_array(), w.as_array()) + + G = Gradient(ig) + + u = G.range_geometry().allocate(ImageGeometry.RANDOM_INT) + res = G.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + G.adjoint(u, out=res) + w = G.adjoint(u) + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + + + class TestBlockOperator(unittest.TestCase): @@ -90,22 +161,23 @@ class TestBlockOperator(unittest.TestCase): print (z1.shape) print(z1[0][0].as_array()) print(res[0][0].as_array()) + self.assertBlockDataContainerEqual(z1, res) + # for col in range(z1.shape[0]): + # a = z1.get_item(col) + # b = res.get_item(col) + # if isinstance(a, BlockDataContainer): + # for col2 in range(a.shape[0]): + # self.assertNumpyArrayEqual( + # a.get_item(col2).as_array(), + # b.get_item(col2).as_array() + # ) + # else: + # self.assertNumpyArrayEqual( + # a.as_array(), + # b.as_array() + # ) + z1 = B.range_geometry().allocate(ImageGeometry.RANDOM_INT) - for col in range(z1.shape[0]): - a = z1.get_item(col) - b = res.get_item(col) - if isinstance(a, BlockDataContainer): - for col2 in range(a.shape[0]): - self.assertNumpyArrayEqual( - a.get_item(col2).as_array(), - b.get_item(col2).as_array() - ) - else: - self.assertNumpyArrayEqual( - a.as_array(), - b.as_array() - ) - z1 = B.direct(u) res1 = B.adjoint(z1) res2 = B.domain_geometry().allocate() B.adjoint(z1, out=res2) @@ -264,7 +336,7 @@ class TestBlockOperator(unittest.TestCase): u = ig.allocate('random_int') steps = [timer()] i = 0 - n = 25. + n = 2. t1 = t2 = 0 res = B.range_geometry().allocate() diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 19cb65f..1891afd 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -65,6 +65,9 @@ class TestFunction(unittest.TestCase): a3 = 0.5 * d.squared_norm() + d.dot(noisy_data) self.assertEqual(a3, g.convex_conjugate(d)) #print( a3, g.convex_conjugate(d)) + + #test proximal conjugate + def test_L2NormSquared(self): # TESTS for L2 and scalar * L2 @@ -94,7 +97,7 @@ class TestFunction(unittest.TestCase): c2 = 1/4. * u.squared_norm() numpy.testing.assert_equal(c1, c2) - #check convex conjuagate with data + #check convex conjugate with data d1 = f1.convex_conjugate(u) d2 = (1./4.) * u.squared_norm() + (u*b).sum() numpy.testing.assert_equal(d1, d2) @@ -121,10 +124,9 @@ class TestFunction(unittest.TestCase): l1 = f1.proximal_conjugate(u, tau) l2 = (u - tau * b)/(1 + tau/2 ) numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) - - + # check scaled function properties - + # scalar scalar = 100 f_scaled_no_data = scalar * L2NormSquared() @@ -161,7 +163,105 @@ class TestFunction(unittest.TestCase): numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + def test_L2NormSquaredOut(self): + # TESTS for L2 and scalar * L2 + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate(ImageGeometry.RANDOM_INT) + b = ig.allocate(ImageGeometry.RANDOM_INT) + + # check grad/call no data + f = L2NormSquared() + a1 = f.gradient(u) + a2 = a1 * 0. + f.gradient(u, out=a2) + numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) + #numpy.testing.assert_equal(f(u), u.squared_norm()) + + # check grad/call with data + f1 = L2NormSquared(b=b) + b1 = f1.gradient(u) + b2 = b1 * 0. + f1.gradient(u, out=b2) + + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + #numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) + + # check proximal no data + tau = 5 + e1 = f.proximal(u, tau) + e2 = e1 * 0. + f.proximal(u, tau, out=e2) + numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) + + # check proximal with data + tau = 5 + h1 = f1.proximal(u, tau) + h2 = h1 * 0. + f1.proximal(u, tau, out=h2) + numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) + + # check proximal conjugate no data + tau = 0.2 + k1 = f.proximal_conjugate(u, tau) + k2 = k1 * 0. + f.proximal_conjugate(u, tau, out=k2) + + numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) + + # check proximal conjugate with data + l1 = f1.proximal_conjugate(u, tau) + l2 = l1 * 0. + f1.proximal_conjugate(u, tau, out=l2) + numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) + + # check scaled function properties + + # scalar + scalar = 100 + f_scaled_no_data = scalar * L2NormSquared() + f_scaled_data = scalar * L2NormSquared(b=b) + + # grad + w = f_scaled_no_data.gradient(u) + ww = w * 0 + f_scaled_no_data.gradient(u, out=ww) + + numpy.testing.assert_array_almost_equal(w.as_array(), + ww.as_array(), decimal=4) + + # numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) + + # # conj + # numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ + # f.convex_conjugate(u/scalar) * scalar, decimal=4) + + # numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ + # scalar * f1.convex_conjugate(u/scalar), decimal=4) + + # # proximal + w = f_scaled_no_data.proximal(u, tau) + ww = w * 0 + f_scaled_no_data.proximal(u, tau, out=ww) + numpy.testing.assert_array_almost_equal(w.as_array(), \ + ww.as_array()) + + + # numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ + # f1.proximal(u, tau*scalar).as_array()) + + + # proximal conjugate + w = f_scaled_no_data.proximal_conjugate(u, tau) + ww = w * 0 + f_scaled_no_data.proximal_conjugate(u, tau, out=ww) + numpy.testing.assert_array_almost_equal(w.as_array(), \ + ww.as_array(), decimal=4) + # numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ + # ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + def test_Norm2sq_as_FunctionOperatorComposition(self): M, N, K = 2,3,5 ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index d871ba0..f569fa7 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -19,12 +19,15 @@ from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ from skimage.util import random_noise +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 512 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -36,8 +39,8 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) -plt.imshow(noisy_data.as_array()) -plt.show() +#plt.imshow(noisy_data.as_array()) +#plt.show() #%% @@ -45,7 +48,7 @@ plt.show() alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': @@ -83,34 +86,63 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':2000} +# opt = {'niter':2000, 'memopt': True} -res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -plt.figure(figsize=(5,5)) -plt.imshow(res.as_array()) -plt.colorbar() -plt.show() -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 10 -# -#pdhg.run(2000) -# -# + +# opt = {'niter':2000, 'memopt': False} +# res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.title('memopt') +# plt.colorbar() +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.title('no memopt') +# plt.colorbar() +# plt.subplot(1,3,3) +# plt.imshow((res1 - res).abs().as_array()) +# plt.title('diff') +# plt.colorbar() +# plt.show() +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 2000 +pdhg.update_objective_interval = 100 + + +pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +pdhgo.max_iteration = 2000 +pdhgo.update_objective_interval = 100 + +steps = [timer()] +pdhgo.run(200) +steps.append(timer()) +t1 = dt(steps) + +pdhg.run(200) +steps.append(timer()) +t2 = dt(steps) + +print ("Time difference {} {} {}".format(t1,t2,t2-t1)) +sol = pdhg.get_output().as_array() +#sol = result.as_array() # -#sol = pdhg.get_output().as_array() -##sol = result.as_array() -## -#fig = plt.figure() -#plt.subplot(1,2,1) -#plt.imshow(noisy_data.as_array()) -##plt.colorbar() -#plt.subplot(1,2,2) -#plt.imshow(sol) -##plt.colorbar() -#plt.show() +fig = plt.figure() +plt.subplot(1,3,1) +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.subplot(1,3,2) +plt.imshow(sol) +plt.colorbar() +plt.subplot(1,3,3) +plt.imshow(pdhgo.get_output().as_array()) +plt.colorbar() + +plt.show() ## # ### -- cgit v1.2.3 From 877c91a73833313885daae2ae8e73a7b5b0a9950 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Apr 2019 12:08:18 +0100 Subject: memopt test --- Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index d07005a..ac37e13 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -161,7 +161,6 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): else: - operator.direct(xbar, out = y_tmp) y_tmp *= sigma y_tmp += y_old @@ -178,8 +177,8 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): xbar += x - x_old = x.copy() - y_old = y.copy() + x_old.fill(x) + y_old.fill(y) # pass -- cgit v1.2.3 From 1dec48d390df5cbc3436832cedf559b64f4651bc Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 15:43:31 +0100 Subject: fix out call --- .../ccpi/optimisation/functions/MixedL21Norm.py | 58 +++++++++++++--------- 1 file changed, 34 insertions(+), 24 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index ed1d5e5..c6b6e95 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -21,6 +21,7 @@ import numpy as np from ccpi.optimisation.functions import Function, ScaledFunction from ccpi.framework import DataContainer, ImageData, \ ImageGeometry, BlockDataContainer +import functools ############################ mixed_L1,2NORM FUNCTIONS ##################### class MixedL21Norm(Function): @@ -36,7 +37,9 @@ class MixedL21Norm(Function): :param: x is a BlockDataContainer - ''' + ''' + if not isinstance(x, BlockDataContainer): + raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) if self.SymTensor: param = [1]*x.shape[0] @@ -73,7 +76,6 @@ class MixedL21Norm(Function): different form L2NormSquared which acts on DC ''' - pass def proximal_conjugate(self, x, tau, out=None): @@ -88,29 +90,37 @@ class MixedL21Norm(Function): return res else: -# pass + if out is None: + tmp = [ el*el for el in x.containers] + res = sum(tmp).sqrt().maximum(1.0) + frac = [el/res for el in x.containers] + res = BlockDataContainer(*frac) + return res + else: + res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) + res = res1.sqrt().maximum(1.0) + + if False: + # works but not memory efficient as allocating a new BlockDataContainer + a = x / res + out.fill(a) + elif False: + # this leads to error +# File "ccpi\framework\BlockDataContainer.py", line 142, in divide +# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# File "ccpi\framework\BlockDataContainer.py", line 142, in +# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# File "ccpi\framework\framework.py", line 814, in divide +# return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) +# File "ccpi\framework\framework.py", line 802, in pixel_wise_binary +# raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) +# ValueError: ImageData: incompatible class: true_divide + x.divide(res, out=out) + else: + for i,el in enumerate(x.containers): + #a = out.get_item(i) + el.divide(res, out=out.get_item(i)) - -# # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# # res = x.divide(ImageData(tmp2).maximum(1.0)) -# if out is None: - - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res - # else: - # tmp = [ el*el for el in x] - # res = (sum(tmp).sqrt()).maximum(1.0) - # #frac = [x[i]/res for i in range(x.shape[0])] - # for i in range(x.shape[0]): - # a = out.get_item(i) - # b = x.get_item(i) - # b /= res - # a.fill( b ) - def __rmul__(self, scalar): return ScaledFunction(self, scalar) -- cgit v1.2.3 From e80f8c108871245f06dc3e570502e95a4acba64b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 15:43:54 +0100 Subject: clean code closes #240 --- .../ccpi/optimisation/functions/MixedL21Norm.py | 23 ++-------------------- 1 file changed, 2 insertions(+), 21 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index c6b6e95..a655e03 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -99,27 +99,8 @@ class MixedL21Norm(Function): else: res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) res = res1.sqrt().maximum(1.0) - - if False: - # works but not memory efficient as allocating a new BlockDataContainer - a = x / res - out.fill(a) - elif False: - # this leads to error -# File "ccpi\framework\BlockDataContainer.py", line 142, in divide -# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# File "ccpi\framework\BlockDataContainer.py", line 142, in -# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# File "ccpi\framework\framework.py", line 814, in divide -# return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) -# File "ccpi\framework\framework.py", line 802, in pixel_wise_binary -# raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) -# ValueError: ImageData: incompatible class: true_divide - x.divide(res, out=out) - else: - for i,el in enumerate(x.containers): - #a = out.get_item(i) - el.divide(res, out=out.get_item(i)) + for i,el in enumerate(x.containers): + el.divide(res, out=out.get_item(i)) def __rmul__(self, scalar): return ScaledFunction(self, scalar) -- cgit v1.2.3 From 350a889c38805dcda98a299315af1ab64510fa5b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 15:45:56 +0100 Subject: add test for mixed L21 Norm --- Wrappers/Python/test/test_functions.py | 70 ++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 1891afd..bc1f034 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -20,7 +20,7 @@ from ccpi.optimisation.operators import Gradient #from ccpi.optimisation.functions import SimpleL2NormSq from ccpi.optimisation.functions import L2NormSquared #from ccpi.optimisation.functions import SimpleL1Norm -from ccpi.optimisation.functions import L1Norm +from ccpi.optimisation.functions import L1Norm, MixedL21Norm from ccpi.optimisation.funcs import Norm2sq # from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq @@ -29,13 +29,46 @@ from ccpi.optimisation.funcs import Norm2sq from ccpi.optimisation.functions import ZeroFun from ccpi.optimisation.functions import FunctionOperatorComposition -import unittest -import numpy +import unittest +import numpy # class TestFunction(unittest.TestCase): + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print(err) + print("expected " , second) + print("actual " , first) + + self.assertTrue(res) def test_Function(self): @@ -280,8 +313,37 @@ class TestFunction(unittest.TestCase): ynew = new_chisq.gradient(u) numpy.testing.assert_array_equal(yold.as_array(), ynew.as_array()) + def test_mixedL12Norm(self): + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) + u1 = ig.allocate('random_int') + u2 = ig.allocate('random_int') + + U = BlockDataContainer(u1, u2, shape=(2,1)) + + # Define no scale and scaled + f_no_scaled = MixedL21Norm() + #f_scaled = 0.5 * MixedL21Norm() + + # call + + # a1 = f_no_scaled(U) + # a2 = f_scaled(U) + # self.assertBlockDataContainerEqual(a1,a2) + tmp = [ el**2 for el in U.containers ] + self.assertBlockDataContainerEqual(BlockDataContainer(*tmp), + U.power(2)) + + z1 = f_no_scaled.proximal_conjugate(U, 1) + u3 = ig.allocate('random_int') + u4 = ig.allocate('random_int') + + z3 = BlockDataContainer(u3, u4, shape=(2,1)) + + + f_no_scaled.proximal_conjugate(U, 1, out=z3) + self.assertBlockDataContainerEqual(z3,z1) - # # f1 = L2NormSq(alpha=1, b=noisy_data) # print(f1(noisy_data)) -- cgit v1.2.3 From 6ce64e15b13cf7c6ae55cf9bc891980679268ac4 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 15:49:24 +0100 Subject: minor code beautification --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 25 ++++++++++------------ 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 086e322..2ac3eba 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -66,24 +66,22 @@ class PDHG(Algorithm): #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) self.f.proximal_conjugate(self.y_old, self.sigma, out=self.y) - + # Gradient ascent, Primal problem solution self.operator.adjoint(self.y, out=self.x_tmp) self.x_tmp *= self.tau self.x_old -= self.x_tmp - + self.g.proximal(self.x_old, self.tau, out=self.x) - + #Update self.x.subtract(self.x_old, out=self.xbar) - #self.xbar -= self.x_old self.xbar *= self.theta self.xbar += self.x - + self.x_old.fill(self.x) self.y_old.fill(self.y) - #self.y_old = self.y.copy() - #self.x_old = self.x.copy() + else: # Gradient descent, Dual problem solution self.y_old += self.sigma * self.operator.direct(self.xbar) @@ -92,19 +90,18 @@ class PDHG(Algorithm): # Gradient ascent, Primal problem solution self.x_old -= self.tau * self.operator.adjoint(self.y) self.x = self.g.proximal(self.x_old, self.tau) - + #Update #xbar = x + theta * (x - x_old) self.xbar.fill(self.x) self.xbar -= self.x_old self.xbar *= self.theta self.xbar += self.x - - self.x_old.fill(self.x) - self.y_old.fill(self.y) - #self.y_old = self.y.copy() - #self.x_old = self.x.copy() - #self.y = self.y_old + + #self.x_old.fill(self.x) + #self.y_old.fill(self.y) + self.x_old = self.x + self.y_old = self.y def update_objective(self): p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) -- cgit v1.2.3 From 41b536a3f2a33d5e527d8b7ada63b47b1abbbca8 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Apr 2019 16:55:20 +0100 Subject: changes for memopt option --- .../Python/ccpi/framework/BlockDataContainer.py | 35 +++++++++++++++++-- Wrappers/Python/ccpi/framework/framework.py | 2 ++ .../Python/ccpi/optimisation/algorithms/PDHG.py | 22 ++++++------ .../ccpi/optimisation/functions/MixedL21Norm.py | 40 ++++++---------------- Wrappers/Python/wip/pdhg_TV_denoising.py | 13 ++++--- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 31 ++++++++++++++--- 6 files changed, 93 insertions(+), 50 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 69b6931..8934f49 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -53,9 +53,9 @@ class BlockDataContainer(object): def is_compatible(self, other): '''basic check if the size of the 2 objects fit''' - for i in range(len(self.containers)): - if type(self.containers[i])==type(self): - self = self.containers[i] +# for i in range(len(self.containers)): +# if type(self.containers[i])==type(self): +# self = self.containers[i] if isinstance(other, Number): return True @@ -341,3 +341,32 @@ class BlockDataContainer(object): '''Inline truedivision''' return self.__idiv__(other) +if __name__ == '__main__': + + M, N, K = 2,3,5 + from ccpi.framework import ImageGeometry, BlockGeometry, BlockDataContainer + + ig = ImageGeometry(N, M) + u = ig.allocate('random_int') + + BG = BlockGeometry(ig, ig) + U = BG.allocate('random_int') + + U_nested = BlockDataContainer(BlockDataContainer(u, u), u) + + + res1 = U + u + res2 = U_nested + u +# res2 = u + U + + + + + + + + + + + + \ No newline at end of file diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 07c2ead..e03a29c 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -1109,6 +1109,7 @@ class AcquisitionData(DataContainer): class DataProcessor(object): + '''Defines a generic DataContainer processor accepts DataContainer as inputs and @@ -1154,6 +1155,7 @@ class DataProcessor(object): raise NotImplementedError('Implement basic checks for input DataContainer') def get_output(self, out=None): + for k,v in self.__dict__.items(): if v is None and k != 'output': raise ValueError('Key {0} is None'.format(k)) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 2a69857..5323e76 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -161,18 +161,20 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): if not memopt: - y_tmp = y_old + sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_tmp, sigma) + y_old += sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_old, sigma) - x_tmp = x_old - tau * operator.adjoint(y) - x = g.proximal(x_tmp, tau) - - xbar = x + theta * (x - x_old) + x_old -= tau*operator.adjoint(y) + x = g.proximal(x_old, tau) + + xbar.fill(x) + xbar -= x_old + xbar *= theta + xbar += x - x_old = x - y_old = y - - + x_old.fill(x) + y_old.fill(y) + else: operator.direct(xbar, out = y_tmp) diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 0ce7d8a..0c658a4 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -77,9 +77,7 @@ class MixedL21Norm(Function): pass def proximal_conjugate(self, x, tau, out=None): - - - + if self.SymTensor: param = [1]*x.shape[0] @@ -91,38 +89,22 @@ class MixedL21Norm(Function): return res else: -# pass - -# # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# # res = x.divide(ImageData(tmp2).maximum(1.0)) + if out is None: - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) - - return res + tmp = [ el*el for el in x.containers] + res = (sum(tmp).sqrt()).maximum(1.0) + return x/res else: - - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - out.fill(x/res) - - - - # tmp = [ el*el for el in x] - # res = (sum(tmp).sqrt()).maximum(1.0) - # #frac = [x[i]/res for i in range(x.shape[0])] - # for i in range(x.shape[0]): - # a = out.get_item(i) - # b = x.get_item(i) - # b /= res - # a.fill( b ) + + res = (sum(x**2).sqrt()).maximum(1.0) + out.fill(x/res) + + + - def __rmul__(self, scalar): return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index 598acb0..22fee90 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -23,11 +23,13 @@ from timeit import default_timer as timer def dt(steps): return steps[-1] - steps[-2] +#%% + # ############################################################################ # Create phantom for TV denoising -N = 100 +N = 200 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -62,8 +64,8 @@ if method == '0': #### Create functions - f1 = MixedL21Norm() - f2 = L2NormSquared(b = noisy_data) + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) f = BlockFunction(f1, f2) g = ZeroFun() @@ -88,15 +90,18 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -<<<<<<< HEAD opt = {'niter':100} opt1 = {'niter':100, 'memopt': True} +t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +print(timer()-t1) print("with memopt \n") +t2 = timer() res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +print(timer()-t2) plt.figure(figsize=(5,5)) plt.imshow(res.as_array()) diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index f06f166..159f2ea 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -21,6 +21,7 @@ from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ from ccpi.astra.ops import AstraProjectorSimple from skimage.util import random_noise +from timeit import default_timer as timer #%%############################################################################### @@ -118,15 +119,37 @@ else: # #pdhg.run(5000) -opt = {'niter':2000} -# +opt = {'niter':300} +opt1 = {'niter':300, 'memopt': True} + + +t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +print(timer()-t1) plt.figure(figsize=(5,5)) plt.imshow(res.as_array()) plt.colorbar() -plt.show() - +plt.show() + +#%% +print("with memopt \n") +# +t2 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +#print(timer()-t2) +# +# +plt.figure(figsize=(5,5)) +plt.imshow(res1.as_array()) +plt.colorbar() +plt.show() +# +#%% +plt.figure(figsize=(5,5)) +plt.imshow(np.abs(res1.as_array()-res.as_array())) +plt.colorbar() +plt.show() #%% #sol = pdhg.get_output().as_array() #fig = plt.figure() -- cgit v1.2.3 From 3c83e80c7a22817c49ad39da07e061f25dd3ac70 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Thu, 11 Apr 2019 16:58:40 +0100 Subject: fix is compatible for nested Block --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 8934f49..9bec1fe 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -70,8 +70,17 @@ class BlockDataContainer(object): return len(self.containers) == len(other) elif isinstance(other, numpy.ndarray): return len(self.containers) == len(other) + elif issubclass(other.__class__, DataContainer): - return self.get_item(0).shape == other.shape + ret = True + for i, el in enumerate(self.containers): + if isinstance(el, BlockDataContainer): + a = el.is_compatible(other) + else: + a = el.shape == other.shape + ret = ret and a + return ret + return len(self.containers) == len(other.containers) def get_item(self, row): @@ -344,7 +353,7 @@ class BlockDataContainer(object): if __name__ == '__main__': M, N, K = 2,3,5 - from ccpi.framework import ImageGeometry, BlockGeometry, BlockDataContainer + from ccpi.framework import ImageGeometry, BlockGeometry ig = ImageGeometry(N, M) u = ig.allocate('random_int') -- cgit v1.2.3 From 10aae87e1416d291906b94927acb4aac5737a44e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 17:13:08 +0100 Subject: fixing algebra with nested block data containers --- .../Python/ccpi/framework/BlockDataContainer.py | 45 +++++++++++++++++++--- 1 file changed, 40 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 13663c2..85cd05a 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -53,9 +53,9 @@ class BlockDataContainer(object): def is_compatible(self, other): '''basic check if the size of the 2 objects fit''' - for i in range(len(self.containers)): - if type(self.containers[i])==type(self): - self = self.containers[i] + #for i in range(len(self.containers)): + # if type(self.containers[i])==type(self): + # self = self.containers[i] if isinstance(other, Number): return True @@ -71,7 +71,16 @@ class BlockDataContainer(object): elif isinstance(other, numpy.ndarray): return len(self.containers) == len(other) elif issubclass(other.__class__, DataContainer): - return self.get_item(0).shape == other.shape + ret = True + for i, el in enumerate(self.containers): + if isinstance(el, BlockDataContainer): + a = el.is_compatible(other) + else: + a = el.shape == other.shape + print ("current element" , el.shape, "other ", other.shape, "same shape" , a) + ret = ret and a + return ret + #return self.get_item(0).shape == other.shape return len(self.containers) == len(other.containers) def get_item(self, row): @@ -139,10 +148,36 @@ class BlockDataContainer(object): return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) elif issubclass(other.__class__, DataContainer): # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) + if out is not None: + kw = kwargs.copy() + for i,el in enumerate(self.containers): + kw['out'] = out.get_item(i) + el.divide(other, *args, **kw) + return + else: + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + def binary_operations(self, operation, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for divide') + out = kwargs.get('out', None) + if isinstance(other, Number) or issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + if out is not None: + kw = kwargs.copy() + for i,el in enumerate(self.containers): + kw['out'] = out.get_item(i) + el.divide(other, *args, **kw) + return + else: + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) + def power(self, other, *args, **kwargs): if not self.is_compatible(other): raise ValueError('Incompatible for power') -- cgit v1.2.3 From 4129fe6a81d20b5f7d05554222d3a78f18f014f0 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 11:56:51 +0100 Subject: convert eol to unix --- .../Python/ccpi/framework/BlockDataContainer.py | 827 +++++++++++---------- 1 file changed, 450 insertions(+), 377 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 85cd05a..fee0cda 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -1,377 +1,450 @@ - # -*- coding: utf-8 -*- -""" -Created on Tue Mar 5 16:04:45 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import DataContainer -#from ccpi.framework import AcquisitionData, ImageData -#from ccpi.optimisation.operators import Operator, LinearOperator - -class BlockDataContainer(object): - '''Class to hold DataContainers as column vector''' - __array_priority__ = 1 - def __init__(self, *args, **kwargs): - '''''' - self.containers = args - self.index = 0 - shape = kwargs.get('shape', None) - if shape is None: - shape = (len(args),1) -# shape = (len(args),1) - self.shape = shape - - n_elements = functools.reduce(lambda x,y: x*y, shape, 1) - if len(args) != n_elements: - raise ValueError( - 'Dimension and size do not match: expected {} got {}' - .format(n_elements, len(args))) - - - def __iter__(self): - '''BlockDataContainer is Iterable''' - return self - def next(self): - '''python2 backwards compatibility''' - return self.__next__() - def __next__(self): - try: - out = self[self.index] - except IndexError as ie: - raise StopIteration() - self.index+=1 - return out - - def is_compatible(self, other): - '''basic check if the size of the 2 objects fit''' - - #for i in range(len(self.containers)): - # if type(self.containers[i])==type(self): - # self = self.containers[i] - - if isinstance(other, Number): - return True - elif isinstance(other, list): - for ot in other: - if not isinstance(ot, (Number,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - raise ValueError('List/ numpy array can only contain numbers {}'\ - .format(type(ot))) - return len(self.containers) == len(other) - elif isinstance(other, numpy.ndarray): - return len(self.containers) == len(other) - elif issubclass(other.__class__, DataContainer): - ret = True - for i, el in enumerate(self.containers): - if isinstance(el, BlockDataContainer): - a = el.is_compatible(other) - else: - a = el.shape == other.shape - print ("current element" , el.shape, "other ", other.shape, "same shape" , a) - ret = ret and a - return ret - #return self.get_item(0).shape == other.shape - return len(self.containers) == len(other.containers) - - def get_item(self, row): - if row > self.shape[0]: - raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) - return self.containers[row] - - def __getitem__(self, row): - return self.get_item(row) - - def add(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for add') - out = kwargs.get('out', None) - #print ("args" , *args) - if isinstance(other, Number): - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - - return type(self)( - *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def subtract(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for subtract') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def multiply(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('{} Incompatible for multiply'.format(other)) - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def divide(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for divide') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - if out is not None: - kw = kwargs.copy() - for i,el in enumerate(self.containers): - kw['out'] = out.get_item(i) - el.divide(other, *args, **kw) - return - else: - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - def binary_operations(self, operation, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for divide') - out = kwargs.get('out', None) - if isinstance(other, Number) or issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - if out is not None: - kw = kwargs.copy() - for i,el in enumerate(self.containers): - kw['out'] = out.get_item(i) - el.divide(other, *args, **kw) - return - else: - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - - def power(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for power') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - def maximum(self,other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for maximum') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) - - ## unary operations - def abs(self, *args, **kwargs): - return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) - def sign(self, *args, **kwargs): - return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) - def sqrt(self, *args, **kwargs): - return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) - def conjugate(self, out=None): - return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) - - ## reductions - def sum(self, *args, **kwargs): - return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) - def squared_norm(self): - y = numpy.asarray([el.squared_norm() for el in self.containers]) - return y.sum() - def norm(self): - return numpy.sqrt(self.squared_norm()) - def copy(self): - '''alias of clone''' - return self.clone() - def clone(self): - return type(self)(*[el.copy() for el in self.containers], shape=self.shape) - def fill(self, other): - if isinstance (other, BlockDataContainer): - if not self.is_compatible(other): - raise ValueError('Incompatible containers') - for el,ot in zip(self.containers, other.containers): - el.fill(ot) - else: - return ValueError('Cannot fill with object provided {}'.format(type(other))) - - def __add__(self, other): - return self.add( other ) - # __radd__ - - def __sub__(self, other): - return self.subtract( other ) - # __rsub__ - - def __mul__(self, other): - return self.multiply(other) - # __rmul__ - - def __div__(self, other): - return self.divide(other) - # __rdiv__ - def __truediv__(self, other): - return self.divide(other) - - def __pow__(self, other): - return self.power(other) - # reverse operand - def __radd__(self, other): - '''Reverse addition - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self + other - # __radd__ - - def __rsub__(self, other): - '''Reverse subtraction - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return (-1 * self) + other - # __rsub__ - - def __rmul__(self, other): - '''Reverse multiplication - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self * other - # __rmul__ - - def __rdiv__(self, other): - '''Reverse division - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return pow(self / other, -1) - # __rdiv__ - def __rtruediv__(self, other): - '''Reverse truedivision - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return self.__rdiv__(other) - - def __rpow__(self, other): - '''Reverse power - - to make sure that this method is called rather than the __mul__ of a numpy array - the class constant __array_priority__ must be set > 0 - https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ - ''' - return other.power(self) - - def __iadd__(self, other): - '''Inline addition''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el += ot - elif isinstance(other, Number): - for el in self.containers: - el += other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __iadd__') - for el,ot in zip(self.containers, other): - el += ot - return self - # __iadd__ - - def __isub__(self, other): - '''Inline subtraction''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el -= ot - elif isinstance(other, Number): - for el in self.containers: - el -= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __isub__') - for el,ot in zip(self.containers, other): - el -= ot - return self - # __isub__ - - def __imul__(self, other): - '''Inline multiplication''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el *= ot - elif isinstance(other, Number): - for el in self.containers: - el *= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __imul__') - for el,ot in zip(self.containers, other): - el *= ot - return self - # __imul__ - - def __idiv__(self, other): - '''Inline division''' - if isinstance (other, BlockDataContainer): - for el,ot in zip(self.containers, other.containers): - el /= ot - elif isinstance(other, Number): - for el in self.containers: - el /= other - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - if not self.is_compatible(other): - raise ValueError('Incompatible for __idiv__') - for el,ot in zip(self.containers, other): - el /= ot - return self - # __rdiv__ - def __itruediv__(self, other): - '''Inline truedivision''' - return self.__idiv__(other) - + # -*- coding: utf-8 -*- +""" +Created on Tue Mar 5 16:04:45 2019 + +@author: ofn77899 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy +from numbers import Number +import functools +from ccpi.framework import DataContainer +#from ccpi.framework import AcquisitionData, ImageData +#from ccpi.optimisation.operators import Operator, LinearOperator + +class BlockDataContainer(object): + '''Class to hold DataContainers as column vector + + Provides basic algebra between BlockDataContainer's, DataContainer's and + subclasses and Numbers + + 1) algebra between `BlockDataContainer`s will be element-wise, only if + the shape of the 2 `BlockDataContainer`s is the same, otherwise it + will fail + 2) algebra between `BlockDataContainer`s and `list` or `numpy array` will + work as long as the number of `rows` and element of the arrays match, + indipendently on the fact that the `BlockDataContainer` could be nested + 3) algebra between `BlockDataContainer` and one `DataContainer` is possible. + It will require that all the `DataContainers` in the block to be + compatible with the `DataContainer` we want to algebra with. Should we + require that the `DataContainer` is the same type? Like `ImageData` or `AcquisitionData`? + 4) algebra between `BlockDataContainer` and a `Number` is possible and it + will be done with each element of the `BlockDataContainer` even if nested + + A = [ [B,C] , D] + A * 3 = [ 3 * [B,C] , 3* D] = [ [ 3*B, 3*C] , 3*D ] + + ''' + ADD = 'add' + SUBTRACT = 'subtract' + MULTIPLY = 'multiply' + DIVIDE = 'divide' + POWER = 'power' + __array_priority__ = 1 + def __init__(self, *args, **kwargs): + '''''' + self.containers = args + self.index = 0 + shape = kwargs.get('shape', None) + if shape is None: + shape = (len(args),1) +# shape = (len(args),1) + self.shape = shape + + n_elements = functools.reduce(lambda x,y: x*y, shape, 1) + if len(args) != n_elements: + raise ValueError( + 'Dimension and size do not match: expected {} got {}' + .format(n_elements, len(args))) + + + def __iter__(self): + '''BlockDataContainer is Iterable''' + return self + def next(self): + '''python2 backwards compatibility''' + return self.__next__() + def __next__(self): + try: + out = self[self.index] + except IndexError as ie: + raise StopIteration() + self.index+=1 + return out + + def is_compatible(self, other): + '''basic check if the size of the 2 objects fit''' + + if isinstance(other, Number): + return True + elif isinstance(other, (list, numpy.ndarray)) : + for ot in other: + if not isinstance(ot, (Number,\ + numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ + numpy.float, numpy.float16, numpy.float32, numpy.float64, \ + numpy.complex)): + raise ValueError('List/ numpy array can only contain numbers {}'\ + .format(type(ot))) + return len(self.containers) == len(other) + elif issubclass(other.__class__, DataContainer): + ret = True + for i, el in enumerate(self.containers): + if isinstance(el, BlockDataContainer): + a = el.is_compatible(other) + else: + a = el.shape == other.shape + print ("current element" , el.shape, "other ", other.shape, "same shape" , a) + ret = ret and a + return ret + #return self.get_item(0).shape == other.shape + return len(self.containers) == len(other.containers) + + def get_item(self, row): + if row > self.shape[0]: + raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) + return self.containers[row] + + def __getitem__(self, row): + return self.get_item(row) + + def add(self, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for add') + out = kwargs.get('out', None) + #print ("args" , *args) + if isinstance(other, Number): + return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) + + return type(self)( + *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + + def subtract(self, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for subtract') + out = kwargs.get('out', None) + if isinstance(other, Number): + return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + + def multiply(self, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('{} Incompatible for multiply'.format(other)) + out = kwargs.get('out', None) + if isinstance(other, Number): + return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list): + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif isinstance(other, numpy.ndarray): + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + + def divide_old(self, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for divide') + out = kwargs.get('out', None) + if isinstance(other, Number): + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + if out is not None: + kw = kwargs.copy() + for i,el in enumerate(self.containers): + kw['out'] = out.get_item(i) + el.divide(other, *args, **kw) + return + else: + return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + def divide(self, other, *args, **kwargs): + out = kwargs.get('out', None) + if out is not None: + self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) + else: + return self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) + + def binary_operations(self, operation, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for divide') + out = kwargs.get('out', None) + if isinstance(other, Number) or issubclass(other.__class__, DataContainer): + # try to do algebra with one DataContainer. Will raise error if not compatible + kw = kwargs.copy() + res = [] + for i,el in enumerate(self.containers): + if operation == BlockDataContainer.ADD: + op = el.add + elif operation == BlockDataContainer.SUBTRACT: + op = el.subtract + elif operation == BlockDataContainer.MULTIPLY: + op = el.multiply + elif operation == BlockDataContainer.DIVIDE: + op = el.divide + elif operation == BlockDataContainer.POWER: + op = el.power + else: + raise ValueError('Unsupported operation', operation) + if out is not None: + kw['out'] = out.get_item(i) + op(other, *args, **kw) + else: + res.append(op(other, *args, **kw)) + if out is not None: + return + else: + return type(self)(*res, shape=self.shape) + elif isinstance(other, (list, numpy.ndarray)): + # try to do algebra with one DataContainer. Will raise error if not compatible + kw = kwargs.copy() + res = [] + for i,zel in enumerate(zip ( self.containers, other) ): + el = zel[0] + ot = zel[1] + if operation == BlockDataContainer.ADD: + op = el.add + elif operation == BlockDataContainer.SUBTRACT: + op = el.subtract + elif operation == BlockDataContainer.MULTIPLY: + op = el.multiply + elif operation == BlockDataContainer.DIVIDE: + op = el.divide + elif operation == BlockDataContainer.POWER: + op = el.power + else: + raise ValueError('Unsupported operation', operation) + if out is not None: + kw['out'] = out.get_item(i) + op(ot, *args, **kw) + else: + res.append(op(ot, *args, **kw)) + if out is not None: + return + else: + return type(self)(*res, shape=self.shape) + return type(self)(*[ operation(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + elif isinstance(other, BlockDataContainer): + return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], + shape=self.shape) + else: + raise ValueError('Incompatible type {}'.format(type(other))) + + + def power(self, other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for power') + out = kwargs.get('out', None) + if isinstance(other, Number): + return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) + + def maximum(self,other, *args, **kwargs): + if not self.is_compatible(other): + raise ValueError('Incompatible for maximum') + out = kwargs.get('out', None) + if isinstance(other, Number): + return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) + return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) + + ## unary operations + def abs(self, *args, **kwargs): + return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) + def sign(self, *args, **kwargs): + return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) + def sqrt(self, *args, **kwargs): + return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) + def conjugate(self, out=None): + return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) + + ## reductions + def sum(self, *args, **kwargs): + return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) + def squared_norm(self): + y = numpy.asarray([el.squared_norm() for el in self.containers]) + return y.sum() + def norm(self): + return numpy.sqrt(self.squared_norm()) + def copy(self): + '''alias of clone''' + return self.clone() + def clone(self): + return type(self)(*[el.copy() for el in self.containers], shape=self.shape) + def fill(self, other): + if isinstance (other, BlockDataContainer): + if not self.is_compatible(other): + raise ValueError('Incompatible containers') + for el,ot in zip(self.containers, other.containers): + el.fill(ot) + else: + return ValueError('Cannot fill with object provided {}'.format(type(other))) + + def __add__(self, other): + return self.add( other ) + # __radd__ + + def __sub__(self, other): + return self.subtract( other ) + # __rsub__ + + def __mul__(self, other): + return self.multiply(other) + # __rmul__ + + def __div__(self, other): + return self.divide(other) + # __rdiv__ + def __truediv__(self, other): + return self.divide(other) + + def __pow__(self, other): + return self.power(other) + # reverse operand + def __radd__(self, other): + '''Reverse addition + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self + other + # __radd__ + + def __rsub__(self, other): + '''Reverse subtraction + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return (-1 * self) + other + # __rsub__ + + def __rmul__(self, other): + '''Reverse multiplication + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self * other + # __rmul__ + + def __rdiv__(self, other): + '''Reverse division + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return pow(self / other, -1) + # __rdiv__ + def __rtruediv__(self, other): + '''Reverse truedivision + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return self.__rdiv__(other) + + def __rpow__(self, other): + '''Reverse power + + to make sure that this method is called rather than the __mul__ of a numpy array + the class constant __array_priority__ must be set > 0 + https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ + ''' + return other.power(self) + + def __iadd__(self, other): + '''Inline addition''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el += ot + elif isinstance(other, Number): + for el in self.containers: + el += other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + if not self.is_compatible(other): + raise ValueError('Incompatible for __iadd__') + for el,ot in zip(self.containers, other): + el += ot + return self + # __iadd__ + + def __isub__(self, other): + '''Inline subtraction''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el -= ot + elif isinstance(other, Number): + for el in self.containers: + el -= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + if not self.is_compatible(other): + raise ValueError('Incompatible for __isub__') + for el,ot in zip(self.containers, other): + el -= ot + return self + # __isub__ + + def __imul__(self, other): + '''Inline multiplication''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el *= ot + elif isinstance(other, Number): + for el in self.containers: + el *= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + if not self.is_compatible(other): + raise ValueError('Incompatible for __imul__') + for el,ot in zip(self.containers, other): + el *= ot + return self + # __imul__ + + def __idiv__(self, other): + '''Inline division''' + if isinstance (other, BlockDataContainer): + for el,ot in zip(self.containers, other.containers): + el /= ot + elif isinstance(other, Number): + for el in self.containers: + el /= other + elif isinstance(other, list) or isinstance(other, numpy.ndarray): + if not self.is_compatible(other): + raise ValueError('Incompatible for __idiv__') + for el,ot in zip(self.containers, other): + el /= ot + return self + # __rdiv__ + def __itruediv__(self, other): + '''Inline truedivision''' + return self.__idiv__(other) + -- cgit v1.2.3 From 4049917fa20353da6f88ff03b7c5b11d67743a00 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 12:09:39 +0100 Subject: fix BlockDataContainer algebra closes #242 --- .../Python/ccpi/framework/BlockDataContainer.py | 152 ++++++++++++--------- 1 file changed, 86 insertions(+), 66 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index fee0cda..529a1ce 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -111,79 +111,98 @@ class BlockDataContainer(object): def __getitem__(self, row): return self.get_item(row) +# def add(self, other, *args, **kwargs): +# if not self.is_compatible(other): +# raise ValueError('Incompatible for add') +# out = kwargs.get('out', None) +# #print ("args" , *args) +# if isinstance(other, Number): +# return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# elif isinstance(other, list) or isinstance(other, numpy.ndarray): +# return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) +# elif issubclass(other.__class__, DataContainer): +# # try to do algebra with one DataContainer. Will raise error if not compatible +# return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# +# return type(self)( +# *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], +# shape=self.shape) +# +# def subtract(self, other, *args, **kwargs): +# if not self.is_compatible(other): +# raise ValueError('Incompatible for subtract') +# out = kwargs.get('out', None) +# if isinstance(other, Number): +# return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# elif isinstance(other, list) or isinstance(other, numpy.ndarray): +# return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) +# elif issubclass(other.__class__, DataContainer): +# # try to do algebra with one DataContainer. Will raise error if not compatible +# return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], +# shape=self.shape) +# +# def multiply(self, other, *args, **kwargs): +# if not self.is_compatible(other): +# raise ValueError('{} Incompatible for multiply'.format(other)) +# out = kwargs.get('out', None) +# if isinstance(other, Number): +# return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# elif isinstance(other, list): +# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) +# elif isinstance(other, numpy.ndarray): +# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) +# elif issubclass(other.__class__, DataContainer): +# # try to do algebra with one DataContainer. Will raise error if not compatible +# return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], +# shape=self.shape) +# +# def divide_old(self, other, *args, **kwargs): +# if not self.is_compatible(other): +# raise ValueError('Incompatible for divide') +# out = kwargs.get('out', None) +# if isinstance(other, Number): +# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# elif isinstance(other, list) or isinstance(other, numpy.ndarray): +# return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) +# elif issubclass(other.__class__, DataContainer): +# # try to do algebra with one DataContainer. Will raise error if not compatible +# if out is not None: +# kw = kwargs.copy() +# for i,el in enumerate(self.containers): +# kw['out'] = out.get_item(i) +# el.divide(other, *args, **kw) +# return +# else: +# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) +# return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], +# shape=self.shape) def add(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for add') out = kwargs.get('out', None) - #print ("args" , *args) - if isinstance(other, Number): - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) - - return type(self)( - *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - + if out is not None: + self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) + else: + return self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) def subtract(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for subtract') out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - + if out is not None: + self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) + else: + return self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) def multiply(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('{} Incompatible for multiply'.format(other)) out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif isinstance(other, numpy.ndarray): - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) - - def divide_old(self, other, *args, **kwargs): - if not self.is_compatible(other): - raise ValueError('Incompatible for divide') - out = kwargs.get('out', None) - if isinstance(other, Number): - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - elif isinstance(other, list) or isinstance(other, numpy.ndarray): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif issubclass(other.__class__, DataContainer): - # try to do algebra with one DataContainer. Will raise error if not compatible - if out is not None: - kw = kwargs.copy() - for i,el in enumerate(self.containers): - kw['out'] = out.get_item(i) - el.divide(other, *args, **kw) - return - else: - return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) + if out is not None: + self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) + else: + return self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) def divide(self, other, *args, **kwargs): out = kwargs.get('out', None) if out is not None: self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) else: return self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) + def binary_operations(self, operation, other, *args, **kwargs): if not self.is_compatible(other): @@ -215,11 +234,15 @@ class BlockDataContainer(object): return else: return type(self)(*res, shape=self.shape) - elif isinstance(other, (list, numpy.ndarray)): + elif isinstance(other, (list, numpy.ndarray, BlockDataContainer)): # try to do algebra with one DataContainer. Will raise error if not compatible kw = kwargs.copy() res = [] - for i,zel in enumerate(zip ( self.containers, other) ): + if isinstance(other, BlockDataContainer): + the_other = other.containers + else: + the_other = other + for i,zel in enumerate(zip ( self.containers, the_other) ): el = zel[0] ot = zel[1] if operation == BlockDataContainer.ADD: @@ -244,9 +267,6 @@ class BlockDataContainer(object): else: return type(self)(*res, shape=self.shape) return type(self)(*[ operation(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) - elif isinstance(other, BlockDataContainer): - return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], - shape=self.shape) else: raise ValueError('Incompatible type {}'.format(type(other))) -- cgit v1.2.3 From 04c3f99648e38756e8180519db5f32ac3344ea2b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 12:10:46 +0100 Subject: update test --- Wrappers/Python/test/test_BlockDataContainer.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 2fca23c..0dd0657 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -136,12 +136,12 @@ class TestBlockDataContainer(unittest.TestCase): print (a[0][0].shape) #cp2 = BlockDataContainer(*a) cp2 = cp0.add(cp1) - assert (cp2.get_item(0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1).as_array()[0][0][0] == 4.) + self.assertEqual (cp2.get_item(0).as_array()[0][0][0] , 2.) + self.assertEqual (cp2.get_item(1).as_array()[0][0][0] , 4.) cp2 = cp0 + cp1 - assert (cp2.get_item(0).as_array()[0][0][0] == 2.) - assert (cp2.get_item(1).as_array()[0][0][0] == 4.) + self.assertTrue (cp2.get_item(0).as_array()[0][0][0] == 2.) + self.assertTrue (cp2.get_item(1).as_array()[0][0][0] == 4.) cp2 = cp0 + 1 numpy.testing.assert_almost_equal(cp2.get_item(0).as_array()[0][0][0] , 1. , decimal=5) numpy.testing.assert_almost_equal(cp2.get_item(1).as_array()[0][0][0] , 2., decimal = 5) @@ -427,6 +427,21 @@ class TestBlockDataContainer(unittest.TestCase): cp0.fill(cp2) self.assertBlockDataContainerEqual(cp0, cp2) + def test_NestedBlockDataContainer(self): + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(2,3,5) + + data0 = ig0.allocate(0) + data2 = ig0.allocate(1) + + cp0 = BlockDataContainer(data0,data2) + #cp1 = BlockDataContainer(data2,data3) + + nested = BlockDataContainer(cp0, data2, data2) + out = BlockDataContainer(BlockDataContainer(data0 , data0), data0, data0) + nested.divide(data2,out=out) + self.assertBlockDataContainerEqual(out, nested) + def assertBlockDataContainerEqual(self, container1, container2): print ("assert Block Data Container Equal") -- cgit v1.2.3 From 60f73fe79526dc61d721b2dc76a942f2e9a082d1 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 12:11:49 +0100 Subject: uses new Algebra of BDC --- Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index a655e03..c5be084 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -99,8 +99,9 @@ class MixedL21Norm(Function): else: res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) res = res1.sqrt().maximum(1.0) - for i,el in enumerate(x.containers): - el.divide(res, out=out.get_item(i)) + x.divide(res, out=out) + #for i,el in enumerate(x.containers): + # el.divide(res, out=out.get_item(i)) def __rmul__(self, scalar): return ScaledFunction(self, scalar) -- cgit v1.2.3 From 7fc291f6d2d71b0d5aa7f3fcf11966910dcea7ab Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 12:12:25 +0100 Subject: removes copy in non optimised alg --- Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 2ac3eba..835c979 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -98,8 +98,6 @@ class PDHG(Algorithm): self.xbar *= self.theta self.xbar += self.x - #self.x_old.fill(self.x) - #self.y_old.fill(self.y) self.x_old = self.x self.y_old = self.y -- cgit v1.2.3 From 7a019f3ccf4d19181ffb5dc5a92d3096cab4b12b Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Apr 2019 12:49:06 +0100 Subject: memopt fixes --- .../Python/ccpi/framework/BlockDataContainer.py | 35 +++---------------- .../ccpi/optimisation/functions/BlockFunction.py | 2 +- .../ccpi/optimisation/functions/L2NormSquared.py | 40 +++++++++++----------- .../ccpi/optimisation/functions/MixedL21Norm.py | 3 -- Wrappers/Python/wip/pdhg_TV_denoising.py | 4 +-- 5 files changed, 28 insertions(+), 56 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 9bec1fe..f1d6d9a 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -78,9 +78,13 @@ class BlockDataContainer(object): a = el.is_compatible(other) else: a = el.shape == other.shape +# print ("current element" , el.shape, "other ", other.shape, "same shape" , a) ret = ret and a return ret - + + +# elif issubclass(other.__class__, DataContainer): +# return self.get_item(0).shape == other.shape return len(self.containers) == len(other.containers) def get_item(self, row): @@ -350,32 +354,3 @@ class BlockDataContainer(object): '''Inline truedivision''' return self.__idiv__(other) -if __name__ == '__main__': - - M, N, K = 2,3,5 - from ccpi.framework import ImageGeometry, BlockGeometry - - ig = ImageGeometry(N, M) - u = ig.allocate('random_int') - - BG = BlockGeometry(ig, ig) - U = BG.allocate('random_int') - - U_nested = BlockDataContainer(BlockDataContainer(u, u), u) - - - res1 = U + u - res2 = U_nested + u -# res2 = u + U - - - - - - - - - - - - \ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 9917d99..8cce290 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -7,7 +7,7 @@ Created on Fri Mar 8 10:01:31 2019 """ import numpy as np -#from ccpi.optimisation.funcs import Function + from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer from numbers import Number diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 9e0e424..9508c13 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -20,38 +20,36 @@ import numpy from ccpi.optimisation.functions import Function from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.framework import DataContainer, ImageData, ImageGeometry -############################ L2NORM FUNCTION ############################# class L2NormSquared(Function): - def __init__(self, **kwargs): - - ''' L2NormSquared class - f : ImageGeometry --> R - - Cases: f(x) = ||x||^{2}_{2} - f(x) = || x - b ||^{2}_{2} - - ''' + ''' + + Class: L2NormSquared - #TODO need x, b to live in the same geometry if b is not None - + Cases: a) f(x) = ||x||^{2} + + b) f(x) = ||x - b||^{2}, b + + ''' + + def __init__(self, **kwargs): + super(L2NormSquared, self).__init__() self.b = kwargs.get('b',None) def __call__(self, x): - ''' Evaluates L2NormSq at point x''' + """ + + Evaluate L2NormSquared at x: f(x) + + + """ + y = x if self.b is not None: -# x.subtract(self.b, out = x) y = x - self.b -# else: -# y -# if out is None: -# return x.squared_norm() -# else: try: return y.squared_norm() except AttributeError as ae: @@ -61,6 +59,8 @@ class L2NormSquared(Function): def gradient(self, x, out=None): + + ''' Evaluates gradient of L2NormSq at point x''' if out is not None: out.fill(x) diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 0c658a4..0c7eb85 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -102,9 +102,6 @@ class MixedL21Norm(Function): res = (sum(x**2).sqrt()).maximum(1.0) out.fill(x/res) - - - def __rmul__(self, scalar): return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index 22fee90..f276b46 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -90,8 +90,8 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':100} -opt1 = {'niter':100, 'memopt': True} +opt = {'niter':1000} +opt1 = {'niter':1000, 'memopt': True} t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -- cgit v1.2.3 From 474767cce1d559b7790824b33ed6244be62e9666 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Apr 2019 13:47:56 +0100 Subject: add docstrings --- .../ccpi/optimisation/functions/L2NormSquared.py | 59 ++++++++++++---------- 1 file changed, 32 insertions(+), 27 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 9508c13..903dafa 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -40,12 +40,7 @@ class L2NormSquared(Function): def __call__(self, x): - """ - - Evaluate L2NormSquared at x: f(x) - - - """ + ''' Evaluate L2NormSquared at x: f(x) ''' y = x if self.b is not None: @@ -58,43 +53,43 @@ class L2NormSquared(Function): - def gradient(self, x, out=None): + def gradient(self, x, out=None): - - ''' Evaluates gradient of L2NormSq at point x''' + ''' Evaluate gradient of L2NormSquared at x: f'(x) ''' + if out is not None: + out.fill(x) if self.b is not None: out -= self.b out *= 2 + else: + y = x if self.b is not None: - # x.subtract(self.b, out=x) y = x - self.b return 2*y - def convex_conjugate(self, x, out=None): - ''' Evaluate convex conjugate of L2NormSq''' + def convex_conjugate(self, x): + + ''' Evaluate convex conjugate of L2NormSquared at x: f^{*}(x)''' tmp = 0 + if self.b is not None: -# tmp = (self.b * x).sum() tmp = (x * self.b).sum() - if out is None: - # FIXME: this is a number - return (1./4.) * x.squared_norm() + tmp - else: - # FIXME: this is a DataContainer - out.fill((1./4.) * x.squared_norm() + tmp) - + return (1./4.) * x.squared_norm() + tmp + def proximal(self, x, tau, out = None): - ''' The proximal operator ( prox_\{tau * f\}(x) ) evaluates i.e., - argmin_x { 0.5||x - u||^{2} + tau f(x) } + ''' Evaluate Proximal Operator of tau * f(\cdot) at x: + + prox_{tau*f(\cdot)}(x) = \argmin_{z} \frac{1}{2}|| z - x ||^{2}_{2} + tau * f(z) + ''' if out is None: @@ -108,17 +103,19 @@ class L2NormSquared(Function): out -= self.b out /= (1+2*tau) if self.b is not None: - out += self.b - #out.fill((x - self.b)/(1+2*tau) + self.b) - #else: - # out.fill(x/(1+2*tau)) + out += self.b def proximal_conjugate(self, x, tau, out=None): + ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x (i.e., the convex conjugate of f) : + + prox_{tau*f(\cdot)}(x) = \argmin_{z} \frac{1}{2}|| z - x ||^{2}_{2} + tau * f^{*}(z) + + ''' + if out is None: if self.b is not None: - # change the order cannot add ImageData + NestedBlock return (x - tau*self.b)/(1 + tau/2) else: return x/(1 + tau/2) @@ -129,6 +126,14 @@ class L2NormSquared(Function): out.fill( x/(1 + tau/2) ) def __rmul__(self, scalar): + + ''' Allows multiplication of L2NormSquared with a scalar + + Returns: ScaledFunction + + + ''' + return ScaledFunction(self, scalar) -- cgit v1.2.3 From b4f439677fa9a8d8235b2ddc0dcbda88cab7b76b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 13:58:46 +0100 Subject: commutative algebra between BlockDataContainers and DataContainers simplified algebra in DataContainer --- .../Python/ccpi/framework/BlockDataContainer.py | 1 + Wrappers/Python/ccpi/framework/framework.py | 348 ++++++++++++--------- Wrappers/Python/test/test_BlockDataContainer.py | 6 + 3 files changed, 204 insertions(+), 151 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 529a1ce..060b130 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -45,6 +45,7 @@ class BlockDataContainer(object): DIVIDE = 'divide' POWER = 'power' __array_priority__ = 1 + __container_priority__ = 2 def __init__(self, *args, **kwargs): '''''' self.containers = args diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 07c2ead..3982965 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -320,6 +320,7 @@ class DataContainer(object): Data is currently held in a numpy arrays''' + __container_priority__ = 1 def __init__ (self, array, deep_copy=True, dimension_labels=None, **kwargs): '''Holds the data''' @@ -495,119 +496,134 @@ class DataContainer(object): return self.shape == other.shape ## algebra - def __add__(self, other, *args, **kwargs): - out = kwargs.get('out', None) - - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() + other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)( - self.as_array() + other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , - type(other))) - # __add__ - +# def __add__(self, other, *args, **kwargs): +# out = kwargs.get('out', None) +# +# if issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# out = self.as_array() + other.as_array() +# return type(self)(out, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, +# other.shape)) +# elif isinstance(other, (int, float, complex)): +# return type(self)( +# self.as_array() + other, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , +# type(other))) +# # __add__ +# +# def __sub__(self, other): +# if issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# out = self.as_array() - other.as_array() +# return type(self)(out, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, +# other.shape)) +# elif isinstance(other, (int, float, complex)): +# return type(self)(self.as_array() - other, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , +# type(other))) +# # __sub__ +# def __truediv__(self,other): +# return self.__div__(other) +# +# def __div__(self, other): +# if issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# out = self.as_array() / other.as_array() +# return type(self)(out, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, +# other.shape)) +# elif isinstance(other, (int, float, complex)): +# return type(self)(self.as_array() / other, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , +# type(other))) +# # __div__ +# +# def __pow__(self, other): +# if issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# out = self.as_array() ** other.as_array() +# return type(self)(out, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, +# other.shape)) +# elif isinstance(other, (int, float, complex)): +# return type(self)(self.as_array() ** other, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , +# type(other))) +# # __pow__ +# +# def __mul__(self, other): +# if issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# out = self.as_array() * other.as_array() +# return type(self)(out, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# else: +# raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, +# other.shape)) +# elif isinstance(other, (int, float, complex,\ +# numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ +# numpy.float, numpy.float16, numpy.float32, numpy.float64, \ +# numpy.complex)): +# return type(self)(self.as_array() * other, +# deep_copy=True, +# dimension_labels=self.dimension_labels, +# geometry=self.geometry) +# +# else: +# raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , +# type(other))) +# # __mul__ + + def __add__(self, other): + return self.add(other) + def __mul__(self, other): + return self.multiply(other) def __sub__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() - other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() - other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , - type(other))) - # __sub__ - def __truediv__(self,other): - return self.__div__(other) - + return self.subtract(other) def __div__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() / other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() / other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , - type(other))) - # __div__ - + return self.divide(other) + def __truediv__(self, other): + return self.divide(other) def __pow__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() ** other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex)): - return type(self)(self.as_array() ** other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , - type(other))) - # __pow__ + return self.power(other) + - def __mul__(self, other): - if issubclass(type(other), DataContainer): - if self.check_dimensions(other): - out = self.as_array() * other.as_array() - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - else: - raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, - other.shape)) - elif isinstance(other, (int, float, complex,\ - numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ - numpy.float, numpy.float16, numpy.float32, numpy.float64, \ - numpy.complex)): - return type(self)(self.as_array() * other, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - else: - raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , - type(other))) - # __mul__ # reverse operand def __radd__(self, other): @@ -648,54 +664,72 @@ class DataContainer(object): # (+=, -=, *=, /= , //=, # must return self - - def __iadd__(self, other): - if isinstance(other, (int, float)) : - numpy.add(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.add(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __iadd__ + kw = {'out':self} + return self.add(other, **kw) def __imul__(self, other): - if isinstance(other, (int, float)) : - arr = self.as_array() - numpy.multiply(arr, other, out=arr) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.multiply(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __imul__ + kw = {'out':self} + return self.multiply(other, **kw) def __isub__(self, other): - if isinstance(other, (int, float)) : - numpy.subtract(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.subtract(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __isub__ + kw = {'out':self} + return self.subtract(other, **kw) def __idiv__(self, other): - return self.__itruediv__(other) + kw = {'out':self} + return self.divide(other, **kw) + def __itruediv__(self, other): - if isinstance(other, (int, float)) : - numpy.divide(self.array, other, out=self.array) - elif issubclass(type(other), DataContainer): - if self.check_dimensions(other): - numpy.divide(self.array, other.array, out=self.array) - else: - raise ValueError('Dimensions do not match') - return self - # __idiv__ + kw = {'out':self} + return self.divide(other, **kw) + +# def __iadd__(self, other): +# if isinstance(other, (int, float)) : +# numpy.add(self.array, other, out=self.array) +# elif issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# numpy.add(self.array, other.array, out=self.array) +# else: +# raise ValueError('Dimensions do not match') +# return self +# # __iadd__ +# +# def __imul__(self, other): +# if isinstance(other, (int, float)) : +# arr = self.as_array() +# numpy.multiply(arr, other, out=arr) +# elif issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# numpy.multiply(self.array, other.array, out=self.array) +# else: +# raise ValueError('Dimensions do not match') +# return self +# # __imul__ +# +# def __isub__(self, other): +# if isinstance(other, (int, float)) : +# numpy.subtract(self.array, other, out=self.array) +# elif issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# numpy.subtract(self.array, other.array, out=self.array) +# else: +# raise ValueError('Dimensions do not match') +# return self +# # __isub__ +# +# def __idiv__(self, other): +# return self.__itruediv__(other) +# def __itruediv__(self, other): +# if isinstance(other, (int, float)) : +# numpy.divide(self.array, other, out=self.array) +# elif issubclass(type(other), DataContainer): +# if self.check_dimensions(other): +# numpy.divide(self.array, other.array, out=self.array) +# else: +# raise ValueError('Dimensions do not match') +# return self +# # __idiv__ def __str__ (self, representation=False): repres = "" @@ -802,15 +836,27 @@ class DataContainer(object): raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) def add(self, other, *args, **kwargs): + if hasattr(other, '__container_priority__') and \ + self.__class__.__container_priority__ < other.__class__.__container_priority__: + return other.add(self, *args, **kwargs) return self.pixel_wise_binary(numpy.add, other, *args, **kwargs) def subtract(self, other, *args, **kwargs): + if hasattr(other, '__container_priority__') and \ + self.__class__.__container_priority__ < other.__class__.__container_priority__: + return other.subtract(self, *args, **kwargs) return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs) def multiply(self, other, *args, **kwargs): + if hasattr(other, '__container_priority__') and \ + self.__class__.__container_priority__ < other.__class__.__container_priority__: + return other.multiply(self, *args, **kwargs) return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs) def divide(self, other, *args, **kwargs): + if hasattr(other, '__container_priority__') and \ + self.__class__.__container_priority__ < other.__class__.__container_priority__: + return other.divide(self, *args, **kwargs) return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) def power(self, other, *args, **kwargs): @@ -883,7 +929,7 @@ class DataContainer(object): class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' - + __container_priority__ = 1 def __init__(self, array = None, deep_copy=False, @@ -1006,7 +1052,7 @@ class ImageData(DataContainer): class AcquisitionData(DataContainer): '''DataContainer for holding 2D or 3D sinogram''' - + __container_priority__ = 1 def __init__(self, array = None, deep_copy=True, diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 0dd0657..a20f289 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -369,6 +369,12 @@ class TestBlockDataContainer(unittest.TestCase): c5 = nbdc.get_item(0).power(2).sum() c5a = nbdc.power(2).sum() print ("sum", c5a, c5) + + cp0 = BlockDataContainer(data0,data2) + a = cp0 * data2 + b = data2 * cp0 + self.assertBlockDataContainerEqual(a,b) + print ("test_Nested_BlockDataContainer OK") def stest_NestedBlockDataContainer2(self): -- cgit v1.2.3 From 849e40b45284bc879eaa2c6bc1fe1ba2a15de6c3 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 14:03:17 +0100 Subject: removed comments --- Wrappers/Python/ccpi/framework/framework.py | 160 +--------------------------- 1 file changed, 1 insertion(+), 159 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 3982965..2453986 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -496,119 +496,6 @@ class DataContainer(object): return self.shape == other.shape ## algebra -# def __add__(self, other, *args, **kwargs): -# out = kwargs.get('out', None) -# -# if issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# out = self.as_array() + other.as_array() -# return type(self)(out, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise ValueError('Wrong shape: {0} and {1}'.format(self.shape, -# other.shape)) -# elif isinstance(other, (int, float, complex)): -# return type(self)( -# self.as_array() + other, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise TypeError('Cannot {0} DataContainer with {1}'.format("add" , -# type(other))) -# # __add__ -# -# def __sub__(self, other): -# if issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# out = self.as_array() - other.as_array() -# return type(self)(out, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise ValueError('__sub__ Wrong shape: {0} and {1}'.format(self.shape, -# other.shape)) -# elif isinstance(other, (int, float, complex)): -# return type(self)(self.as_array() - other, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise TypeError('Cannot {0} DataContainer with {1}'.format("subtract" , -# type(other))) -# # __sub__ -# def __truediv__(self,other): -# return self.__div__(other) -# -# def __div__(self, other): -# if issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# out = self.as_array() / other.as_array() -# return type(self)(out, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise ValueError('__div__ Wrong shape: {0} and {1}'.format(self.shape, -# other.shape)) -# elif isinstance(other, (int, float, complex)): -# return type(self)(self.as_array() / other, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise TypeError('Cannot {0} DataContainer with {1}'.format("divide" , -# type(other))) -# # __div__ -# -# def __pow__(self, other): -# if issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# out = self.as_array() ** other.as_array() -# return type(self)(out, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise ValueError('__pow__ Wrong shape: {0} and {1}'.format(self.shape, -# other.shape)) -# elif isinstance(other, (int, float, complex)): -# return type(self)(self.as_array() ** other, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise TypeError('pow: Cannot {0} DataContainer with {1}'.format("power" , -# type(other))) -# # __pow__ -# -# def __mul__(self, other): -# if issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# out = self.as_array() * other.as_array() -# return type(self)(out, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# else: -# raise ValueError('*:Wrong shape: {0} and {1}'.format(self.shape, -# other.shape)) -# elif isinstance(other, (int, float, complex,\ -# numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ -# numpy.float, numpy.float16, numpy.float32, numpy.float64, \ -# numpy.complex)): -# return type(self)(self.as_array() * other, -# deep_copy=True, -# dimension_labels=self.dimension_labels, -# geometry=self.geometry) -# -# else: -# raise TypeError('Cannot {0} DataContainer with {1}'.format("multiply" , -# type(other))) -# # __mul__ def __add__(self, other): return self.add(other) @@ -684,52 +571,7 @@ class DataContainer(object): kw = {'out':self} return self.divide(other, **kw) -# def __iadd__(self, other): -# if isinstance(other, (int, float)) : -# numpy.add(self.array, other, out=self.array) -# elif issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# numpy.add(self.array, other.array, out=self.array) -# else: -# raise ValueError('Dimensions do not match') -# return self -# # __iadd__ -# -# def __imul__(self, other): -# if isinstance(other, (int, float)) : -# arr = self.as_array() -# numpy.multiply(arr, other, out=arr) -# elif issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# numpy.multiply(self.array, other.array, out=self.array) -# else: -# raise ValueError('Dimensions do not match') -# return self -# # __imul__ -# -# def __isub__(self, other): -# if isinstance(other, (int, float)) : -# numpy.subtract(self.array, other, out=self.array) -# elif issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# numpy.subtract(self.array, other.array, out=self.array) -# else: -# raise ValueError('Dimensions do not match') -# return self -# # __isub__ -# -# def __idiv__(self, other): -# return self.__itruediv__(other) -# def __itruediv__(self, other): -# if isinstance(other, (int, float)) : -# numpy.divide(self.array, other, out=self.array) -# elif issubclass(type(other), DataContainer): -# if self.check_dimensions(other): -# numpy.divide(self.array, other.array, out=self.array) -# else: -# raise ValueError('Dimensions do not match') -# return self -# # __idiv__ + def __str__ (self, representation=False): repres = "" -- cgit v1.2.3 From 9ede79fb417bdbbb21e94045af16d62c366e35ee Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 14:54:09 +0100 Subject: added docstrings --- .../Python/ccpi/framework/BlockDataContainer.py | 95 +++++++--------------- 1 file changed, 28 insertions(+), 67 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 060b130..20efbc3 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -112,92 +112,45 @@ class BlockDataContainer(object): def __getitem__(self, row): return self.get_item(row) -# def add(self, other, *args, **kwargs): -# if not self.is_compatible(other): -# raise ValueError('Incompatible for add') -# out = kwargs.get('out', None) -# #print ("args" , *args) -# if isinstance(other, Number): -# return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# elif isinstance(other, list) or isinstance(other, numpy.ndarray): -# return type(self)(*[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -# elif issubclass(other.__class__, DataContainer): -# # try to do algebra with one DataContainer. Will raise error if not compatible -# return type(self)(*[ el.add(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# -# return type(self)( -# *[ el.add(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], -# shape=self.shape) -# -# def subtract(self, other, *args, **kwargs): -# if not self.is_compatible(other): -# raise ValueError('Incompatible for subtract') -# out = kwargs.get('out', None) -# if isinstance(other, Number): -# return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# elif isinstance(other, list) or isinstance(other, numpy.ndarray): -# return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -# elif issubclass(other.__class__, DataContainer): -# # try to do algebra with one DataContainer. Will raise error if not compatible -# return type(self)(*[ el.subtract(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# return type(self)(*[ el.subtract(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], -# shape=self.shape) -# -# def multiply(self, other, *args, **kwargs): -# if not self.is_compatible(other): -# raise ValueError('{} Incompatible for multiply'.format(other)) -# out = kwargs.get('out', None) -# if isinstance(other, Number): -# return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# elif isinstance(other, list): -# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -# elif isinstance(other, numpy.ndarray): -# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -# elif issubclass(other.__class__, DataContainer): -# # try to do algebra with one DataContainer. Will raise error if not compatible -# return type(self)(*[ el.multiply(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# return type(self)(*[ el.multiply(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], -# shape=self.shape) -# -# def divide_old(self, other, *args, **kwargs): -# if not self.is_compatible(other): -# raise ValueError('Incompatible for divide') -# out = kwargs.get('out', None) -# if isinstance(other, Number): -# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# elif isinstance(other, list) or isinstance(other, numpy.ndarray): -# return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -# elif issubclass(other.__class__, DataContainer): -# # try to do algebra with one DataContainer. Will raise error if not compatible -# if out is not None: -# kw = kwargs.copy() -# for i,el in enumerate(self.containers): -# kw['out'] = out.get_item(i) -# el.divide(other, *args, **kw) -# return -# else: -# return type(self)(*[ el.divide(other, *args, **kwargs) for el in self.containers], shape=self.shape) -# return type(self)(*[ el.divide(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], -# shape=self.shape) def add(self, other, *args, **kwargs): + '''Algebra: add method of BlockDataContainer with number/DataContainer or BlockDataContainer + + :param: other (number, DataContainer or subclasses or BlockDataContainer + :param: out (optional): provides a placehold for the resul. + ''' out = kwargs.get('out', None) if out is not None: self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) else: return self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) def subtract(self, other, *args, **kwargs): + '''Algebra: subtract method of BlockDataContainer with number/DataContainer or BlockDataContainer + + :param: other (number, DataContainer or subclasses or BlockDataContainer + :param: out (optional): provides a placehold for the resul. + ''' out = kwargs.get('out', None) if out is not None: self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) else: return self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) def multiply(self, other, *args, **kwargs): + '''Algebra: multiply method of BlockDataContainer with number/DataContainer or BlockDataContainer + + :param: other (number, DataContainer or subclasses or BlockDataContainer + :param: out (optional): provides a placehold for the resul. + ''' out = kwargs.get('out', None) if out is not None: self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) else: return self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) def divide(self, other, *args, **kwargs): + '''Algebra: divide method of BlockDataContainer with number/DataContainer or BlockDataContainer + + :param: other (number, DataContainer or subclasses or BlockDataContainer + :param: out (optional): provides a placehold for the resul. + ''' out = kwargs.get('out', None) if out is not None: self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) @@ -206,6 +159,14 @@ class BlockDataContainer(object): def binary_operations(self, operation, other, *args, **kwargs): + '''Algebra: generic method of algebric operation with BlockDataContainer with number/DataContainer or BlockDataContainer + + Provides commutativity with DataContainer and subclasses, i.e. this + class's reverse algebric methods take precedence w.r.t. direct algebric + methods of DataContainer and subclasses. + + This method is not to be used directly + ''' if not self.is_compatible(other): raise ValueError('Incompatible for divide') out = kwargs.get('out', None) -- cgit v1.2.3 From 9c4776d7da4a599755badf018292d54317a2817b Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 15:16:06 +0100 Subject: scaled function proximal conjugate uses out --- Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 464b944..3fbb858 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,8 @@ class ScaledFunction(object): if out is None: return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - out.fill(self.scalar*self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out) + out *= self.scalar def grad(self, x): '''Alias of gradient(x,None)''' -- cgit v1.2.3 From 168f8b09c56b06ab21f09e0ff2906e4ac18bf9d6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 15:23:28 +0100 Subject: removed print --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 1 - 1 file changed, 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 20efbc3..afdf617 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -98,7 +98,6 @@ class BlockDataContainer(object): a = el.is_compatible(other) else: a = el.shape == other.shape - print ("current element" , el.shape, "other ", other.shape, "same shape" , a) ret = ret and a return ret #return self.get_item(0).shape == other.shape -- cgit v1.2.3 From 3a24350a3c3e617434885728c0afed8c1891d3c4 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 15:57:58 +0100 Subject: use PDHG class --- Wrappers/Python/wip/pdhg_TV_denoising.py | 52 +++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 11 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index f276b46..9bd5221 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -93,28 +93,58 @@ tau = 1/(sigma*normK**2) opt = {'niter':1000} opt1 = {'niter':1000, 'memopt': True} -t1 = timer() -res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -print(timer()-t1) +#t1 = timer() +#res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +#print(timer()-t1) +# +#print("with memopt \n") +# +#t2 = timer() +#res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +#print(timer()-t2) + +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 2000 +pdhg.update_objective_interval = 100 + + +pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +pdhgo.max_iteration = 2000 +pdhgo.update_objective_interval = 100 + +steps = [timer()] +pdhgo.run(2000) +steps.append(timer()) +t1 = dt(steps) + +pdhg.run(2000) +steps.append(timer()) +t2 = dt(steps) -print("with memopt \n") +print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +res = pdhg.get_output() +res1 = pdhgo.get_output() + +diff = (res-res1) +print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) -t2 = timer() -res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) -print(timer()-t2) plt.figure(figsize=(5,5)) +plt.subplot(1,3,1) plt.imshow(res.as_array()) plt.colorbar() -plt.show() +#plt.show() -plt.figure(figsize=(5,5)) +#plt.figure(figsize=(5,5)) +plt.subplot(1,3,2) plt.imshow(res1.as_array()) plt.colorbar() -plt.show() +#plt.show() -plt.figure(figsize=(5,5)) +#plt.figure(figsize=(5,5)) +plt.subplot(1,3,3) plt.imshow(np.abs(res1.as_array()-res.as_array())) plt.colorbar() plt.show() -- cgit v1.2.3 From 4c88537805e864b1d6fdf2d40a9d147bf72bcbe3 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Apr 2019 16:00:51 +0100 Subject: fix memopt and docstrings --- .../Python/ccpi/framework/BlockDataContainer.py | 6 +- .../Python/ccpi/optimisation/functions/L1Norm.py | 141 +++++++++++---------- .../ccpi/optimisation/functions/L2NormSquared.py | 23 ++-- .../ccpi/optimisation/functions/MixedL21Norm.py | 5 +- .../ccpi/optimisation/functions/ScaledFunction.py | 3 +- 5 files changed, 95 insertions(+), 83 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 529a1ce..75ee4b2 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -97,7 +97,7 @@ class BlockDataContainer(object): a = el.is_compatible(other) else: a = el.shape == other.shape - print ("current element" , el.shape, "other ", other.shape, "same shape" , a) +# print ("current element" , el.shape, "other ", other.shape, "same shape" , a) ret = ret and a return ret #return self.get_item(0).shape == other.shape @@ -468,3 +468,7 @@ class BlockDataContainer(object): '''Inline truedivision''' return self.__idiv__(other) + + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py index 163eefa..4e53f2c 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -16,11 +16,82 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -Created on Wed Mar 6 19:42:34 2019 -@author: evangelos -""" + +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.optimisation.operators import ShrinkageOperator + + +class L1Norm(Function): + + ''' + + Class: L1Norm + + Cases: a) f(x) = ||x||_{1} + + b) f(x) = ||x - b||_{1} + + ''' + + def __init__(self, **kwargs): + + super(L1Norm, self).__init__() + self.b = kwargs.get('b',None) + + def __call__(self, x): + + ''' Evaluate L1Norm at x: f(x) ''' + + y = x + if self.b is not None: + y = x - self.b + return y.abs().sum() + + def gradient(self,x): + #TODO implement subgradient??? + return ValueError('Not Differentiable') + + def convex_conjugate(self,x): + #TODO implement Indicator infty??? + + y = 0 + if self.b is not None: + y = 0 + (self.b * x).sum() + return y + + def proximal(self, x, tau, out=None): + + # TODO implement shrinkage operator, we will need it later e.g SplitBregman + + if out is None: + if self.b is not None: + return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) + else: + return ShrinkageOperator.__call__(self, x, tau) + else: + if self.b is not None: + out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) + else: + out.fill(ShrinkageOperator.__call__(self, x, tau)) + + def proximal_conjugate(self, x, tau, out=None): + + if out is None: + if self.b is not None: + return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) + else: + return x.divide(x.abs().maximum(1.0)) + else: + if self.b is not None: + out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) + else: + out.fill(x.divide(x.abs().maximum(1.0)) ) + + def __rmul__(self, scalar): + return ScaledFunction(self, scalar) + #import numpy as np ##from ccpi.optimisation.funcs import Function @@ -92,67 +163,7 @@ Created on Wed Mar 6 19:42:34 2019 # ############################################################################### -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.optimisation.operators import ShrinkageOperator - - -class L1Norm(Function): - - def __init__(self, **kwargs): - - super(L1Norm, self).__init__() - self.b = kwargs.get('b',None) - - def __call__(self, x): - - y = x - if self.b is not None: - y = x - self.b - return y.abs().sum() - - def gradient(self,x): - #TODO implement subgradient??? - return ValueError('Not Differentiable') - - def convex_conjugate(self,x): - #TODO implement Indicator infty??? - - y = 0 - if self.b is not None: - y = 0 + (self.b * x).sum() - return y - - def proximal(self, x, tau, out=None): - - # TODO implement shrinkage operator, we will need it later e.g SplitBregman - - if out is None: - if self.b is not None: - return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) - else: - return ShrinkageOperator.__call__(self, x, tau) - else: - if self.b is not None: - out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) - else: - out.fill(ShrinkageOperator.__call__(self, x, tau)) - - def proximal_conjugate(self, x, tau, out=None): - - if out is None: - if self.b is not None: - return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) - else: - return x.divide(x.abs().maximum(1.0)) - else: - if self.b is not None: - out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) - else: - out.fill(x.divide(x.abs().maximum(1.0)) ) - - def __rmul__(self, scalar): - return ScaledFunction(self, scalar) + diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 903dafa..7397cfb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -17,19 +17,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy from ccpi.optimisation.functions import Function from ccpi.optimisation.functions.ScaledFunction import ScaledFunction class L2NormSquared(Function): - ''' - - Class: L2NormSquared - - Cases: a) f(x) = ||x||^{2} + ''' + + Cases: a) f(x) = \|x\|^{2}_{2} - b) f(x) = ||x - b||^{2}, b + b) f(x) = ||x - b||^{2}_{2} ''' @@ -50,9 +47,7 @@ class L2NormSquared(Function): except AttributeError as ae: # added for compatibility with SIRF return (y.norm()**2) - - - + def gradient(self, x, out=None): ''' Evaluate gradient of L2NormSquared at x: f'(x) ''' @@ -127,11 +122,10 @@ class L2NormSquared(Function): def __rmul__(self, scalar): - ''' Allows multiplication of L2NormSquared with a scalar + ''' Multiplication of L2NormSquared with a scalar Returns: ScaledFunction - - + ''' return ScaledFunction(self, scalar) @@ -139,7 +133,8 @@ class L2NormSquared(Function): if __name__ == '__main__': - + from ccpi.framework import ImageGeometry + import numpy # TESTS for L2 and scalar * L2 M, N, K = 2,3,5 diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 24c47f4..c0e8a6a 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -99,11 +99,12 @@ class MixedL21Norm(Function): res = BlockDataContainer(*frac) return res else: + + res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) res = res1.sqrt().maximum(1.0) x.divide(res, out=out) - #for i,el in enumerate(x.containers): - # el.divide(res, out=out.get_item(i)) + def __rmul__(self, scalar): return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 464b944..3fbb858 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,8 @@ class ScaledFunction(object): if out is None: return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - out.fill(self.scalar*self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out) + out *= self.scalar def grad(self, x): '''Alias of gradient(x,None)''' -- cgit v1.2.3 From 76906f3a5941b45441ee0209605d73ea6fb445f2 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Fri, 12 Apr 2019 16:01:12 +0100 Subject: fix memopt and docstrings --- Wrappers/Python/wip/pdhg_TV_denoising.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index f276b46..eedeeb8 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -25,8 +25,6 @@ def dt(steps): #%% - -# ############################################################################ # Create phantom for TV denoising N = 200 @@ -90,8 +88,8 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':1000} -opt1 = {'niter':1000, 'memopt': True} +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) @@ -114,10 +112,15 @@ plt.colorbar() plt.show() +diff = np.abs(res1.as_array()-res.as_array()) plt.figure(figsize=(5,5)) -plt.imshow(np.abs(res1.as_array()-res.as_array())) +plt.imshow(diff) plt.colorbar() plt.show() + +diff = (res-res1) +print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) #======= ## opt = {'niter':2000, 'memopt': True} # -- cgit v1.2.3 From 18ec4ecfe286d04868ddcd2febd94affd77e3a57 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 16:12:50 +0100 Subject: added test for mixedL21Norm closes #231 --- Wrappers/Python/test/test_functions.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index bc1f034..22721fa 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -323,13 +323,15 @@ class TestFunction(unittest.TestCase): # Define no scale and scaled f_no_scaled = MixedL21Norm() - #f_scaled = 0.5 * MixedL21Norm() + f_scaled = 1 * MixedL21Norm() # call - # a1 = f_no_scaled(U) - # a2 = f_scaled(U) - # self.assertBlockDataContainerEqual(a1,a2) + a1 = f_no_scaled(U) + a2 = f_scaled(U) + self.assertAlmostEqual(a1,a2) + + tmp = [ el**2 for el in U.containers ] self.assertBlockDataContainerEqual(BlockDataContainer(*tmp), U.power(2)) -- cgit v1.2.3 From d1e26ec31df5a2b269e021e4a2c039e0e265a353 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 12 Apr 2019 17:28:17 +0100 Subject: adds exception for incompatible geometry/array --- Wrappers/Python/ccpi/framework/framework.py | 227 ++++++++++++++++------------ Wrappers/Python/test/test_DataContainer.py | 16 ++ 2 files changed, 143 insertions(+), 100 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index af4139b..7874813 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -772,61 +772,18 @@ class DataContainer(object): class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' __container_priority__ = 1 + + def __init__(self, array = None, deep_copy=False, dimension_labels=None, **kwargs): - self.geometry = None + self.geometry = kwargs.get('geometry', None) if array is None: - if 'geometry' in kwargs.keys(): - geometry = kwargs['geometry'] - self.geometry = geometry - channels = geometry.channels - horiz_x = geometry.voxel_num_x - horiz_y = geometry.voxel_num_y - vert = 1 if geometry.voxel_num_z is None\ - else geometry.voxel_num_z # this should be 1 for 2D - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (channels , horiz_y, horiz_x) - dim_labels = [ImageGeometry.CHANNEL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - if vert > 1: - shape = (vert, horiz_y, horiz_x) - dim_labels = [ImageGeometry.VERTICAL, - ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - else: - shape = (horiz_y, horiz_x) - dim_labels = [ImageGeometry.HORIZONTAL_Y, - ImageGeometry.HORIZONTAL_X] - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == ImageGeometry.CHANNEL: - shape.append(channels) - elif dim == ImageGeometry.HORIZONTAL_Y: - shape.append(horiz_y) - elif dim == ImageGeometry.VERTICAL: - shape.append(vert) - elif dim == ImageGeometry.HORIZONTAL_X: - shape.append(horiz_x) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes'.format( - len(dimension_labels) - len(shape))) - shape = tuple(shape) + if self.geometry is not None: + shape, dimension_labels = self.get_shape_labels(self.geometry) array = numpy.zeros( shape , dtype=numpy.float32) super(ImageData, self).__init__(array, deep_copy, @@ -836,6 +793,11 @@ class ImageData(DataContainer): raise ValueError('Please pass either a DataContainer, ' +\ 'a numpy array or a geometry') else: + if self.geometry is not None: + shape, labels = self.get_shape_labels(self.geometry, dimension_labels) + if array.shape != shape: + raise ValueError('Shape mismatch {} {}'.format(shape, array.shape)) + if issubclass(type(array) , DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ @@ -890,11 +852,62 @@ class ImageData(DataContainer): #out.geometry = self.recalculate_geometry(dimensions , **kw) out.geometry = self.geometry return out - + + def get_shape_labels(self, geometry, dimension_labels=None): + channels = geometry.channels + horiz_x = geometry.voxel_num_x + horiz_y = geometry.voxel_num_y + vert = 1 if geometry.voxel_num_z is None\ + else geometry.voxel_num_z # this should be 1 for 2D + if dimension_labels is None: + if channels > 1: + if vert > 1: + shape = (channels, vert, horiz_y, horiz_x) + dim_labels = [ImageGeometry.CHANNEL, + ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] + else: + shape = (channels , horiz_y, horiz_x) + dim_labels = [ImageGeometry.CHANNEL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] + else: + if vert > 1: + shape = (vert, horiz_y, horiz_x) + dim_labels = [ImageGeometry.VERTICAL, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] + else: + shape = (horiz_y, horiz_x) + dim_labels = [ImageGeometry.HORIZONTAL_Y, + ImageGeometry.HORIZONTAL_X] + dimension_labels = dim_labels + else: + shape = [] + for i in range(len(dimension_labels)): + dim = dimension_labels[i] + if dim == ImageGeometry.CHANNEL: + shape.append(channels) + elif dim == ImageGeometry.HORIZONTAL_Y: + shape.append(horiz_y) + elif dim == ImageGeometry.VERTICAL: + shape.append(vert) + elif dim == ImageGeometry.HORIZONTAL_X: + shape.append(horiz_x) + if len(shape) != len(dimension_labels): + raise ValueError('Missing {0} axes {1} shape {2}'.format( + len(dimension_labels) - len(shape), dimension_labels, shape)) + shape = tuple(shape) + + return (shape, dimension_labels) + class AcquisitionData(DataContainer): '''DataContainer for holding 2D or 3D sinogram''' __container_priority__ = 1 + + def __init__(self, array = None, deep_copy=True, @@ -905,63 +918,20 @@ class AcquisitionData(DataContainer): if 'geometry' in kwargs.keys(): geometry = kwargs['geometry'] self.geometry = geometry - channels = geometry.channels - horiz = geometry.pixel_num_h - vert = geometry.pixel_num_v - angles = geometry.angles - num_of_angles = numpy.shape(angles)[0] - if dimension_labels is None: - if channels > 1: - if vert > 1: - shape = (channels, num_of_angles , vert, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL] - else: - shape = (channels , num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.CHANNEL, - AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL] - else: - if vert > 1: - shape = (num_of_angles, vert, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.VERTICAL, - AcquisitionGeometry.HORIZONTAL - ] - else: - shape = (num_of_angles, horiz) - dim_labels = [AcquisitionGeometry.ANGLE, - AcquisitionGeometry.HORIZONTAL - ] - - dimension_labels = dim_labels - else: - shape = [] - for dim in dimension_labels: - if dim == AcquisitionGeometry.CHANNEL: - shape.append(channels) - elif dim == AcquisitionGeometry.ANGLE: - shape.append(num_of_angles) - elif dim == AcquisitionGeometry.VERTICAL: - shape.append(vert) - elif dim == AcquisitionGeometry.HORIZONTAL: - shape.append(horiz) - if len(shape) != len(dimension_labels): - raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ - .format( - len(dimension_labels) - len(shape), - dimension_labels, shape) - ) - shape = tuple(shape) + shape, dimension_labels = self.get_shape_labels(geometry, dimension_labels) + array = numpy.zeros( shape , dtype=numpy.float32) super(AcquisitionData, self).__init__(array, deep_copy, dimension_labels, **kwargs) else: - + if self.geometry is not None: + shape, labels = self.get_shape_labels(self.geometry, dimension_labels) + print('Shape mismatch {} {}'.format(shape, array.shape)) + if array.shape != shape: + raise ValueError('Shape mismatch {} {}'.format(shape, array.shape)) + if issubclass(type(array) ,DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ @@ -995,6 +965,63 @@ class AcquisitionData(DataContainer): super(AcquisitionData, self).__init__(array, deep_copy, dimension_labels, **kwargs) + def get_shape_labels(self, geometry, dimension_labels=None): + channels = geometry.channels + horiz = geometry.pixel_num_h + vert = geometry.pixel_num_v + angles = geometry.angles + num_of_angles = numpy.shape(angles)[0] + + if dimension_labels is None: + if channels > 1: + if vert > 1: + shape = (channels, num_of_angles , vert, horiz) + dim_labels = [AcquisitionGeometry.CHANNEL, + AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL] + else: + shape = (channels , num_of_angles, horiz) + dim_labels = [AcquisitionGeometry.CHANNEL, + AcquisitionGeometry.ANGLE, + AcquisitionGeometry.HORIZONTAL] + else: + if vert > 1: + shape = (num_of_angles, vert, horiz) + dim_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL + ] + else: + shape = (num_of_angles, horiz) + dim_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.HORIZONTAL + ] + + dimension_labels = dim_labels + else: + shape = [] + for i in range(len(dimension_labels)): + dim = dimension_labels[i] + + if dim == AcquisitionGeometry.CHANNEL: + shape.append(channels) + elif dim == AcquisitionGeometry.ANGLE: + shape.append(num_of_angles) + elif dim == AcquisitionGeometry.VERTICAL: + shape.append(vert) + elif dim == AcquisitionGeometry.HORIZONTAL: + shape.append(horiz) + if len(shape) != len(dimension_labels): + raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ + .format( + len(dimension_labels) - len(shape), + dimension_labels, shape) + ) + shape = tuple(shape) + return (shape, dimension_labels) + + class DataProcessor(object): diff --git a/Wrappers/Python/test/test_DataContainer.py b/Wrappers/Python/test/test_DataContainer.py index 8edfd8b..40cd244 100755 --- a/Wrappers/Python/test/test_DataContainer.py +++ b/Wrappers/Python/test/test_DataContainer.py @@ -494,6 +494,14 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[0], image.dimension_labels[0]) self.assertEqual(order[1], image.dimension_labels[1]) self.assertEqual(order[2], image.dimension_labels[2]) + + ig = ImageGeometry(2,3,2) + try: + z = ImageData(numpy.random.randint(10, size=(2,3)), geometry=ig) + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) #vgeometry.allocate('') def test_AcquisitionGeometry_allocate(self): @@ -525,6 +533,14 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(order[1], sino.dimension_labels[1]) self.assertEqual(order[2], sino.dimension_labels[2]) self.assertEqual(order[2], sino.dimension_labels[2]) + + + try: + z = AcquisitionData(numpy.random.randint(10, size=(2,3)), geometry=ageometry) + self.assertTrue(False) + except ValueError as ve: + print (ve) + self.assertTrue(True) def assertNumpyArrayEqual(self, first, second): res = True -- cgit v1.2.3 From c6b643e939b0c26e41fea4a86d81178af2481387 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:03:01 +0100 Subject: add pnorm method --- .../Python/ccpi/framework/BlockDataContainer.py | 46 +++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 75ee4b2..4655e1b 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -302,13 +302,26 @@ class BlockDataContainer(object): return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) ## reductions + def sum(self, *args, **kwargs): return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) + def squared_norm(self): y = numpy.asarray([el.squared_norm() for el in self.containers]) return y.sum() + def norm(self): - return numpy.sqrt(self.squared_norm()) + return numpy.sqrt(self.squared_norm()) + + def pnorm(self, p=2): + + if p==1: + return sum(self.abs()) + elif p==2: + return sum([el*el for el in self.containers]).sqrt() + else: + return ValueError('Not implemented') + def copy(self): '''alias of clone''' return self.clone() @@ -467,7 +480,38 @@ class BlockDataContainer(object): def __itruediv__(self, other): '''Inline truedivision''' return self.__idiv__(other) + + + + + +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry, BlockGeometry + import numpy + + N, M = 2, 3 + ig = ImageGeometry(N, M) + BG = BlockGeometry(ig, ig) + + U = BG.allocate('random_int') + + + print ("test sum BDC " ) + w = U[0].as_array() + U[1].as_array() + w1 = sum(U).as_array() + numpy.testing.assert_array_equal(w, w1) + + print ("test sum BDC " ) + z = numpy.sqrt(U[0].as_array()**2 + U[1].as_array()**2) + z1 = sum(U**2).sqrt().as_array() + numpy.testing.assert_array_equal(z, z1) + + + z2 = U.pnorm(2) + + -- cgit v1.2.3 From 0c0c274a4566dfa46bac56d61dc59d9c97dc8dbc Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:03:53 +0100 Subject: add docstrings and fix pnorm --- .../ccpi/optimisation/functions/BlockFunction.py | 63 ++++++++++++++------ .../ccpi/optimisation/functions/MixedL21Norm.py | 67 ++++++++++++---------- .../ccpi/optimisation/functions/ScaledFunction.py | 34 ++++++----- 3 files changed, 98 insertions(+), 66 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 8cce290..bf627a5 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -6,36 +6,35 @@ Created on Fri Mar 8 10:01:31 2019 @author: evangelos """ -import numpy as np - from ccpi.optimisation.functions import Function from ccpi.framework import BlockDataContainer from numbers import Number class BlockFunction(Function): - '''A Block vector of Functions + '''BlockFunction acts as a separable sum function, i.e., - .. math:: - - f = [f_1,f_2,f_3] - f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) + f = [f_1,...,f_n] + + f([x_1,...,x_n]) = f_1(x_1) + .... + f_n(x_n) ''' def __init__(self, *functions): - '''Creator''' + self.functions = functions self.length = len(self.functions) super(BlockFunction, self).__init__() def __call__(self, x): - '''evaluates the BlockFunction on the BlockDataContainer + + '''Evaluates the BlockFunction at a BlockDataContainer x :param: x (BlockDataContainer): must have as many rows as self.length returns sum(f_i(x_i)) ''' + if self.length != x.shape[0]: raise ValueError('BlockFunction and BlockDataContainer have incompatible size') t = 0 @@ -44,7 +43,12 @@ class BlockFunction(Function): return t def convex_conjugate(self, x): - '''Convex_conjugate does not take into account the BlockOperator''' + + ''' Evaluate convex conjugate of BlockFunction at x + + returns sum(f_i^{*}(x_i)) + + ''' t = 0 for i in range(x.shape[0]): t += self.functions[i].convex_conjugate(x.get_item(i)) @@ -52,7 +56,13 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): - '''proximal_conjugate does not take into account the BlockOperator''' + + ''' Evaluate Proximal Operator of tau * f(\cdot) at x + + prox_{tau*f}(x) = sum_{i} prox_{tau*f_{i}}(x_{i}) + + + ''' if out is not None: if isinstance(tau, Number): @@ -76,7 +86,14 @@ class BlockFunction(Function): def proximal(self, x, tau, out = None): - '''proximal does not take into account the BlockOperator''' + + ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x + + prox_{tau*f^{*}}(x) = sum_{i} prox_{tau*f^{*}_{i}}(x_{i}) + + + ''' + out = [None]*self.length if isinstance(tau, Number): for i in range(self.length): @@ -88,8 +105,19 @@ class BlockFunction(Function): return BlockDataContainer(*out) def gradient(self,x, out=None): - '''FIXME: gradient returns pass''' - pass + + ''' Evaluate gradient of f at x: f'(x) + + returns: BlockDataContainer [f_{1}'(x_{1}), ... , f_{n}'(x_{n})] + + ''' + + out = [None]*self.length + for i in range(self.length): + out[i] = self.functions[i].gradient(x.get_item(i)) + + return BlockDataContainer(*out) + if __name__ == '__main__': @@ -100,6 +128,7 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry, BlockGeometry from ccpi.optimisation.operators import Gradient, Identity, BlockOperator import numpy + import numpy as np ig = ImageGeometry(M, N) @@ -131,11 +160,7 @@ if __name__ == '__main__': numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ res_out[1].as_array(), decimal=4) - - - - - + ########################################################################## diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index c0e8a6a..f524c5f 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -17,47 +17,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import DataContainer, ImageData, \ - ImageGeometry, BlockDataContainer +from ccpi.framework import BlockDataContainer + import functools -############################ mixed_L1,2NORM FUNCTIONS ##################### class MixedL21Norm(Function): + + ''' + f(x) = ||x||_{2,1} = \sum |x|_{2} + ''' + def __init__(self, **kwargs): super(MixedL21Norm, self).__init__() self.SymTensor = kwargs.get('SymTensor',False) - def __call__(self, x, out=None): + def __call__(self, x): - ''' Evaluates L1,2Norm at point x + ''' Evaluates L2,1Norm at point x :param: x is a BlockDataContainer ''' if not isinstance(x, BlockDataContainer): - raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) + raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x))) + if self.SymTensor: + #TODO fix this case param = [1]*x.shape[0] param[-1] = 2 tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] - res = sum(tmp).sqrt().sum() - else: - -# tmp = [ x[i]**2 for i in range(x.shape[0])] - tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() -# print(x.containers) -# print(tmp) -# print(type(sum(tmp))) -# print(type(tmp)) - res = sum(tmp).sqrt().sum() -# print(res) - return res + else: + + #tmp = [ el**2 for el in x.containers ] + #res = sum(tmp).sqrt().sum() + res = x.pnorm() + + return res def gradient(self, x, out=None): return ValueError('Not Differentiable') @@ -93,20 +94,28 @@ class MixedL21Norm(Function): else: if out is None: - tmp = [ el*el for el in x.containers] - res = sum(tmp).sqrt().maximum(1.0) - frac = [el/res for el in x.containers] - res = BlockDataContainer(*frac) - return res +# tmp = [ el*el for el in x.containers] +# res = sum(tmp).sqrt().maximum(1.0) +# frac = [el/res for el in x.containers] +# res = BlockDataContainer(*frac) +# return res + + return x.divide(x.pnorm().maximum(1.0)) else: - - - res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) - res = res1.sqrt().maximum(1.0) - x.divide(res, out=out) + +# res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) +# res = res1.sqrt().maximum(1.0) +# x.divide(res, out=out) + x.divide(x.pnorm().maximum(1.0), out=out) def __rmul__(self, scalar): + + ''' Multiplication of L2NormSquared with a scalar + + Returns: ScaledFunction + + ''' return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 3fbb858..cb85249 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -20,6 +20,7 @@ from numbers import Number import numpy class ScaledFunction(object): + '''ScaledFunction A class to represent the scalar multiplication of an Function with a scalar. @@ -48,12 +49,22 @@ class ScaledFunction(object): def convex_conjugate(self, x): '''returns the convex_conjugate of the scaled function ''' - # if out is None: - # return self.scalar * self.function.convex_conjugate(x/self.scalar) - # else: - # out.fill(self.function.convex_conjugate(x/self.scalar)) - # out *= self.scalar return self.scalar * self.function.convex_conjugate(x/self.scalar) + + def gradient(self, x, out=None): + '''Returns the gradient of the function at x, if the function is differentiable''' + if out is None: + return self.scalar * self.function.gradient(x) + else: + out.fill( self.scalar * self.function.gradient(x) ) + + def proximal(self, x, tau, out=None): + '''This returns the proximal operator for the function at x, tau + ''' + if out is None: + return self.function.proximal(x, tau*self.scalar) + else: + out.fill( self.function.proximal(x, tau*self.scalar) ) def proximal_conjugate(self, x, tau, out = None): '''This returns the proximal operator for the function at x, tau @@ -76,20 +87,7 @@ class ScaledFunction(object): versions of the CIL. Use proximal instead''', DeprecationWarning) return self.proximal(x, out=None) - def gradient(self, x, out=None): - '''Returns the gradient of the function at x, if the function is differentiable''' - if out is None: - return self.scalar * self.function.gradient(x) - else: - out.fill( self.scalar * self.function.gradient(x) ) - def proximal(self, x, tau, out=None): - '''This returns the proximal operator for the function at x, tau - ''' - if out is None: - return self.function.proximal(x, tau*self.scalar) - else: - out.fill( self.function.proximal(x, tau*self.scalar) ) if __name__ == '__main__': -- cgit v1.2.3 From b59b6e08ca9f6a553007de0be7a764e1c20a3831 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:04:16 +0100 Subject: change to ZeroFunction --- .../Python/ccpi/optimisation/functions/ZeroFun.py | 63 ---------------------- 1 file changed, 63 deletions(-) delete mode 100644 Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py deleted file mode 100644 index 6d21acb..0000000 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# This work is part of the Core Imaging Library developed by -# Visual Analytics and Imaging System Group of the Science Technology -# Facilities Council, STFC - -# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -#from ccpi.optimisation.funcs import Function -from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData -from ccpi.framework import BlockDataContainer - -class ZeroFun(Function): - - def __init__(self): - super(ZeroFun, self).__init__() - - def __call__(self,x): - return 0 - - def convex_conjugate(self, x): - ''' This is the support function sup which in fact is the - indicator function for the set = {0} - So 0 if x=0, or inf if x neq 0 - ''' - - if x.shape[0]==1: - return x.maximum(0).sum() - else: - if isinstance(x, BlockDataContainer): - return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() - else: - return x.maximum(0).sum() + x.maximum(0).sum() - - def proximal(self, x, tau, out=None): - if out is None: - return x.copy() - else: - out.fill(x) - - def proximal_conjugate(self, x, tau, out = None): - if out is None: - return 0 - else: - return 0 - - def domain_geometry(self): - pass - def range_geometry(self): - pass \ No newline at end of file -- cgit v1.2.3 From 580b6224747325f6540330d469ae8a2aefc9c5e3 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:04:32 +0100 Subject: add ZeroFunction --- Wrappers/Python/ccpi/optimisation/functions/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 65e8848..a82ee3e 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from .Function import Function -from .ZeroFun import ZeroFun +from .ZeroFunction import ZeroFunction from .L1Norm import L1Norm from .L2NormSquared import L2NormSquared from .ScaledFunction import ScaledFunction -- cgit v1.2.3 From 8c6e2906397d9543756abf9b69088a94c980c216 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:05:07 +0100 Subject: fix pdhg examples --- Wrappers/Python/wip/pdhg_TV_denoising.py | 4 ++-- Wrappers/Python/wip/pdhg_TV_tomography2D.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index eedeeb8..2072ea3 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -14,7 +14,7 @@ import matplotlib.pyplot as plt from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient -from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction from skimage.util import random_noise @@ -66,7 +66,7 @@ if method == '0': f2 = 0.5 * L2NormSquared(b = noisy_data) f = BlockFunction(f1, f2) - g = ZeroFun() + g = ZeroFunction() else: diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 159f2ea..e0868f7 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -16,7 +16,7 @@ import matplotlib.pyplot as plt from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient -from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ MixedL21Norm, BlockFunction, ScaledFunction from ccpi.astra.ops import AstraProjectorSimple @@ -90,7 +90,7 @@ operator = BlockOperator(op1, op2, shape=(2,1) ) alpha = 50 f = BlockFunction( alpha * MixedL21Norm(), \ 0.5 * L2NormSquared(b = noisy_data) ) -g = ZeroFun() +g = ZeroFunction() # Compute operator Norm normK = operator.norm() -- cgit v1.2.3 From 47424426aa54ac629c1cb70efcfe9ef3c23f9ddf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:05:34 +0100 Subject: change to ZeroFunction --- .../ccpi/optimisation/functions/ZeroFunction.py | 61 ++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py new file mode 100644 index 0000000..cce519a --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# This work is part of the Core Imaging Library developed by +# Visual Analytics and Imaging System Group of the Science Technology +# Facilities Council, STFC + +# Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ccpi.optimisation.functions import Function +from ccpi.framework import BlockDataContainer + +class ZeroFunction(Function): + + ''' ZeroFunction: f(x) = 0 + + + ''' + + def __init__(self): + super(ZeroFunction, self).__init__() + + def __call__(self,x): + return 0 + + def convex_conjugate(self, x): + + ''' This is the support function sup which in fact is the + indicator function for the set = {0} + So 0 if x=0, or inf if x neq 0 + ''' + + if x.shape[0]==1: + return x.maximum(0).sum() + else: + if isinstance(x, BlockDataContainer): + return x.get_item(0).maximum(0).sum() + x.get_item(1).maximum(0).sum() + else: + return x.maximum(0).sum() + x.maximum(0).sum() + + def proximal(self, x, tau, out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def proximal_conjugate(self, x, tau, out = None): + if out is None: + return 0 + else: + return 0 -- cgit v1.2.3 From a48f4a3e132f18e34cd47988e2e117090f734999 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 20:06:03 +0100 Subject: check algebra FunctionComposition --- .../functions/FunctionOperatorComposition.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 34b7e35..38bc458 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -6,32 +6,47 @@ Created on Fri Mar 8 09:55:36 2019 @author: evangelos """ -import numpy as np -#from ccpi.optimisation.funcs import Function from ccpi.optimisation.functions import Function from ccpi.optimisation.functions import ScaledFunction class FunctionOperatorComposition(Function): + ''' Function composition with Operator, i.e., f(Ax) + + A: operator + f: function + + ''' + def __init__(self, operator, function): + super(FunctionOperatorComposition, self).__init__() self.function = function self.operator = operator alpha = 1 + if isinstance (function, ScaledFunction): alpha = function.scalar self.L = 2 * alpha * operator.norm()**2 def __call__(self, x): + + ''' Evaluate FunctionOperatorComposition at x + + returns f(Ax) + + ''' return self.function(self.operator.direct(x)) + #TODO do not know if we need it def call_adjoint(self, x): return self.function(self.operator.adjoint(x)) + def convex_conjugate(self, x): ''' convex_conjugate does not take into account the Operator''' -- cgit v1.2.3 From 1eba627e28552985642b9eaf77ba43bf41191566 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Sun, 14 Apr 2019 22:17:46 +0100 Subject: fix test TV denoising --- Wrappers/Python/wip/pdhg_TV_denoising.py | 1 + 1 file changed, 1 insertion(+) (limited to 'Wrappers') diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index 73fd57a..d885bca 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -138,6 +138,7 @@ plt.colorbar() plt.subplot(1,3,2) plt.imshow(res1.as_array()) plt.colorbar() + #plt.show() -- cgit v1.2.3 From 1a5da33eb2c7bfde2224d634cb34d17b18d7cf72 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 15 Apr 2019 12:16:21 +0100 Subject: add test for allocate when passed a geometry closes #223 --- Wrappers/Python/ccpi/framework/framework.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py index 7874813..7516447 100755 --- a/Wrappers/Python/ccpi/framework/framework.py +++ b/Wrappers/Python/ccpi/framework/framework.py @@ -913,7 +913,7 @@ class AcquisitionData(DataContainer): deep_copy=True, dimension_labels=None, **kwargs): - self.geometry = None + self.geometry = kwargs.get('geometry', None) if array is None: if 'geometry' in kwargs.keys(): geometry = kwargs['geometry'] @@ -928,7 +928,6 @@ class AcquisitionData(DataContainer): else: if self.geometry is not None: shape, labels = self.get_shape_labels(self.geometry, dimension_labels) - print('Shape mismatch {} {}'.format(shape, array.shape)) if array.shape != shape: raise ValueError('Shape mismatch {} {}'.format(shape, array.shape)) @@ -952,16 +951,18 @@ class AcquisitionData(DataContainer): if dimension_labels is None: if array.ndim == 4: - dimension_labels = ['channel' ,'angle' , 'vertical' , - 'horizontal'] + dimension_labels = [AcquisitionGeometry.CHANNEL, + AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL] elif array.ndim == 3: - dimension_labels = ['angle' , 'vertical' , - 'horizontal'] + dimension_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL] else: - dimension_labels = ['angle' , - 'horizontal'] - - #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) + dimension_labels = [AcquisitionGeometry.ANGLE, + AcquisitionGeometry.HORIZONTAL] + super(AcquisitionData, self).__init__(array, deep_copy, dimension_labels, **kwargs) -- cgit v1.2.3 From bac56c8103bd70e6e343d4a1495df85b6491b62b Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Apr 2019 12:51:14 +0100 Subject: fix out, mixedL21Norm, add 3D denoising --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 60 +--- .../ccpi/optimisation/functions/L2NormSquared.py | 5 +- .../ccpi/optimisation/functions/MixedL21Norm.py | 25 +- Wrappers/Python/wip/pdhg_TV_denoising.py | 146 ++++++--- Wrappers/Python/wip/pdhg_TV_denoising3D.py | 360 +++++++++++++++++++++ Wrappers/Python/wip/pdhg_TV_tomography2D.py | 47 +-- 6 files changed, 497 insertions(+), 146 deletions(-) create mode 100644 Wrappers/Python/wip/pdhg_TV_denoising3D.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 439149c..5e92767 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -126,10 +126,6 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - if memopt: - print ("memopt") - else: - print("no memopt") x_old = operator.domain_geometry().allocate() y_old = operator.range_geometry().allocate() @@ -183,65 +179,13 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): g.proximal(x_tmp, tau, out = x) - xbar = x - x_old + x.subtract(x_old, out=xbar) xbar *= theta xbar += x - - + x_old.fill(x) y_old.fill(y) - -# pass -# -## # Gradient descent, Dual problem solution -## y_tmp = y_old + sigma * operator.direct(xbar) -# y_tmp = operator.direct(xbar) -# y_tmp *= sigma -# y_tmp +=y_old -# -# y = f.proximal_conjugate(y_tmp, sigma) -## f.proximal_conjugate(y_tmp, sigma, out = y) -# -# # Gradient ascent, Primal problem solution -## x_tmp = x_old - tau * operator.adjoint(y) -# -# x_tmp = operator.adjoint(y) -# x_tmp *=-tau -# x_tmp +=x_old -# -# x = g.proximal(x_tmp, tau) -## g.proximal(x_tmp, tau, out = x) -# -# #Update -## xbar = x + theta * (x - x_old) -# xbar = x - x_old -# xbar *= theta -# xbar += x -# -# x_old = x -# y_old = y -# -## operator.direct(xbar, out = y_tmp) -## y_tmp *= sigma -## y_tmp +=y_old -# if isinstance(f, FunctionOperatorComposition): -# p1 = f(x) + g(x) -# else: -# p1 = f(operator.direct(x)) + g(x) -# d1 = -(f.convex_conjugate(y) + g(-1*operator.adjoint(y))) -# pd1 = p1 - d1 - -# primal.append(p1) -# dual.append(d1) -# pdgap.append(pd1) - -# if i%100==0: -# print(p1, d1, pd1) -# if isinstance(f, FunctionOperatorComposition): -# p1 = f(x) + g(x) -# else: - t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 7397cfb..2d0a00a 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -116,9 +116,10 @@ class L2NormSquared(Function): return x/(1 + tau/2) else: if self.b is not None: - out.fill( (x - tau*self.b)/(1 + tau/2) ) + x.subtract(tau*self.b, out=out) + out.divide(1+tau/2, out=out) else: - out.fill( x/(1 + tau/2) ) + x.divide(1 + tau/2, out=out) def __rmul__(self, scalar): diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index f524c5f..3541bc2 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -94,19 +94,22 @@ class MixedL21Norm(Function): else: if out is None: -# tmp = [ el*el for el in x.containers] -# res = sum(tmp).sqrt().maximum(1.0) -# frac = [el/res for el in x.containers] -# res = BlockDataContainer(*frac) -# return res - - return x.divide(x.pnorm().maximum(1.0)) + tmp = [ el*el for el in x.containers] + res = sum(tmp).sqrt().maximum(1.0) + frac = [el/res for el in x.containers] + return BlockDataContainer(*frac) + + #TODO this is slow, why??? +# return x.divide(x.pnorm().maximum(1.0)) else: -# res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) -# res = res1.sqrt().maximum(1.0) -# x.divide(res, out=out) - x.divide(x.pnorm().maximum(1.0), out=out) + res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) + res = res1.sqrt().maximum(1.0) + x.divide(res, out=out) + +# x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) + #TODO this is slow, why ??? +# x.divide(x.norm().maximum(1.0), out=out) def __rmul__(self, scalar): diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index d885bca..e142d94 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -27,7 +27,7 @@ def dt(steps): # Create phantom for TV denoising -N = 200 +N = 500 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -40,8 +40,8 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) -#plt.imshow(noisy_data.as_array()) -#plt.show() +plt.imshow(noisy_data.as_array()) +plt.show() #%% @@ -82,7 +82,6 @@ else: # Compute operator Norm normK = operator.norm() -print ("normK", normK) # Primal & dual stepsizes sigma = 1 @@ -91,54 +90,113 @@ tau = 1/(sigma*normK**2) opt = {'niter':2000} opt1 = {'niter':2000, 'memopt': True} -#t1 = timer() -#res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -#print(timer()-t1) -# -#print("with memopt \n") -# -#t2 = timer() -#res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) -#print(timer()-t2) - -pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -pdhg.max_iteration = 2000 -pdhg.update_objective_interval = 100 - +t1 = timer() +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +t2 = timer() -pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -pdhgo.max_iteration = 2000 -pdhgo.update_objective_interval = 100 -steps = [timer()] -pdhgo.run(2000) -steps.append(timer()) -t1 = dt(steps) - -pdhg.run(2000) -steps.append(timer()) -t2 = dt(steps) +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() +# +print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) -print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -res = pdhg.get_output() -res1 = pdhgo.get_output() +# +#%% +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +## +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +## +#steps = [timer()] +#pdhgo.run(2000) +#steps.append(timer()) +#t1 = dt(steps) +## +#pdhg.run(2000) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +#res = pdhg.get_output() +#res1 = pdhgo.get_output() -diff = (res-res1) -print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) -print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) +#%% +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((res1 - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() -plt.figure(figsize=(5,5)) -plt.subplot(1,3,1) -plt.imshow(res.as_array()) -plt.colorbar() +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhg.get_output().as_array()) +#plt.title('no memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhg.get_output() - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() #plt.show() - -#plt.figure(figsize=(5,5)) -plt.subplot(1,3,2) -plt.imshow(res1.as_array()) -plt.colorbar() +# +# +# +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.title('memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + + + +# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +# res = pdhg.get_output() +# res1 = pdhgo.get_output() +# +# diff = (res-res1) +# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) +# +# +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.colorbar() +# #plt.show() +# +# #plt.figure(figsize=(5,5)) +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.colorbar() + #plt.show() diff --git a/Wrappers/Python/wip/pdhg_TV_denoising3D.py b/Wrappers/Python/wip/pdhg_TV_denoising3D.py new file mode 100644 index 0000000..06ecfa2 --- /dev/null +++ b/Wrappers/Python/wip/pdhg_TV_denoising3D.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +#%% + +# Create phantom for TV denoising + +import timeit +import os +from tomophantom import TomoP3D +import tomophantom + +print ("Building 3D phantom using TomoPhantom software") +tic=timeit.default_timer() +model = 13 # select a model number from the library +N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom) +path = os.path.dirname(tomophantom.__file__) +path_library3D = os.path.join(path, "Phantom3DLibrary.dat") +#This will generate a N_size x N_size x N_size phantom (3D) +phantom_tm = TomoP3D.Model(model, N_size, path_library3D) +#toc=timeit.default_timer() +#Run_time = toc - tic +#print("Phantom has been built in {} seconds".format(Run_time)) +# +#sliceSel = int(0.5*N_size) +##plt.gray() +#plt.figure() +#plt.subplot(131) +#plt.imshow(phantom_tm[sliceSel,:,:],vmin=0, vmax=1) +#plt.title('3D Phantom, axial view') +# +#plt.subplot(132) +#plt.imshow(phantom_tm[:,sliceSel,:],vmin=0, vmax=1) +#plt.title('3D Phantom, coronal view') +# +#plt.subplot(133) +#plt.imshow(phantom_tm[:,:,sliceSel],vmin=0, vmax=1) +#plt.title('3D Phantom, sagittal view') +#plt.show() + +#%% + +N = N_size +ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N, voxel_num_z=N) + +n1 = random_noise(phantom_tm, mode = 'gaussian', mean=0, var = 0.001, seed=10) +noisy_data = ImageData(n1) +#plt.imshow(noisy_data.as_array()[:,:,32]) + +#%% + +# Regularisation Parameter +alpha = 0.02 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' + +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + f = BlockFunction(f1, f2) + + g = ZeroFunction() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = L2NormSquared(b = noisy_data) + + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() + +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} + +#t1 = timer() +#res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +#t2 = timer() + + +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() + +#import cProfile +#cProfile.run('res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) ') +### +print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) +# +## +##%% +# +#plt.figure(figsize=(10,10)) +#plt.subplot(311) +#plt.imshow(res1.as_array()[sliceSel,:,:]) +#plt.colorbar() +#plt.title('3D Phantom, axial view') +# +#plt.subplot(312) +#plt.imshow(res1.as_array()[:,sliceSel,:]) +#plt.colorbar() +#plt.title('3D Phantom, coronal view') +# +#plt.subplot(313) +#plt.imshow(res1.as_array()[:,:,sliceSel]) +#plt.colorbar() +#plt.title('3D Phantom, sagittal view') +#plt.show() +# +#plt.figure(figsize=(10,10)) +#plt.subplot(311) +#plt.imshow(res.as_array()[sliceSel,:,:]) +#plt.colorbar() +#plt.title('3D Phantom, axial view') +# +#plt.subplot(312) +#plt.imshow(res.as_array()[:,sliceSel,:]) +#plt.colorbar() +#plt.title('3D Phantom, coronal view') +# +#plt.subplot(313) +#plt.imshow(res.as_array()[:,:,sliceSel]) +#plt.colorbar() +#plt.title('3D Phantom, sagittal view') +#plt.show() +# +#diff = (res1 - res).abs() +# +#plt.figure(figsize=(10,10)) +#plt.subplot(311) +#plt.imshow(diff.as_array()[sliceSel,:,:]) +#plt.colorbar() +#plt.title('3D Phantom, axial view') +# +#plt.subplot(312) +#plt.imshow(diff.as_array()[:,sliceSel,:]) +#plt.colorbar() +#plt.title('3D Phantom, coronal view') +# +#plt.subplot(313) +#plt.imshow(diff.as_array()[:,:,sliceSel]) +#plt.colorbar() +#plt.title('3D Phantom, sagittal view') +#plt.show() +# +# +# +# +##%% +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +#### +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +#### +#steps = [timer()] +#pdhgo.run(2000) +#steps.append(timer()) +#t1 = dt(steps) +## +#pdhg.run(2000) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +#res = pdhg.get_output() +#res1 = pdhgo.get_output() + +#%% +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((res1 - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhg.get_output().as_array()) +#plt.title('no memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhg.get_output() - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() +# +# +# +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.title('memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + + + + +# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +# res = pdhg.get_output() +# res1 = pdhgo.get_output() +# +# diff = (res-res1) +# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) +# +# +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.colorbar() +# #plt.show() +# +# #plt.figure(figsize=(5,5)) +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.colorbar() + +#plt.show() + + + +#======= +## opt = {'niter':2000, 'memopt': True} +# +## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +#>>>>>>> origin/pdhg_fix +# +# +## opt = {'niter':2000, 'memopt': False} +## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +## plt.figure(figsize=(5,5)) +## plt.subplot(1,3,1) +## plt.imshow(res.as_array()) +## plt.title('memopt') +## plt.colorbar() +## plt.subplot(1,3,2) +## plt.imshow(res1.as_array()) +## plt.title('no memopt') +## plt.colorbar() +## plt.subplot(1,3,3) +## plt.imshow((res1 - res).abs().as_array()) +## plt.title('diff') +## plt.colorbar() +## plt.show() +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +# +# +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +# +#steps = [timer()] +#pdhgo.run(200) +#steps.append(timer()) +#t1 = dt(steps) +# +#pdhg.run(200) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,3,1) +#plt.imshow(noisy_data.as_array()) +#plt.colorbar() +#plt.subplot(1,3,2) +#plt.imshow(sol) +#plt.colorbar() +#plt.subplot(1,3,3) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.colorbar() +# +#plt.show() +### +## +#### +##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +##plt.legend() +##plt.show() +# +# +##%% +## diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index e0868f7..3fec34e 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -56,7 +56,7 @@ detectors = 150 angles = np.linspace(0,np.pi,100) ag = AcquisitionGeometry('parallel','2D',angles, detectors) -Aop = AstraProjectorSimple(ig, ag, 'cpu') +Aop = AstraProjectorSimple(ig, ag, 'gpu') sin = Aop.direct(data) plt.imshow(sin.as_array()) @@ -113,43 +113,28 @@ else: sigma = 1 tau = 1/(sigma*normK**2) -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 5000 -#pdhg.update_objective_interval = 250 -# -#pdhg.run(5000) - -opt = {'niter':300} -opt1 = {'niter':300, 'memopt': True} +# Compute operator Norm +normK = operator.norm() + +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) - -print(timer()-t1) -plt.figure(figsize=(5,5)) -plt.imshow(res.as_array()) -plt.colorbar() -plt.show() - -#%% -print("with memopt \n") -# t2 = timer() + + +t3 = timer() res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) -#print(timer()-t2) -# -# -plt.figure(figsize=(5,5)) -plt.imshow(res1.as_array()) -plt.colorbar() -plt.show() +t4 = timer() # -#%% -plt.figure(figsize=(5,5)) -plt.imshow(np.abs(res1.as_array()-res.as_array())) -plt.colorbar() -plt.show() +print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) + + #%% #sol = pdhg.get_output().as_array() #fig = plt.figure() -- cgit v1.2.3 From f1e095e530c0e06007344fbd3f40bf4dcad9686c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 15 Apr 2019 14:20:57 +0100 Subject: update ZeroFun to ZeroFunction --- Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py | 6 +++--- Wrappers/Python/test/test_functions.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index 93ba178..064cb33 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -6,7 +6,7 @@ Created on Thu Feb 21 11:07:30 2019 """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun +from ccpi.optimisation.functions import ZeroFunction import numpy class FISTA(Algorithm): @@ -46,11 +46,11 @@ class FISTA(Algorithm): # default inputs if f is None: - self.f = ZeroFun() + self.f = ZeroFunction() else: self.f = f if g is None: - g = ZeroFun() + g = ZeroFunction() self.g = g else: self.g = g diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 22721fa..05bdd7a 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -26,7 +26,7 @@ from ccpi.optimisation.funcs import Norm2sq # from ccpi.optimisation.functions.L2NormSquared import SimpleL2NormSq, L2NormSq # from ccpi.optimisation.functions.L1Norm import SimpleL1Norm, L1Norm #from ccpi.optimisation.functions import mixed_L12Norm -from ccpi.optimisation.functions import ZeroFun +from ccpi.optimisation.functions import ZeroFunction from ccpi.optimisation.functions import FunctionOperatorComposition import unittest @@ -329,7 +329,7 @@ class TestFunction(unittest.TestCase): a1 = f_no_scaled(U) a2 = f_scaled(U) - self.assertAlmostEqual(a1,a2) + self.assertNumpyArrayAlmostEqual(a1.as_array(),a2.as_array()) tmp = [ el**2 for el in U.containers ] -- cgit v1.2.3 From 617f2e71dd34b3c1fe2997ffbaeefd7f030ec3aa Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 15 Apr 2019 15:11:30 +0100 Subject: fixed load method --- Wrappers/Python/ccpi/io/reader.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/io/reader.py b/Wrappers/Python/ccpi/io/reader.py index 856f5e0..07e3bf9 100644 --- a/Wrappers/Python/ccpi/io/reader.py +++ b/Wrappers/Python/ccpi/io/reader.py @@ -241,26 +241,37 @@ class NexusReader(object): pass dims = file[self.data_path].shape if ymin is None and ymax is None: - data = np.array(file[self.data_path]) + + try: + image_keys = self.get_image_keys() + print ("image_keys", image_keys) + projections = np.array(file[self.data_path]) + data = projections[image_keys==0] + except KeyError as ke: + print (ke) + data = np.array(file[self.data_path]) + else: + image_keys = self.get_image_keys() + print ("image_keys", image_keys) + projections = np.array(file[self.data_path])[image_keys==0] if ymin is None: ymin = 0 if ymax > dims[1]: raise ValueError('ymax out of range') - data = np.array(file[self.data_path][:,:ymax,:]) + data = projections[:,:ymax,:] elif ymax is None: ymax = dims[1] if ymin < 0: raise ValueError('ymin out of range') - data = np.array(file[self.data_path][:,ymin:,:]) + data = projections[:,ymin:,:] else: if ymax > dims[1]: raise ValueError('ymax out of range') if ymin < 0: raise ValueError('ymin out of range') - data = np.array(file[self.data_path] - [: , ymin:ymax , :] ) + data = projections[: , ymin:ymax , :] except: print("Error reading nexus file") -- cgit v1.2.3 From e0ec99b8a8a0e55a53531612da38c378790bbb60 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 15 Apr 2019 16:11:16 +0100 Subject: use new algorithm class --- Wrappers/Python/test/test_run_test.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_run_test.py b/Wrappers/Python/test/test_run_test.py index 8cef925..c698032 100755 --- a/Wrappers/Python/test/test_run_test.py +++ b/Wrappers/Python/test/test_run_test.py @@ -6,10 +6,10 @@ from ccpi.framework import ImageData from ccpi.framework import AcquisitionData from ccpi.framework import ImageGeometry from ccpi.framework import AcquisitionGeometry -from ccpi.optimisation.algs import FISTA -from ccpi.optimisation.algs import FBPD +from ccpi.optimisation.algorithms import FISTA +#from ccpi.optimisation.algs import FBPD from ccpi.optimisation.funcs import Norm2sq -from ccpi.optimisation.functions import ZeroFun +from ccpi.optimisation.functions import ZeroFunction from ccpi.optimisation.funcs import Norm1 from ccpi.optimisation.funcs import TV2D from ccpi.optimisation.funcs import Norm2 @@ -82,7 +82,7 @@ class TestAlgorithms(unittest.TestCase): opt = {'memopt': True} # Create object instances with the test data A and b. f = Norm2sq(A, b, c=0.5, memopt=True) - g0 = ZeroFun() + g0 = ZeroFunction() # Initial guess x_init = DataContainer(np.zeros((n, 1))) @@ -90,12 +90,15 @@ class TestAlgorithms(unittest.TestCase): f.grad(x_init) # Run FISTA for least squares plus zero function. - x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt) - + #x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt) + fa = FISTA(x_init=x_init, f=f, g=g0) + fa.max_iteration = 10 + fa.run(10) + # Print solution and final objective/criterion value for comparison print("FISTA least squares plus zero function solution and objective value:") - print(x_fista0.array) - print(criter0[-1]) + print(fa.get_output()) + print(fa.get_last_objective()) # Compare to CVXPY @@ -143,7 +146,7 @@ class TestAlgorithms(unittest.TestCase): opt = {'memopt': True} # Create object instances with the test data A and b. f = Norm2sq(A, b, c=0.5, memopt=True) - g0 = ZeroFun() + g0 = ZeroFunction() # Initial guess x_init = DataContainer(np.zeros((n, 1))) @@ -155,12 +158,16 @@ class TestAlgorithms(unittest.TestCase): g1.prox(x_init, 0.02) # Combine with least squares and solve using generic FISTA implementation - x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) + #x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) + fa = FISTA(x_init=x_init, f=f, g=g1) + fa.max_iteration = 10 + fa.run(10) + # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") - print(x_fista1.as_array().squeeze()) - print(criter1[-1]) + print(fa.get_output()) + print(fa.get_last_objective()) # Compare to CVXPY -- cgit v1.2.3 From 4d5161e6b98bed1766062fe69f6ee071e2e2e43c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 15 Apr 2019 11:37:56 -0400 Subject: fixed imports --- Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py | 3 +-- Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py | 2 +- Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py | 2 +- Wrappers/Python/ccpi/optimisation/algs.py | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py index 7194eb8..e65bc89 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py @@ -23,7 +23,6 @@ Created on Thu Feb 21 11:11:23 2019 """ from ccpi.optimisation.algorithms import Algorithm -#from collections.abc import Iterable class CGLS(Algorithm): '''Conjugate Gradient Least Squares algorithm @@ -84,4 +83,4 @@ class CGLS(Algorithm): self.d = s + beta*self.d def update_objective(self): - self.loss.append(self.r.squared_norm()) \ No newline at end of file + self.loss.append(self.r.squared_norm()) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py index 445ba7a..aa07359 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FBPD.py @@ -23,7 +23,7 @@ Created on Thu Feb 21 11:09:03 2019 """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFun +from ccpi.optimisation.functions import ZeroFunction class FBPD(Algorithm): '''FBPD Algorithm diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py index f1e4132..14763c5 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py @@ -73,4 +73,4 @@ class GradientDescent(Algorithm): def update_objective(self): self.loss.append(self.objective_function(self.x)) - \ No newline at end of file + diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 6b6ae2c..c142eda 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -21,7 +21,7 @@ import numpy import time from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ZeroFun +from ccpi.optimisation.functions import ZeroFunction from ccpi.framework import ImageData from ccpi.framework import AcquisitionData from ccpi.optimisation.spdhg import spdhg -- cgit v1.2.3 From 85171b24b4bddf18b82a4fd5fd6e2bfe16f0afbf Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Apr 2019 17:26:30 +0100 Subject: change grad & prox --- Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index 93ba178..dbe8174 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -106,9 +106,9 @@ class FISTA(Algorithm): else: - u = self.y - self.invL*self.f.grad(self.y) + u = self.y - self.invL*self.f.gradient(self.y) - self.x = self.g.prox(u,self.invL) + self.x = self.g.proximal(u,self.invL) self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) -- cgit v1.2.3 From c0bde2086da1b1160c956fafbbb666c256d3e4b9 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Apr 2019 17:27:51 +0100 Subject: fixing other method pf pdhg denoising --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 27 ++- .../functions/FunctionOperatorComposition.py | 15 +- .../ccpi/optimisation/functions/L2NormSquared.py | 34 +++- .../ccpi/optimisation/functions/MixedL21Norm.py | 7 +- .../ccpi/optimisation/functions/ScaledFunction.py | 7 +- Wrappers/Python/wip/fista_TV_denoising.py | 72 +++++++ Wrappers/Python/wip/pdhg_TV_denoising.py | 209 +++------------------ 7 files changed, 176 insertions(+), 195 deletions(-) create mode 100644 Wrappers/Python/wip/fista_TV_denoising.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 5e92767..d1b5351 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -158,14 +158,24 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old -= tau*operator.adjoint(y) x = g.proximal(x_old, tau) - xbar.fill(x) - xbar -= x_old + x.subtract(x_old, out=xbar) xbar *= theta xbar += x - + x_old.fill(x) - y_old.fill(y) + y_old.fill(y) + + +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + else: operator.direct(xbar, out = y_tmp) @@ -186,6 +196,15 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 38bc458..70511bb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -54,15 +54,20 @@ class FunctionOperatorComposition(Function): def proximal(self, x, tau, out=None): - '''proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=out) + '''proximal does not take into account the Operator''' + if out is None: + return self.function.proximal(x, tau) + else: + self.function.proximal(x, tau, out=out) + def proximal_conjugate(self, x, tau, out=None): ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=out) + if out is None: + return self.function.proximal_conjugate(x, tau) + else: + self.function.proximal_conjugate(x, tau, out=out) def gradient(self, x, out=None): diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 2d0a00a..2ac11ee 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -34,6 +34,7 @@ class L2NormSquared(Function): super(L2NormSquared, self).__init__() self.b = kwargs.get('b',None) + self.L = 2 def __call__(self, x): @@ -247,7 +248,38 @@ if __name__ == '__main__': print(res_out.as_array()) numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ - res_out.as_array(), decimal=4) + res_out.as_array(), decimal=4) + + + + ig1 = ImageGeometry(2,3) + + tau = 0.1 + + u = ig1.allocate('random_int') + b = ig1.allocate('random_int') + + scalar = 0.5 + f_scaled = scalar * L2NormSquared(b=b) + f_noscaled = L2NormSquared(b=b) + + + res1 = f_scaled.proximal(u, tau) + res2 = f_noscaled.proximal(u, tau*scalar) + +# res2 = (u + tau*b)/(1+tau) + + numpy.testing.assert_array_almost_equal(res1.as_array(), \ + res2.as_array(), decimal=4) + + + + + + + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 3541bc2..7e6b6e7 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -54,9 +54,8 @@ class MixedL21Norm(Function): else: - #tmp = [ el**2 for el in x.containers ] - #res = sum(tmp).sqrt().sum() - res = x.pnorm() + tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() return res @@ -109,7 +108,7 @@ class MixedL21Norm(Function): # x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) #TODO this is slow, why ??? -# x.divide(x.norm().maximum(1.0), out=out) +# x.divide(x.pnorm().maximum(1.0), out=out) def __rmul__(self, scalar): diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index cb85249..7caeab2 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -37,11 +37,14 @@ class ScaledFunction(object): ''' def __init__(self, function, scalar): super(ScaledFunction, self).__init__() - self.L = None + if not isinstance (scalar, Number): raise TypeError('expected scalar: got {}'.format(type(scalar))) self.scalar = scalar self.function = function + + if self.function.L is not None: + self.L = self.scalar * self.function.L def __call__(self,x, out=None): '''Evaluates the function at x ''' @@ -64,7 +67,7 @@ class ScaledFunction(object): if out is None: return self.function.proximal(x, tau*self.scalar) else: - out.fill( self.function.proximal(x, tau*self.scalar) ) + self.function.proximal(x, tau*self.scalar, out = out) def proximal_conjugate(self, x, tau, out = None): '''This returns the proximal operator for the function at x, tau diff --git a/Wrappers/Python/wip/fista_TV_denoising.py b/Wrappers/Python/wip/fista_TV_denoising.py new file mode 100644 index 0000000..a9712fc --- /dev/null +++ b/Wrappers/Python/wip/fista_TV_denoising.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old, FISTA + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from ccpi.optimisation.algs import FISTA + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +# Create phantom for TV denoising + +N = 100 + +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) +noisy_data = ImageData(n1) + + +plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') +plt.show() + +# Regularisation Parameter +alpha = 2 + +operator = Gradient(ig) +g = alpha * MixedL21Norm() +f = 0.5 * L2NormSquared(b = noisy_data) + +x_init = ig.allocate() +opt = {'niter':2000} + + +x = FISTA(x_init, f, g, opt) + +#fista = FISTA() +#fista.set_up(x_init, f, g, opt ) +#fista.max_iteration = 10 +# +#fista.run(2000) +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(fista.get_output().as_array()) +#plt.title('no memopt class') + + + diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index e142d94..a5cd1bf 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -23,11 +23,9 @@ from timeit import default_timer as timer def dt(steps): return steps[-1] - steps[-2] -#%% - # Create phantom for TV denoising -N = 500 +N = 100 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -40,16 +38,16 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) + plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') plt.show() -#%% - # Regularisation Parameter alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '0' +method = '1' if method == '0': @@ -74,12 +72,9 @@ else: # No Composite # ########################################################################### operator = Gradient(ig) - f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) - g = L2NormSquared(b = noisy_data) - - ########################################################################### -#%% - + f = alpha * MixedL21Norm() + g = 0.5 * L2NormSquared(b = noisy_data) + # Compute operator Norm normK = operator.norm() @@ -94,180 +89,36 @@ t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) t2 = timer() +print(" Run memopt") t3 = timer() res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) t4 = timer() -# -print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) - -# -#%% -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -## -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -## -#steps = [timer()] -#pdhgo.run(2000) -#steps.append(timer()) -#t1 = dt(steps) -## -#pdhg.run(2000) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -#res = pdhg.get_output() -#res1 = pdhgo.get_output() #%% -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((res1 - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhg.get_output().as_array()) -#plt.title('no memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhg.get_output() - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() -# -# -# -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.title('memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - - +plt.figure(figsize=(15,15)) +plt.subplot(3,1,1) +plt.imshow(res.as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(3,1,2) +plt.imshow(res1.as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(3,1,3) +plt.imshow((res1 - res).abs().as_array()) +plt.title('diff') +plt.colorbar() +plt.show() +# +plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt') +plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt') +plt.legend() +plt.show() - -# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -# res = pdhg.get_output() -# res1 = pdhgo.get_output() -# -# diff = (res-res1) -# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) -# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) -# -# -# plt.figure(figsize=(5,5)) -# plt.subplot(1,3,1) -# plt.imshow(res.as_array()) -# plt.colorbar() -# #plt.show() -# -# #plt.figure(figsize=(5,5)) -# plt.subplot(1,3,2) -# plt.imshow(res1.as_array()) -# plt.colorbar() - -#plt.show() +print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3)) +diff = (res1 - res).abs().as_array().max() +print(" Max of abs difference is {}".format(diff)) -#======= -## opt = {'niter':2000, 'memopt': True} -# -## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -#>>>>>>> origin/pdhg_fix -# -# -## opt = {'niter':2000, 'memopt': False} -## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -## plt.figure(figsize=(5,5)) -## plt.subplot(1,3,1) -## plt.imshow(res.as_array()) -## plt.title('memopt') -## plt.colorbar() -## plt.subplot(1,3,2) -## plt.imshow(res1.as_array()) -## plt.title('no memopt') -## plt.colorbar() -## plt.subplot(1,3,3) -## plt.imshow((res1 - res).abs().as_array()) -## plt.title('diff') -## plt.colorbar() -## plt.show() -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -# -# -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -# -#steps = [timer()] -#pdhgo.run(200) -#steps.append(timer()) -#t1 = dt(steps) -# -#pdhg.run(200) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) -#sol = pdhg.get_output().as_array() -##sol = result.as_array() -## -#fig = plt.figure() -#plt.subplot(1,3,1) -#plt.imshow(noisy_data.as_array()) -#plt.colorbar() -#plt.subplot(1,3,2) -#plt.imshow(sol) -#plt.colorbar() -#plt.subplot(1,3,3) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.colorbar() -# -#plt.show() -### -## -#### -##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') -##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -##plt.legend() -##plt.show() -# -# -##%% -## -- cgit v1.2.3 From f00a2a988f38abf93aecf55f94196d55fc0ca968 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Apr 2019 17:28:34 +0100 Subject: fixing other method pf pdhg denoising --- Wrappers/Python/ccpi/optimisation/algs.py | 14 +- .../test_pdhg_profile/profile_pdhg_TV_denoising.py | 273 +++++++++++++++++++++ 2 files changed, 280 insertions(+), 7 deletions(-) create mode 100644 Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 6b6ae2c..da6adc1 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -72,7 +72,7 @@ def FISTA(x_init, f=None, g=None, opt=None): t_old = 1 - c = f(x_init) + g(x_init) +# c = f(x_init) + g(x_init) # algorithm loop for it in range(0, max_iter): @@ -99,9 +99,9 @@ def FISTA(x_init, f=None, g=None, opt=None): else: - u = y - invL*f.grad(y) + u = y - invL*f.gradient(y) - x = g.prox(u,invL) + x = g.proximal(u,invL) t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) @@ -111,8 +111,8 @@ def FISTA(x_init, f=None, g=None, opt=None): t_old = t # time and criterion - timing[it] = time.time() - time0 - criter[it] = f(x) + g(x); +# timing[it] = time.time() - time0 +# criter[it] = f(x) + g(x); # stopping rule #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: @@ -121,9 +121,9 @@ def FISTA(x_init, f=None, g=None, opt=None): #print(it, 'out of', 10, 'iterations', end='\r'); #criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); +# timing = numpy.cumsum(timing[0:it+1]); - return x, it, timing, criter + return x #, it, timing, criter def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ regulariser=None, opt=None): diff --git a/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py b/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py new file mode 100644 index 0000000..e142d94 --- /dev/null +++ b/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +#%% + +# Create phantom for TV denoising + +N = 500 + +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) +noisy_data = ImageData(n1) + +plt.imshow(noisy_data.as_array()) +plt.show() + +#%% + +# Regularisation Parameter +alpha = 2 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' + +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + f = BlockFunction(f1, f2) + + g = ZeroFunction() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = L2NormSquared(b = noisy_data) + + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() + +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} + +t1 = timer() +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +t2 = timer() + + +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() +# +print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) + +# +#%% +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +## +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +## +#steps = [timer()] +#pdhgo.run(2000) +#steps.append(timer()) +#t1 = dt(steps) +## +#pdhg.run(2000) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +#res = pdhg.get_output() +#res1 = pdhgo.get_output() + +#%% +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((res1 - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhg.get_output().as_array()) +#plt.title('no memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhg.get_output() - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() +# +# +# +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.title('memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + + + + +# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +# res = pdhg.get_output() +# res1 = pdhgo.get_output() +# +# diff = (res-res1) +# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) +# +# +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.colorbar() +# #plt.show() +# +# #plt.figure(figsize=(5,5)) +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.colorbar() + +#plt.show() + + + +#======= +## opt = {'niter':2000, 'memopt': True} +# +## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +#>>>>>>> origin/pdhg_fix +# +# +## opt = {'niter':2000, 'memopt': False} +## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +## plt.figure(figsize=(5,5)) +## plt.subplot(1,3,1) +## plt.imshow(res.as_array()) +## plt.title('memopt') +## plt.colorbar() +## plt.subplot(1,3,2) +## plt.imshow(res1.as_array()) +## plt.title('no memopt') +## plt.colorbar() +## plt.subplot(1,3,3) +## plt.imshow((res1 - res).abs().as_array()) +## plt.title('diff') +## plt.colorbar() +## plt.show() +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +# +# +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +# +#steps = [timer()] +#pdhgo.run(200) +#steps.append(timer()) +#t1 = dt(steps) +# +#pdhg.run(200) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,3,1) +#plt.imshow(noisy_data.as_array()) +#plt.colorbar() +#plt.subplot(1,3,2) +#plt.imshow(sol) +#plt.colorbar() +#plt.subplot(1,3,3) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.colorbar() +# +#plt.show() +### +## +#### +##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +##plt.legend() +##plt.show() +# +# +##%% +## -- cgit v1.2.3 From b55208551c32a0e9e1ccf01c88083889d4179a40 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Mon, 15 Apr 2019 18:15:36 +0100 Subject: proximal is not working properly --- .../ccpi/optimisation/functions/L2NormSquared.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 2ac11ee..1946d67 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -89,10 +89,25 @@ class L2NormSquared(Function): ''' if out is None: - if self.b is not None: - return (x - self.b)/(1+2*tau) + self.b - else: + + if self.b is None: return x/(1+2*tau) + else: + tmp = x + tmp -= self.b + tmp /= (1+2*tau) + tmp += self.b + return tmp +# return (x-self.b)/(1+2*tau) + self.b + +# if self.b is not None: +# out=x +# if self.b is not None: +# out -= self.b +# out /= (1+2*tau) +# if self.b is not None: +# out += self.b +# return out else: out.fill(x) if self.b is not None: -- cgit v1.2.3 From e23a45a61a8f185efe564088daea45de714d94ac Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Apr 2019 19:04:46 +0100 Subject: fix not memopt case --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 48 ++++++++--------- .../ccpi/optimisation/functions/KullbackLeibler.py | 60 +++++++++++++++++----- 2 files changed, 71 insertions(+), 37 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index d1b5351..a165e55 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -152,28 +152,28 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): if not memopt: - y_old += sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_old, sigma) - - x_old -= tau*operator.adjoint(y) - x = g.proximal(x_old, tau) - + y_tmp = y_old + sigma * operator.direct(xbar) + y = f.proximal_conjugate(y_tmp, sigma) + + x_tmp = x_old - tau*operator.adjoint(y) + x = g.proximal(x_tmp, tau) + x.subtract(x_old, out=xbar) xbar *= theta xbar += x - + x_old.fill(x) - y_old.fill(y) + y_old.fill(y) -# if i%100==0: -# -# p1 = f(operator.direct(x)) + g(x) -# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) -# primal.append(p1) -# dual.append(d1) -# pdgap.append(p1-d1) -# print(p1, d1, p1-d1) + if i%100==0: + + p1 = f(operator.direct(x)) + g(x) + d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) else: @@ -196,14 +196,14 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) -# if i%100==0: -# -# p1 = f(operator.direct(x)) + g(x) -# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) -# primal.append(p1) -# dual.append(d1) -# pdgap.append(p1-d1) -# print(p1, d1, p1-d1) + if i%100==0: + + p1 = f(operator.direct(x)) + g(x) + d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py index 18af154..ae25bdb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py +++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py @@ -19,23 +19,29 @@ import numpy from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.framework import DataContainer, ImageData, ImageGeometry +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction class KullbackLeibler(Function): - def __init__(self,data,**kwargs): + ''' Assume that data > 0 + + ''' + + def __init__(self,data, **kwargs): super(KullbackLeibler, self).__init__() self.b = data self.bnoise = kwargs.get('bnoise', 0) + + def __call__(self, x): + + # TODO check + self.sum_value = self.b + self.bnoise if (self.sum_value.as_array()<0).any(): self.sum_value = numpy.inf - - def __call__(self, x): if self.sum_value==numpy.inf: return numpy.inf @@ -43,22 +49,50 @@ class KullbackLeibler(Function): return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array())) - def gradient(self, x): + def gradient(self, x, out=None): #TODO Division check - return 1 - self.b/(x + self.bnoise) + if out is None: + return 1 - self.b/(x + self.bnoise) + else: + self.b.divide(x+self.bnoise, out=out) + out.subtract(1, out=out) - def convex_conjugate(self, x, out=None): - pass + def convex_conjugate(self, x): + + return self.b * ( numpy.log(self.b/(1-x)) - 1 ) - self.bnoise * (x - 1) def proximal(self, x, tau, out=None): - z = x + tau * self.bnoise - return (z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt() - + if out is None: + return 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() ) + else: + tmp = 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b ) .sqrt() ) + out.fill(tmp) + def proximal_conjugate(self, x, tau, out=None): - pass + + + if out is None: + z = x + tau * self.bnoise + return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) + else: + z = x + tau * self.bnoise + res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) + out.fill(res) + + + + def __rmul__(self, scalar): + + ''' Multiplication of L2NormSquared with a scalar + + Returns: ScaledFunction + + ''' + + return ScaledFunction(self, scalar) -- cgit v1.2.3 From 48cffabc0879f9e61d932b654e497382ebfaa995 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Apr 2019 19:05:10 +0100 Subject: fix KL methods --- .../Python/ccpi/optimisation/functions/KullbackLeibler.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py index ae25bdb..40dddd7 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py +++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py @@ -20,6 +20,7 @@ import numpy from ccpi.optimisation.functions import Function from ccpi.optimisation.functions.ScaledFunction import ScaledFunction +from ccpi.framework import ImageData class KullbackLeibler(Function): @@ -39,14 +40,17 @@ class KullbackLeibler(Function): # TODO check - self.sum_value = self.b + self.bnoise + self.sum_value = x + self.bnoise if (self.sum_value.as_array()<0).any(): self.sum_value = numpy.inf if self.sum_value==numpy.inf: return numpy.inf else: - return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array())) + tmp = self.sum_value.as_array() + return (x - self.b * ImageData( numpy.log(tmp))).sum() + +# return numpy.sum( x.as_array() - self.b.as_array() * numpy.log(self.sum_value.as_array())) def gradient(self, x, out=None): @@ -60,7 +64,10 @@ class KullbackLeibler(Function): def convex_conjugate(self, x): - return self.b * ( numpy.log(self.b/(1-x)) - 1 ) - self.bnoise * (x - 1) + tmp = self.b.as_array()/( 1 - x.as_array() ) + + return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum() +# return self.b * ( ImageData(numpy.log(self.b/(1-x)) - 1 )) - self.bnoise * (x - 1) def proximal(self, x, tau, out=None): -- cgit v1.2.3 From 92a2dfaa652211d5472055c86cc90dfd02e1e400 Mon Sep 17 00:00:00 2001 From: epapoutsellis Date: Tue, 16 Apr 2019 19:05:37 +0100 Subject: add demos --- .../ccpi/optimisation/functions/L2NormSquared.py | 12 ++-- .../ccpi/optimisation/functions/MixedL21Norm.py | 3 +- Wrappers/Python/wip/pdhg_TV_denoising.py | 6 +- Wrappers/Python/wip/pdhg_tv_denoising_poisson.py | 71 ++++++++++++++-------- 4 files changed, 58 insertions(+), 34 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 1946d67..6d3bf86 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -93,12 +93,12 @@ class L2NormSquared(Function): if self.b is None: return x/(1+2*tau) else: - tmp = x - tmp -= self.b - tmp /= (1+2*tau) - tmp += self.b - return tmp -# return (x-self.b)/(1+2*tau) + self.b +# tmp = x +# tmp -= self.b +# tmp /= (1+2*tau) +# tmp += self.b +# return tmp + return (x-self.b)/(1+2*tau) + self.b # if self.b is not None: # out=x diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 7e6b6e7..2004e5f 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -96,7 +96,8 @@ class MixedL21Norm(Function): tmp = [ el*el for el in x.containers] res = sum(tmp).sqrt().maximum(1.0) frac = [el/res for el in x.containers] - return BlockDataContainer(*frac) + return BlockDataContainer(*frac) + #TODO this is slow, why??? # return x.divide(x.pnorm().maximum(1.0)) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index a5cd1bf..e6d38e9 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -20,10 +20,10 @@ from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ from skimage.util import random_noise from timeit import default_timer as timer -def dt(steps): - return steps[-1] - steps[-2] +#def dt(steps): +# return steps[-1] - steps[-2] -# Create phantom for TV denoising +# Create phantom for TV Gaussian denoising N = 100 diff --git a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py index 9fad6f8..fbe0d9b 100644 --- a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py +++ b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py @@ -6,24 +6,25 @@ Created on Fri Feb 22 14:53:03 2019 @author: evangelos """ -from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer - import numpy as np import matplotlib.pyplot as plt +from ccpi.framework import ImageData, ImageGeometry + from ccpi.optimisation.algorithms import PDHG, PDHG_old from ccpi.optimisation.operators import BlockOperator, Identity, Gradient -from ccpi.optimisation.functions import ZeroFun, KullbackLeibler, \ - MixedL21Norm, FunctionOperatorComposition, BlockFunction +from ccpi.optimisation.functions import ZeroFunction, KullbackLeibler, \ + MixedL21Norm, BlockFunction from skimage.util import random_noise +from timeit import default_timer as timer # ############################################################################ -# Create phantom for TV denoising +# Create phantom for TV Poisson denoising N = 100 data = np.zeros((N,N)) @@ -41,13 +42,11 @@ plt.imshow(noisy_data.as_array()) plt.colorbar() plt.show() -#%% - # Regularisation Parameter -alpha = 10 +alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': # Create operators @@ -57,15 +56,11 @@ if method == '0': # Form Composite Operator operator = BlockOperator(op1, op2, shape=(2,1) ) - #### Create functions -# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \ -# L2NormSq(0.5, b = noisy_data) ) - f1 = alpha * MixedL21Norm() - f2 = KullbackLeibler(b = noisy_data) + f2 = KullbackLeibler(noisy_data) f = BlockFunction(f1, f2 ) - g = ZeroFun() + g = ZeroFunction() else: @@ -73,29 +68,57 @@ else: # No Composite # ########################################################################### operator = Gradient(ig) - f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + f = alpha * MixedL21Norm() g = KullbackLeibler(noisy_data) ########################################################################### #%% # Compute operator Norm normK = operator.norm() -print ("normK", normK) -# Primal & dual stepsizes -#sigma = 1 -#tau = 1/(sigma*normK**2) -sigma = 1/normK -tau = 1/normK +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} +t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) - -plt.figure(figsize=(5,5)) +t2 = timer() + +print(" Run memopt") + +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() + +#%% +plt.figure(figsize=(15,15)) +plt.subplot(3,1,1) plt.imshow(res.as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(3,1,2) +plt.imshow(res1.as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(3,1,3) +plt.imshow((res1 - res).abs().as_array()) +plt.title('diff') plt.colorbar() plt.show() +# +plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt') +plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt') +plt.legend() +plt.show() + +print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3)) +diff = (res1 - res).abs().as_array().max() + +print(" Max of abs difference is {}".format(diff)) + #pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) #pdhg.max_iteration = 2000 -- cgit v1.2.3