From c8a628773fa5fe281acd2c701dd96a9824f22713 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 31 Jul 2018 14:49:30 +0100 Subject: Added copy as alias of clone, use copy in algs (#131) closes #126 --- Wrappers/Python/ccpi/framework.py | 7 +++++-- Wrappers/Python/ccpi/optimisation/algs.py | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index f4cd406..485e96b 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -34,7 +34,7 @@ def find_key(dic, val): return [k for k, v in dic.items() if v == val][0] -class ImageGeometry: +class ImageGeometry(object): def __init__(self, voxel_num_x=0, @@ -105,7 +105,7 @@ class ImageGeometry: return repres -class AcquisitionGeometry: +class AcquisitionGeometry(object): def __init__(self, geom_type, @@ -571,6 +571,9 @@ class DataContainer(object): dimension_labels=self.dimension_labels, deep_copy=True, geometry=self.geometry ) + def copy(self): + '''alias of clone''' + return self.clone() def get_data_axes_order(self,new_order=None): '''returns the axes label of self as a list diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index a45100c..f025bbb 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -191,8 +191,8 @@ def CGLS(x_init, operator , data , opt=None): tol = opt['tol'] max_iter = opt['iter'] - r = data.clone() - x = x_init.clone() + r = data.copy() + x = x_init.copy() d = operator.adjoint(r) -- cgit v1.2.3 From 400e63f834f48b2e557e9cb1a71723d9253008a3 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 31 Jul 2018 14:52:52 +0100 Subject: added alias to prox and grad (#130) --- Wrappers/Python/ccpi/optimisation/funcs.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index f5463a3..90bc9e3 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -24,9 +24,11 @@ import numpy class Function(object): def __init__(self): self.op = Identity() - def __call__(self,x): return 0 - def grad(self,x): return 0 - def prox(self,x,tau): return x + def __call__(self,x): return 0 + def grad(self, x): return 0 + def prox(self, x, tau): return x + def gradient(self, x): return self.grad(x) + def proximal(self, x, tau): return self.prox(x, tau) class Norm2(Function): -- cgit v1.2.3 From 59596ad67b5167a9d9ee9d353101bb4c5974541f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 15 Aug 2018 10:44:34 +0100 Subject: add scipy as dependency (#138) --- Wrappers/Python/conda-recipe/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index f72c980..8575b8a 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -18,6 +18,7 @@ requirements: run: - python - numpy + - scipy - matplotlib about: -- cgit v1.2.3 From 2557fb9765d8bdbb236d3b0e3b3d6bed486839f3 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 15 Aug 2018 12:23:49 +0100 Subject: replaces in with explicit test (#141) closes #139 --- Wrappers/Python/ccpi/framework.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 485e96b..d82010b 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -863,8 +863,9 @@ class DataProcessor(object): raise NotImplementedError('Implement basic checks for input DataContainer') def get_output(self): - if None in self.__dict__.values(): - raise ValueError('Not all parameters have been passed') + for k,v in self.__dict__.items(): + if v is None: + raise ValueError('Key {0} is None'.format(k)) shouldRun = False if self.runTime == -1: shouldRun = True -- cgit v1.2.3 From f8f294f51aac110049da3eb7c1cb8cbd6a46282f Mon Sep 17 00:00:00 2001 From: jakobsj Date: Tue, 18 Sep 2018 11:56:20 +0100 Subject: Add SIRT and Box constraints (#125) * Quick prototype of SIRT with nonnegativity added * Add indicator function for boxconstrint in SIRT and FISTA with demo --- Wrappers/Python/ccpi/framework.py | 7 ++ Wrappers/Python/ccpi/optimisation/algs.py | 63 +++++++++++ Wrappers/Python/ccpi/optimisation/funcs.py | 24 ++++ Wrappers/Python/wip/demo_test_sirt.py | 176 +++++++++++++++++++++++++++++ 4 files changed, 270 insertions(+) create mode 100644 Wrappers/Python/wip/demo_test_sirt.py diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index d82010b..0c2432f 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -490,6 +490,13 @@ class DataContainer(object): dimension_labels=self.dimension_labels, geometry=self.geometry) + def minimum(self,otherscalar): + out = numpy.minimum(self.as_array(),otherscalar) + return type(self)(out, + deep_copy=True, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + def sign(self): out = numpy.sign(self.as_array() ) return type(self)(out, diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index f025bbb..570df4b 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -21,6 +21,7 @@ import numpy import time from ccpi.optimisation.funcs import Function +from ccpi.framework import ImageData, AcquisitionData def FISTA(x_init, f=None, g=None, opt=None): '''Fast Iterative Shrinkage-Thresholding Algorithm @@ -222,4 +223,66 @@ def CGLS(x_init, operator , data , opt=None): criter[it] = (r**2).sum() return x, it, timing, criter + +def SIRT(x_init, operator , data , opt=None, constraint=None): + '''Simultaneous Iterative Reconstruction Technique + + Parameters: + x_init: initial guess + operator: operator for forward/backward projections + data: data to operate on + opt: additional algorithm + constraint: func of Indicator type specifying convex constraint. + ''' + + if opt is None: + opt = {'tol': 1e-4, 'iter': 1000} + else: + try: + max_iter = opt['iter'] + except KeyError as ke: + opt[ke] = 1000 + try: + opt['tol'] = 1000 + except KeyError as ke: + opt[ke] = 1e-4 + tol = opt['tol'] + max_iter = opt['iter'] + + # Set default constraint to unconstrained + if constraint==None: + constraint = Function() + + x = x_init.clone() + + timing = numpy.zeros(max_iter) + criter = numpy.zeros(max_iter) + + # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0 + relax_par = 1.0 + + # Set up scaling matrices D and M. + im1 = ImageData(geometry=x_init.geometry) + im1.array[:] = 1.0 + M = 1/operator.direct(im1) + del im1 + aq1 = AcquisitionData(geometry=M.geometry) + aq1.array[:] = 1.0 + D = 1/operator.adjoint(aq1) + del aq1 + + # algorithm loop + for it in range(0, max_iter): + t = time.time() + r = data - operator.direct(x) + + x = constraint.prox(x + relax_par * (D*operator.adjoint(M*r)),None) + + timing[it] = time.time() - t + if it > 0: + criter[it-1] = (r**2).sum() + + r = data - operator.direct(x) + criter[it] = (r**2).sum() + return x, it, timing, criter diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 90bc9e3..4a0a139 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -128,4 +128,28 @@ class Norm1(Function): def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + +# Box constraints indicator function. Calling returns 0 if argument is within +# the box. The prox operator is projection onto the box. Only implements one +# scalar lower and one upper as constraint on all elements. Should generalise +# to vectors to allow different constraints one elements. +class IndicatorBox(Function): + + def __init__(self,lower=-numpy.inf,upper=numpy.inf): + # Do nothing + self.lower = lower + self.upper = upper + super(IndicatorBox, self).__init__() + + def __call__(self,x): + + if (numpy.all(x.array>=self.lower) and + numpy.all(x.array <= self.upper) ): + val = 0 + else: + val = numpy.inf + return val + + def prox(self,x,tau=None): + return (x.maximum(self.lower)).minimum(self.upper) diff --git a/Wrappers/Python/wip/demo_test_sirt.py b/Wrappers/Python/wip/demo_test_sirt.py new file mode 100644 index 0000000..6f5a44d --- /dev/null +++ b/Wrappers/Python/wip/demo_test_sirt.py @@ -0,0 +1,176 @@ +# This demo illustrates how to use the SIRT algorithm without and with +# nonnegativity and box constraints. The ASTRA 2D projectors are used. + +# First make all imports +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \ + AcquisitionData +from ccpi.optimisation.algs import FISTA, FBPD, CGLS, SIRT +from ccpi.optimisation.funcs import Norm2sq, Norm1, TV2D, IndicatorBox +from ccpi.astra.ops import AstraProjectorSimple + +import numpy as np +import matplotlib.pyplot as plt + +# Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case +test_case = 1 + +# Set up phantom size NxN by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display as image. +N = 128 +ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) +Phantom = ImageData(geometry=ig) + +x = Phantom.as_array() +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +plt.imshow(x) +plt.title('Phantom image') +plt.show() + +# Set up AcquisitionGeometry object to hold the parameters of the measurement +# setup geometry: # Number of angles, the actual angles from 0 to +# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector +# pixel relative to an object pixel, the number of detector pixels, and the +# source-origin and origin-detector distance (here the origin-detector distance +# set to 0 to simulate a "virtual detector" with same detector pixel size as +# object pixel size). +angles_num = 20 +det_w = 1.0 +det_num = N +SourceOrig = 200 +OrigDetec = 0 + +if test_case==1: + angles = np.linspace(0,np.pi,angles_num,endpoint=False) + ag = AcquisitionGeometry('parallel', + '2D', + angles, + det_num,det_w) +elif test_case==2: + angles = np.linspace(0,2*np.pi,angles_num,endpoint=False) + ag = AcquisitionGeometry('cone', + '2D', + angles, + det_num, + det_w, + dist_source_center=SourceOrig, + dist_center_detector=OrigDetec) +else: + NotImplemented + +# Set up Operator object combining the ImageGeometry and AcquisitionGeometry +# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU. +Aop = AstraProjectorSimple(ig, ag, 'gpu') + +# Forward and backprojection are available as methods direct and adjoint. Here +# generate test data b and do simple backprojection to obtain z. +b = Aop.direct(Phantom) +z = Aop.adjoint(b) + +plt.imshow(b.array) +plt.title('Simulated data') +plt.show() + +plt.imshow(z.array) +plt.title('Backprojected data') +plt.show() + +# Using the test data b, different reconstruction methods can now be set up as +# demonstrated in the rest of this file. In general all methods need an initial +# guess and some algorithm options to be set: +x_init = ImageData(np.zeros(x.shape),geometry=ig) +opt = {'tol': 1e-4, 'iter': 1000} + +# First a CGLS reconstruction can be done: +x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b, opt) + +plt.imshow(x_CGLS.array) +plt.title('CGLS') +plt.colorbar() +plt.show() + +plt.semilogy(criter_CGLS) +plt.title('CGLS criterion') +plt.show() + +# A SIRT unconstrained reconstruction can be done: similarly: +x_SIRT, it_SIRT, timing_SIRT, criter_SIRT = SIRT(x_init, Aop, b, opt) + +plt.imshow(x_SIRT.array) +plt.title('SIRT unconstrained') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT) +plt.title('SIRT unconstrained criterion') +plt.show() + +# A SIRT nonnegativity constrained reconstruction can be done using the +# additional input "constraint" set to a box indicator function with 0 as the +# lower bound and the default upper bound of infinity: +x_SIRT0, it_SIRT0, timing_SIRT0, criter_SIRT0 = SIRT(x_init, Aop, b, opt, + constraint=IndicatorBox(lower=0)) + +plt.imshow(x_SIRT0.array) +plt.title('SIRT nonneg') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT0) +plt.title('SIRT nonneg criterion') +plt.show() + +# A SIRT reconstruction with box constraints on [0,1] can also be done: +x_SIRT01, it_SIRT01, timing_SIRT01, criter_SIRT01 = SIRT(x_init, Aop, b, opt, + constraint=IndicatorBox(lower=0,upper=1)) + +plt.imshow(x_SIRT01.array) +plt.title('SIRT box(0,1)') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT01) +plt.title('SIRT box(0,1) criterion') +plt.show() + +# The indicator function can also be used with the FISTA algorithm to do +# least squares with nonnegativity constraint. + +# Create least squares object instance with projector, test data and a constant +# coefficient of 0.5: +f = Norm2sq(Aop,b,c=0.5) + +# Run FISTA for least squares without constraints +x_fista, it, timing, criter = FISTA(x_init, f, None,opt) + +plt.imshow(x_fista.array) +plt.title('FISTA Least squares') +plt.show() + +plt.semilogy(criter) +plt.title('FISTA Least squares criterion') +plt.show() + +# Run FISTA for least squares with nonnegativity constraint +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, IndicatorBox(lower=0),opt) + +plt.imshow(x_fista0.array) +plt.title('FISTA Least squares nonneg') +plt.show() + +plt.semilogy(criter0) +plt.title('FISTA Least squares nonneg criterion') +plt.show() + +# Run FISTA for least squares with box constraint [0,1] +x_fista01, it01, timing01, criter01 = FISTA(x_init, f, IndicatorBox(lower=0,upper=1),opt) + +plt.imshow(x_fista01.array) +plt.title('FISTA Least squares box(0,1)') +plt.show() + +plt.semilogy(criter01) +plt.title('FISTA Least squares box(0,1) criterion') +plt.show() \ No newline at end of file -- cgit v1.2.3 From d00a41c8c6b95e9a56f6af221ebc2d23d9f8db9d Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Sat, 29 Sep 2018 23:52:58 +0100 Subject: Memhandle (#136) * added inplace algebra correctly * readded object to AcquisitionGeometry and ImageGeometry * Add more memory management * added unary and binary operations to DataContainer * Added unary and binary operations added also unittest * added unittest * added unit tests for binary operators * bugfixes and remove prints * dafault initialisation behaviour not to deep_copy * FISTA to use the efficient memory handling additions * add calls with input out=None Starts to address #134 * add methods with arg out=None * add test for memhandle * major bugfix after removal of create_image_data * fixed FISTA memhandle * some bugfix in FISTA and funcs ops * debug in progress * Fixed several bugs in memory optimised code Added methods allocate_adjoint and allocate_direct to Operator. * test with cvx and adjustments * fixed unittest, need to fix inline algebra * fixed inline algebra definition * initial cvxpy unittest * adds cvx demo as unit test closes #150 check equality of cvx calculated with respect to FISTA and FBDP --- Wrappers/Python/ccpi/framework.py | 244 +++++++++---- Wrappers/Python/ccpi/optimisation/algs.py | 78 +++-- Wrappers/Python/ccpi/optimisation/funcs.py | 139 +++++++- Wrappers/Python/ccpi/optimisation/ops.py | 114 +++++- Wrappers/Python/conda-recipe/meta.yaml | 4 + Wrappers/Python/conda-recipe/run_test.py | 536 ++++++++++++++++++++++++++++- Wrappers/Python/wip/demo_compare_cvx.py | 25 +- Wrappers/Python/wip/demo_memhandle.py | 193 +++++++++++ 8 files changed, 1200 insertions(+), 133 deletions(-) create mode 100755 Wrappers/Python/wip/demo_memhandle.py diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 0c2432f..a34ca37 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -33,6 +33,15 @@ def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] +def message(cls, msg, *args): + msg = "{0}: " + msg + for i in range(len(args)): + msg += " {%d}" %(i+1) + args = list(args) + args.insert(0, cls.__name__ ) + + return msg.format(*args ) + class ImageGeometry(object): @@ -211,7 +220,7 @@ class DataContainer(object): if type(array) == numpy.ndarray: if deep_copy: - self.array = array[:] + self.array = array.copy() else: self.array = array else: @@ -335,12 +344,17 @@ class DataContainer(object): def fill(self, array, **dimension): '''fills the internal numpy array with the one provided''' if dimension == {}: - if numpy.shape(array) != numpy.shape(self.array): - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - numpy.shape(self.array), - numpy.shape(array))) - self.array = array[:] + if issubclass(type(array), DataContainer) or\ + issubclass(type(array), numpy.ndarray): + if array.shape != self.shape: + raise ValueError('Cannot fill with the provided array.' + \ + 'Expecting {0} got {1}'.format( + self.shape,array.shape)) + if issubclass(type(array), DataContainer): + numpy.copyto(self.array, array.array) + else: + #self.array[:] = array + numpy.copyto(self.array, array) else: command = 'self.array[' @@ -360,8 +374,10 @@ class DataContainer(object): def check_dimensions(self, other): return self.shape == other.shape - - def __add__(self, other): + + ## algebra + + def __add__(self, other , *args, out=None, **kwargs): if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -407,7 +423,6 @@ class DataContainer(object): return self.__div__(other) def __div__(self, other): - print ("calling __div__") if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() / other.as_array() @@ -470,40 +485,6 @@ class DataContainer(object): type(other))) # __mul__ - - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - - def abs(self): - out = numpy.abs(self.as_array() ) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - def maximum(self,otherscalar): - out = numpy.maximum(self.as_array(),otherscalar) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - def minimum(self,otherscalar): - out = numpy.minimum(self.as_array(),otherscalar) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - def sign(self): - out = numpy.sign(self.as_array() ) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - # reverse operand def __radd__(self, other): return self + other @@ -530,7 +511,7 @@ class DataContainer(object): return type(self)(fother ** self.array , dimension_labels=self.dimension_labels, geometry=self.geometry) - elif issubclass(other, DataContainer): + elif issubclass(type(other), DataContainer): if self.check_dimensions(other): return type(self)(other.as_array() ** self.array , dimension_labels=self.dimension_labels, @@ -539,27 +520,59 @@ class DataContainer(object): raise ValueError('Dimensions do not match') # __rpow__ - def sum(self): - return self.as_array().sum() - # in-place arithmetic operators: # (+=, -=, *=, /= , //=, + # must return self + def __iadd__(self, other): - return self + other + if isinstance(other, (int, float)) : + #print ("__iadd__", self.array.shape) + numpy.add(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.add(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __iadd__ def __imul__(self, other): - return self * other + if isinstance(other, (int, float)) : + #print ("__imul__", self.array.shape) + #print ("type(self)", type(self)) + #print ("self.array", self.array, other) + arr = self.as_array() + #print ("arr", arr) + numpy.multiply(arr, other, out=arr) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.multiply(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __imul__ def __isub__(self, other): - return self - other + if isinstance(other, (int, float)) : + numpy.subtract(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.subtract(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __isub__ def __idiv__(self, other): - print ("call __idiv__") - return self / other + if isinstance(other, (int, float)) : + numpy.divide(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.divide(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __idiv__ def __str__ (self, representation=False): @@ -578,9 +591,6 @@ class DataContainer(object): dimension_labels=self.dimension_labels, deep_copy=True, geometry=self.geometry ) - def copy(self): - '''alias of clone''' - return self.clone() def get_data_axes_order(self,new_order=None): '''returns the axes label of self as a list @@ -617,13 +627,127 @@ class DataContainer(object): .format(len(self.shape),len(new_order))) - - + def copy(self): + '''alias of clone''' + return self.clone() + + ## binary operations + + def pixel_wise_binary(self,pwop, x2 , *args, out=None, **kwargs): + if out is None: + if isinstance(x2, (int, float, complex)): + out = pwop(self.as_array() , x2 , *args, **kwargs ) + elif issubclass(type(x2) , DataContainer): + out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + + + elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): + if self.check_dimensions(out) and self.check_dimensions(x2): + pwop(self.as_array(), x2.as_array(), *args, out=out.as_array(), **kwargs ) + return type(self)(out.as_array(), + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): + if self.check_dimensions(out): + pwop(self.as_array(), x2, *args, out=out.as_array(), **kwargs ) + return type(self)(out.as_array(), + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), x2 , *args, out=out, **kwargs) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def add(self, other , *args, out=None, **kwargs): + return self.pixel_wise_binary(numpy.add, other, *args, out=out, **kwargs) + + def subtract(self, other , *args, out=None, **kwargs): + return self.pixel_wise_binary(numpy.subtract, other, *args, out=out, **kwargs) + + def multiply(self, other , *args, out=None, **kwargs): + return self.pixel_wise_binary(numpy.multiply, other, *args, out=out, **kwargs) + + def divide(self, other , *args, out=None, **kwargs): + return self.pixel_wise_binary(numpy.divide, other, *args, out=out, **kwargs) + + def power(self, other , *args, out=None, **kwargs): + return self.pixel_wise_binary(numpy.power, other, *args, out=out, **kwargs) + + def maximum(self,x2, out=None): + return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out) + + ## unary operations + + def pixel_wise_unary(self,pwop, *args, out=None, **kwargs): + if out is None: + out = pwop(self.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + elif issubclass(type(out), DataContainer): + if self.check_dimensions(out): + pwop(self.as_array(), *args, out=out.as_array(), **kwargs ) + return type(self)(out.as_array(), + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), *args, out=out, **kwargs) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def abs(self, out=None): + return self.pixel_wise_unary(numpy.abs, out=out) + + def sign(self, out=None): + #out = numpy.sign(self.as_array() ) + #return type(self)(out, + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + return self.pixel_wise_unary(numpy.sign , out=out) + + def sqrt(self, out=None): + return self.pixel_wise_unary(numpy.sqrt, out=out) + + #def __abs__(self): + # operation = FM.OPERATION.ABS + # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) + # __abs__ + + ## reductions + def sum(self): + return self.as_array().sum() + + class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' def __init__(self, array = None, - deep_copy=True, + deep_copy=False, dimension_labels=None, **kwargs): @@ -1131,4 +1255,4 @@ if __name__ == '__main__': pixel_num_h=5 , channels=2) sino = AcquisitionData(geometry=sgeometry) sino2 = sino.clone() - \ No newline at end of file + diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 570df4b..4a6a383 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -43,22 +43,22 @@ def FISTA(x_init, f=None, g=None, opt=None): # algorithmic parameters if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] + opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} + max_iter = opt['iter'] if 'iter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + + # initialization - x_old = x_init - y = x_init; + if memopt: + y = x_init.clone() + x_old = x_init.clone() + x = x_init.clone() + u = x_init.clone() + else: + x_old = x_init + y = x_init; timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) @@ -66,22 +66,44 @@ def FISTA(x_init, f=None, g=None, opt=None): invL = 1/f.L t_old = 1 + + c = f(x_init) + g(x_init) # algorithm loop for it in range(0, max_iter): time0 = time.time() - - u = y - invL*f.grad(y) - - x = g.prox(u,invL) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - y = x + (t_old-1)/t*(x-x_old) - - x_old = x - t_old = t + if memopt: + # u = y - invL*f.grad(y) + # store the result in x_old + f.gradient(y, out=u) + u.__imul__( -invL ) + u.__iadd__( y ) + # x = g.prox(u,invL) + g.proximal(u, invL, out=x) + + t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) + + # y = x + (t_old-1)/t*(x-x_old) + x.subtract(x_old, out=y) + y.__imul__ ((t_old-1)/t) + y.__iadd__( x ) + + x_old.fill(x) + t_old = t + + + else: + u = y - invL*f.grad(y) + + x = g.prox(u,invL) + + t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) + + y = x + (t_old-1)/t*(x-x_old) + + x_old = x.copy() + t_old = t # time and criterion timing[it] = time.time() - time0 @@ -93,7 +115,7 @@ def FISTA(x_init, f=None, g=None, opt=None): #print(it, 'out of', 10, 'iterations', end='\r'); - criter = criter[0:it+1]; + #criter = criter[0:it+1]; timing = numpy.cumsum(timing[0:it+1]); return x, it, timing, criter @@ -127,6 +149,7 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): opt[ke] = 1e-4 tol = opt['tol'] max_iter = opt['iter'] + memopt = opt['memopts'] if 'memopts' in opt.keys() else False # step-sizes tau = 2 / (g.L + 2); @@ -141,6 +164,9 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) + + + # algorithm loop for it in range(0, max_iter): diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 4a0a139..490d742 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -19,16 +19,31 @@ from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy +from ccpi.framework import DataContainer +def isSizeCorrect(data1 ,data2): + if issubclass(type(data1), DataContainer) and \ + issubclass(type(data2), DataContainer): + # check dimensionality + if data1.check_dimensions(data2): + return True + elif issubclass(type(data1) , numpy.ndarray) and \ + issubclass(type(data2) , numpy.ndarray): + return data1.shape == data2.shape + else: + raise ValueError("{0}: getting two incompatible types: {1} {2}"\ + .format('Function', type(data1), type(data2))) + return False + class Function(object): def __init__(self): self.op = Identity() - def __call__(self,x): return 0 - def grad(self, x): return 0 - def prox(self, x, tau): return x - def gradient(self, x): return self.grad(x) - def proximal(self, x, tau): return self.prox(x, tau) + def __call__(self,x, out=None): return 0 + def grad(self, x): return 0 + def prox(self, x, tau): return x + def gradient(self, x, out=None): return self.grad(x) + def proximal(self, x, tau, out=None): return self.prox(x, tau) class Norm2(Function): @@ -39,10 +54,25 @@ class Norm2(Function): self.gamma = gamma; self.direction = direction; - def __call__(self, x): + def __call__(self, x, out=None): - xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, + if out is None: + xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, keepdims=True)) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + arr = out.as_array() + numpy.square(x.as_array(), out=arr) + xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + p = numpy.sum(self.gamma*xx) return p @@ -55,6 +85,30 @@ class Norm2(Function): p = x.as_array() * xx return type(x)(p,geometry=x.geometry) + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x,tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + numpy.square(x.as_array(), out = out.as_array()) + xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, + keepdims=True )) + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out.as_array()) + + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + + class TV2D(Norm2): @@ -81,10 +135,16 @@ class Norm2sq(Function): ''' - def __init__(self,A,b,c=1.0): + def __init__(self,A,b,c=1.0,memopt=False): self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. + self.memopt = memopt + if memopt: + #self.direct_placehold = A.adjoint(b) + self.direct_placehold = A.allocate_direct() + self.adjoint_placehold = A.allocate_adjoint() + # Compute the Lipschitz parameter from the operator. # Initialise to None instead and only call when needed. @@ -93,11 +153,31 @@ class Norm2sq(Function): def grad(self,x): #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) + return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) def __call__(self,x): #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) + #if out is None: + # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) + #else: + y = self.A.direct(x) + y.__isub__(self.b) + y.__imul__(y) + return y.sum() * self.c + + def gradient(self, x, out = None): + if self.memopt: + #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) + + self.A.direct(x, out=self.adjoint_placehold) + self.adjoint_placehold.__isub__( self.b ) + self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold) + self.direct_placehold.__imul__(2.0 * self.c) + # can this be avoided? + out.fill(self.direct_placehold) + else: + return self.grad(x) + class ZeroFun(Function): @@ -111,20 +191,33 @@ class ZeroFun(Function): return 0 def prox(self,x,tau): - return x + return x.copy() + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + out.fill(x) # A more interesting example, least squares plus 1-norm minimization. # Define class to represent 1-norm including prox function class Norm1(Function): def __init__(self,gamma): - # Do nothing self.gamma = gamma self.L = 1 + self.sign_x = None super(Norm1, self).__init__() - def __call__(self,x): - return self.gamma*(x.abs().sum()) + def __call__(self,x,out=None): + if out is None: + return self.gamma*(x.abs().sum()) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + x.abs(out=out) + return out.sum() * self.gamma def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() @@ -153,3 +246,21 @@ class IndicatorBox(Function): def prox(self,x,tau=None): return (x.maximum(self.lower)).minimum(self.upper) + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + #(x.abs() - tau*self.gamma).maximum(0) * x.sign() + x.abs(out = out) + out.__isub__(tau*self.gamma) + out.maximum(0, out=out) + if self.sign_x is None or not x.shape == self.sign_x.shape: + self.sign_x = x.sign() + else: + x.sign(out=self.sign_x) + + out.__imul__( self.sign_x ) + diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 668b07e..c6775a8 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -19,55 +19,106 @@ import numpy from scipy.sparse.linalg import svds +from ccpi.framework import DataContainer +from ccpi.framework import AcquisitionData +from ccpi.framework import ImageData +from ccpi.framework import ImageGeometry +from ccpi.framework import AcquisitionGeometry # Maybe operators need to know what types they take as inputs/outputs # to not just use generic DataContainer class Operator(object): - def direct(self,x): + def direct(self,x, out=None): return x - def adjoint(self,x): + def adjoint(self,x, out=None): return x def size(self): # To be defined for specific class raise NotImplementedError def get_max_sing_val(self): raise NotImplementedError + def allocate_direct(self): + raise NotImplementedError + def allocate_adjoint(self): + raise NotImplementedError class Identity(Operator): def __init__(self): self.s1 = 1.0 super(Identity, self).__init__() - def direct(self,x): - return x + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) - def adjoint(self,x): - return x + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def size(self): + return NotImplemented + + def get_max_sing_val(self): + return self.s1 + +class TomoIdentity(Operator): + def __init__(self, geometry, **kwargs): + self.s1 = 1.0 + self.geometry = geometry + super(TomoIdentity, self).__init__() + + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) def size(self): return NotImplemented def get_max_sing_val(self): return self.s1 + def allocate_direct(self): + if issubclass(type(self.geometry), ImageGeometry): + return ImageData(geometry=self.geometry) + elif issubclass(type(self.geometry), AcquisitionGeometry): + return AcquisitionData(geometry=self.geometry) + else: + raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) + def allocate_adjoint(self): + return self.allocate_direct() + + class FiniteDiff2D(Operator): def __init__(self): self.s1 = 8.0 super(FiniteDiff2D, self).__init__() - def direct(self,x): + def direct(self,x, out=None): '''Forward differences with Neumann BC.''' + # FIXME this seems to be working only with numpy arrays d1 = numpy.zeros_like(x.as_array()) d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] d2 = numpy.zeros_like(x.as_array()) d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] d = numpy.stack((d1,d2),0) - return type(x)(d,geometry=x.geometry) + return type(x)(d,False,geometry=x.geometry) - def adjoint(self,x): + def adjoint(self,x, out=None): '''Backward differences, Neumann BC.''' Nrows = x.get_dimension_size('horizontal_x') Ncols = x.get_dimension_size('horizontal_x') @@ -76,12 +127,16 @@ class FiniteDiff2D(Operator): Nchannels = x.get_dimension_size('channel') zer = numpy.zeros((Nrows,1)) xxx = x.as_array()[0,:,:-1] - h = numpy.concatenate((zer,xxx), 1) - numpy.concatenate((xxx,zer), 1) + # + h = numpy.concatenate((zer,xxx), 1) + h -= numpy.concatenate((xxx,zer), 1) zer = numpy.zeros((1,Ncols)) xxx = x.as_array()[1,:-1,:] - v = numpy.concatenate((zer,xxx), 0) - numpy.concatenate((xxx,zer), 0) - return type(x)(h + v,geometry=x.geometry) + # + v = numpy.concatenate((zer,xxx), 0) + v -= numpy.concatenate((xxx,zer), 0) + return type(x)(h + v, False, geometry=x.geometry) def size(self): return NotImplemented @@ -132,7 +187,7 @@ def PowerMethodNonsquareOld(op,numiters): def PowerMethodNonsquare(op,numiters): # Initialise random # Jakob's - #inputsize = op.size()[1] + inputsize , outputsize = op.size() #x0 = ImageContainer(numpy.random.randn(*inputsize) # Edo's #vg = ImageGeometry(voxel_num_x=inputsize[0], @@ -143,7 +198,10 @@ def PowerMethodNonsquare(op,numiters): #print (x0) #x0.fill(numpy.random.randn(*x0.shape)) - x0 = op.create_image_data() + + #x0 = op.create_image_data() + x0 = op.allocate_direct() + x0.fill(numpy.random.randn(*x0.shape)) s = numpy.zeros(numiters) # Loop @@ -162,11 +220,19 @@ class LinearOperatorMatrix(Operator): self.s1 = None # Largest singular value, initially unknown super(LinearOperatorMatrix, self).__init__() - def direct(self,x): - return type(x)(numpy.dot(self.A,x.as_array())) + def direct(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A,x.as_array())) + else: + numpy.dot(self.A, x.as_array(), out=out.as_array()) + - def adjoint(self,x): - return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + def adjoint(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + else: + numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) + def size(self): return self.A.shape @@ -178,3 +244,15 @@ class LinearOperatorMatrix(Operator): return self.s1 else: return self.s1 + def allocate_direct(self): + '''allocates the memory to hold the result of adjoint''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((N_A,1)) + return DataContainer(out) + def allocate_adjoint(self): + '''allocate the memory to hold the result of direct''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((M_A,1)) + return DataContainer(out) diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 8575b8a..1b3d910 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -20,6 +20,10 @@ requirements: - numpy - scipy - matplotlib + run: + - python + - numpy + - cvxpy about: home: http://www.ccpi.ac.uk diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 6a20d05..7df5c07 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -1,8 +1,33 @@ import unittest import numpy -from ccpi.framework import DataContainer, ImageData, AcquisitionData, \ - ImageGeometry, AcquisitionGeometry +import numpy as np +from ccpi.framework import DataContainer +from ccpi.framework import ImageData +from ccpi.framework import AcquisitionData +from ccpi.framework import ImageGeometry +from ccpi.framework import AcquisitionGeometry import sys +from timeit import default_timer as timer +from ccpi.optimisation.algs import FISTA +from ccpi.optimisation.algs import FBPD +from ccpi.optimisation.funcs import Norm2sq +from ccpi.optimisation.funcs import ZeroFun +from ccpi.optimisation.funcs import Norm1 +from ccpi.optimisation.funcs import TV2D + +from ccpi.optimisation.ops import LinearOperatorMatrix +from ccpi.optimisation.ops import TomoIdentity + +from cvxpy import * + + +def aid(x): + # This function returns the memory + # block address of an array. + return x.__array_interface__['data'][0] + +def dt (steps): + return steps[-1] - steps[-2] class TestDataContainer(unittest.TestCase): @@ -21,6 +46,282 @@ class TestDataContainer(unittest.TestCase): self.assertEqual(sys.getrefcount(a),3) self.assertEqual(ds.dimension_labels , {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'}) + def testGb_creation_nocopy(self): + X,Y,Z = 512,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + print("test clone") + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + #print("a refcount " , sys.getrefcount(a)) + self.assertEqual(sys.getrefcount(a),3) + ds1 = ds.copy() + self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array())) + ds1 = ds.clone() + self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array())) + + + def testInlineAlgebra(self): + print ("Test Inline Algebra") + X,Y,Z = 1024,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + print(t0) + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + #ds.__iadd__( 2 ) + ds += 2 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],3.) + #ds.__isub__( 2 ) + ds -= 2 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],1.) + #ds.__imul__( 2 ) + ds *= 2 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],2.) + #ds.__idiv__( 2 ) + ds /= 2 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],1.) + + ds1 = ds.copy() + #ds1.__iadd__( 1 ) + ds1 += 1 + #ds.__iadd__( ds1 ) + ds += ds1 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],3.) + #ds.__isub__( ds1 ) + ds -= ds1 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],1.) + #ds.__imul__( ds1 ) + ds *= ds1 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],2.) + #ds.__idiv__( ds1 ) + ds /= ds1 + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],1.) + + + def test_unary_operations(self): + print ("Test unary operations") + X,Y,Z = 1024,512,512 + steps = [timer()] + a = -numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + print(t0) + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + + ds.sign(out=ds) + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],-1.) + + ds.abs(out=ds) + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],1.) + + ds.__imul__( 2 ) + ds.sqrt(out=ds) + steps.append(timer()) + print(dt(steps)) + self.assertEqual(ds.as_array()[0][0][0],numpy.sqrt(2., dtype='float32')) + + + + def test_binary_operations(self): + self.binary_add() + self.binary_subtract() + self.binary_multiply() + self.binary_divide() + + def binary_add(self): + print ("Test binary add") + X,Y,Z = 512,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + ds1 = ds.copy() + + steps.append(timer()) + ds.add(ds1, out=ds) + steps.append(timer()) + t1 = dt(steps) + print("ds.add(ds1, out=ds)",dt(steps)) + steps.append(timer()) + ds2 = ds.add(ds1) + steps.append(timer()) + t2 = dt(steps) + print("ds2 = ds.add(ds1)",dt(steps)) + + self.assertLess(t1,t2) + self.assertEqual(ds.as_array()[0][0][0] , 2.) + + ds0 = ds + ds0.add(2,out=ds0) + steps.append(timer()) + print ("ds0.add(2,out=ds0)", dt(steps), 3 , ds0.as_array()[0][0][0]) + self.assertEqual(4., ds0.as_array()[0][0][0]) + + dt1 = dt(steps) + ds3 = ds0.add(2) + steps.append(timer()) + print ("ds3 = ds0.add(2)", dt(steps), 5 , ds3.as_array()[0][0][0]) + dt2 = dt(steps) + self.assertLess(dt1,dt2) + + def binary_subtract(self): + print ("Test binary subtract") + X,Y,Z = 512,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + ds1 = ds.copy() + + steps.append(timer()) + ds.subtract(ds1, out=ds) + steps.append(timer()) + t1 = dt(steps) + print("ds.subtract(ds1, out=ds)",dt(steps)) + self.assertEqual(0., ds.as_array()[0][0][0]) + + steps.append(timer()) + ds2 = ds.subtract(ds1) + self.assertEqual(-1., ds2.as_array()[0][0][0]) + + steps.append(timer()) + t2 = dt(steps) + print("ds2 = ds.subtract(ds1)",dt(steps)) + + self.assertLess(t1,t2) + + del ds1 + ds0 = ds.copy() + steps.append(timer()) + ds0.subtract(2,out=ds0) + #ds0.__isub__( 2 ) + steps.append(timer()) + print ("ds0.subtract(2,out=ds0)", dt(steps), -2. , ds0.as_array()[0][0][0]) + self.assertEqual(-2., ds0.as_array()[0][0][0]) + + dt1 = dt(steps) + ds3 = ds0.subtract(2) + steps.append(timer()) + print ("ds3 = ds0.subtract(2)", dt(steps), 0. , ds3.as_array()[0][0][0]) + dt2 = dt(steps) + self.assertLess(dt1,dt2) + self.assertEqual(-2., ds0.as_array()[0][0][0]) + self.assertEqual(-4., ds3.as_array()[0][0][0]) + + def binary_multiply(self): + print ("Test binary multiply") + X,Y,Z = 1024,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + ds1 = ds.copy() + + steps.append(timer()) + ds.multiply(ds1, out=ds) + steps.append(timer()) + t1 = dt(steps) + print("ds.multiply(ds1, out=ds)",dt(steps)) + steps.append(timer()) + ds2 = ds.multiply(ds1) + steps.append(timer()) + t2 = dt(steps) + print("ds2 = ds.multiply(ds1)",dt(steps)) + + self.assertLess(t1,t2) + + ds0 = ds + ds0.multiply(2,out=ds0) + steps.append(timer()) + print ("ds0.multiply(2,out=ds0)", dt(steps), 2. , ds0.as_array()[0][0][0]) + self.assertEqual(2., ds0.as_array()[0][0][0]) + + dt1 = dt(steps) + ds3 = ds0.multiply(2) + steps.append(timer()) + print ("ds3 = ds0.multiply(2)", dt(steps), 4. , ds3.as_array()[0][0][0]) + dt2 = dt(steps) + self.assertLess(dt1,dt2) + self.assertEqual(4., ds3.as_array()[0][0][0]) + self.assertEqual(2., ds.as_array()[0][0][0]) + + def binary_divide(self): + print ("Test binary divide") + X,Y,Z = 1024,512,512 + steps = [timer()] + a = numpy.ones((X,Y,Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y','Z']) + ds1 = ds.copy() + + steps.append(timer()) + ds.divide(ds1, out=ds) + steps.append(timer()) + t1 = dt(steps) + print("ds.divide(ds1, out=ds)",dt(steps)) + steps.append(timer()) + ds2 = ds.divide(ds1) + steps.append(timer()) + t2 = dt(steps) + print("ds2 = ds.divide(ds1)",dt(steps)) + + self.assertLess(t1,t2) + self.assertEqual(ds.as_array()[0][0][0] , 1.) + + ds0 = ds + ds0.divide(2,out=ds0) + steps.append(timer()) + print ("ds0.divide(2,out=ds0)", dt(steps), 0.5 , ds0.as_array()[0][0][0]) + self.assertEqual(0.5, ds0.as_array()[0][0][0]) + + dt1 = dt(steps) + ds3 = ds0.divide(2) + steps.append(timer()) + print ("ds3 = ds0.divide(2)", dt(steps), 0.25 , ds3.as_array()[0][0][0]) + dt2 = dt(steps) + self.assertLess(dt1,dt2) + self.assertEqual(.25, ds3.as_array()[0][0][0]) + self.assertEqual(.5, ds.as_array()[0][0][0]) + + def test_creation_copy(self): shape = (2,3,4,5) size = shape[0] @@ -147,6 +448,235 @@ class TestDataContainer(unittest.TestCase): res = False print (err) self.assertTrue(res) + + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print (err) + self.assertTrue(res) + +class TestAlgorithms(unittest.TestCase): + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print (err) + self.assertTrue(res) + + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print (err) + self.assertTrue(res) + def test_FISTA(self): + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0],1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt':True} + # Create object instances with the test data A and b. + f = Norm2sq(A,b,c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n,1))) + + f.grad(x_init) + + # Run FISTA for least squares plus zero function. + x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt) + + # Print solution and final objective/criterion value for comparison + print("FISTA least squares plus zero function solution and objective value:") + print(x_fista0.array) + print(criter0[-1]) + + # Compare to CVXPY + + # Construct the problem. + x0 = Variable(n) + objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) ) + prob0 = Problem(objective0) + + # The optimal objective is returned by prob.solve(). + result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus zero function solution and objective value:") + print(x0.value) + print(objective0.value) + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fista0.array),x0.value,6) + # Create 1-norm object instance + g1 = Norm1(lam) + + g1(x_init) + g1.prox(x_init,0.02) + + # Combine with least squares and solve using generic FISTA implementation + x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) + + # Print for comparison + print("FISTA least squares plus 1-norm solution and objective value:") + print(x_fista1) + print(criter1[-1]) + + # Compare to CVXPY + + # Construct the problem. + x1 = Variable(n) + objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) + prob1 = Problem(objective1) + + # The optimal objective is returned by prob.solve(). + result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1.value) + print(objective1.value) + + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fista1.array),x1.value,6) + + # Now try another algorithm FBPD for same problem: + x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) + print(x_fbpd1) + print(criterfbpd1[-1]) + + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fbpd1.array),x1.value,6) + # Plot criterion curve to see both FISTA and FBPD converge to same value. + # Note that FISTA is very efficient for 1-norm minimization so it beats + # FBPD in this test by a lot. But FBPD can handle a larger class of problems + # than FISTA can. + + + # Now try 1-norm and TV denoising with FBPD, first 1-norm. + + # Set up phantom size NxN by creating ImageGeometry, initialising the + # ImageData object with this geometry and empty array and finally put some + # data into its array, and display as image. + N = 64 + ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) + Phantom = ImageData(geometry=ig) + + x = Phantom.as_array() + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + + + # Identity operator for denoising + I = TomoIdentity(ig) + + # Data and add noise + y = I.direct(Phantom) + y.array = y.array + 0.1*np.random.randn(N, N) + + + # Data fidelity term + f_denoise = Norm2sq(I,y,c=0.5,memopt=True) + + # 1-norm regulariser + lam1_denoise = 1.0 + g1_denoise = Norm1(lam1_denoise) + + # Initial guess + x_init_denoise = ImageData(np.zeros((N,N))) + + # Combine with least squares and solve using generic FISTA implementation + x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) + + print(x_fista1_denoise) + print(criter1_denoise[-1]) + + + # Now denoise LS + 1-norm with FBPD + x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) + print(x_fbpd1_denoise) + print(criterfbpd1_denoise[-1]) + + + # Compare to CVXPY + + # Construct the problem. + x1_denoise = Variable(N**2,1) + objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) ) + prob1_denoise = Problem(objective1_denoise) + + # The optimal objective is returned by prob.solve(). + result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1_denoise.value) + print(objective1_denoise.value) + self.assertNumpyArrayAlmostEqual( + x_fista1_denoise.array.flatten(),x1_denoise.value,5) + + self.assertNumpyArrayAlmostEqual( + x_fbpd1_denoise.array.flatten(),x1_denoise.value,5) + x1_cvx = x1_denoise.value + x1_cvx.shape = (N,N) + + + # Now TV with FBPD + lam_tv = 0.1 + gtv = TV2D(lam_tv) + gtv(gtv.op.direct(x_init_denoise)) + + opt_tv = {'tol': 1e-4, 'iter': 10000} + + x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) + print(x_fbpdtv_denoise) + print(criterfbpdtv_denoise[-1]) + + + + # Compare to CVXPY + + # Construct the problem. + xtv_denoise = Variable((N,N)) + objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) + probtv_denoise = Problem(objectivetv_denoise) + + # The optimal objective is returned by prob.solve(). + resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(xtv_denoise.value) + print(objectivetv_denoise.value) + + self.assertNumpyArrayAlmostEqual( + x_fbpdtv_denoise.as_array(),xtv_denoise.value,1) + # ============================================================================= # def test_upper(self): # self.assertEqual('foo'.upper(), 'FOO') @@ -164,4 +694,4 @@ class TestDataContainer(unittest.TestCase): # ============================================================================= if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py index cbfe50e..2df11c1 100644 --- a/Wrappers/Python/wip/demo_compare_cvx.py +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -3,7 +3,7 @@ from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataCo from ccpi.optimisation.algs import FISTA, FBPD, CGLS from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D -from ccpi.optimisation.ops import LinearOperatorMatrix, Identity +from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity # Requires CVXPY, see http://www.cvxpy.org/ # CVXPY can be installed in anaconda using @@ -33,9 +33,9 @@ b = DataContainer(bmat) # Regularization parameter lam = 10 - +opt = {'memopt':True} # Create object instances with the test data A and b. -f = Norm2sq(A,b,c=0.5) +f = Norm2sq(A,b,c=0.5, memopt=True) g0 = ZeroFun() # Initial guess @@ -44,7 +44,7 @@ x_init = DataContainer(np.zeros((n,1))) f.grad(x_init) # Run FISTA for least squares plus zero function. -x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0) +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt) # Print solution and final objective/criterion value for comparison print("FISTA least squares plus zero function solution and objective value:") @@ -56,7 +56,7 @@ if use_cvxpy: # Construct the problem. x0 = Variable(n) - objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat) ) + objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) ) prob0 = Problem(objective0) # The optimal objective is returned by prob.solve(). @@ -83,7 +83,7 @@ g1(x_init) g1.prox(x_init,0.02) # Combine with least squares and solve using generic FISTA implementation -x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1) +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") @@ -95,7 +95,7 @@ if use_cvxpy: # Construct the problem. x1 = Variable(n) - objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat) + lam*norm(x1,1) ) + objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) prob1 = Problem(objective1) # The optimal objective is returned by prob.solve(). @@ -147,7 +147,7 @@ plt.title('Phantom image') plt.show() # Identity operator for denoising -I = Identity() +I = TomoIdentity(ig) # Data and add noise y = I.direct(Phantom) @@ -158,7 +158,7 @@ plt.title('Noisy image') plt.show() # Data fidelity term -f_denoise = Norm2sq(I,y,c=0.5) +f_denoise = Norm2sq(I,y,c=0.5,memopt=True) # 1-norm regulariser lam1_denoise = 1.0 @@ -168,7 +168,7 @@ g1_denoise = Norm1(lam1_denoise) x_init_denoise = ImageData(np.zeros((N,N))) # Combine with least squares and solve using generic FISTA implementation -x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise) +x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) print(x_fista1_denoise) print(criter1_denoise[-1]) @@ -229,7 +229,8 @@ if use_cvxpy: # Compare to CVXPY # Construct the problem. - xtv_denoise = Variable(N,N) + xtv_denoise = Variable((N,N)) + print (xtv_denoise.shape) objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) probtv_denoise = Problem(objectivetv_denoise) @@ -247,4 +248,4 @@ plt.title('CVX TV') plt.show() plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV') -plt.loglog(criterfbpdtv_denoise, label='FBPD TV') \ No newline at end of file +plt.loglog(criterfbpdtv_denoise, label='FBPD TV') diff --git a/Wrappers/Python/wip/demo_memhandle.py b/Wrappers/Python/wip/demo_memhandle.py new file mode 100755 index 0000000..db48d73 --- /dev/null +++ b/Wrappers/Python/wip/demo_memhandle.py @@ -0,0 +1,193 @@ + +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer +from ccpi.optimisation.algs import FISTA, FBPD, CGLS +from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D + +from ccpi.optimisation.ops import LinearOperatorMatrix, Identity +from ccpi.optimisation.ops import TomoIdentity + +# Requires CVXPY, see http://www.cvxpy.org/ +# CVXPY can be installed in anaconda using +# conda install -c cvxgrp cvxpy libgcc + + +import numpy as np +import matplotlib.pyplot as plt + +# Problem data. +m = 30 +n = 20 +np.random.seed(1) +Amat = np.random.randn(m, n) +A = LinearOperatorMatrix(Amat) +bmat = np.random.randn(m) +bmat.shape = (bmat.shape[0],1) + + + +# A = Identity() +# Change n to equal to m. + +b = DataContainer(bmat) + +# Regularization parameter +lam = 10 + +# Create object instances with the test data A and b. +f = Norm2sq(A,b,c=0.5, memopt=True) +g0 = ZeroFun() + +# Initial guess +x_init = DataContainer(np.zeros((n,1))) + +f.grad(x_init) +opt = {'memopt': True} +# Run FISTA for least squares plus zero function. +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0) +x_fista0_m, it0_m, timing0_m, criter0_m = FISTA(x_init, f, g0, opt=opt) + +iternum = [i for i in range(len(criter0))] +# Print solution and final objective/criterion value for comparison +print("FISTA least squares plus zero function solution and objective value:") +print(x_fista0.array) +print(criter0[-1]) + + +# Plot criterion curve to see FISTA converge to same value as CVX. +#iternum = np.arange(1,1001) +plt.figure() +plt.loglog(iternum,criter0,label='FISTA LS') +plt.loglog(iternum,criter0_m,label='FISTA LS memopt') +plt.legend() +plt.show() +#%% +# Create 1-norm object instance +g1 = Norm1(lam) + +g1(x_init) +g1.prox(x_init,0.02) + +# Combine with least squares and solve using generic FISTA implementation +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1) +x_fista1_m, it1_m, timing1_m, criter1_m = FISTA(x_init, f, g1, opt=opt) +iternum = [i for i in range(len(criter1))] +# Print for comparison +print("FISTA least squares plus 1-norm solution and objective value:") +print(x_fista1) +print(criter1[-1]) + + +# Now try another algorithm FBPD for same problem: +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +iternum = [i for i in range(len(criterfbpd1))] +print(x_fbpd1) +print(criterfbpd1[-1]) + +# Plot criterion curve to see both FISTA and FBPD converge to same value. +# Note that FISTA is very efficient for 1-norm minimization so it beats +# FBPD in this test by a lot. But FBPD can handle a larger class of problems +# than FISTA can. +plt.figure() +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.loglog(iternum,criter1_m,label='FISTA LS+1 memopt') +plt.legend() +plt.show() + +plt.figure() +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.loglog(iternum,criterfbpd1,label='FBPD LS+1') +plt.legend() +plt.show() +#%% +# Now try 1-norm and TV denoising with FBPD, first 1-norm. + +# Set up phantom size NxN by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display as image. +N = 1000 +ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) +Phantom = ImageData(geometry=ig) + +x = Phantom.as_array() +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +plt.imshow(x) +plt.title('Phantom image') +plt.show() + +# Identity operator for denoising +I = TomoIdentity(ig) + +# Data and add noise +y = I.direct(Phantom) +y.array += 0.1*np.random.randn(N, N) + +plt.figure() +plt.imshow(y.array) +plt.title('Noisy image') +plt.show() + +# Data fidelity term +f_denoise = Norm2sq(I,y,c=0.5, memopt=True) + +# 1-norm regulariser +lam1_denoise = 1.0 +g1_denoise = Norm1(lam1_denoise) + +# Initial guess +x_init_denoise = ImageData(np.zeros((N,N))) +opt = {'memopt': False, 'iter' : 50} +# Combine with least squares and solve using generic FISTA implementation +print ("no memopt") +x_fista1_denoise, it1_denoise, timing1_denoise, \ + criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) +opt = {'memopt': True, 'iter' : 50} +print ("yes memopt") +x_fista1_denoise_m, it1_denoise_m, timing1_denoise_m, \ + criter1_denoise_m = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) + +print(x_fista1_denoise) +print(criter1_denoise[-1]) + +plt.figure() +plt.imshow(x_fista1_denoise.as_array()) +plt.title('FISTA LS+1') +plt.show() + +plt.figure() +plt.imshow(x_fista1_denoise_m.as_array()) +plt.title('FISTA LS+1 memopt') +plt.show() + +plt.figure() +plt.loglog(iternum,criter1_denoise,label='FISTA LS+1') +plt.loglog(iternum,criter1_denoise_m,label='FISTA LS+1 memopt') +plt.legend() +plt.show() +#%% +# Now denoise LS + 1-norm with FBPD +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +print(x_fbpd1_denoise) +print(criterfbpd1_denoise[-1]) + +plt.figure() +plt.imshow(x_fbpd1_denoise.as_array()) +plt.title('FBPD LS+1') +plt.show() + + +# Now TV with FBPD +lam_tv = 0.1 +gtv = TV2D(lam_tv) +gtv(gtv.op.direct(x_init_denoise)) + +opt_tv = {'tol': 1e-4, 'iter': 10000} + +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +print(x_fbpdtv_denoise) +print(criterfbpdtv_denoise[-1]) + +plt.imshow(x_fbpdtv_denoise.as_array()) +plt.title('FBPD TV') +plt.show() -- cgit v1.2.3 From 1e9c45fc518cca8f40139ed6a4de299375a1c7ce Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 17 Oct 2018 22:16:22 +0100 Subject: closes #151 (#152) --- Wrappers/Python/ccpi/framework.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index a34ca37..7249d64 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -377,7 +377,7 @@ class DataContainer(object): ## algebra - def __add__(self, other , *args, out=None, **kwargs): + def __add__(self, other , out=None, *args, **kwargs): if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -633,7 +633,7 @@ class DataContainer(object): ## binary operations - def pixel_wise_binary(self,pwop, x2 , *args, out=None, **kwargs): + def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) @@ -673,19 +673,19 @@ class DataContainer(object): else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - def add(self, other , *args, out=None, **kwargs): + def add(self, other , out=None, *args, **kwargs): return self.pixel_wise_binary(numpy.add, other, *args, out=out, **kwargs) - def subtract(self, other , *args, out=None, **kwargs): + def subtract(self, other, out=None , *args, **kwargs): return self.pixel_wise_binary(numpy.subtract, other, *args, out=out, **kwargs) - def multiply(self, other , *args, out=None, **kwargs): + def multiply(self, other , out=None, *args, **kwargs): return self.pixel_wise_binary(numpy.multiply, other, *args, out=out, **kwargs) - def divide(self, other , *args, out=None, **kwargs): + def divide(self, other , out=None ,*args, **kwargs): return self.pixel_wise_binary(numpy.divide, other, *args, out=out, **kwargs) - def power(self, other , *args, out=None, **kwargs): + def power(self, other , out=None, *args, **kwargs): return self.pixel_wise_binary(numpy.power, other, *args, out=out, **kwargs) def maximum(self,x2, out=None): @@ -693,7 +693,7 @@ class DataContainer(object): ## unary operations - def pixel_wise_unary(self,pwop, *args, out=None, **kwargs): + def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): if out is None: out = pwop(self.as_array() , *args, **kwargs ) return type(self)(out, -- cgit v1.2.3 From 2d09420d881427f96ed951f6a020b1980fc1860f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 18 Oct 2018 17:05:44 +0100 Subject: removed default behaviour of Function as Identity (#155) addresses this loophole https://github.com/CCPPETMR/Hackathon-SIRF/issues/12#issuecomment-430944064 --- Wrappers/Python/ccpi/optimisation/funcs.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 490d742..c105236 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -38,12 +38,12 @@ def isSizeCorrect(data1 ,data2): class Function(object): def __init__(self): - self.op = Identity() - def __call__(self,x, out=None): return 0 - def grad(self, x): return 0 - def prox(self, x, tau): return x - def gradient(self, x, out=None): return self.grad(x) - def proximal(self, x, tau, out=None): return self.prox(x, tau) + pass + def __call__(self,x, out=None): raise NotImplementedError + def grad(self, x): raise NotImplementedError + def prox(self, x, tau): raise NotImplementedError + def gradient(self, x, out=None): raise NotImplementedError + def proximal(self, x, tau, out=None): raise NotImplementedError class Norm2(Function): -- cgit v1.2.3 From 11cbe477fc6efca9aab33f85dd1210bb17572be1 Mon Sep 17 00:00:00 2001 From: jakobsj Date: Mon, 22 Oct 2018 11:20:12 +0100 Subject: Added working demo of colourbay data load and recon. (#132) * Added working demo of colourbay data load and recon. * Added IMAT white-beam demo loading summed fits files * corrections to normalization and log with zeroes in flats * IMAT data multichannel script started * script to reconstruct multi-channel imat data updated * some updates to demo --- Wrappers/Python/wip/demo_colourbay.py | 137 ++++++++++++++++++++ Wrappers/Python/wip/demo_imat_multichan_RGLTK.py | 151 +++++++++++++++++++++++ Wrappers/Python/wip/demo_imat_whitebeam.py | 138 +++++++++++++++++++++ 3 files changed, 426 insertions(+) create mode 100644 Wrappers/Python/wip/demo_colourbay.py create mode 100644 Wrappers/Python/wip/demo_imat_multichan_RGLTK.py create mode 100644 Wrappers/Python/wip/demo_imat_whitebeam.py diff --git a/Wrappers/Python/wip/demo_colourbay.py b/Wrappers/Python/wip/demo_colourbay.py new file mode 100644 index 0000000..5dbf2e1 --- /dev/null +++ b/Wrappers/Python/wip/demo_colourbay.py @@ -0,0 +1,137 @@ +# This script demonstrates how to load a mat-file with UoM colour-bay data +# into the CIL optimisation framework and run (simple) multichannel +# reconstruction methods. + +# All third-party imports. +import numpy +from scipy.io import loadmat +import matplotlib.pyplot as plt + +# All own imports. +from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData +from ccpi.astra.ops import AstraProjectorMC +from ccpi.optimisation.algs import CGLS, FISTA +from ccpi.optimisation.funcs import Norm2sq, Norm1 + +# Load full data and permute to expected ordering. Change path as necessary. +# The loaded X has dims 80x60x80x150, which is pix x angle x pix x channel. +# Permute (numpy.transpose) puts into our default ordering which is +# (channel, angle, vertical, horizontal). + +pathname = '/media/jakob/050d8d45-fab3-4285-935f-260e6c5f162c1/Data/ColourBay/spectral_data_sets/CarbonPd/' +filename = 'carbonPd_full_sinogram_stripes_removed.mat' + +X = loadmat(pathname + filename) +X = numpy.transpose(X['SS'],(3,1,2,0)) + +# Store geometric variables for reuse +num_channels = X.shape[0] +num_pixels_h = X.shape[3] +num_pixels_v = X.shape[2] +num_angles = X.shape[1] + +# Display a single projection in a single channel +plt.imshow(X[100,5,:,:]) +plt.title('Example of a projection image in one channel' ) +plt.show() + +# Set angles to use +angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False) + +# Define full 3D acquisition geometry and data container. +# Geometric info is taken from the txt-file in the same dir as the mat-file +ag = AcquisitionGeometry('cone', + '3D', + angles, + pixel_num_h=num_pixels_h, + pixel_size_h=0.25, + pixel_num_v=num_pixels_v, + pixel_size_v=0.25, + dist_source_center=233.0, + dist_center_detector=245.0, + channels=num_channels) +data = AcquisitionData(X, geometry=ag) + +# Reduce to central slice by extracting relevant parameters from data and its +# geometry. Perhaps create function to extract central slice automatically? +data2d = data.subset(vertical=40) +ag2d = AcquisitionGeometry('cone', + '2D', + ag.angles, + pixel_num_h=ag.pixel_num_h, + pixel_size_h=ag.pixel_size_h, + pixel_num_v=1, + pixel_size_v=ag.pixel_size_h, + dist_source_center=ag.dist_source_center, + dist_center_detector=ag.dist_center_detector, + channels=ag.channels) +data2d.geometry = ag2d + +# Set up 2D Image Geometry. +# First need the geometric magnification to scale the voxel size relative +# to the detector pixel size. +mag = (ag.dist_source_center + ag.dist_center_detector)/ag.dist_source_center +ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h, + voxel_num_y=ag2d.pixel_num_h, + voxel_size_x=ag2d.pixel_size_h/mag, + voxel_size_y=ag2d.pixel_size_h/mag, + channels=X.shape[0]) + +# Create GPU multichannel projector/backprojector operator with ASTRA. +Aall = AstraProjectorMC(ig2d,ag2d,'gpu') + +# Compute and simple backprojction and display one channel as image. +Xbp = Aall.adjoint(data2d) +plt.imshow(Xbp.subset(channel=100).array) +plt.show() + +# Set initial guess ImageData with zeros for algorithms, and algorithm options. +x_init = ImageData(numpy.zeros((num_channels,num_pixels_v,num_pixels_h)), + geometry=ig2d, + dimension_labels=['channel','horizontal_y','horizontal_x']) +opt_CGLS = {'tol': 1e-4, 'iter': 5} + +# Run CGLS algorithm and display one channel. +x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aall, data2d, opt_CGLS) + +plt.imshow(x_CGLS.subset(channel=100).array) +plt.title('CGLS') +plt.show() + +plt.semilogy(criter_CGLS) +plt.title('CGLS Criterion vs iterations') +plt.show() + +# Create least squares object instance with projector, test data and a constant +# coefficient of 0.5. Note it is least squares over all channels. +f = Norm2sq(Aall,data2d,c=0.5) + +# Options for FISTA algorithm. +opt = {'tol': 1e-4, 'iter': 100} + +# Run FISTA for least squares without regularization and display one channel +# reconstruction as image. +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, None, opt) + +plt.imshow(x_fista0.subset(channel=100).array) +plt.title('FISTA LS') +plt.show() + +plt.semilogy(criter0) +plt.title('FISTA LS Criterion vs iterations') +plt.show() + +# Set up 1-norm regularisation (over all channels), solve with FISTA, and +# display one channel of reconstruction. +lam = 0.1 +g0 = Norm1(lam) + +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g0, opt) + +plt.imshow(x_fista1.subset(channel=100).array) +plt.title('FISTA LS+1') +plt.show() + +plt.semilogy(criter1) +plt.title('FISTA LS+1 Criterion vs iterations') +plt.show() \ No newline at end of file diff --git a/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py b/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py new file mode 100644 index 0000000..0ec116f --- /dev/null +++ b/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py @@ -0,0 +1,151 @@ +# This script demonstrates how to load IMAT fits data +# into the CIL optimisation framework and run reconstruction methods. +# +# Demo to reconstruct energy-discretized channels of IMAT data + +# needs dxchange: conda install -c conda-forge dxchange +# needs astropy: conda install astropy + + +# All third-party imports. +import numpy as np +import matplotlib.pyplot as plt +from dxchange.reader import read_fits +from astropy.io import fits + +# All own imports. +from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData, DataContainer +from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple +from ccpi.optimisation.algs import CGLS, FISTA +from ccpi.optimisation.funcs import Norm2sq, Norm1 +from ccpi.plugins.regularisers import FGP_TV + +# set main parameters here +n = 512 +totalAngles = 250 # total number of projection angles +# spectral discretization parameter +num_average = 145 # channel discretization frequency (total number of averaged channels) +numChannels = 2970 # 2970 +totChannels = round(numChannels/num_average) # the resulting number of channels +Projections_stack = np.zeros((num_average,n,n),dtype='uint16') +ProjAngleChannels = np.zeros((totalAngles,totChannels,n,n),dtype='float32') + +######################################################################### +print ("Loading the data...") +MainPath = '/media/algol/336F96987817D4B4/DATA/IMAT_DATA/' # path to data +pathname0 = '{!s}{!s}'.format(MainPath,'PSI_DATA/DATA/Sample/') +counterFileName = 4675 +# A main loop over all available angles +for ll in range(0,totalAngles,1): + pathnameData = '{!s}{!s}{!s}/'.format(pathname0,'angle',str(ll)) + filenameCurr = '{!s}{!s}{!s}'.format('IMAT0000',str(counterFileName),'_Tomo_test_000_') + counterT = 0 + # loop over reduced channels (discretized) + for i in range(0,totChannels,1): + sumCount = 0 + # loop over actual channels to obtain averaged one + for j in range(0,num_average,1): + if counterT < 10: + outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0000',str(counterT)) + if ((counterT >= 10) & (counterT < 100)): + outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'000',str(counterT)) + if ((counterT >= 100) & (counterT < 1000)): + outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'00',str(counterT)) + if ((counterT >= 1000) & (counterT < 10000)): + outfile = '{!s}{!s}{!s}{!s}.fits'.format(pathnameData,filenameCurr,'0',str(counterT)) + try: + Projections_stack[j,:,:] = read_fits(outfile) + except: + print("Fits is corrupted, skipping no.", counterT) + sumCount -= 1 + counterT += 1 + sumCount += 1 + AverageProj=np.sum(Projections_stack,axis=0)/sumCount # averaged projection over "num_average" channels + ProjAngleChannels[ll,i,:,:] = AverageProj + print("Angle is processed", ll) + counterFileName += 1 +######################################################################### + +flat1 = read_fits('{!s}{!s}{!s}'.format(MainPath,'PSI_DATA/DATA/','OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits')) +nonzero = flat1 > 0 +# Apply flat field and take negative log +for ll in range(0,totalAngles,1): + for i in range(0,totChannels,1): + ProjAngleChannels[ll,i,nonzero] = ProjAngleChannels[ll,i,nonzero]/flat1[nonzero] + +eqzero = ProjAngleChannels == 0 +ProjAngleChannels[eqzero] = 1 +ProjAngleChannels_NormLog = -np.log(ProjAngleChannels) # normalised and neg-log data + +# extact sinogram over energy channels +selectedVertical_slice = 256 +sino_all_channels = ProjAngleChannels_NormLog[:,:,:,selectedVertical_slice] +# Set angles to use +angles = np.linspace(-np.pi,np.pi,totalAngles,endpoint=False) + +# set the geometry +ig = ImageGeometry(n,n) +ag = AcquisitionGeometry('parallel', + '2D', + angles, + n,1) +Aop = AstraProjectorSimple(ig, ag, 'gpu') + + +# loop to reconstruct energy channels +REC_chan = np.zeros((totChannels, n, n), 'float32') +for i in range(0,totChannels,1): + sino_channel = sino_all_channels[:,i,:] # extract a sinogram for i-th channel + + print ("Initial guess") + x_init = ImageData(geometry=ig) + + # Create least squares object instance with projector and data. + print ("Create least squares object instance with projector and data.") + f = Norm2sq(Aop,DataContainer(sino_channel),c=0.5) + + print ("Run FISTA-TV for least squares") + lamtv = 5 + opt = {'tol': 1e-4, 'iter': 200} + g_fgp = FGP_TV(lambdaReg = lamtv, + iterationsTV=50, + tolerance=1e-6, + methodTV=0, + nonnegativity=0, + printing=0, + device='gpu') + + x_fista_fgp, it1, timing1, criter_fgp = FISTA(x_init, f, g_fgp, opt) + REC_chan[i,:,:] = x_fista_fgp.array + """ + plt.figure() + plt.subplot(121) + plt.imshow(x_fista_fgp.array, vmin=0, vmax=0.05) + plt.title('FISTA FGP TV') + plt.subplot(122) + plt.semilogy(criter_fgp) + plt.show() + """ + """ + print ("Run CGLS for least squares") + opt = {'tol': 1e-4, 'iter': 20} + x_init = ImageData(geometry=ig) + x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, DataContainer(sino_channel), opt=opt) + + plt.figure() + plt.imshow(x_CGLS.array,vmin=0, vmax=0.05) + plt.title('CGLS') + plt.show() + """ +# Saving images into fits using astrapy if required +counter = 0 +filename = 'FISTA_TV_imat_slice' +for i in range(totChannels): + im = REC_chan[i,:,:] + add_val = np.min(im[:]) + im += abs(add_val) + im = im/np.max(im[:])*65535 + outfile = '{!s}_{!s}_{!s}.fits'.format(filename,str(selectedVertical_slice),str(counter)) + hdu = fits.PrimaryHDU(np.uint16(im)) + hdu.writeto(outfile, overwrite=True) + counter += 1 \ No newline at end of file diff --git a/Wrappers/Python/wip/demo_imat_whitebeam.py b/Wrappers/Python/wip/demo_imat_whitebeam.py new file mode 100644 index 0000000..482c1ae --- /dev/null +++ b/Wrappers/Python/wip/demo_imat_whitebeam.py @@ -0,0 +1,138 @@ +# This script demonstrates how to load IMAT fits data +# into the CIL optimisation framework and run reconstruction methods. +# +# This demo loads the summedImg files which are the non-spectral images +# resulting from summing projections over all spectral channels. + +# needs dxchange: conda install -c conda-forge dxchange +# needs astropy: conda install astropy + + +# All third-party imports. +import numpy +from scipy.io import loadmat +import matplotlib.pyplot as plt +from dxchange.reader import read_fits + +# All own imports. +from ccpi.framework import AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData +from ccpi.astra.ops import AstraProjectorSimple, AstraProjector3DSimple +from ccpi.optimisation.algs import CGLS, FISTA +from ccpi.optimisation.funcs import Norm2sq, Norm1 + +# Load and display a couple of summed projection as examples +pathname0 = '/media/algol/HD-LXU3/DATA_DANIIL/PSI_DATA/DATA/Sample/angle0/' +filename0 = 'IMAT00004675_Tomo_test_000_SummedImg.fits' + +data0 = read_fits(pathname0 + filename0) + +pathname10 = '/media/algol/HD-LXU3/DATA_DANIIL/PSI_DATA/DATA/Sample/angle10/' +filename10 = 'IMAT00004685_Tomo_test_000_SummedImg.fits' + +data10 = read_fits(pathname10 + filename10) + +# Load a flat field (more are available, should we average over them?) +flat1 = read_fits('/media/algol/HD-LXU3/DATA_DANIIL/PSI_DATA/DATA/OpenBeam_aft1/IMAT00004932_Tomo_test_000_SummedImg.fits') + +# Apply flat field and display after flat-field correction and negative log +data0_rel = numpy.zeros(numpy.shape(flat1), dtype = float) +nonzero = flat1 > 0 +data0_rel[nonzero] = data0[nonzero] / flat1[nonzero] +data10_rel = numpy.zeros(numpy.shape(flat1), dtype = float) +data10_rel[nonzero] = data10[nonzero] / flat1[nonzero] + +plt.imshow(data0_rel) +plt.colorbar() +plt.show() + +plt.imshow(-numpy.log(data0_rel)) +plt.colorbar() +plt.show() + +plt.imshow(data10_rel) +plt.colorbar() +plt.show() + +plt.imshow(-numpy.log(data10_rel)) +plt.colorbar() +plt.show() + +# Set up for loading all summed images at 250 angles. +pathname = '/media/algol/HD-LXU3/DATA_DANIIL/PSI_DATA/DATA/Sample/angle{}/' +filename = 'IMAT0000{}_Tomo_test_000_SummedImg.fits' + +# Dimensions +num_angles = 250 +imsize = 512 + +# Initialise array +data = numpy.zeros((num_angles,imsize,imsize)) + +# Load only 0-249, as 250 is at repetition of zero degrees just like image 0 +for i in range(0,250): + curimfile = (pathname + filename).format(i, i+4675) + data[i,:,:] = read_fits(curimfile) + +# Apply flat field and take negative log +nonzero = flat1 > 0 +for i in range(0,250): + data[i,nonzero] = data[i,nonzero]/flat1[nonzero] + +eqzero = data == 0 +data[eqzero] = 1 + +data_rel = -numpy.log(data) + +# Permute order to get: angles, vertical, horizontal, as default in framework. +data_rel = numpy.transpose(data_rel,(0,2,1)) + +# Set angles to use +angles = numpy.linspace(-numpy.pi,numpy.pi,num_angles,endpoint=False) + +# Create 3D acquisition geometry and acquisition data +ag = AcquisitionGeometry('parallel', + '3D', + angles, + pixel_num_h=imsize, + pixel_num_v=imsize) +b = AcquisitionData(data_rel, geometry=ag) + +# Reduce to single (noncentral) slice by extracting relevant parameters from data and its +# geometry. Perhaps create function to extract central slice automatically? +b2d = b.subset(vertical=128) +ag2d = AcquisitionGeometry('parallel', + '2D', + ag.angles, + pixel_num_h=ag.pixel_num_h) +b2d.geometry = ag2d + +# Create 2D image geometry +ig2d = ImageGeometry(voxel_num_x=ag2d.pixel_num_h, + voxel_num_y=ag2d.pixel_num_h) + +# Create GPU projector/backprojector operator with ASTRA. +Aop = AstraProjectorSimple(ig2d,ag2d,'gpu') + +# Demonstrate operator is working by applying simple backprojection. +z = Aop.adjoint(b2d) +plt.imshow(z.array) +plt.title('Simple backprojection') +plt.colorbar() +plt.show() + +# Set initial guess ImageData with zeros for algorithms, and algorithm options. +x_init = ImageData(numpy.zeros((imsize,imsize)), + geometry=ig2d) +opt_CGLS = {'tol': 1e-4, 'iter': 20} + +# Run CGLS algorithm and display reconstruction. +x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b2d, opt_CGLS) + +plt.imshow(x_CGLS.array) +plt.title('CGLS') +plt.colorbar() +plt.show() + +plt.semilogy(criter_CGLS) +plt.title('CGLS Criterion vs iterations') +plt.show() \ No newline at end of file -- cgit v1.2.3 From 8584140e4db56fe998f9780dbc8d75a9aa3a72fe Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 2 Nov 2018 14:06:48 +0000 Subject: Build fix (#159) closes #148 closes #156 closes #157 closes #158 * split run_test for algorithms * fix FBPD adds operator as input argument changes naming of variables to explicit * implement proximal for ZeroFun and Norm1 proximal method must be correctly implemented. lead to error #158 * fix unit test and cvx demo --- Wrappers/Python/ccpi/optimisation/algs.py | 34 +++++----- Wrappers/Python/ccpi/optimisation/funcs.py | 33 +++++++++- Wrappers/Python/conda-recipe/bld.bat | 4 -- Wrappers/Python/conda-recipe/build.sh | 4 -- Wrappers/Python/conda-recipe/meta.yaml | 10 +-- Wrappers/Python/conda-recipe/run_test.py | 100 +++++++++++++++++++++++++++-- Wrappers/Python/setup.py | 6 +- Wrappers/Python/wip/demo_compare_cvx.py | 29 +++++++-- 8 files changed, 173 insertions(+), 47 deletions(-) diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 4a6a383..24ed6e9 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -21,6 +21,7 @@ import numpy import time from ccpi.optimisation.funcs import Function +from ccpi.optimisation.funcs import ZeroFun from ccpi.framework import ImageData, AcquisitionData def FISTA(x_init, f=None, g=None, opt=None): @@ -38,8 +39,8 @@ def FISTA(x_init, f=None, g=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() + if f is None: f = ZeroFun() + if g is None: g = ZeroFun() # algorithmic parameters if opt is None: @@ -120,7 +121,8 @@ def FISTA(x_init, f=None, g=None, opt=None): return x, it, timing, criter -def FBPD(x_init, f=None, g=None, h=None, opt=None): +def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ + regulariser=None, opt=None): '''FBPD Algorithm Parameters: @@ -131,9 +133,9 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() - if h is None: h = Function() + if constraint is None: constraint = ZeroFun() + if data_fidelity is None: data_fidelity = ZeroFun() + if regulariser is None: regulariser = ZeroFun() # algorithmic parameters if opt is None: @@ -152,14 +154,14 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): memopt = opt['memopts'] if 'memopts' in opt.keys() else False # step-sizes - tau = 2 / (g.L + 2); - sigma = (1/tau - g.L/2) / h.L; + tau = 2 / (data_fidelity.L + 2); + sigma = (1/tau - data_fidelity.L/2) / regulariser.L; inv_sigma = 1/sigma # initialization x = x_init - y = h.op.direct(x); + y = operator.direct(x); timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) @@ -174,23 +176,23 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): # primal forward-backward step x_old = x; - x = x - tau * ( g.grad(x) + h.op.adjoint(y) ); - x = f.prox(x, tau); + x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); + x = constraint.prox(x, tau); # dual forward-backward step - y = y + sigma * h.op.direct(2*x - x_old); - y = y - sigma * h.prox(inv_sigma*y, inv_sigma); + y = y + sigma * operator.direct(2*x - x_old); + y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma); # time and criterion timing[it] = time.time() - t - criter[it] = f(x) + g(x) + h(h.op.direct(x)); + criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) # stopping rule #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: # break - criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); + criter = criter[0:it+1] + timing = numpy.cumsum(timing[0:it+1]) return x, it, timing, criter diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index c105236..0ed77ae 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -193,12 +193,20 @@ class ZeroFun(Function): def prox(self,x,tau): return x.copy() + def proximal(self, x, tau, out=None): if out is None: - return self.prox(x, tau) + return self.prox(x,tau) else: - out.fill(x) - + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + out.fill(x) + + elif issubclass(type(out) , numpy.ndarray): + out[:] = x + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) # A more interesting example, least squares plus 1-norm minimization. # Define class to represent 1-norm including prox function class Norm1(Function): @@ -221,6 +229,25 @@ class Norm1(Function): def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x,tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + v = (x.abs() - tau*self.gamma).maximum(0) + x.sign(out=out) + out *= v + #out.fill(self.prox(x,tau)) + elif issubclass(type(out) , numpy.ndarray): + v = (x.abs() - tau*self.gamma).maximum(0) + out[:] = x.sign() + out *= v + #out[:] = self.prox(x,tau) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + # Box constraints indicator function. Calling returns 0 if argument is within # the box. The prox operator is projection onto the box. Only implements one diff --git a/Wrappers/Python/conda-recipe/bld.bat b/Wrappers/Python/conda-recipe/bld.bat index 4b4c7f5..d317f54 100644 --- a/Wrappers/Python/conda-recipe/bld.bat +++ b/Wrappers/Python/conda-recipe/bld.bat @@ -1,7 +1,3 @@ -IF NOT DEFINED CIL_VERSION ( -ECHO CIL_VERSION Not Defined. -exit 1 -) ROBOCOPY /E "%RECIPE_DIR%\.." "%SRC_DIR%" diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 5dd97b0..2e68519 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -1,7 +1,3 @@ -if [ -z "$CIL_VERSION" ]; then - echo "Need to set CIL_VERSION" - exit 1 -fi mkdir ${SRC_DIR}/ccpi cp -r "${RECIPE_DIR}/../../../" ${SRC_DIR}/ccpi diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 1b3d910..4b19a68 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,13 +1,13 @@ package: name: ccpi-framework - version: {{ environ['CIL_VERSION'] }} + version: 0.11.0 build: preserve_egg_dir: False - script_env: - - CIL_VERSION -# number: 0 +#script_env: +# - CIL_VERSION + number: 0 requirements: build: @@ -24,6 +24,8 @@ requirements: - python - numpy - cvxpy + - scipy + - matplotlib about: home: http://www.ccpi.ac.uk diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 7df5c07..ce09808 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -17,6 +17,7 @@ from ccpi.optimisation.funcs import TV2D from ccpi.optimisation.ops import LinearOperatorMatrix from ccpi.optimisation.ops import TomoIdentity +from ccpi.optimisation.ops import Identity from cvxpy import * @@ -48,6 +49,7 @@ class TestDataContainer(unittest.TestCase): def testGb_creation_nocopy(self): X,Y,Z = 512,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -66,6 +68,7 @@ class TestDataContainer(unittest.TestCase): def testInlineAlgebra(self): print ("Test Inline Algebra") X,Y,Z = 1024,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -122,6 +125,7 @@ class TestDataContainer(unittest.TestCase): def test_unary_operations(self): print ("Test unary operations") X,Y,Z = 1024,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = -numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -157,6 +161,7 @@ class TestDataContainer(unittest.TestCase): def binary_add(self): print ("Test binary add") X,Y,Z = 512,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -243,6 +248,7 @@ class TestDataContainer(unittest.TestCase): def binary_multiply(self): print ("Test binary multiply") X,Y,Z = 1024,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -283,6 +289,7 @@ class TestDataContainer(unittest.TestCase): def binary_divide(self): print ("Test binary divide") X,Y,Z = 1024,512,512 + X,Y,Z = 256,512,512 steps = [timer()] a = numpy.ones((X,Y,Z), dtype='float32') steps.append(timer()) @@ -530,6 +537,33 @@ class TestAlgorithms(unittest.TestCase): print(objective0.value) self.assertNumpyArrayAlmostEqual( numpy.squeeze(x_fista0.array),x0.value,6) + def test_FISTA_Norm1(self): + + opt = {'memopt':True} + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0],1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt':True} + # Create object instances with the test data A and b. + f = Norm2sq(A,b,c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n,1))) + # Create 1-norm object instance g1 = Norm1(lam) @@ -541,7 +575,7 @@ class TestAlgorithms(unittest.TestCase): # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") - print(x_fista1) + print(x_fista1.as_array().squeeze()) print(criter1[-1]) # Compare to CVXPY @@ -563,8 +597,56 @@ class TestAlgorithms(unittest.TestCase): self.assertNumpyArrayAlmostEqual( numpy.squeeze(x_fista1.array),x1.value,6) + def test_FBPD_Norm1(self): + + opt = {'memopt':True} + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0],1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt':True} + # Create object instances with the test data A and b. + f = Norm2sq(A,b,c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n,1))) + + # Create 1-norm object instance + g1 = Norm1(lam) + + + # Compare to CVXPY + + # Construct the problem. + x1 = Variable(n) + objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) + prob1 = Problem(objective1) + + # The optimal objective is returned by prob.solve(). + result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1.value) + print(objective1.value) + # Now try another algorithm FBPD for same problem: - x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) + x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, + Identity(), None, f, g1) print(x_fbpd1) print(criterfbpd1[-1]) @@ -581,6 +663,8 @@ class TestAlgorithms(unittest.TestCase): # Set up phantom size NxN by creating ImageGeometry, initialising the # ImageData object with this geometry and empty array and finally put some # data into its array, and display as image. + def test_FISTA_denoise(self): + opt = {'memopt':True} N = 64 ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) Phantom = ImageData(geometry=ig) @@ -609,14 +693,18 @@ class TestAlgorithms(unittest.TestCase): x_init_denoise = ImageData(np.zeros((N,N))) # Combine with least squares and solve using generic FISTA implementation - x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) + x_fista1_denoise, it1_denoise, timing1_denoise, \ + criter1_denoise = \ + FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) print(x_fista1_denoise) print(criter1_denoise[-1]) # Now denoise LS + 1-norm with FBPD - x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) + x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\ + criterfbpd1_denoise = \ + FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) @@ -652,7 +740,9 @@ class TestAlgorithms(unittest.TestCase): opt_tv = {'tol': 1e-4, 'iter': 10000} - x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) + x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\ + criterfbpdtv_denoise = \ + FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv) print(x_fbpdtv_denoise) print(criterfbpdtv_denoise[-1]) diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index b4fabbd..dfd5bdd 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,11 +23,7 @@ import os import sys - -cil_version=os.environ['CIL_VERSION'] -if cil_version == '': - print("Please set the environmental variable CIL_VERSION") - sys.exit(1) +cil_version='0.11.1' setup( name="ccpi-framework", diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py index 2df11c1..ad79fa5 100644 --- a/Wrappers/Python/wip/demo_compare_cvx.py +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -4,6 +4,7 @@ from ccpi.optimisation.algs import FISTA, FBPD, CGLS from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity +from ccpi.optimisation.ops import Identity # Requires CVXPY, see http://www.cvxpy.org/ # CVXPY can be installed in anaconda using @@ -80,8 +81,22 @@ plt.show() g1 = Norm1(lam) g1(x_init) -g1.prox(x_init,0.02) - +x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1))) +x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1))) +v = g1.prox(x_rand,0.02) +#vv = g1.prox(x_rand2,0.02) +vv = v.copy() +vv *= 0 +print (">>>>>>>>>>vv" , vv.as_array()) +vv.fill(v) +print (">>>>>>>>>>fill" , vv.as_array()) +g1.proximal(x_rand, 0.02, out=vv) +print (">>>>>>>>>>v" , v.as_array()) +print (">>>>>>>>>>gradient" , vv.as_array()) + +print (">>>>>>>>>>" , (v-vv).as_array()) +import sys +#sys.exit(0) # Combine with least squares and solve using generic FISTA implementation x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) @@ -108,7 +123,7 @@ if use_cvxpy: print(objective1.value) # Now try another algorithm FBPD for same problem: -x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1) print(x_fbpd1) print(criterfbpd1[-1]) @@ -178,7 +193,8 @@ plt.title('FISTA LS+1') plt.show() # Now denoise LS + 1-norm with FBPD -x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \ + criterfbpd1_denoise = FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) @@ -217,7 +233,8 @@ gtv(gtv.op.direct(x_init_denoise)) opt_tv = {'tol': 1e-4, 'iter': 10000} -x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \ + criterfbpdtv_denoise = FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv) print(x_fbpdtv_denoise) print(criterfbpdtv_denoise[-1]) @@ -230,7 +247,7 @@ if use_cvxpy: # Construct the problem. xtv_denoise = Variable((N,N)) - print (xtv_denoise.shape) + #print (xtv_denoise.value.shape) objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) probtv_denoise = Problem(objectivetv_denoise) -- cgit v1.2.3 From 0ee2f1929906cbec457e4d95f9c68d68cbb52da9 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 2 Nov 2018 16:37:09 +0000 Subject: added build variants (#160) fixed unit test for python2.7 --- Wrappers/Python/conda-recipe/conda_build_config.yaml | 4 ++++ Wrappers/Python/conda-recipe/run_test.py | 7 +++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 Wrappers/Python/conda-recipe/conda_build_config.yaml diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml new file mode 100644 index 0000000..5dd08f5 --- /dev/null +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -0,0 +1,4 @@ +python: + - 2.7 # [not win] + - 3.5 + - 3.6 diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index ce09808..c1dc59b 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -670,8 +670,11 @@ class TestAlgorithms(unittest.TestCase): Phantom = ImageData(geometry=ig) x = Phantom.as_array() - x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 - x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + + x[int(round(N/4)):int(round(3*N/4)), + int(round(N/4)):int(round(3*N/4))] = 0.5 + x[int(round(N/8)):int(round(7*N/8)), + int(round(3*N/8)):int(round(5*N/8))] = 1 # Identity operator for denoising -- cgit v1.2.3 From 3f5c27f14cf921246ee55582f42bf5322f3cd590 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 7 Nov 2018 14:11:44 +0000 Subject: Test norm (#162) * enable win build with unittest. skip cvx * unittest on funcs * fixed Norm1 and function * test chaining * a few fixes for unittest * investigating Norm2 * restored Norm2 TODO: rewrite it and test * removed comment --- Wrappers/Python/ccpi/framework.py | 96 +-- Wrappers/Python/ccpi/optimisation/funcs.py | 30 +- Wrappers/Python/ccpi/optimisation/ops.py | 6 +- Wrappers/Python/conda-recipe/meta.yaml | 5 +- Wrappers/Python/conda-recipe/run_test.py | 1012 +++++++++++++++------------- Wrappers/Python/wip/demo_compare_cvx.py | 62 +- 6 files changed, 673 insertions(+), 538 deletions(-) diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 7249d64..4d74d2b 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -627,8 +627,8 @@ class DataContainer(object): .format(len(self.shape),len(new_order))) - def copy(self): - '''alias of clone''' + def copy(self): + '''alias of clone''' return self.clone() ## binary operations @@ -647,49 +647,51 @@ class DataContainer(object): elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): if self.check_dimensions(out) and self.check_dimensions(x2): - pwop(self.as_array(), x2.as_array(), *args, out=out.as_array(), **kwargs ) - return type(self)(out.as_array(), - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) + pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs ) + #return type(self)(out.as_array(), + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + return out else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): if self.check_dimensions(out): - pwop(self.as_array(), x2, *args, out=out.as_array(), **kwargs ) - return type(self)(out.as_array(), - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) + pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) + #return type(self)(out.as_array(), + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + return out else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), x2 , *args, out=out, **kwargs) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) + pwop(self.as_array(), x2 , out=out, *args, **kwargs) + #return type(self)(out, + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) def add(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.add, other, *args, out=out, **kwargs) + return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) def subtract(self, other, out=None , *args, **kwargs): - return self.pixel_wise_binary(numpy.subtract, other, *args, out=out, **kwargs) + return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) def multiply(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.multiply, other, *args, out=out, **kwargs) + return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) def divide(self, other , out=None ,*args, **kwargs): - return self.pixel_wise_binary(numpy.divide, other, *args, out=out, **kwargs) + return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) def power(self, other , out=None, *args, **kwargs): - return self.pixel_wise_binary(numpy.power, other, *args, out=out, **kwargs) + return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) - def maximum(self,x2, out=None): - return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out) + def maximum(self,x2, out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) ## unary operations @@ -702,36 +704,36 @@ class DataContainer(object): geometry=self.geometry) elif issubclass(type(out), DataContainer): if self.check_dimensions(out): - pwop(self.as_array(), *args, out=out.as_array(), **kwargs ) - return type(self)(out.as_array(), - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) + pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) + #return type(self)(out.as_array(), + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: - pwop(self.as_array(), *args, out=out, **kwargs) - return type(self)(out, - deep_copy=False, - dimension_labels=self.dimension_labels, - geometry=self.geometry) + pwop(self.as_array(), out=out, *args, **kwargs) + #return type(self)(out, + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) - def abs(self, out=None): - return self.pixel_wise_unary(numpy.abs, out=out) + def abs(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) - def sign(self, out=None): + def sign(self, out=None, *args, **kwargs): #out = numpy.sign(self.as_array() ) #return type(self)(out, # deep_copy=False, # dimension_labels=self.dimension_labels, # geometry=self.geometry) - return self.pixel_wise_unary(numpy.sign , out=out) + return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) - def sqrt(self, out=None): - return self.pixel_wise_unary(numpy.sqrt, out=out) + def sqrt(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) #def __abs__(self): # operation = FM.OPERATION.ABS @@ -739,9 +741,9 @@ class DataContainer(object): # __abs__ ## reductions - def sum(self): - return self.as_array().sum() - + def sum(self, out=None, *args, **kwargs): + return self.as_array().sum(*args, **kwargs) + #return self.pixel_wise_unary(numpy.sum, out=out, *args, **kwargs) class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' @@ -805,7 +807,7 @@ class ImageData(DataContainer): raise ValueError('Please pass either a DataContainer, ' +\ 'a numpy array or a geometry') else: - if type(array) == DataContainer: + if issubclass(type(array) , DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ array.number_of_dimensions == 3 or \ @@ -817,7 +819,7 @@ class ImageData(DataContainer): # array.dimension_labels, **kwargs) super(ImageData, self).__init__(array.as_array(), deep_copy, array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: + elif issubclass(type(array) , numpy.ndarray): if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): raise ValueError( 'Number of dimensions are not 2 or 3 or 4 : {0}'\ @@ -914,7 +916,7 @@ class AcquisitionData(DataContainer): dimension_labels, **kwargs) else: - if type(array) == DataContainer: + if issubclass(type(array) ,DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ array.number_of_dimensions == 3 or \ @@ -926,7 +928,7 @@ class AcquisitionData(DataContainer): # array.dimension_labels, **kwargs) super(AcquisitionData, self).__init__(array.as_array(), deep_copy, array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: + elif issubclass(type(array) ,numpy.ndarray): if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): raise ValueError( 'Number of dimensions are not 2 or 3 or 4 : {0}'\ diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 0ed77ae..db00e9f 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -45,6 +45,7 @@ class Function(object): def gradient(self, x, out=None): raise NotImplementedError def proximal(self, x, tau, out=None): raise NotImplementedError + class Norm2(Function): def __init__(self, @@ -108,7 +109,6 @@ class Norm2(Function): else: raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - class TV2D(Norm2): @@ -193,20 +193,21 @@ class ZeroFun(Function): def prox(self,x,tau): return x.copy() - def proximal(self, x, tau, out=None): if out is None: - return self.prox(x,tau) + return self.prox(x, tau) else: if isSizeCorrect(out, x): - # check dimensionality - if issubclass(type(out), DataContainer): - out.fill(x) - - elif issubclass(type(out) , numpy.ndarray): - out[:] = x - else: - raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + # check dimensionality + if issubclass(type(out), DataContainer): + out.fill(x) + + elif issubclass(type(out) , numpy.ndarray): + out[:] = x + else: + raise ValueError ('Wrong size: x{0} out{1}' + .format(x.shape,out.shape) ) + # A more interesting example, least squares plus 1-norm minimization. # Define class to represent 1-norm including prox function class Norm1(Function): @@ -229,11 +230,12 @@ class Norm1(Function): def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + def proximal(self, x, tau, out=None): if out is None: - return self.prox(x,tau) + return self.prox(x, tau) else: - if isSizeCorrect(out, x): + if isSizeCorrect(x,out): # check dimensionality if issubclass(type(out), DataContainer): v = (x.abs() - tau*self.gamma).maximum(0) @@ -248,7 +250,6 @@ class Norm1(Function): else: raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) - # Box constraints indicator function. Calling returns 0 if argument is within # the box. The prox operator is projection onto the box. Only implements one # scalar lower and one upper as constraint on all elements. Should generalise @@ -290,4 +291,3 @@ class IndicatorBox(Function): x.sign(out=self.sign_x) out.__imul__( self.sign_x ) - diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index c6775a8..96f85d8 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -47,6 +47,7 @@ class Operator(object): class Identity(Operator): def __init__(self): self.s1 = 1.0 + self.L = 1 super(Identity, self).__init__() def direct(self,x,out=None): @@ -110,18 +111,19 @@ class FiniteDiff2D(Operator): def direct(self,x, out=None): '''Forward differences with Neumann BC.''' # FIXME this seems to be working only with numpy arrays + d1 = numpy.zeros_like(x.as_array()) d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] d2 = numpy.zeros_like(x.as_array()) d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] d = numpy.stack((d1,d2),0) - + #x.geometry.voxel_num_z = 2 return type(x)(d,False,geometry=x.geometry) def adjoint(self,x, out=None): '''Backward differences, Neumann BC.''' Nrows = x.get_dimension_size('horizontal_x') - Ncols = x.get_dimension_size('horizontal_x') + Ncols = x.get_dimension_size('horizontal_y') Nchannels = 1 if len(x.shape) == 4: Nchannels = x.get_dimension_size('channel') diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 4b19a68..327cdcb 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: ccpi-framework - version: 0.11.0 + version: 0.11.1 build: @@ -12,7 +12,6 @@ build: requirements: build: - python - - numpy - setuptools run: @@ -23,7 +22,7 @@ requirements: run: - python - numpy - - cvxpy + - cvxpy # [not win] - scipy - matplotlib diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index c1dc59b..d6bc340 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -14,12 +14,17 @@ from ccpi.optimisation.funcs import Norm2sq from ccpi.optimisation.funcs import ZeroFun from ccpi.optimisation.funcs import Norm1 from ccpi.optimisation.funcs import TV2D +from ccpi.optimisation.funcs import Norm2 from ccpi.optimisation.ops import LinearOperatorMatrix from ccpi.optimisation.ops import TomoIdentity from ccpi.optimisation.ops import Identity -from cvxpy import * +try: + from cvxpy import * + cvx_not_installable = False +except ImportError: + cvx_not_installable = True def aid(x): @@ -27,76 +32,99 @@ def aid(x): # block address of an array. return x.__array_interface__['data'][0] -def dt (steps): + +def dt(steps): return steps[-1] - steps[-2] + class TestDataContainer(unittest.TestCase): - + def create_simple_ImageData(self): + N = 64 + ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N) + Phantom = ImageData(geometry=ig) + + x = Phantom.as_array() + + x[int(round(N/4)):int(round(3*N/4)), + int(round(N/4)):int(round(3*N/4))] = 0.5 + x[int(round(N/8)):int(round(7*N/8)), + int(round(3*N/8)):int(round(5*N/8))] = 1 + + return (ig, Phantom) + + def create_DataContainer(self, X,Y,Z, value=1): + steps = [timer()] + a = value * numpy.ones((X, Y, Z), dtype='float32') + steps.append(timer()) + t0 = dt(steps) + #print("a refcount " , sys.getrefcount(a)) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) + return ds + def test_creation_nocopy(self): - shape = (2,3,4,5) + shape = (2, 3, 4, 5) size = shape[0] for i in range(1, len(shape)): size = size * shape[i] #print("a refcount " , sys.getrefcount(a)) - a = numpy.asarray([i for i in range( size )]) + a = numpy.asarray([i for i in range(size)]) #print("a refcount " , sys.getrefcount(a)) a = numpy.reshape(a, shape) #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) + ds = DataContainer(a, False, ['X', 'Y', 'Z', 'W']) #print("a refcount " , sys.getrefcount(a)) - self.assertEqual(sys.getrefcount(a),3) - self.assertEqual(ds.dimension_labels , {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'}) - + self.assertEqual(sys.getrefcount(a), 3) + self.assertEqual(ds.dimension_labels, {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'}) + def testGb_creation_nocopy(self): - X,Y,Z = 512,512,512 - X,Y,Z = 256,512,512 + X, Y, Z = 512, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) print("test clone") #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) #print("a refcount " , sys.getrefcount(a)) - self.assertEqual(sys.getrefcount(a),3) + self.assertEqual(sys.getrefcount(a), 3) ds1 = ds.copy() self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array())) ds1 = ds.clone() self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array())) - - + def testInlineAlgebra(self): - print ("Test Inline Algebra") - X,Y,Z = 1024,512,512 - X,Y,Z = 256,512,512 + print("Test Inline Algebra") + X, Y, Z = 1024, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) print(t0) #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) #ds.__iadd__( 2 ) ds += 2 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],3.) - #ds.__isub__( 2 ) + self.assertEqual(ds.as_array()[0][0][0], 3.) + #ds.__isub__( 2 ) ds -= 2 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],1.) + self.assertEqual(ds.as_array()[0][0][0], 1.) #ds.__imul__( 2 ) ds *= 2 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],2.) + self.assertEqual(ds.as_array()[0][0][0], 2.) #ds.__idiv__( 2 ) ds /= 2 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],1.) - + self.assertEqual(ds.as_array()[0][0][0], 1.) + ds1 = ds.copy() #ds1.__iadd__( 1 ) ds1 += 1 @@ -104,367 +132,371 @@ class TestDataContainer(unittest.TestCase): ds += ds1 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],3.) + self.assertEqual(ds.as_array()[0][0][0], 3.) #ds.__isub__( ds1 ) ds -= ds1 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],1.) + self.assertEqual(ds.as_array()[0][0][0], 1.) #ds.__imul__( ds1 ) ds *= ds1 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],2.) + self.assertEqual(ds.as_array()[0][0][0], 2.) #ds.__idiv__( ds1 ) ds /= ds1 steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],1.) - - + self.assertEqual(ds.as_array()[0][0][0], 1.) + def test_unary_operations(self): - print ("Test unary operations") - X,Y,Z = 1024,512,512 - X,Y,Z = 256,512,512 + print("Test unary operations") + X, Y, Z = 1024, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = -numpy.ones((X,Y,Z), dtype='float32') + a = -numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) print(t0) #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) - + ds = DataContainer(a, False, ['X', 'Y', 'Z']) + ds.sign(out=ds) steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],-1.) - + self.assertEqual(ds.as_array()[0][0][0], -1.) + ds.abs(out=ds) steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],1.) - - ds.__imul__( 2 ) + self.assertEqual(ds.as_array()[0][0][0], 1.) + + ds.__imul__(2) ds.sqrt(out=ds) steps.append(timer()) print(dt(steps)) - self.assertEqual(ds.as_array()[0][0][0],numpy.sqrt(2., dtype='float32')) - - - + self.assertEqual(ds.as_array()[0][0][0], + numpy.sqrt(2., dtype='float32')) + def test_binary_operations(self): self.binary_add() self.binary_subtract() self.binary_multiply() self.binary_divide() - + def binary_add(self): - print ("Test binary add") - X,Y,Z = 512,512,512 - X,Y,Z = 256,512,512 + print("Test binary add") + X, Y, Z = 512, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) - + #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) ds1 = ds.copy() - + steps.append(timer()) ds.add(ds1, out=ds) steps.append(timer()) t1 = dt(steps) - print("ds.add(ds1, out=ds)",dt(steps)) + print("ds.add(ds1, out=ds)", dt(steps)) steps.append(timer()) ds2 = ds.add(ds1) steps.append(timer()) t2 = dt(steps) - print("ds2 = ds.add(ds1)",dt(steps)) - - self.assertLess(t1,t2) - self.assertEqual(ds.as_array()[0][0][0] , 2.) - + print("ds2 = ds.add(ds1)", dt(steps)) + + self.assertLess(t1, t2) + self.assertEqual(ds.as_array()[0][0][0], 2.) + ds0 = ds - ds0.add(2,out=ds0) + ds0.add(2, out=ds0) steps.append(timer()) - print ("ds0.add(2,out=ds0)", dt(steps), 3 , ds0.as_array()[0][0][0]) + print("ds0.add(2,out=ds0)", dt(steps), 3, ds0.as_array()[0][0][0]) self.assertEqual(4., ds0.as_array()[0][0][0]) - - dt1 = dt(steps) + + dt1 = dt(steps) ds3 = ds0.add(2) steps.append(timer()) - print ("ds3 = ds0.add(2)", dt(steps), 5 , ds3.as_array()[0][0][0]) + print("ds3 = ds0.add(2)", dt(steps), 5, ds3.as_array()[0][0][0]) dt2 = dt(steps) - self.assertLess(dt1,dt2) - + self.assertLess(dt1, dt2) + def binary_subtract(self): - print ("Test binary subtract") - X,Y,Z = 512,512,512 + print("Test binary subtract") + X, Y, Z = 512, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) - + #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) ds1 = ds.copy() - + steps.append(timer()) ds.subtract(ds1, out=ds) steps.append(timer()) t1 = dt(steps) - print("ds.subtract(ds1, out=ds)",dt(steps)) + print("ds.subtract(ds1, out=ds)", dt(steps)) self.assertEqual(0., ds.as_array()[0][0][0]) - + steps.append(timer()) ds2 = ds.subtract(ds1) self.assertEqual(-1., ds2.as_array()[0][0][0]) - + steps.append(timer()) t2 = dt(steps) - print("ds2 = ds.subtract(ds1)",dt(steps)) - - self.assertLess(t1,t2) - + print("ds2 = ds.subtract(ds1)", dt(steps)) + + self.assertLess(t1, t2) + del ds1 ds0 = ds.copy() steps.append(timer()) - ds0.subtract(2,out=ds0) + ds0.subtract(2, out=ds0) #ds0.__isub__( 2 ) steps.append(timer()) - print ("ds0.subtract(2,out=ds0)", dt(steps), -2. , ds0.as_array()[0][0][0]) + print("ds0.subtract(2,out=ds0)", dt( + steps), -2., ds0.as_array()[0][0][0]) self.assertEqual(-2., ds0.as_array()[0][0][0]) - - dt1 = dt(steps) + + dt1 = dt(steps) ds3 = ds0.subtract(2) steps.append(timer()) - print ("ds3 = ds0.subtract(2)", dt(steps), 0. , ds3.as_array()[0][0][0]) + print("ds3 = ds0.subtract(2)", dt(steps), 0., ds3.as_array()[0][0][0]) dt2 = dt(steps) - self.assertLess(dt1,dt2) + self.assertLess(dt1, dt2) self.assertEqual(-2., ds0.as_array()[0][0][0]) self.assertEqual(-4., ds3.as_array()[0][0][0]) - + def binary_multiply(self): - print ("Test binary multiply") - X,Y,Z = 1024,512,512 - X,Y,Z = 256,512,512 + print("Test binary multiply") + X, Y, Z = 1024, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) - + #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) ds1 = ds.copy() - + steps.append(timer()) ds.multiply(ds1, out=ds) steps.append(timer()) t1 = dt(steps) - print("ds.multiply(ds1, out=ds)",dt(steps)) + print("ds.multiply(ds1, out=ds)", dt(steps)) steps.append(timer()) ds2 = ds.multiply(ds1) steps.append(timer()) t2 = dt(steps) - print("ds2 = ds.multiply(ds1)",dt(steps)) - - self.assertLess(t1,t2) - + print("ds2 = ds.multiply(ds1)", dt(steps)) + + self.assertLess(t1, t2) + ds0 = ds - ds0.multiply(2,out=ds0) + ds0.multiply(2, out=ds0) steps.append(timer()) - print ("ds0.multiply(2,out=ds0)", dt(steps), 2. , ds0.as_array()[0][0][0]) + print("ds0.multiply(2,out=ds0)", dt( + steps), 2., ds0.as_array()[0][0][0]) self.assertEqual(2., ds0.as_array()[0][0][0]) - - dt1 = dt(steps) + + dt1 = dt(steps) ds3 = ds0.multiply(2) steps.append(timer()) - print ("ds3 = ds0.multiply(2)", dt(steps), 4. , ds3.as_array()[0][0][0]) + print("ds3 = ds0.multiply(2)", dt(steps), 4., ds3.as_array()[0][0][0]) dt2 = dt(steps) - self.assertLess(dt1,dt2) + self.assertLess(dt1, dt2) self.assertEqual(4., ds3.as_array()[0][0][0]) self.assertEqual(2., ds.as_array()[0][0][0]) - + def binary_divide(self): - print ("Test binary divide") - X,Y,Z = 1024,512,512 - X,Y,Z = 256,512,512 + print("Test binary divide") + X, Y, Z = 1024, 512, 512 + X, Y, Z = 256, 512, 512 steps = [timer()] - a = numpy.ones((X,Y,Z), dtype='float32') + a = numpy.ones((X, Y, Z), dtype='float32') steps.append(timer()) t0 = dt(steps) - + #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, False, ['X', 'Y','Z']) + ds = DataContainer(a, False, ['X', 'Y', 'Z']) ds1 = ds.copy() - + steps.append(timer()) ds.divide(ds1, out=ds) steps.append(timer()) t1 = dt(steps) - print("ds.divide(ds1, out=ds)",dt(steps)) + print("ds.divide(ds1, out=ds)", dt(steps)) steps.append(timer()) ds2 = ds.divide(ds1) steps.append(timer()) t2 = dt(steps) - print("ds2 = ds.divide(ds1)",dt(steps)) - - self.assertLess(t1,t2) - self.assertEqual(ds.as_array()[0][0][0] , 1.) - + print("ds2 = ds.divide(ds1)", dt(steps)) + + self.assertLess(t1, t2) + self.assertEqual(ds.as_array()[0][0][0], 1.) + ds0 = ds - ds0.divide(2,out=ds0) + ds0.divide(2, out=ds0) steps.append(timer()) - print ("ds0.divide(2,out=ds0)", dt(steps), 0.5 , ds0.as_array()[0][0][0]) + print("ds0.divide(2,out=ds0)", dt(steps), 0.5, ds0.as_array()[0][0][0]) self.assertEqual(0.5, ds0.as_array()[0][0][0]) - - dt1 = dt(steps) + + dt1 = dt(steps) ds3 = ds0.divide(2) steps.append(timer()) - print ("ds3 = ds0.divide(2)", dt(steps), 0.25 , ds3.as_array()[0][0][0]) + print("ds3 = ds0.divide(2)", dt(steps), 0.25, ds3.as_array()[0][0][0]) dt2 = dt(steps) - self.assertLess(dt1,dt2) + self.assertLess(dt1, dt2) self.assertEqual(.25, ds3.as_array()[0][0][0]) self.assertEqual(.5, ds.as_array()[0][0][0]) - - + def test_creation_copy(self): - shape = (2,3,4,5) + shape = (2, 3, 4, 5) size = shape[0] for i in range(1, len(shape)): size = size * shape[i] #print("a refcount " , sys.getrefcount(a)) - a = numpy.asarray([i for i in range( size )]) + a = numpy.asarray([i for i in range(size)]) #print("a refcount " , sys.getrefcount(a)) a = numpy.reshape(a, shape) #print("a refcount " , sys.getrefcount(a)) - ds = DataContainer(a, True, ['X', 'Y','Z' ,'W']) + ds = DataContainer(a, True, ['X', 'Y', 'Z', 'W']) #print("a refcount " , sys.getrefcount(a)) - self.assertEqual(sys.getrefcount(a),2) - + self.assertEqual(sys.getrefcount(a), 2) + def test_subset(self): - shape = (4,3,2) + shape = (4, 3, 2) a = [i for i in range(2*3*4)] a = numpy.asarray(a) a = numpy.reshape(a, shape) - ds = DataContainer(a, True, ['X', 'Y','Z']) + ds = DataContainer(a, True, ['X', 'Y', 'Z']) sub = ds.subset(['X']) res = True try: numpy.testing.assert_array_equal(sub.as_array(), - numpy.asarray([0,6,12,18])) + numpy.asarray([0, 6, 12, 18])) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - + sub = ds.subset(['X'], Y=2, Z=0) res = True try: numpy.testing.assert_array_equal(sub.as_array(), - numpy.asarray([4,10,16,22])) + numpy.asarray([4, 10, 16, 22])) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - - + sub = ds.subset(['Y']) try: numpy.testing.assert_array_equal( - sub.as_array(), numpy.asarray([0,2,4])) + sub.as_array(), numpy.asarray([0, 2, 4])) res = True except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - - + sub = ds.subset(['Z']) try: numpy.testing.assert_array_equal( - sub.as_array(), numpy.asarray([0,1])) + sub.as_array(), numpy.asarray([0, 1])) res = True except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) sub = ds.subset(['Z'], X=1, Y=2) try: numpy.testing.assert_array_equal( - sub.as_array(), numpy.asarray([10,11])) + sub.as_array(), numpy.asarray([10, 11])) res = True except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - + print(a) - sub = ds.subset(['X', 'Y'] , Z=1) + sub = ds.subset(['X', 'Y'], Z=1) res = True try: numpy.testing.assert_array_equal(sub.as_array(), - numpy.asarray([[ 1, 3, 5], - [ 7, 9, 11], - [13, 15, 17], - [19, 21, 23]])) + numpy.asarray([[1, 3, 5], + [7, 9, 11], + [13, 15, 17], + [19, 21, 23]])) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - + def test_ImageData(self): # create ImageData from geometry vgeometry = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2) vol = ImageData(geometry=vgeometry) - self.assertEqual(vol.shape , (2,3,4)) - + self.assertEqual(vol.shape, (2, 3, 4)) + vol1 = vol + 1 self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape)) - + vol1 = vol - 1 self.assertNumpyArrayEqual(vol1.as_array(), -numpy.ones(vol.shape)) - + vol1 = 2 * (vol + 1) self.assertNumpyArrayEqual(vol1.as_array(), 2 * numpy.ones(vol.shape)) - - vol1 = (vol + 1) / 2 - self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) / 2 ) - + + vol1 = (vol + 1) / 2 + self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) / 2) + vol1 = vol + 1 - self.assertEqual(vol1.sum() , 2*3*4) - vol1 = ( vol + 2 ) ** 2 - self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) * 4 ) - - - + self.assertEqual(vol1.sum(), 2*3*4) + vol1 = (vol + 2) ** 2 + self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) * 4) + + self.assertEqual(vol.number_of_dimensions, 3) + def test_AcquisitionData(self): - sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10), - geom_type='parallel', pixel_num_v=3, - pixel_num_h=5 , channels=2) + sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10), + geom_type='parallel', pixel_num_v=3, + pixel_num_h=5, channels=2) sino = AcquisitionData(geometry=sgeometry) - self.assertEqual(sino.shape , (2,10,3,5)) - - + self.assertEqual(sino.shape, (2, 10, 3, 5)) + def assertNumpyArrayEqual(self, first, second): res = True try: numpy.testing.assert_array_equal(first, second) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): res = True try: numpy.testing.assert_array_almost_equal(first, second, decimal) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) + def test_DataContainerChaining(self): + dc = self.create_DataContainer(256,256,256,1) + + dc.add(9,out=dc)\ + .subtract(1,out=dc) + self.assertEqual(1+9-1,dc.as_array().flatten()[0]) + + + class TestAlgorithms(unittest.TestCase): def assertNumpyArrayEqual(self, first, second): @@ -473,311 +505,372 @@ class TestAlgorithms(unittest.TestCase): numpy.testing.assert_array_equal(first, second) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): res = True try: numpy.testing.assert_array_almost_equal(first, second, decimal) except AssertionError as err: res = False - print (err) + print(err) self.assertTrue(res) - def test_FISTA(self): - # Problem data. - m = 30 - n = 20 - np.random.seed(1) - Amat = np.random.randn(m, n) - A = LinearOperatorMatrix(Amat) - bmat = np.random.randn(m) - bmat.shape = (bmat.shape[0],1) - - # A = Identity() - # Change n to equal to m. - - b = DataContainer(bmat) - - # Regularization parameter - lam = 10 - opt = {'memopt':True} - # Create object instances with the test data A and b. - f = Norm2sq(A,b,c=0.5, memopt=True) - g0 = ZeroFun() - - # Initial guess - x_init = DataContainer(np.zeros((n,1))) - - f.grad(x_init) - - # Run FISTA for least squares plus zero function. - x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt) - - # Print solution and final objective/criterion value for comparison - print("FISTA least squares plus zero function solution and objective value:") - print(x_fista0.array) - print(criter0[-1]) - - # Compare to CVXPY - - # Construct the problem. - x0 = Variable(n) - objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) ) - prob0 = Problem(objective0) - - # The optimal objective is returned by prob.solve(). - result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus zero function solution and objective value:") - print(x0.value) - print(objective0.value) - self.assertNumpyArrayAlmostEqual( - numpy.squeeze(x_fista0.array),x0.value,6) - def test_FISTA_Norm1(self): - - opt = {'memopt':True} - # Problem data. - m = 30 - n = 20 - np.random.seed(1) - Amat = np.random.randn(m, n) - A = LinearOperatorMatrix(Amat) - bmat = np.random.randn(m) - bmat.shape = (bmat.shape[0],1) - - # A = Identity() - # Change n to equal to m. - - b = DataContainer(bmat) - - # Regularization parameter - lam = 10 - opt = {'memopt':True} - # Create object instances with the test data A and b. - f = Norm2sq(A,b,c=0.5, memopt=True) - g0 = ZeroFun() - - # Initial guess - x_init = DataContainer(np.zeros((n,1))) - - # Create 1-norm object instance - g1 = Norm1(lam) - - g1(x_init) - g1.prox(x_init,0.02) - - # Combine with least squares and solve using generic FISTA implementation - x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) - - # Print for comparison - print("FISTA least squares plus 1-norm solution and objective value:") - print(x_fista1.as_array().squeeze()) - print(criter1[-1]) - - # Compare to CVXPY - - # Construct the problem. - x1 = Variable(n) - objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) - prob1 = Problem(objective1) - - # The optimal objective is returned by prob.solve(). - result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus 1-norm solution and objective value:") - print(x1.value) - print(objective1.value) - - self.assertNumpyArrayAlmostEqual( - numpy.squeeze(x_fista1.array),x1.value,6) - - def test_FBPD_Norm1(self): - - opt = {'memopt':True} - # Problem data. - m = 30 - n = 20 - np.random.seed(1) - Amat = np.random.randn(m, n) - A = LinearOperatorMatrix(Amat) - bmat = np.random.randn(m) - bmat.shape = (bmat.shape[0],1) - - # A = Identity() - # Change n to equal to m. - - b = DataContainer(bmat) - - # Regularization parameter - lam = 10 - opt = {'memopt':True} - # Create object instances with the test data A and b. - f = Norm2sq(A,b,c=0.5, memopt=True) - g0 = ZeroFun() - - # Initial guess - x_init = DataContainer(np.zeros((n,1))) - - # Create 1-norm object instance - g1 = Norm1(lam) - - - # Compare to CVXPY - - # Construct the problem. - x1 = Variable(n) - objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) - prob1 = Problem(objective1) - - # The optimal objective is returned by prob.solve(). - result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus 1-norm solution and objective value:") - print(x1.value) - print(objective1.value) - - # Now try another algorithm FBPD for same problem: - x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, - Identity(), None, f, g1) - print(x_fbpd1) - print(criterfbpd1[-1]) - - self.assertNumpyArrayAlmostEqual( - numpy.squeeze(x_fbpd1.array),x1.value,6) + + def test_FISTA_cvx(self): + if not cvx_not_installable: + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0], 1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt': True} + # Create object instances with the test data A and b. + f = Norm2sq(A, b, c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n, 1))) + + f.grad(x_init) + + # Run FISTA for least squares plus zero function. + x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt) + + # Print solution and final objective/criterion value for comparison + print("FISTA least squares plus zero function solution and objective value:") + print(x_fista0.array) + print(criter0[-1]) + + # Compare to CVXPY + + # Construct the problem. + x0 = Variable(n) + objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0])) + prob0 = Problem(objective0) + + # The optimal objective is returned by prob.solve(). + result0 = prob0.solve(verbose=False, solver=SCS, eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus zero function solution and objective value:") + print(x0.value) + print(objective0.value) + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fista0.array), x0.value, 6) + else: + self.assertTrue(cvx_not_installable) + + def test_FISTA_Norm1_cvx(self): + if not cvx_not_installable: + opt = {'memopt': True} + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0], 1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt': True} + # Create object instances with the test data A and b. + f = Norm2sq(A, b, c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n, 1))) + + # Create 1-norm object instance + g1 = Norm1(lam) + + g1(x_init) + g1.prox(x_init, 0.02) + + # Combine with least squares and solve using generic FISTA implementation + x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt) + + # Print for comparison + print("FISTA least squares plus 1-norm solution and objective value:") + print(x_fista1.as_array().squeeze()) + print(criter1[-1]) + + # Compare to CVXPY + + # Construct the problem. + x1 = Variable(n) + objective1 = Minimize( + 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1)) + prob1 = Problem(objective1) + + # The optimal objective is returned by prob.solve(). + result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1.value) + print(objective1.value) + + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fista1.array), x1.value, 6) + else: + self.assertTrue(cvx_not_installable) + + def test_FBPD_Norm1_cvx(self): + if not cvx_not_installable: + opt = {'memopt': True} + # Problem data. + m = 30 + n = 20 + np.random.seed(1) + Amat = np.random.randn(m, n) + A = LinearOperatorMatrix(Amat) + bmat = np.random.randn(m) + bmat.shape = (bmat.shape[0], 1) + + # A = Identity() + # Change n to equal to m. + + b = DataContainer(bmat) + + # Regularization parameter + lam = 10 + opt = {'memopt': True} + # Create object instances with the test data A and b. + f = Norm2sq(A, b, c=0.5, memopt=True) + g0 = ZeroFun() + + # Initial guess + x_init = DataContainer(np.zeros((n, 1))) + + # Create 1-norm object instance + g1 = Norm1(lam) + + # Compare to CVXPY + + # Construct the problem. + x1 = Variable(n) + objective1 = Minimize( + 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1)) + prob1 = Problem(objective1) + + # The optimal objective is returned by prob.solve(). + result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1.value) + print(objective1.value) + + # Now try another algorithm FBPD for same problem: + x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, + Identity(), None, f, g1) + print(x_fbpd1) + print(criterfbpd1[-1]) + + self.assertNumpyArrayAlmostEqual( + numpy.squeeze(x_fbpd1.array), x1.value, 6) + else: + self.assertTrue(cvx_not_installable) # Plot criterion curve to see both FISTA and FBPD converge to same value. # Note that FISTA is very efficient for 1-norm minimization so it beats - # FBPD in this test by a lot. But FBPD can handle a larger class of problems + # FBPD in this test by a lot. But FBPD can handle a larger class of problems # than FISTA can. - - + # Now try 1-norm and TV denoising with FBPD, first 1-norm. - - # Set up phantom size NxN by creating ImageGeometry, initialising the + + # Set up phantom size NxN by creating ImageGeometry, initialising the # ImageData object with this geometry and empty array and finally put some # data into its array, and display as image. - def test_FISTA_denoise(self): - opt = {'memopt':True} + def test_FISTA_denoise_cvx(self): + if not cvx_not_installable: + opt = {'memopt': True} + N = 64 + ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N) + Phantom = ImageData(geometry=ig) + + x = Phantom.as_array() + + x[int(round(N/4)):int(round(3*N/4)), + int(round(N/4)):int(round(3*N/4))] = 0.5 + x[int(round(N/8)):int(round(7*N/8)), + int(round(3*N/8)):int(round(5*N/8))] = 1 + + # Identity operator for denoising + I = TomoIdentity(ig) + + # Data and add noise + y = I.direct(Phantom) + y.array = y.array + 0.1*np.random.randn(N, N) + + # Data fidelity term + f_denoise = Norm2sq(I, y, c=0.5, memopt=True) + + # 1-norm regulariser + lam1_denoise = 1.0 + g1_denoise = Norm1(lam1_denoise) + + # Initial guess + x_init_denoise = ImageData(np.zeros((N, N))) + + # Combine with least squares and solve using generic FISTA implementation + x_fista1_denoise, it1_denoise, timing1_denoise, \ + criter1_denoise = \ + FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) + + print(x_fista1_denoise) + print(criter1_denoise[-1]) + + # Now denoise LS + 1-norm with FBPD + x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\ + criterfbpd1_denoise = \ + FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) + print(x_fbpd1_denoise) + print(criterfbpd1_denoise[-1]) + + # Compare to CVXPY + + # Construct the problem. + x1_denoise = Variable(N**2, 1) + objective1_denoise = Minimize( + 0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise, 1)) + prob1_denoise = Problem(objective1_denoise) + + # The optimal objective is returned by prob.solve(). + result1_denoise = prob1_denoise.solve( + verbose=False, solver=SCS, eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1_denoise.value) + print(objective1_denoise.value) + self.assertNumpyArrayAlmostEqual( + x_fista1_denoise.array.flatten(), x1_denoise.value, 5) + + self.assertNumpyArrayAlmostEqual( + x_fbpd1_denoise.array.flatten(), x1_denoise.value, 5) + x1_cvx = x1_denoise.value + x1_cvx.shape = (N, N) + + # Now TV with FBPD + lam_tv = 0.1 + gtv = TV2D(lam_tv) + gtv(gtv.op.direct(x_init_denoise)) + + opt_tv = {'tol': 1e-4, 'iter': 10000} + + x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\ + criterfbpdtv_denoise = \ + FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv, opt=opt_tv) + print(x_fbpdtv_denoise) + print(criterfbpdtv_denoise[-1]) + + # Compare to CVXPY + + # Construct the problem. + xtv_denoise = Variable((N, N)) + objectivetv_denoise = Minimize( + 0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise)) + probtv_denoise = Problem(objectivetv_denoise) + + # The optimal objective is returned by prob.solve(). + resulttv_denoise = probtv_denoise.solve( + verbose=False, solver=SCS, eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(xtv_denoise.value) + print(objectivetv_denoise.value) + + self.assertNumpyArrayAlmostEqual( + x_fbpdtv_denoise.as_array(), xtv_denoise.value, 1) + + else: + self.assertTrue(cvx_not_installable) + + +class TestFunction(unittest.TestCase): + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + def create_simple_ImageData(self): N = 64 - ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) + ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N) Phantom = ImageData(geometry=ig) - + x = Phantom.as_array() - + x[int(round(N/4)):int(round(3*N/4)), int(round(N/4)):int(round(3*N/4))] = 0.5 x[int(round(N/8)):int(round(7*N/8)), int(round(3*N/8)):int(round(5*N/8))] = 1 - - - # Identity operator for denoising - I = TomoIdentity(ig) - - # Data and add noise - y = I.direct(Phantom) - y.array = y.array + 0.1*np.random.randn(N, N) - - - # Data fidelity term - f_denoise = Norm2sq(I,y,c=0.5,memopt=True) - - # 1-norm regulariser - lam1_denoise = 1.0 - g1_denoise = Norm1(lam1_denoise) - - # Initial guess - x_init_denoise = ImageData(np.zeros((N,N))) - - # Combine with least squares and solve using generic FISTA implementation - x_fista1_denoise, it1_denoise, timing1_denoise, \ - criter1_denoise = \ - FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) - - print(x_fista1_denoise) - print(criter1_denoise[-1]) - - - # Now denoise LS + 1-norm with FBPD - x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\ - criterfbpd1_denoise = \ - FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) - print(x_fbpd1_denoise) - print(criterfbpd1_denoise[-1]) - - - # Compare to CVXPY - - # Construct the problem. - x1_denoise = Variable(N**2,1) - objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) ) - prob1_denoise = Problem(objective1_denoise) - - # The optimal objective is returned by prob.solve(). - result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus 1-norm solution and objective value:") - print(x1_denoise.value) - print(objective1_denoise.value) - self.assertNumpyArrayAlmostEqual( - x_fista1_denoise.array.flatten(),x1_denoise.value,5) - - self.assertNumpyArrayAlmostEqual( - x_fbpd1_denoise.array.flatten(),x1_denoise.value,5) - x1_cvx = x1_denoise.value - x1_cvx.shape = (N,N) - - - # Now TV with FBPD - lam_tv = 0.1 - gtv = TV2D(lam_tv) - gtv(gtv.op.direct(x_init_denoise)) - - opt_tv = {'tol': 1e-4, 'iter': 10000} - - x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\ - criterfbpdtv_denoise = \ - FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv) - print(x_fbpdtv_denoise) - print(criterfbpdtv_denoise[-1]) - - - - # Compare to CVXPY - - # Construct the problem. - xtv_denoise = Variable((N,N)) - objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) - probtv_denoise = Problem(objectivetv_denoise) - - # The optimal objective is returned by prob.solve(). - resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12) - - # The optimal solution for x is stored in x.value and optimal objective value - # is in result as well as in objective.value - print("CVXPY least squares plus 1-norm solution and objective value:") - print(xtv_denoise.value) - print(objectivetv_denoise.value) - - self.assertNumpyArrayAlmostEqual( - x_fbpdtv_denoise.as_array(),xtv_denoise.value,1) - + + return (ig, Phantom) + + def _test_Norm2(self): + print("test Norm2") + opt = {'memopt': True} + ig, Phantom = self.create_simple_ImageData() + x = Phantom.as_array() + print(Phantom) + print(Phantom.as_array()) + + norm2 = Norm2() + v1 = norm2(x) + v2 = norm2(Phantom) + self.assertEqual(v1, v2) + + p1 = norm2.prox(Phantom, 1) + print(p1) + p2 = norm2.prox(x, 1) + self.assertNumpyArrayEqual(p1.as_array(), p2) + + p3 = norm2.proximal(Phantom, 1) + p4 = norm2.proximal(x, 1) + self.assertNumpyArrayEqual(p3.as_array(), p2) + self.assertNumpyArrayEqual(p3.as_array(), p4) + + out = Phantom.copy() + p5 = norm2.proximal(Phantom, 1, out=out) + self.assertEqual(id(p5), id(out)) + self.assertNumpyArrayEqual(p5.as_array(), p3.as_array()) # ============================================================================= # def test_upper(self): # self.assertEqual('foo'.upper(), 'FOO') -# +# # def test_isupper(self): # self.assertTrue('FOO'.isupper()) # self.assertFalse('Foo'.isupper()) -# +# # def test_split(self): # s = 'hello world' # self.assertEqual(s.split(), ['hello', 'world']) @@ -786,5 +879,6 @@ class TestAlgorithms(unittest.TestCase): # s.split(2) # ============================================================================= + if __name__ == '__main__': unittest.main() diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py index ad79fa5..27b1c97 100644 --- a/Wrappers/Python/wip/demo_compare_cvx.py +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -1,10 +1,11 @@ from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer from ccpi.optimisation.algs import FISTA, FBPD, CGLS -from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D +from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2 from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity from ccpi.optimisation.ops import Identity +from ccpi.optimisation.ops import FiniteDiff2D # Requires CVXPY, see http://www.cvxpy.org/ # CVXPY can be installed in anaconda using @@ -172,6 +173,8 @@ plt.imshow(y.array) plt.title('Noisy image') plt.show() + +################### # Data fidelity term f_denoise = Norm2sq(I,y,c=0.5,memopt=True) @@ -188,9 +191,9 @@ x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_d print(x_fista1_denoise) print(criter1_denoise[-1]) -plt.imshow(x_fista1_denoise.as_array()) -plt.title('FISTA LS+1') -plt.show() +#plt.imshow(x_fista1_denoise.as_array()) +#plt.title('FISTA LS+1') +#plt.show() # Now denoise LS + 1-norm with FBPD x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \ @@ -198,9 +201,9 @@ x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \ print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) -plt.imshow(x_fbpd1_denoise.as_array()) -plt.title('FBPD LS+1') -plt.show() +#plt.imshow(x_fbpd1_denoise.as_array()) +#plt.title('FBPD LS+1') +#plt.show() if use_cvxpy: # Compare to CVXPY @@ -222,25 +225,46 @@ if use_cvxpy: x1_cvx = x1_denoise.value x1_cvx.shape = (N,N) + + +#plt.imshow(x1_cvx) +#plt.title('CVX LS+1') +#plt.show() + +fig = plt.figure() +plt.subplot(1,4,1) +plt.imshow(y.array) +plt.title("LS+1") +plt.subplot(1,4,2) +plt.imshow(x_fista1_denoise.as_array()) +plt.title("fista") +plt.subplot(1,4,3) +plt.imshow(x_fbpd1_denoise.as_array()) +plt.title("fbpd") +plt.subplot(1,4,4) plt.imshow(x1_cvx) -plt.title('CVX LS+1') +plt.title("cvx") plt.show() -# Now TV with FBPD +############################################################## +# Now TV with FBPD and Norm2 lam_tv = 0.1 gtv = TV2D(lam_tv) -gtv(gtv.op.direct(x_init_denoise)) +norm2 = Norm2(lam_tv) +op = FiniteDiff2D() +#gtv(gtv.op.direct(x_init_denoise)) opt_tv = {'tol': 1e-4, 'iter': 10000} x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \ - criterfbpdtv_denoise = FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv) + criterfbpdtv_denoise = FBPD(x_init_denoise, op, None, \ + f_denoise, norm2 ,opt=opt_tv) print(x_fbpdtv_denoise) print(criterfbpdtv_denoise[-1]) plt.imshow(x_fbpdtv_denoise.as_array()) plt.title('FBPD TV') -plt.show() +#plt.show() if use_cvxpy: # Compare to CVXPY @@ -262,7 +286,21 @@ if use_cvxpy: plt.imshow(xtv_denoise.value) plt.title('CVX TV') +#plt.show() + +fig = plt.figure() +plt.subplot(1,3,1) +plt.imshow(y.array) +plt.title("TV2D") +plt.subplot(1,3,2) +plt.imshow(x_fbpdtv_denoise.as_array()) +plt.title("fbpd tv denoise") +plt.subplot(1,3,3) +plt.imshow(xtv_denoise.value) +plt.title("CVX tv") plt.show() + + plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV') plt.loglog(criterfbpdtv_denoise, label='FBPD TV') -- cgit v1.2.3 From 42d9e33488400dd8b232002ad862be861f5f7439 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 7 Nov 2018 14:21:46 +0000 Subject: increased version number (#163) --- Wrappers/Python/conda-recipe/meta.yaml | 2 +- Wrappers/Python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 327cdcb..45d25f9 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: ccpi-framework - version: 0.11.1 + version: 0.11.2 build: diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index dfd5bdd..ef579e9 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,7 +23,7 @@ import os import sys -cil_version='0.11.1' +cil_version='0.11.2' setup( name="ccpi-framework", -- cgit v1.2.3 From 9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Sat, 10 Nov 2018 09:47:42 +0000 Subject: Nexus read slice (#144) * replaces in with explicit test closes #139 * add read of single slice or subset * get_image_keys * reader reads single slice * adds get_acquisition_data_slice and get_acquisition_data_subset it is possible to read in only a subset of the dataset. * added multifile_nexus.py * cleaned example * fix normaliser and reader * wip * wip for reader * added support for non standard?? nexus files updated example with normalisation. * fix camelcase * Added initial unittest for reader bugfix for python 2.7 * fixes for python 2.7 * remove duplicate test requirement * minimal unittest for get_acquisition_data_subset --- Wrappers/Python/ccpi/framework.py | 40 +--- Wrappers/Python/ccpi/io/reader.py | 228 +++++++++++++++++++++-- Wrappers/Python/ccpi/processors.py | 154 ++++++++++++++-- Wrappers/Python/conda-recipe/meta.yaml | 17 +- Wrappers/Python/conda-recipe/run_test.py | 85 +++++++++ Wrappers/Python/setup.py | 2 +- Wrappers/Python/wip/multifile_nexus.py | 307 +++++++++++++++++++++++++++++++ 7 files changed, 762 insertions(+), 71 deletions(-) create mode 100755 Wrappers/Python/wip/multifile_nexus.py diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 4d74d2b..9938fb7 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -17,18 +17,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import from __future__ import division -import abc +from __future__ import print_function +from __future__ import unicode_literals + import numpy import sys from datetime import timedelta, datetime import warnings -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - ABC = abc.ABC -else: - ABC = abc.ABCMeta('ABC', (), {}) - def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] @@ -376,7 +374,6 @@ class DataContainer(object): return self.shape == other.shape ## algebra - def __add__(self, other , out=None, *args, **kwargs): if issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -525,9 +522,9 @@ class DataContainer(object): # must return self + def __iadd__(self, other): if isinstance(other, (int, float)) : - #print ("__iadd__", self.array.shape) numpy.add(self.array, other, out=self.array) elif issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -539,11 +536,7 @@ class DataContainer(object): def __imul__(self, other): if isinstance(other, (int, float)) : - #print ("__imul__", self.array.shape) - #print ("type(self)", type(self)) - #print ("self.array", self.array, other) arr = self.as_array() - #print ("arr", arr) numpy.multiply(arr, other, out=arr) elif issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -627,13 +620,14 @@ class DataContainer(object): .format(len(self.shape),len(new_order))) - def copy(self): + def copy(self): '''alias of clone''' return self.clone() ## binary operations def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): + if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) @@ -657,11 +651,8 @@ class DataContainer(object): raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): if self.check_dimensions(out): + pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) return out else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) @@ -694,7 +685,6 @@ class DataContainer(object): return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) ## unary operations - def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): if out is None: out = pwop(self.as_array() , *args, **kwargs ) @@ -705,19 +695,11 @@ class DataContainer(object): elif issubclass(type(out), DataContainer): if self.check_dimensions(out): pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: pwop(self.as_array(), out=out, *args, **kwargs) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) @@ -725,11 +707,6 @@ class DataContainer(object): return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) def sign(self, out=None, *args, **kwargs): - #out = numpy.sign(self.as_array() ) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) def sqrt(self, out=None, *args, **kwargs): @@ -743,7 +720,6 @@ class DataContainer(object): ## reductions def sum(self, out=None, *args, **kwargs): return self.as_array().sum(*args, **kwargs) - #return self.pixel_wise_unary(numpy.sum, out=out, *args, **kwargs) class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' diff --git a/Wrappers/Python/ccpi/io/reader.py b/Wrappers/Python/ccpi/io/reader.py index b0b5414..856f5e0 100644 --- a/Wrappers/Python/ccpi/io/reader.py +++ b/Wrappers/Python/ccpi/io/reader.py @@ -22,6 +22,11 @@ This is a reader module with classes for loading 3D datasets. @author: Mr. Srikanth Nagella ''' +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + from ccpi.framework import AcquisitionGeometry from ccpi.framework import AcquisitionData import numpy as np @@ -53,7 +58,18 @@ class NexusReader(object): self.angles = None self.geometry = None self.filename = nexus_filename - + self.key_path = 'entry1/tomo_entry/instrument/detector/image_key' + self.data_path = 'entry1/tomo_entry/data/data' + self.angle_path = 'entry1/tomo_entry/data/rotation_angle' + + def get_image_keys(self): + try: + with NexusFile(self.filename,'r') as file: + return np.array(file[self.key_path]) + except KeyError as ke: + raise KeyError("get_image_keys: " , ke.args[0] , self.key_path) + + def load(self, dimensions=None, image_key_id=0): ''' This is generic loading function of flat field, dark field and projection data. @@ -64,10 +80,10 @@ class NexusReader(object): return try: with NexusFile(self.filename,'r') as file: - image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key']) + image_keys = np.array(file[self.key_path]) projections = None if dimensions == None: - projections = np.array(file['entry1/tomo_entry/data/data']) + projections = np.array(file[self.data_path]) result = projections[image_keys==image_key_id] return result else: @@ -76,8 +92,8 @@ class NexusReader(object): projection_indexes = index_array[0][dimensions[0]] new_dimensions = list(dimensions) new_dimensions[0]= projection_indexes - new_dimensions = tuple(new_dimensions) - result = np.array(file['entry1/tomo_entry/data/data'][new_dimensions]) + new_dimensions = tuple(new_dimensions) + result = np.array(file[self.data_path][new_dimensions]) return result except: print("Error reading nexus file") @@ -88,6 +104,12 @@ class NexusReader(object): Loads the projection data from the nexus file. returns: numpy array with projection data ''' + try: + if 0 not in self.get_image_keys(): + raise ValueError("Projections are not in the data. Data Path " , + self.data_path) + except KeyError as ke: + raise KeyError(ke.args[0] , self.data_path) return self.load(dimensions, 0) def load_flat(self, dimensions=None): @@ -95,6 +117,12 @@ class NexusReader(object): Loads the flat field data from the nexus file. returns: numpy array with flat field data ''' + try: + if 1 not in self.get_image_keys(): + raise ValueError("Flats are not in the data. Data Path " , + self.data_path) + except KeyError as ke: + raise KeyError(ke.args[0] , self.data_path) return self.load(dimensions, 1) def load_dark(self, dimensions=None): @@ -102,6 +130,12 @@ class NexusReader(object): Loads the Dark field data from the nexus file. returns: numpy array with dark field data ''' + try: + if 2 not in self.get_image_keys(): + raise ValueError("Darks are not in the data. Data Path " , + self.data_path) + except KeyError as ke: + raise KeyError(ke.args[0] , self.data_path) return self.load(dimensions, 2) def get_projection_angles(self): @@ -114,11 +148,11 @@ class NexusReader(object): return try: with NexusFile(self.filename,'r') as file: - angles = np.array(file['entry1/tomo_entry/data/rotation_angle'],np.float32) - image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key']) + angles = np.array(file[self.angle_path],np.float32) + image_keys = np.array(file[self.key_path]) return angles[image_keys==0] except: - print("Error reading nexus file") + print("get_projection_angles Error reading nexus file") raise @@ -132,8 +166,8 @@ class NexusReader(object): return try: with NexusFile(self.filename,'r') as file: - projections = file['entry1/tomo_entry/data/data'] - image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key']) + projections = file[self.data_path] + image_keys = np.array(file[self.key_path]) dims = list(projections.shape) dims[0] = dims[1] dims[1] = np.sum(image_keys==0) @@ -151,15 +185,25 @@ class NexusReader(object): if self.filename is None: return try: - with NexusFile(self.filename,'r') as file: - projections = file['entry1/tomo_entry/data/data'] - image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key']) + with NexusFile(self.filename,'r') as file: + try: + projections = file[self.data_path] + except KeyError as ke: + raise KeyError('Error: data path {0} not found\n{1}'\ + .format(self.data_path, + ke.args[0])) + #image_keys = np.array(file[self.key_path]) + image_keys = self.get_image_keys() dims = list(projections.shape) dims[0] = np.sum(image_keys==0) return tuple(dims) except: - print("Error reading nexus file") - raise + print("Warning: Error reading image_keys trying accessing data on " , self.data_path) + with NexusFile(self.filename,'r') as file: + dims = file[self.data_path].shape + return tuple(dims) + + def get_acquisition_data(self, dimensions=None): ''' @@ -179,6 +223,160 @@ class NexusReader(object): return AcquisitionData(data, geometry=geometry, dimension_labels=['angle','vertical','horizontal']) + def get_acquisition_data_subset(self, ymin=None, ymax=None): + ''' + This method load the acquisition data and given dimension and returns an AcquisitionData Object + ''' + if not h5pyAvailable: + raise Exception("Error: h5py is not installed") + if self.filename is None: + return + try: + + + with NexusFile(self.filename,'r') as file: + try: + dims = self.get_projection_dimensions() + except KeyError: + pass + dims = file[self.data_path].shape + if ymin is None and ymax is None: + data = np.array(file[self.data_path]) + else: + if ymin is None: + ymin = 0 + if ymax > dims[1]: + raise ValueError('ymax out of range') + data = np.array(file[self.data_path][:,:ymax,:]) + elif ymax is None: + ymax = dims[1] + if ymin < 0: + raise ValueError('ymin out of range') + data = np.array(file[self.data_path][:,ymin:,:]) + else: + if ymax > dims[1]: + raise ValueError('ymax out of range') + if ymin < 0: + raise ValueError('ymin out of range') + + data = np.array(file[self.data_path] + [: , ymin:ymax , :] ) + + except: + print("Error reading nexus file") + raise + + + try: + angles = self.get_projection_angles() + except KeyError as ke: + n = data.shape[0] + angles = np.linspace(0, n, n+1, dtype=np.float32) + + if ymax-ymin > 1: + + geometry = AcquisitionGeometry('parallel', '3D', + angles, + pixel_num_h = dims[2], + pixel_size_h = 1 , + pixel_num_v = ymax-ymin, + pixel_size_v = 1, + dist_source_center = None, + dist_center_detector = None, + channels = 1) + return AcquisitionData(data, False, geometry=geometry, + dimension_labels=['angle','vertical','horizontal']) + elif ymax-ymin == 1: + geometry = AcquisitionGeometry('parallel', '2D', + angles, + pixel_num_h = dims[2], + pixel_size_h = 1 , + dist_source_center = None, + dist_center_detector = None, + channels = 1) + return AcquisitionData(data.squeeze(), False, geometry=geometry, + dimension_labels=['angle','horizontal']) + def get_acquisition_data_slice(self, y_slice=0): + return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1) + def get_acquisition_data_whole(self): + with NexusFile(self.filename,'r') as file: + try: + dims = self.get_projection_dimensions() + except KeyError: + print ("Warning: ") + dims = file[self.data_path].shape + + ymin = 0 + ymax = dims[1] - 1 + + return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax) + + + + def list_file_content(self): + try: + with NexusFile(self.filename,'r') as file: + file.visit(print) + except: + print("Error reading nexus file") + raise + def get_acquisition_data_batch(self, bmin=None, bmax=None): + if not h5pyAvailable: + raise Exception("Error: h5py is not installed") + if self.filename is None: + return + try: + + + with NexusFile(self.filename,'r') as file: + try: + dims = self.get_projection_dimensions() + except KeyError: + dims = file[self.data_path].shape + if bmin is None or bmax is None: + raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits') + + if bmin >= 0 and bmin < bmax and bmax <= dims[0]: + data = np.array(file[self.data_path][bmin:bmax]) + else: + raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0])) + + except: + print("Error reading nexus file") + raise + + + try: + angles = self.get_projection_angles()[bmin:bmax] + except KeyError as ke: + n = data.shape[0] + angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax] + + if bmax-bmin > 1: + + geometry = AcquisitionGeometry('parallel', '3D', + angles, + pixel_num_h = dims[2], + pixel_size_h = 1 , + pixel_num_v = bmax-bmin, + pixel_size_v = 1, + dist_source_center = None, + dist_center_detector = None, + channels = 1) + return AcquisitionData(data, False, geometry=geometry, + dimension_labels=['angle','vertical','horizontal']) + elif bmax-bmin == 1: + geometry = AcquisitionGeometry('parallel', '2D', + angles, + pixel_num_h = dims[2], + pixel_size_h = 1 , + dist_source_center = None, + dist_center_detector = None, + channels = 1) + return AcquisitionData(data.squeeze(), False, geometry=geometry, + dimension_labels=['angle','horizontal']) + + class XTEKReader(object): ''' diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py index a2d0446..6a9057a 100755 --- a/Wrappers/Python/ccpi/processors.py +++ b/Wrappers/Python/ccpi/processors.py @@ -19,6 +19,7 @@ from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ AcquisitionGeometry, ImageGeometry, ImageData +from ccpi.reconstruction.parallelbeam import alg as pbalg import numpy from scipy import ndimage @@ -39,8 +40,8 @@ class Normalizer(DataProcessor): def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): kwargs = { - 'flat_field' : None, - 'dark_field' : None, + 'flat_field' : flat_field, + 'dark_field' : dark_field, # very small number. Used when there is a division by zero 'tolerance' : tolerance } @@ -53,7 +54,8 @@ class Normalizer(DataProcessor): self.set_dark_field(dark_field) def check_input(self, dataset): - if dataset.number_of_dimensions == 3: + if dataset.number_of_dimensions == 3 or\ + dataset.number_of_dimensions == 2: return True else: raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ @@ -65,7 +67,7 @@ class Normalizer(DataProcessor): raise ValueError('Dark Field should be 2D') elif len(numpy.shape(df)) == 2: self.dark_field = df - elif issubclass(type(df), DataSet): + elif issubclass(type(df), DataContainer): self.dark_field = self.set_dark_field(df.as_array()) def set_flat_field(self, df): @@ -74,7 +76,7 @@ class Normalizer(DataProcessor): raise ValueError('Flat Field should be 2D') elif len(numpy.shape(df)) == 2: self.flat_field = df - elif issubclass(type(df), DataSet): + elif issubclass(type(df), DataContainer): self.flat_field = self.set_flat_field(df.as_array()) @staticmethod @@ -86,22 +88,40 @@ class Normalizer(DataProcessor): c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0 return c + @staticmethod + def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): + '''returns the estimated relative error of the normalised projection + + n = (projection - dark) / (flat - dark) + Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) + ''' + a = (projection - dark) + b = (flat-dark) + df = delta_flat / flat + dd = delta_dark / dark + rel_norm_error = (b + a) / (b * a) * (df + dd) + return rel_norm_error + def process(self): projections = self.get_input() dark = self.dark_field flat = self.flat_field - if not (projections.shape[1:] == dark.shape and \ - projections.shape[1:] == flat.shape): - raise ValueError('Flats/Dark and projections size do not match.') - - - a = numpy.asarray( - [ Normalizer.normalize_projection( - projection, flat, dark, self.tolerance) \ - for projection in projections.as_array() ] - ) + if projections.number_of_dimensions == 3: + if not (projections.shape[1:] == dark.shape and \ + projections.shape[1:] == flat.shape): + raise ValueError('Flats/Dark and projections size do not match.') + + + a = numpy.asarray( + [ Normalizer.normalize_projection( + projection, flat, dark, self.tolerance) \ + for projection in projections.as_array() ] + ) + elif projections.number_of_dimensions == 2: + a = Normalizer.normalize_projection(projections.as_array(), + flat, dark, self.tolerance) y = type(projections)( a , True, dimension_labels=projections.dimension_labels, geometry=projections.geometry) @@ -388,3 +408,107 @@ class CenterOfRotationFinder(DataProcessor): return cor + +class AcquisitionDataPadder(DataProcessor): + '''Normalization based on flat and dark + + This processor read in a AcquisitionData and normalises it based on + the instrument reading with and without incident photons or neutrons. + + Input: AcquisitionData + Parameter: 2D projection with flat field (or stack) + 2D projection with dark field (or stack) + Output: AcquisitionDataSetn + ''' + + def __init__(self, + center_of_rotation = None, + acquisition_geometry = None, + pad_value = 1e-5): + kwargs = { + 'acquisition_geometry' : acquisition_geometry, + 'center_of_rotation' : center_of_rotation, + 'pad_value' : pad_value + } + + super(AcquisitionDataPadder, self).__init__(**kwargs) + + def check_input(self, dataset): + if self.acquisition_geometry is None: + self.acquisition_geometry = dataset.geometry + if dataset.number_of_dimensions == 3: + return True + else: + raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ + .format(dataset.number_of_dimensions)) + + def process(self): + projections = self.get_input() + w = projections.get_dimension_size('horizontal') + delta = w - 2 * self.center_of_rotation + + padded_width = int ( + numpy.ceil(abs(delta)) + w + ) + delta_pix = padded_width - w + + voxel_per_pixel = 1 + geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), + self.acquisition_geometry.angles, + self.center_of_rotation, + voxel_per_pixel ) + + padded_geometry = self.acquisition_geometry.clone() + + padded_geometry.pixel_num_h = geom['n_h'] + padded_geometry.pixel_num_v = geom['n_v'] + + delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h + delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v + + if delta_pix_h == 0: + delta_pix_h = delta_pix + padded_geometry.pixel_num_h = padded_width + #initialize a new AcquisitionData with values close to 0 + out = AcquisitionData(geometry=padded_geometry) + out = out + self.pad_value + + + #pad in the horizontal-vertical plane -> slice on angles + if delta > 0: + #pad left of middle + command = "out.array[" + for i in range(out.number_of_dimensions): + if out.dimension_labels[i] == 'horizontal': + value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) + command = command + str(value) + else: + if out.dimension_labels[i] == 'vertical' : + value = '{0}:'.format(delta_pix_v) + command = command + str(value) + else: + command = command + ":" + if i < out.number_of_dimensions -1: + command = command + ',' + command = command + '] = projections.array' + #print (command) + else: + #pad right of middle + command = "out.array[" + for i in range(out.number_of_dimensions): + if out.dimension_labels[i] == 'horizontal': + value = '{0}:{1}'.format(0, w) + command = command + str(value) + else: + if out.dimension_labels[i] == 'vertical' : + value = '{0}:'.format(delta_pix_v) + command = command + str(value) + else: + command = command + ":" + if i < out.number_of_dimensions -1: + command = command + ',' + command = command + '] = projections.array' + #print (command) + #cleaned = eval(command) + exec(command) + return out \ No newline at end of file diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 45d25f9..5de01c5 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: ccpi-framework - version: 0.11.2 + version: 0.11.3 build: @@ -9,6 +9,12 @@ build: # - CIL_VERSION number: 0 + +test: + requires: + - python-wget + - cvxpy # [not win] + requirements: build: - python @@ -19,13 +25,8 @@ requirements: - numpy - scipy - matplotlib - run: - - python - - numpy - - cvxpy # [not win] - - scipy - - matplotlib - + - h5py + about: home: http://www.ccpi.ac.uk license: Apache 2.0 License diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index d6bc340..5bf6538 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -20,6 +20,13 @@ from ccpi.optimisation.ops import LinearOperatorMatrix from ccpi.optimisation.ops import TomoIdentity from ccpi.optimisation.ops import Identity + +import numpy.testing +import wget +import os +from ccpi.io.reader import NexusReader + + try: from cvxpy import * cvx_not_installable = False @@ -879,6 +886,84 @@ class TestFunction(unittest.TestCase): # s.split(2) # ============================================================================= +class TestNexusReader(unittest.TestCase): + + def setUp(self): + wget.download('https://github.com/DiamondLightSource/Savu/raw/master/test_data/data/24737_fd.nxs') + self.filename = '24737_fd.nxs' + + def tearDown(self): + os.remove(self.filename) + + + def testGetDimensions(self): + nr = NexusReader(self.filename) + self.assertEqual(nr.get_sinogram_dimensions(), (135, 91, 160), "Sinogram dimensions are not correct") + + def testGetProjectionDimensions(self): + nr = NexusReader(self.filename) + self.assertEqual(nr.get_projection_dimensions(), (91,135,160), "Projection dimensions are not correct") + + def testLoadProjectionWithoutDimensions(self): + nr = NexusReader(self.filename) + projections = nr.load_projection() + self.assertEqual(projections.shape, (91,135,160), "Loaded projection data dimensions are not correct") + + def testLoadProjectionWithDimensions(self): + nr = NexusReader(self.filename) + projections = nr.load_projection((slice(0,1), slice(0,135), slice(0,160))) + self.assertEqual(projections.shape, (1,135,160), "Loaded projection data dimensions are not correct") + + def testLoadProjectionCompareSingle(self): + nr = NexusReader(self.filename) + projections_full = nr.load_projection() + projections_part = nr.load_projection((slice(0,1), slice(0,135), slice(0,160))) + numpy.testing.assert_array_equal(projections_part, projections_full[0:1,:,:]) + + def testLoadProjectionCompareMulti(self): + nr = NexusReader(self.filename) + projections_full = nr.load_projection() + projections_part = nr.load_projection((slice(0,3), slice(0,135), slice(0,160))) + numpy.testing.assert_array_equal(projections_part, projections_full[0:3,:,:]) + + def testLoadProjectionCompareRandom(self): + nr = NexusReader(self.filename) + projections_full = nr.load_projection() + projections_part = nr.load_projection((slice(1,8), slice(5,10), slice(8,20))) + numpy.testing.assert_array_equal(projections_part, projections_full[1:8,5:10,8:20]) + + def testLoadProjectionCompareFull(self): + nr = NexusReader(self.filename) + projections_full = nr.load_projection() + projections_part = nr.load_projection((slice(None,None), slice(None,None), slice(None,None))) + numpy.testing.assert_array_equal(projections_part, projections_full[:,:,:]) + + def testLoadFlatCompareFull(self): + nr = NexusReader(self.filename) + flats_full = nr.load_flat() + flats_part = nr.load_flat((slice(None,None), slice(None,None), slice(None,None))) + numpy.testing.assert_array_equal(flats_part, flats_full[:,:,:]) + + def testLoadDarkCompareFull(self): + nr = NexusReader(self.filename) + darks_full = nr.load_dark() + darks_part = nr.load_dark((slice(None,None), slice(None,None), slice(None,None))) + numpy.testing.assert_array_equal(darks_part, darks_full[:,:,:]) + + def testProjectionAngles(self): + nr = NexusReader(self.filename) + angles = nr.get_projection_angles() + self.assertEqual(angles.shape, (91,), "Loaded projection number of angles are not correct") + + def test_get_acquisition_data_subset(self): + nr = NexusReader(self.filename) + key = nr.get_image_keys() + sl = nr.get_acquisition_data_subset(0,10) + data = nr.get_acquisition_data().subset(['vertical','horizontal']) + + self.assertTrue(sl.shape , (10,data.shape[1])) + if __name__ == '__main__': unittest.main() + diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index ef579e9..b584344 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,7 +23,7 @@ import os import sys -cil_version='0.11.2' +cil_version='0.11.3' setup( name="ccpi-framework", diff --git a/Wrappers/Python/wip/multifile_nexus.py b/Wrappers/Python/wip/multifile_nexus.py new file mode 100755 index 0000000..d1ad438 --- /dev/null +++ b/Wrappers/Python/wip/multifile_nexus.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Aug 15 16:00:53 2018 + +@author: ofn77899 +""" + +import os +from ccpi.io.reader import NexusReader + +from sys import getsizeof + +import matplotlib.pyplot as plt + +from ccpi.framework import DataProcessor, DataContainer +from ccpi.processors import Normalizer +from ccpi.processors import CenterOfRotationFinder +import numpy + + +class averager(object): + def __init__(self): + self.reset() + + def reset(self): + self.N = 0 + self.avg = 0 + self.min = 0 + self.max = 0 + self.var = 0 + self.ske = 0 + self.kur = 0 + + def add_reading(self, val): + + if (self.N == 0): + self.avg = val; + self.min = val; + self.max = val; + elif (self.N == 1): + #//set min/max + self.max = val if val > self.max else self.max + self.min = val if val < self.min else self.min + + + thisavg = (self.avg + val)/2 + #// initial value is in avg + self.var = (self.avg - thisavg)*(self.avg-thisavg) + (val - thisavg) * (val-thisavg) + self.ske = self.var * (self.avg - thisavg) + self.kur = self.var * self.var + self.avg = thisavg + else: + self.max = val if val > self.max else self.max + self.min = val if val < self.min else self.min + + M = self.N + + #// b-factor =(_N + v_(N+1)) / (N+1) + #float b = (val -avg) / (M+1); + b = (val -self.avg) / (M+1) + + self.kur = self.kur + (M *(b*b*b*b) * (1+M*M*M))- (4* b * self.ske) + (6 * b *b * self.var * (M-1)) + + self.ske = self.ske + (M * b*b*b *(M-1)*(M+1)) - (3 * b * self.var * (M-1)) + + #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ; + self.var = self.var * ((M-1)/M) + (b * b * (M+1)) + + self.avg = self.avg * (M/(M+1)) + val / (M+1) + + self.N += 1 + + def stats(self, vector): + i = 0 + while i < vector.size: + self.add_reading(vector[i]) + i+=1 + +avg = averager() +a = numpy.linspace(0,39,40) +avg.stats(a) +print ("average" , avg.avg, a.mean()) +print ("variance" , avg.var, a.var()) +b = a - a.mean() +b *= b +b = numpy.sqrt(sum(b)/(a.size-1)) +print ("std" , numpy.sqrt(avg.var), b) +#%% + +class DataStatMoments(DataProcessor): + '''Normalization based on flat and dark + + This processor read in a AcquisitionData and normalises it based on + the instrument reading with and without incident photons or neutrons. + + Input: AcquisitionData + Parameter: 2D projection with flat field (or stack) + 2D projection with dark field (or stack) + Output: AcquisitionDataSetn + ''' + + def __init__(self, axis, skewness=False, kurtosis=False, offset=0): + kwargs = { + 'axis' : axis, + 'skewness' : skewness, + 'kurtosis' : kurtosis, + 'offset' : offset, + } + #DataProcessor.__init__(self, **kwargs) + super(DataStatMoments, self).__init__(**kwargs) + + + def check_input(self, dataset): + #self.N = dataset.get_dimension_size(self.axis) + return True + + @staticmethod + def add_sample(dataset, N, axis, stats=None, offset=0): + #dataset = self.get_input() + if (N == 0): + # get the axis number along to calculate the stats + + + #axs = dataset.get_dimension_size(self.axis) + # create a placeholder for the output + if stats is None: + ax = dataset.get_dimension_axis(axis) + shape = [dataset.shape[i] for i in range(len(dataset.shape)) if i != ax] + # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg) + shape.insert(0, 4+2) + stats = numpy.zeros(shape) + + stats[0] = dataset.subset(**{axis:N-offset}).array[:] + + #avg = val + elif (N == 1): + # val + stats[5] = dataset.subset(**{axis:N-offset}).array + stats[4] = stats[0] + stats[5] + stats[4] /= 2 # thisavg + stats[5] -= stats[4] # (val - thisavg) + + #float thisavg = (avg + val)/2; + + #// initial value is in avg + #var = (avg - thisavg)*(avg-thisavg) + (val - thisavg) * (val-thisavg); + stats[1] = stats[5] * stats[5] + stats[5] * stats[5] + #skewness = var * (avg - thisavg); + stats[2] = stats[1] * stats[5] + #kurtosis = var * var; + stats[3] = stats[1] * stats[1] + #avg = thisavg; + stats[0] = stats[4] + else: + + #float M = (float)N; + M = N + #val + stats[4] = dataset.subset(**{axis:N-offset}).array + #// b-factor =(_N + v_(N+1)) / (N+1) + stats[5] = stats[4] - stats[0] + stats[5] /= (M+1) # b factor + #float b = (val -avg) / (M+1); + + #kurtosis = kurtosis + (M *(b*b*b*b) * (1+M*M*M))- (4* b * skewness) + (6 * b *b * var * (M-1)); + #if self.kurtosis: + # stats[3] += (M * stats[5] * stats[5] * stats[5] * stats[5]) - \ + # (4 * stats[5] * stats[2]) + \ + # (6 * stats[5] * stats[5] * stats[1] * (M-1)) + + #skewness = skewness + (M * b*b*b *(M-1)*(M+1)) - (3 * b * var * (M-1)); + #if self.skewness: + # stats[2] = stats[2] + (M * stats[5]* stats[5] * stats[5] * (M-1)*(M-1) ) -\ + # 3 * stats[5] * stats[1] * (M-1) + #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ; + #var = var * ((M-1)/M) + (b * b * (M+1)); + stats[1] = ((M-1)/M) * stats[1] + (stats[5] * stats[5] * (M+1)) + + #avg = avg * (M/(M+1)) + val / (M+1) + stats[0] = stats[0] * (M/(M+1)) + stats[4] / (M+1) + + N += 1 + return stats , N + + + def process(self): + + data = self.get_input() + + #stats, i = add_sample(0) + N = data.get_dimension_size(self.axis) + ax = data.get_dimension_axis(self.axis) + stats = None + i = 0 + while i < N: + stats , i = DataStatMoments.add_sample(data, i, self.axis, stats, offset=self.offset) + self.offset += N + labels = ['StatisticalMoments'] + + labels += [data.dimension_labels[i] \ + for i in range(len(data.dimension_labels)) if i != ax] + y = DataContainer( stats[:4] , False, + dimension_labels=labels) + return y + +directory = r'E:\Documents\Dataset\CCPi\Nexus_test' +data_path="entry1/instrument/pco1_hw_hdf_nochunking/data" + +reader = NexusReader(os.path.join( os.path.abspath(directory) , '74331.nxs')) + +print ("read flat") +read_flat = NexusReader(os.path.join( os.path.abspath(directory) , '74240.nxs')) +read_flat.data_path = data_path +flatsslice = read_flat.get_acquisition_data_whole() +avg = DataStatMoments('angle') + +avg.set_input(flatsslice) +flats = avg.get_output() + +ave = averager() +ave.stats(flatsslice.array[:,0,0]) + +print ("avg" , ave.avg, flats.array[0][0][0]) +print ("var" , ave.var, flats.array[1][0][0]) + +print ("read dark") +read_dark = NexusReader(os.path.join( os.path.abspath(directory) , '74243.nxs')) +read_dark.data_path = data_path + +## darks are very many so we proceed in batches +total_size = read_dark.get_projection_dimensions()[0] +print ("total_size", total_size) + +batchsize = 40 +if batchsize > total_size: + batchlimits = [batchsize * (i+1) for i in range(int(total_size/batchsize))] + [total_size-1] +else: + batchlimits = [total_size] +#avg.N = 0 +avg.offset = 0 +N = 0 +for batch in range(len(batchlimits)): + print ("running batch " , batch) + bmax = batchlimits[batch] + bmin = bmax-batchsize + + darksslice = read_dark.get_acquisition_data_batch(bmin,bmax) + if batch == 0: + #create stats + ax = darksslice.get_dimension_axis('angle') + shape = [darksslice.shape[i] for i in range(len(darksslice.shape)) if i != ax] + # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg) + shape.insert(0, 4+2) + print ("create stats shape ", shape) + stats = numpy.zeros(shape) + print ("N" , N) + #avg.set_input(darksslice) + i = bmin + while i < bmax: + stats , i = DataStatMoments.add_sample(darksslice, i, 'angle', stats, bmin) + print ("{0}-{1}-{2}".format(bmin, i , bmax ) ) + +darks = stats +#%% + +fig = plt.subplot(2,2,1) +fig.imshow(flats.subset(StatisticalMoments=0).array) +fig = plt.subplot(2,2,2) +fig.imshow(numpy.sqrt(flats.subset(StatisticalMoments=1).array)) +fig = plt.subplot(2,2,3) +fig.imshow(darks[0]) +fig = plt.subplot(2,2,4) +fig.imshow(numpy.sqrt(darks[1])) +plt.show() + + +#%% +norm = Normalizer(flat_field=flats.array[0,200,:], dark_field=darks[0,200,:]) +#norm.set_flat_field(flats.array[0,200,:]) +#norm.set_dark_field(darks.array[0,200,:]) +norm.set_input(reader.get_acquisition_data_slice(200)) + +n = Normalizer.normalize_projection(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], 1e-5) +#dn_n= Normalizer.estimate_normalised_error(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], +# numpy.sqrt(flats.array[1,200,:]), numpy.sqrt(darks[1,200,:])) +#%% + + +#%% +fig = plt.subplot(2,1,1) + + +fig.imshow(norm.get_input().as_array()) +fig = plt.subplot(2,1,2) +fig.imshow(n) + +#fig = plt.subplot(3,1,3) +#fig.imshow(dn_n) + + +plt.show() + + + + + + -- cgit v1.2.3 From ccb9a65318aa5ca1b72edfa9acf349cd14ad3747 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 15 Nov 2018 15:37:16 +0000 Subject: Spdhg (#166) * replaces in with explicit test closes #139 * add read of single slice or subset * get_image_keys * reader reads single slice * adds get_acquisition_data_slice and get_acquisition_data_subset it is possible to read in only a subset of the dataset. * added multifile_nexus.py * cleaned example * fix normaliser and reader * wip * wip for reader * added support for non standard?? nexus files updated example with normalisation. * fix camelcase * Added initial unittest for reader bugfix for python 2.7 * fixes for python 2.7 * remove duplicate test requirement * minimal unittest for get_acquisition_data_subset * added SPDHG * work in progress --- Wrappers/Python/ccpi/optimisation/algs.py | 6 +- Wrappers/Python/ccpi/optimisation/ops.py | 14 +- Wrappers/Python/ccpi/optimisation/spdhg.py | 338 +++++++++++++++++++++++++++++ 3 files changed, 350 insertions(+), 8 deletions(-) create mode 100755 Wrappers/Python/ccpi/optimisation/spdhg.py diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 24ed6e9..6890e3b 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -22,7 +22,11 @@ import time from ccpi.optimisation.funcs import Function from ccpi.optimisation.funcs import ZeroFun -from ccpi.framework import ImageData, AcquisitionData +from ccpi.framework import ImageData +from ccpi.framework import AcquisitionData +from ccpi.framework.optimisation.spdhg import spdhg +from ccpi.framework.optimisation.spdhg import KullbackLeibler +from ccpi.framework.optimisation.spdhg import KullbackLeiblerConvexConjugate def FISTA(x_init, f=None, g=None, opt=None): '''Fast Iterative Shrinkage-Thresholding Algorithm diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 96f85d8..450b084 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -186,10 +186,10 @@ def PowerMethodNonsquareOld(op,numiters): # return s, x0 -def PowerMethodNonsquare(op,numiters): +def PowerMethodNonsquare(op,numiters , x0=None): # Initialise random # Jakob's - inputsize , outputsize = op.size() + # inputsize , outputsize = op.size() #x0 = ImageContainer(numpy.random.randn(*inputsize) # Edo's #vg = ImageGeometry(voxel_num_x=inputsize[0], @@ -200,16 +200,16 @@ def PowerMethodNonsquare(op,numiters): #print (x0) #x0.fill(numpy.random.randn(*x0.shape)) - - #x0 = op.create_image_data() - x0 = op.allocate_direct() - x0.fill(numpy.random.randn(*x0.shape)) + if x0 is None: + #x0 = op.create_image_data() + x0 = op.allocate_direct() + x0.fill(numpy.random.randn(*x0.shape)) s = numpy.zeros(numiters) # Loop for it in numpy.arange(numiters): x1 = op.adjoint(op.direct(x0)) - x1norm = numpy.sqrt((x1**2).sum()) + x1norm = numpy.sqrt((x1*x1).sum()) #print ("x0 **********" ,x0) #print ("x1 **********" ,x1) s[it] = (x1*x0).sum() / (x0*x0).sum() diff --git a/Wrappers/Python/ccpi/optimisation/spdhg.py b/Wrappers/Python/ccpi/optimisation/spdhg.py new file mode 100755 index 0000000..263a7cd --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/spdhg.py @@ -0,0 +1,338 @@ +# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy + +from ccpi.optimisation.funcs import Function +from ccpi.framework import ImageData +from ccpi.framework import AcquisitionData + + +class spdhg(): + """Computes a saddle point with a stochastic PDHG. + + This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that + + (x*, y*) in arg min_x max_y sum_i=1^n - f*[i](y_i) + g(x) + + where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and + proper functionals. For this algorithm, they all may be non-smooth and no + strong convexity is assumed. + + Parameters + ---------- + f : list of functions + Functionals Y[i] -> IR_infty that all have a convex conjugate with a + proximal operator, i.e. + f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i]. + g : function + Functional X -> IR_infty that has a proximal operator, i.e. + g.prox(tau) : X -> X. + A : list of functions + Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint + x : primal variable, optional + By default equals 0. + y : dual variable, optional + Part of a product space. By default equals 0. + z : variable, optional + Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0. + tau : scalar / vector / matrix, optional + Step size for primal variable. Note that the proximal operator of g + has to be well-defined for this input. + sigma : scalar, optional + Scalar / vector / matrix used as step size for dual variable. Note that + the proximal operator related to f (see above) has to be well-defined + for this input. + prob : list of scalars, optional + Probabilities prob[i] that a subset i is selected in each iteration. + If fun_select is not given, then the sum of all probabilities must + equal 1. + A_norms : list of scalars, optional + Norms of the operators in A. Can be used to determine the step sizes + tau and sigma and the probabilities prob. + fun_select : function, optional + Function that selects blocks at every iteration IN -> {1,...,n}. By + default this is serial sampling, fun_select(k) selects an index + i \in {1,...,n} with probability prob[i]. + + References + ---------- + [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, + *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling + and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808 + (2018) http://doi.org/10.1007/s10851-010-0251-1 + + [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott, + A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a + stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII, + 58 (2017) http://doi.org/10.1117/12.2272946. + + [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster + PET Reconstruction with Non-Smooth Priors by Randomization and + Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150 + """ + + def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None, + prob=None, A_norms=None, fun_select=None): + # fun_select is optional and by default performs serial sampling + + if x is None: + x = A[0].allocate_direct(0) + + if y is None: + if z is not None: + raise ValueError('y and z have to be defaulted together') + + y = [Ai.allocate_adjoint(0) for Ai in A] + z = 0 * x.copy() + + else: + if z is None: + raise ValueError('y and z have to be defaulted together') + + if A_norms is not None: + if tau is not None or sigma is not None or prob is not None: + raise ValueError('Either A_norms or (tau, sigma, prob) must ' + 'be given') + + tau = 1 / sum(A_norms) + sigma = [1 / nA for nA in A_norms] + prob = [nA / sum(A_norms) for nA in A_norms] + + #uniform prob, needs different sigma and tau + #n = len(A) + #prob = [1./n] * n + + if fun_select is None: + if prob is None: + raise ValueError('prob was not determined') + + def fun_select(k): + return [int(numpy.random.choice(len(A), 1, p=prob))] + + self.iter = 0 + self.x = x + + self.y = y + self.z = z + + self.f = f + self.g = g + self.A = A + self.tau = tau + self.sigma = sigma + self.prob = prob + self.fun_select = fun_select + + # Initialize variables + self.z_relax = z.copy() + self.tmp = self.x.copy() + + def update(self): + # select block + selected = self.fun_select(self.iter) + + # update primal variable + #tmp = (self.x - self.tau * self.z_relax).as_array() + #self.x.fill(self.g.prox(tmp, self.tau)) + self.tmp = - self.tau * self.z_relax + self.tmp += self.x + self.x = self.g.prox(self.tmp, self.tau) + + # update dual variable and z, z_relax + self.z_relax = self.z.copy() + for i in selected: + # save old yi + y_old = self.y[i].copy() + + # y[i]= prox(tmp) + tmp = y_old + self.sigma[i] * self.A[i].direct(self.x) + self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i]) + + # update adjoint of dual variable + dz = self.A[i].adjoint(self.y[i] - y_old) + self.z += dz + + # compute extrapolation + self.z_relax += (1 + 1 / self.prob[i]) * dz + + self.iter += 1 + + +## Functions + +class KullbackLeibler(Function): + def __init__(self, data, background): + self.data = data + self.background = background + self.__offset = None + + def __call__(self, x): + """Return the KL-diveregnce in the point ``x``. + + If any components of ``x`` is non-positive, the value is positive + infinity. + + Needs one extra array of memory of the size of `prior`. + """ + + # define short variable names + y = self.data + r = self.background + + # Compute + # sum(x + r - y + y * log(y / (x + r))) + # = sum(x - y * log(x + r)) + self.offset + # Assume that + # x + r > 0 + + # sum the result up + obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset() + + if numpy.isnan(obj): + # In this case, some element was less than or equal to zero + return numpy.inf + else: + return obj + + @property + def convex_conj(self): + """The convex conjugate functional of the KL-functional.""" + return KullbackLeiblerConvexConjugate(self.data, self.background) + + def offset(self): + """The offset which is independent of the unknown.""" + + if self.__offset is None: + tmp = self.domain.element() + + # define short variable names + y = self.data + r = self.background + + tmp = self.domain.element(numpy.maximum(y, 1)) + tmp = r - y + y * numpy.log(tmp) + + # sum the result up + self.__offset = numpy.sum(tmp) + + return self.__offset + +# def __repr__(self): +# """to be added???""" +# """Return ``repr(self)``.""" + # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__, + ## self.domain, self.data, + # self.background) + + +class KullbackLeiblerConvexConjugate(Function): + """The convex conjugate of Kullback-Leibler divergence functional. + + Notes + ----- + The functional :math:`F^*` with prior :math:`g>0` is given by: + + .. math:: + F^*(x) + = + \\begin{cases} + \\sum_{i} \left( -g_i \ln(1 - x_i) \\right) + & \\text{if } x_i < 1 \\forall i + \\\\ + +\\infty & \\text{else} + \\end{cases} + + See Also + -------- + KullbackLeibler : convex conjugate functional + """ + + def __init__(self, data, background): + self.data = data + self.background = background + + def __call__(self, x): + y = self.data + r = self.background + + tmp = numpy.sum(- x * r - y * numpy.log(1 - x)) + + if numpy.isnan(tmp): + # In this case, some element was larger than or equal to one + return numpy.inf + else: + return tmp + + + def prox(self, x, tau, out=None): + # Let y = data, r = background, z = x + tau * r + # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y)) + # Currently it needs 3 extra copies of memory. + + if out is None: + out = x.copy() + + # define short variable names + try: # this should be standard SIRF/CIL mode + y = self.data.as_array() + r = self.background.as_array() + x = x.as_array() + + try: + taua = tau.as_array() + except: + taua = tau + + z = x + tau * r + + out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))) + + return out + + except: # e.g. for NumPy + y = self.data + r = self.background + + try: + taua = tau.as_array() + except: + taua = tau + + z = x + tau * r + + out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)) + + return out + + @property + def convex_conj(self): + return KullbackLeibler(self.data, self.background) + + +def mult(x, y): + try: + xa = x.as_array() + except: + xa = x + + out = y.clone() + out.fill(xa * y.as_array()) + + return out -- cgit v1.2.3 From b0654b0f0238102c743c3e91f22c422fb4cc542f Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Fri, 16 Nov 2018 17:16:03 +0000 Subject: fix import error --- Wrappers/Python/ccpi/optimisation/algs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 6890e3b..a736277 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -24,9 +24,9 @@ from ccpi.optimisation.funcs import Function from ccpi.optimisation.funcs import ZeroFun from ccpi.framework import ImageData from ccpi.framework import AcquisitionData -from ccpi.framework.optimisation.spdhg import spdhg -from ccpi.framework.optimisation.spdhg import KullbackLeibler -from ccpi.framework.optimisation.spdhg import KullbackLeiblerConvexConjugate +from ccpi.optimisation.spdhg import spdhg +from ccpi.optimisation.spdhg import KullbackLeibler +from ccpi.optimisation.spdhg import KullbackLeiblerConvexConjugate def FISTA(x_init, f=None, g=None, opt=None): '''Fast Iterative Shrinkage-Thresholding Algorithm -- cgit v1.2.3 From cd29af33331efe8730616e4585fcc1ce69d2931e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 27 Nov 2018 10:56:33 +0000 Subject: changed title --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 06d30fe..49714e8 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# CCPi-PythonFramework +# CCPi-Framework Basic Python Framework for CIL This package aims at ensuring a longer life and easy extensibility of the CIL software. This package provides a common framework, hence the name, for the analysis of data in the CT pipeline and quick development of novel reconstruction algorithms. -- cgit v1.2.3 From 5fe09e04771ed62d05bd3154ea182d1b99c5d8ce Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Thu, 3 Jan 2019 13:23:19 +0000 Subject: UPDATE: CIL VERSION needs to be set --- Wrappers/Python/conda-recipe/bld.bat | 4 ++++ Wrappers/Python/conda-recipe/build.sh | 4 ++++ Wrappers/Python/conda-recipe/meta.yaml | 9 ++++----- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/conda-recipe/bld.bat b/Wrappers/Python/conda-recipe/bld.bat index d317f54..97a4e62 100644 --- a/Wrappers/Python/conda-recipe/bld.bat +++ b/Wrappers/Python/conda-recipe/bld.bat @@ -1,4 +1,8 @@ +IF NOT DEFINED CIL_VERSION ( +ECHO CIL_VERSION Not Defined. +exit 1 +) ROBOCOPY /E "%RECIPE_DIR%\.." "%SRC_DIR%" %PYTHON% setup.py build_py diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 2e68519..43e85d5 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -1,3 +1,7 @@ +if [ -z "$CIL_VERSION" ]; then + echo "Need to set CIL_VERSION" + exit 1 +fi mkdir ${SRC_DIR}/ccpi cp -r "${RECIPE_DIR}/../../../" ${SRC_DIR}/ccpi diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 5de01c5..fbfbafa 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,14 +1,13 @@ package: name: ccpi-framework - version: 0.11.3 + version: {{ environ['CIL_VERSION'] }} build: preserve_egg_dir: False -#script_env: -# - CIL_VERSION - number: 0 - + script_env: + - CIL_VERSION + #number: 0 test: requires: -- cgit v1.2.3 From 11744367fa0a36be738226f14898ac5e8816491e Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 8 Jan 2019 17:02:26 +0000 Subject: added L member to Function --- Wrappers/Python/ccpi/optimisation/funcs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index db00e9f..2266560 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -38,7 +38,7 @@ def isSizeCorrect(data1 ,data2): class Function(object): def __init__(self): - pass + self.L = None def __call__(self,x, out=None): raise NotImplementedError def grad(self, x): raise NotImplementedError def prox(self, x, tau): raise NotImplementedError -- cgit v1.2.3 From c324276a3fc2b0fec6c938691fb61c4b42442751 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Tue, 8 Jan 2019 17:06:55 +0000 Subject: Norm2sq does not fail if cannot calculate Lipschitz constant --- Wrappers/Python/ccpi/optimisation/funcs.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index 2266560..c7a6143 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -136,6 +136,8 @@ class Norm2sq(Function): ''' def __init__(self,A,b,c=1.0,memopt=False): + super(Norm2sq, self).__init__() + self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. @@ -146,11 +148,13 @@ class Norm2sq(Function): self.adjoint_placehold = A.allocate_adjoint() - # Compute the Lipschitz parameter from the operator. - # Initialise to None instead and only call when needed. - self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) - super(Norm2sq, self).__init__() - + # Compute the Lipschitz parameter from the operator if possible + # Leave it initialised to None otherwise + try: + self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) + except AttributeError as ae: + pass + def grad(self,x): #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b ) return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) -- cgit v1.2.3 From ff4a9962d3701b30dc6709500ee3492788b6977b Mon Sep 17 00:00:00 2001 From: Kulhanek Date: Fri, 11 Jan 2019 13:32:17 +0100 Subject: UPDATE: jenkins build and variants --- .../Python/conda-recipe/conda_build_config.yaml | 3 ++ Wrappers/Python/conda-recipe/meta.yaml | 1 + build/jenkins-build.sh | 45 ++++++++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 build/jenkins-build.sh diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml index 5dd08f5..b7977f3 100644 --- a/Wrappers/Python/conda-recipe/conda_build_config.yaml +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -2,3 +2,6 @@ python: - 2.7 # [not win] - 3.5 - 3.6 +numpy: + - 1.12 + - 1.15 diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index fbfbafa..840590f 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -17,6 +17,7 @@ test: requirements: build: - python + - numpy {{ numpy }} - setuptools run: diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh new file mode 100644 index 0000000..e45a330 --- /dev/null +++ b/build/jenkins-build.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +if [[ -n ${CIL_VERSION} ]] +then + echo Using defined version: $CIL_VERSION +else + export CIL_VERSION=0.10.4 + echo Defining version: $CIL_VERSION +fi +# Script to builds source code in Jenkins environment +# module try-load conda + +# install miniconda if the module is not present +if hash conda 2>/dev/null; then + echo using conda +else + if [ ! -f Miniconda3-latest-Linux-x86_64.sh ]; then + wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + chmod +x Miniconda3-latest-Linux-x86_64.sh + fi + ./Miniconda3-latest-Linux-x86_64.sh -u -b -p . + PATH=$PATH:./bin +fi + +# presume that git clone is done before this script is launched, if not, uncomment +# git clone https://github.com/vais-ral/CCPi-Regularisation-Toolkit +conda install -y conda-build +#export CIL_VERSION=0.10.2 +#cd CCPi-Regularisation-Toolkit # already there by jenkins +# need to call first build +conda build Wrappers/Python/conda-recipe +# then need to call the same with --output +#- otherwise no build is done :-(, just fake file names are generated +export REG_FILES=`conda build Wrappers/Python/conda-recipe --output` +# REG_FILES variable should contain output files +echo files created: $REG_FILES +#upload to anaconda +if [[ -n ${CCPI_CONDA_TOKEN} ]] +then + conda install anaconda-client + while read -r outfile; do + anaconda -v -t ${CCPI_CONDA_TOKEN} upload $outfile --force --label dev + done <<< "$REG_FILES" +else + echo CCPI_CONDA_TOKEN not defined, will not upload to anaconda. +fi -- cgit v1.2.3 From 92bdb9f29baf03f6140e9b6269ed1d8c7c42520b Mon Sep 17 00:00:00 2001 From: Kulhanek Date: Fri, 11 Jan 2019 13:34:23 +0100 Subject: UPDATE: channels conda-roge and ccp --- build/jenkins-build.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh index e45a330..31b81d2 100644 --- a/build/jenkins-build.sh +++ b/build/jenkins-build.sh @@ -24,12 +24,10 @@ fi # presume that git clone is done before this script is launched, if not, uncomment # git clone https://github.com/vais-ral/CCPi-Regularisation-Toolkit conda install -y conda-build -#export CIL_VERSION=0.10.2 -#cd CCPi-Regularisation-Toolkit # already there by jenkins + # need to call first build -conda build Wrappers/Python/conda-recipe +conda build Wrappers/Python/conda-recipe -c conda-forge -c ccpi # then need to call the same with --output -#- otherwise no build is done :-(, just fake file names are generated export REG_FILES=`conda build Wrappers/Python/conda-recipe --output` # REG_FILES variable should contain output files echo files created: $REG_FILES -- cgit v1.2.3 From ba9994e131dae1e1c09f156df12327f2ef10beb4 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 11 Jan 2019 15:44:30 +0000 Subject: UPDATE: CIL VERSION needs to be set (#173) * UPDATE: CIL VERSION needs to be set * UPDATE: jenkins build and variants * UPDATE: channels conda-forge and ccpi --- Wrappers/Python/conda-recipe/bld.bat | 4 ++ Wrappers/Python/conda-recipe/build.sh | 4 ++ .../Python/conda-recipe/conda_build_config.yaml | 3 ++ Wrappers/Python/conda-recipe/meta.yaml | 10 ++--- build/jenkins-build.sh | 43 ++++++++++++++++++++++ 5 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 build/jenkins-build.sh diff --git a/Wrappers/Python/conda-recipe/bld.bat b/Wrappers/Python/conda-recipe/bld.bat index d317f54..97a4e62 100644 --- a/Wrappers/Python/conda-recipe/bld.bat +++ b/Wrappers/Python/conda-recipe/bld.bat @@ -1,4 +1,8 @@ +IF NOT DEFINED CIL_VERSION ( +ECHO CIL_VERSION Not Defined. +exit 1 +) ROBOCOPY /E "%RECIPE_DIR%\.." "%SRC_DIR%" %PYTHON% setup.py build_py diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 2e68519..43e85d5 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -1,3 +1,7 @@ +if [ -z "$CIL_VERSION" ]; then + echo "Need to set CIL_VERSION" + exit 1 +fi mkdir ${SRC_DIR}/ccpi cp -r "${RECIPE_DIR}/../../../" ${SRC_DIR}/ccpi diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml index 5dd08f5..b7977f3 100644 --- a/Wrappers/Python/conda-recipe/conda_build_config.yaml +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -2,3 +2,6 @@ python: - 2.7 # [not win] - 3.5 - 3.6 +numpy: + - 1.12 + - 1.15 diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 5de01c5..840590f 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,14 +1,13 @@ package: name: ccpi-framework - version: 0.11.3 + version: {{ environ['CIL_VERSION'] }} build: preserve_egg_dir: False -#script_env: -# - CIL_VERSION - number: 0 - + script_env: + - CIL_VERSION + #number: 0 test: requires: @@ -18,6 +17,7 @@ test: requirements: build: - python + - numpy {{ numpy }} - setuptools run: diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh new file mode 100644 index 0000000..31b81d2 --- /dev/null +++ b/build/jenkins-build.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +if [[ -n ${CIL_VERSION} ]] +then + echo Using defined version: $CIL_VERSION +else + export CIL_VERSION=0.10.4 + echo Defining version: $CIL_VERSION +fi +# Script to builds source code in Jenkins environment +# module try-load conda + +# install miniconda if the module is not present +if hash conda 2>/dev/null; then + echo using conda +else + if [ ! -f Miniconda3-latest-Linux-x86_64.sh ]; then + wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh + chmod +x Miniconda3-latest-Linux-x86_64.sh + fi + ./Miniconda3-latest-Linux-x86_64.sh -u -b -p . + PATH=$PATH:./bin +fi + +# presume that git clone is done before this script is launched, if not, uncomment +# git clone https://github.com/vais-ral/CCPi-Regularisation-Toolkit +conda install -y conda-build + +# need to call first build +conda build Wrappers/Python/conda-recipe -c conda-forge -c ccpi +# then need to call the same with --output +export REG_FILES=`conda build Wrappers/Python/conda-recipe --output` +# REG_FILES variable should contain output files +echo files created: $REG_FILES +#upload to anaconda +if [[ -n ${CCPI_CONDA_TOKEN} ]] +then + conda install anaconda-client + while read -r outfile; do + anaconda -v -t ${CCPI_CONDA_TOKEN} upload $outfile --force --label dev + done <<< "$REG_FILES" +else + echo CCPI_CONDA_TOKEN not defined, will not upload to anaconda. +fi -- cgit v1.2.3 From 37624a19d3672c87ca75d2ac05c5d72955e789dc Mon Sep 17 00:00:00 2001 From: Kulhanek Date: Mon, 14 Jan 2019 13:51:15 +0100 Subject: update pin-compatible --- Wrappers/Python/conda-recipe/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 840590f..29a2b12 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -21,6 +21,7 @@ requirements: - setuptools run: + - {{ pin_compatible('numpy', max_pin='x.x') }} - python - numpy - scipy -- cgit v1.2.3 From 70321f2f61a2404330732c16c06ae8cbdc454985 Mon Sep 17 00:00:00 2001 From: Kulhanek Date: Mon, 14 Jan 2019 13:52:19 +0100 Subject: update --- Wrappers/Python/conda-recipe/meta.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 29a2b12..1b7cae6 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -2,7 +2,6 @@ package: name: ccpi-framework version: {{ environ['CIL_VERSION'] }} - build: preserve_egg_dir: False script_env: -- cgit v1.2.3 From 5fc7abb8688dae095d0c8274ae5679c44a9f4149 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 12:04:10 +0000 Subject: Update jenkins-build.sh --- build/jenkins-build.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh index 31b81d2..f1b9ad3 100644 --- a/build/jenkins-build.sh +++ b/build/jenkins-build.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash + +# define CIL_VERSION if not defined by calling environment if [[ -n ${CIL_VERSION} ]] then echo Using defined version: $CIL_VERSION @@ -6,12 +8,10 @@ else export CIL_VERSION=0.10.4 echo Defining version: $CIL_VERSION fi -# Script to builds source code in Jenkins environment -# module try-load conda -# install miniconda if the module is not present +# install miniconda if it is not already present if hash conda 2>/dev/null; then - echo using conda + echo using preinstalled conda else if [ ! -f Miniconda3-latest-Linux-x86_64.sh ]; then wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -- cgit v1.2.3 From 1cb5c41410d2c0db25849290fe7eeb5e960f053d Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:01:43 +0000 Subject: Update jenkins-build.sh --- build/jenkins-build.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh index f1b9ad3..dcdf3da 100644 --- a/build/jenkins-build.sh +++ b/build/jenkins-build.sh @@ -1,8 +1,7 @@ #!/usr/bin/env bash # define CIL_VERSION if not defined by calling environment -if [[ -n ${CIL_VERSION} ]] -then +if [[ -n ${CIL_VERSION} ]]; then echo Using defined version: $CIL_VERSION else export CIL_VERSION=0.10.4 @@ -13,6 +12,7 @@ fi if hash conda 2>/dev/null; then echo using preinstalled conda else + echo installing miniconda if [ ! -f Miniconda3-latest-Linux-x86_64.sh ]; then wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh chmod +x Miniconda3-latest-Linux-x86_64.sh @@ -22,7 +22,7 @@ else fi # presume that git clone is done before this script is launched, if not, uncomment -# git clone https://github.com/vais-ral/CCPi-Regularisation-Toolkit +# git clone https://github.com/vais-ral/CCPi-Framework.git conda install -y conda-build # need to call first build -- cgit v1.2.3 From 4f28f9195c1c7b56f679410ae41a7627d4bb6382 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:08:21 +0000 Subject: Update conda_build_config.yaml --- Wrappers/Python/conda-recipe/conda_build_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml index b7977f3..7afbb0f 100644 --- a/Wrappers/Python/conda-recipe/conda_build_config.yaml +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -4,4 +4,4 @@ python: - 3.6 numpy: - 1.12 - - 1.15 + # doesn't build with - 1.15 -- cgit v1.2.3 From 10551e618a601480727798ddded8feeda844e327 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:19:17 +0000 Subject: Update conda_build_config.yaml --- Wrappers/Python/conda-recipe/conda_build_config.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml index 7afbb0f..0706479 100644 --- a/Wrappers/Python/conda-recipe/conda_build_config.yaml +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -3,5 +3,6 @@ python: - 3.5 - 3.6 numpy: - - 1.12 - # doesn't build with - 1.15 + # doesn't build with, cvxp requires >1.14 + #- 1.12 + - 1.15 -- cgit v1.2.3 From 1fe2b12b9f398ce045ebd7d319ea2d2fbd7f5e45 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:35:33 +0000 Subject: Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 49714e8..d6a2d64 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # CCPi-Framework +Last commit: [![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework&build=6)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/CCPi-Framework/6/) Basic Python Framework for CIL This package aims at ensuring a longer life and easy extensibility of the CIL software. This package provides a common framework, hence the name, for the analysis of data in the CT pipeline and quick development of novel reconstruction algorithms. -- cgit v1.2.3 From 4dfec332eaf26ec3d015475e3f3dc6950b8c204f Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:36:03 +0000 Subject: Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d6a2d64..3c86f00 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # CCPi-Framework -Last commit: [![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework&build=6)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/CCPi-Framework/6/) +[![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework&build=6)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/CCPi-Framework/6/) + Basic Python Framework for CIL This package aims at ensuring a longer life and easy extensibility of the CIL software. This package provides a common framework, hence the name, for the analysis of data in the CT pipeline and quick development of novel reconstruction algorithms. -- cgit v1.2.3 From 38d1b956a72b858a996403e1305d16e24aca9c09 Mon Sep 17 00:00:00 2001 From: Tomas Kulhanek Date: Fri, 18 Jan 2019 13:37:35 +0000 Subject: Update README.md ADD: build status from jenkins embeddable build status plugin --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c86f00..c1133bd 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # CCPi-Framework -[![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework&build=6)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/CCPi-Framework/6/) +[![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/job/CCPi-Framework/) Basic Python Framework for CIL -- cgit v1.2.3 From a5bfbc29e1dca36cb36a1aaf44549cfd97efdb9d Mon Sep 17 00:00:00 2001 From: Kulhanek Date: Mon, 28 Jan 2019 12:17:51 +0000 Subject: UPDATE: using universal build --- build/jenkins-build.sh | 44 ++------------------------------------------ 1 file changed, 2 insertions(+), 42 deletions(-) diff --git a/build/jenkins-build.sh b/build/jenkins-build.sh index 31b81d2..009d43d 100644 --- a/build/jenkins-build.sh +++ b/build/jenkins-build.sh @@ -1,43 +1,3 @@ #!/usr/bin/env bash -if [[ -n ${CIL_VERSION} ]] -then - echo Using defined version: $CIL_VERSION -else - export CIL_VERSION=0.10.4 - echo Defining version: $CIL_VERSION -fi -# Script to builds source code in Jenkins environment -# module try-load conda - -# install miniconda if the module is not present -if hash conda 2>/dev/null; then - echo using conda -else - if [ ! -f Miniconda3-latest-Linux-x86_64.sh ]; then - wget -q https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - chmod +x Miniconda3-latest-Linux-x86_64.sh - fi - ./Miniconda3-latest-Linux-x86_64.sh -u -b -p . - PATH=$PATH:./bin -fi - -# presume that git clone is done before this script is launched, if not, uncomment -# git clone https://github.com/vais-ral/CCPi-Regularisation-Toolkit -conda install -y conda-build - -# need to call first build -conda build Wrappers/Python/conda-recipe -c conda-forge -c ccpi -# then need to call the same with --output -export REG_FILES=`conda build Wrappers/Python/conda-recipe --output` -# REG_FILES variable should contain output files -echo files created: $REG_FILES -#upload to anaconda -if [[ -n ${CCPI_CONDA_TOKEN} ]] -then - conda install anaconda-client - while read -r outfile; do - anaconda -v -t ${CCPI_CONDA_TOKEN} upload $outfile --force --label dev - done <<< "$REG_FILES" -else - echo CCPI_CONDA_TOKEN not defined, will not upload to anaconda. -fi +export CCPI_BUILD_ARGS="-c conda-forge -c ccpi" +bash <(curl -L https://raw.githubusercontent.com/vais-ral/CCPi-VirtualMachine/master/scripts/jenkins-build.sh) -- cgit v1.2.3 From 06232063fb183bdd67eb3cb153b8b62c3c511a6f Mon Sep 17 00:00:00 2001 From: vais-ral Date: Tue, 29 Jan 2019 09:35:53 +0000 Subject: Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c1133bd..bfbdbbd 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ + +| master build | pull request build | +|--------|-------------| +| [![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/job/CCPi-Framework/) | [![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework-dev)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/job/CCPi-Framework-dev/) | + # CCPi-Framework -[![Build Status](https://anvil.softeng-support.ac.uk/jenkins/buildStatus/icon?job=CILsingle/CCPi-Framework)](https://anvil.softeng-support.ac.uk/jenkins/job/CILsingle/job/CCPi-Framework/) Basic Python Framework for CIL -- cgit v1.2.3