diff options
author | jakobsj <jakobsj@users.noreply.github.com> | 2019-02-14 16:30:23 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-02-14 16:30:23 +0000 |
commit | bc728075361f0cd978a223e8f328cb11b2b99d60 (patch) | |
tree | 30a06a23e8f0f7e42548106f3b1dcb35b1266fe5 /Wrappers | |
parent | 94eb6d54fb38f04999b5e8b1d0b2b7b66309b80f (diff) | |
parent | 06232063fb183bdd67eb3cb153b8b62c3c511a6f (diff) | |
download | framework-bc728075361f0cd978a223e8f328cb11b2b99d60.tar.gz framework-bc728075361f0cd978a223e8f328cb11b2b99d60.tar.bz2 framework-bc728075361f0cd978a223e8f328cb11b2b99d60.tar.xz framework-bc728075361f0cd978a223e8f328cb11b2b99d60.zip |
Merge branch 'master' into colourbay_initial_demo
Diffstat (limited to 'Wrappers')
-rw-r--r-- | Wrappers/Python/ccpi/framework.py | 241 | ||||
-rw-r--r-- | Wrappers/Python/ccpi/io/reader.py | 228 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/algs.py | 183 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/funcs.py | 204 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/ops.py | 124 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/spdhg.py | 338 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/processors.py | 154 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/bld.bat | 2 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/build.sh | 2 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/conda_build_config.yaml | 8 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/meta.yaml | 15 | ||||
-rwxr-xr-x | Wrappers/Python/conda-recipe/run_test.py | 928 | ||||
-rw-r--r-- | Wrappers/Python/setup.py | 6 | ||||
-rw-r--r-- | Wrappers/Python/wip/demo_compare_cvx.py | 112 | ||||
-rw-r--r-- | Wrappers/Python/wip/demo_imat_multichan_RGLTK.py | 1 | ||||
-rwxr-xr-x | Wrappers/Python/wip/demo_memhandle.py | 193 | ||||
-rw-r--r-- | Wrappers/Python/wip/demo_test_sirt.py | 176 | ||||
-rwxr-xr-x | Wrappers/Python/wip/multifile_nexus.py | 307 |
18 files changed, 2941 insertions, 281 deletions
diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index f4cd406..9938fb7 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -17,24 +17,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import from __future__ import division -import abc +from __future__ import print_function +from __future__ import unicode_literals + import numpy import sys from datetime import timedelta, datetime import warnings -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - ABC = abc.ABC -else: - ABC = abc.ABCMeta('ABC', (), {}) - def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] +def message(cls, msg, *args): + msg = "{0}: " + msg + for i in range(len(args)): + msg += " {%d}" %(i+1) + args = list(args) + args.insert(0, cls.__name__ ) + + return msg.format(*args ) + -class ImageGeometry: +class ImageGeometry(object): def __init__(self, voxel_num_x=0, @@ -105,7 +112,7 @@ class ImageGeometry: return repres -class AcquisitionGeometry: +class AcquisitionGeometry(object): def __init__(self, geom_type, @@ -211,7 +218,7 @@ class DataContainer(object): if type(array) == numpy.ndarray: if deep_copy: - self.array = array[:] + self.array = array.copy() else: self.array = array else: @@ -335,12 +342,17 @@ class DataContainer(object): def fill(self, array, **dimension): '''fills the internal numpy array with the one provided''' if dimension == {}: - if numpy.shape(array) != numpy.shape(self.array): - raise ValueError('Cannot fill with the provided array.' + \ - 'Expecting {0} got {1}'.format( - numpy.shape(self.array), - numpy.shape(array))) - self.array = array[:] + if issubclass(type(array), DataContainer) or\ + issubclass(type(array), numpy.ndarray): + if array.shape != self.shape: + raise ValueError('Cannot fill with the provided array.' + \ + 'Expecting {0} got {1}'.format( + self.shape,array.shape)) + if issubclass(type(array), DataContainer): + numpy.copyto(self.array, array.array) + else: + #self.array[:] = array + numpy.copyto(self.array, array) else: command = 'self.array[' @@ -360,8 +372,9 @@ class DataContainer(object): def check_dimensions(self, other): return self.shape == other.shape - - def __add__(self, other): + + ## algebra + def __add__(self, other , out=None, *args, **kwargs): if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() + other.as_array() @@ -407,7 +420,6 @@ class DataContainer(object): return self.__div__(other) def __div__(self, other): - print ("calling __div__") if issubclass(type(other), DataContainer): if self.check_dimensions(other): out = self.as_array() / other.as_array() @@ -470,33 +482,6 @@ class DataContainer(object): type(other))) # __mul__ - - #def __abs__(self): - # operation = FM.OPERATION.ABS - # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) - # __abs__ - - def abs(self): - out = numpy.abs(self.as_array() ) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - def maximum(self,otherscalar): - out = numpy.maximum(self.as_array(),otherscalar) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - - def sign(self): - out = numpy.sign(self.as_array() ) - return type(self)(out, - deep_copy=True, - dimension_labels=self.dimension_labels, - geometry=self.geometry) - # reverse operand def __radd__(self, other): return self + other @@ -523,7 +508,7 @@ class DataContainer(object): return type(self)(fother ** self.array , dimension_labels=self.dimension_labels, geometry=self.geometry) - elif issubclass(other, DataContainer): + elif issubclass(type(other), DataContainer): if self.check_dimensions(other): return type(self)(other.as_array() ** self.array , dimension_labels=self.dimension_labels, @@ -532,27 +517,55 @@ class DataContainer(object): raise ValueError('Dimensions do not match') # __rpow__ - def sum(self): - return self.as_array().sum() - # in-place arithmetic operators: # (+=, -=, *=, /= , //=, + # must return self + + def __iadd__(self, other): - return self + other + if isinstance(other, (int, float)) : + numpy.add(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.add(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __iadd__ def __imul__(self, other): - return self * other + if isinstance(other, (int, float)) : + arr = self.as_array() + numpy.multiply(arr, other, out=arr) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.multiply(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __imul__ def __isub__(self, other): - return self - other + if isinstance(other, (int, float)) : + numpy.subtract(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.subtract(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __isub__ def __idiv__(self, other): - print ("call __idiv__") - return self / other + if isinstance(other, (int, float)) : + numpy.divide(self.array, other, out=self.array) + elif issubclass(type(other), DataContainer): + if self.check_dimensions(other): + numpy.divide(self.array, other.array, out=self.array) + else: + raise ValueError('Dimensions do not match') + return self # __idiv__ def __str__ (self, representation=False): @@ -607,13 +620,112 @@ class DataContainer(object): .format(len(self.shape),len(new_order))) - - + def copy(self): + '''alias of clone''' + return self.clone() + + ## binary operations + + def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): + + if out is None: + if isinstance(x2, (int, float, complex)): + out = pwop(self.as_array() , x2 , *args, **kwargs ) + elif issubclass(type(x2) , DataContainer): + out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + + + elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): + if self.check_dimensions(out) and self.check_dimensions(x2): + pwop(self.as_array(), x2.as_array(), out=out.as_array(), *args, **kwargs ) + #return type(self)(out.as_array(), + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + return out + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): + if self.check_dimensions(out): + + pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) + return out + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), x2 , out=out, *args, **kwargs) + #return type(self)(out, + # deep_copy=False, + # dimension_labels=self.dimension_labels, + # geometry=self.geometry) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def add(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.add, other, out=out, *args, **kwargs) + + def subtract(self, other, out=None , *args, **kwargs): + return self.pixel_wise_binary(numpy.subtract, other, out=out, *args, **kwargs) + + def multiply(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.multiply, other, out=out, *args, **kwargs) + + def divide(self, other , out=None ,*args, **kwargs): + return self.pixel_wise_binary(numpy.divide, other, out=out, *args, **kwargs) + + def power(self, other , out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.power, other, out=out, *args, **kwargs) + + def maximum(self,x2, out=None, *args, **kwargs): + return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) + + ## unary operations + def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): + if out is None: + out = pwop(self.as_array() , *args, **kwargs ) + return type(self)(out, + deep_copy=False, + dimension_labels=self.dimension_labels, + geometry=self.geometry) + elif issubclass(type(out), DataContainer): + if self.check_dimensions(out): + pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) + else: + raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) + elif issubclass(type(out), numpy.ndarray): + if self.array.shape == out.shape and self.array.dtype == out.dtype: + pwop(self.as_array(), out=out, *args, **kwargs) + else: + raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) + + def abs(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) + + def sign(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) + + def sqrt(self, out=None, *args, **kwargs): + return self.pixel_wise_unary(numpy.sqrt, out=out, *args, **kwargs) + + #def __abs__(self): + # operation = FM.OPERATION.ABS + # return self.callFieldMath(operation, None, self.mask, self.maskOnValue) + # __abs__ + + ## reductions + def sum(self, out=None, *args, **kwargs): + return self.as_array().sum(*args, **kwargs) + class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' def __init__(self, array = None, - deep_copy=True, + deep_copy=False, dimension_labels=None, **kwargs): @@ -671,7 +783,7 @@ class ImageData(DataContainer): raise ValueError('Please pass either a DataContainer, ' +\ 'a numpy array or a geometry') else: - if type(array) == DataContainer: + if issubclass(type(array) , DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ array.number_of_dimensions == 3 or \ @@ -683,7 +795,7 @@ class ImageData(DataContainer): # array.dimension_labels, **kwargs) super(ImageData, self).__init__(array.as_array(), deep_copy, array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: + elif issubclass(type(array) , numpy.ndarray): if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): raise ValueError( 'Number of dimensions are not 2 or 3 or 4 : {0}'\ @@ -780,7 +892,7 @@ class AcquisitionData(DataContainer): dimension_labels, **kwargs) else: - if type(array) == DataContainer: + if issubclass(type(array) ,DataContainer): # if the array is a DataContainer get the info from there if not ( array.number_of_dimensions == 2 or \ array.number_of_dimensions == 3 or \ @@ -792,7 +904,7 @@ class AcquisitionData(DataContainer): # array.dimension_labels, **kwargs) super(AcquisitionData, self).__init__(array.as_array(), deep_copy, array.dimension_labels, **kwargs) - elif type(array) == numpy.ndarray: + elif issubclass(type(array) ,numpy.ndarray): if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): raise ValueError( 'Number of dimensions are not 2 or 3 or 4 : {0}'\ @@ -860,8 +972,9 @@ class DataProcessor(object): raise NotImplementedError('Implement basic checks for input DataContainer') def get_output(self): - if None in self.__dict__.values(): - raise ValueError('Not all parameters have been passed') + for k,v in self.__dict__.items(): + if v is None: + raise ValueError('Key {0} is None'.format(k)) shouldRun = False if self.runTime == -1: shouldRun = True @@ -1120,4 +1233,4 @@ if __name__ == '__main__': pixel_num_h=5 , channels=2) sino = AcquisitionData(geometry=sgeometry) sino2 = sino.clone() -
\ No newline at end of file + diff --git a/Wrappers/Python/ccpi/io/reader.py b/Wrappers/Python/ccpi/io/reader.py index b0b5414..856f5e0 100644 --- a/Wrappers/Python/ccpi/io/reader.py +++ b/Wrappers/Python/ccpi/io/reader.py @@ -22,6 +22,11 @@ This is a reader module with classes for loading 3D datasets. @author: Mr. Srikanth Nagella
'''
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
from ccpi.framework import AcquisitionGeometry
from ccpi.framework import AcquisitionData
import numpy as np
@@ -53,7 +58,18 @@ class NexusReader(object): self.angles = None
self.geometry = None
self.filename = nexus_filename
-
+ self.key_path = 'entry1/tomo_entry/instrument/detector/image_key'
+ self.data_path = 'entry1/tomo_entry/data/data'
+ self.angle_path = 'entry1/tomo_entry/data/rotation_angle'
+
+ def get_image_keys(self):
+ try:
+ with NexusFile(self.filename,'r') as file:
+ return np.array(file[self.key_path])
+ except KeyError as ke:
+ raise KeyError("get_image_keys: " , ke.args[0] , self.key_path)
+
+
def load(self, dimensions=None, image_key_id=0):
'''
This is generic loading function of flat field, dark field and projection data.
@@ -64,10 +80,10 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ image_keys = np.array(file[self.key_path])
projections = None
if dimensions == None:
- projections = np.array(file['entry1/tomo_entry/data/data'])
+ projections = np.array(file[self.data_path])
result = projections[image_keys==image_key_id]
return result
else:
@@ -76,8 +92,8 @@ class NexusReader(object): projection_indexes = index_array[0][dimensions[0]]
new_dimensions = list(dimensions)
new_dimensions[0]= projection_indexes
- new_dimensions = tuple(new_dimensions)
- result = np.array(file['entry1/tomo_entry/data/data'][new_dimensions])
+ new_dimensions = tuple(new_dimensions)
+ result = np.array(file[self.data_path][new_dimensions])
return result
except:
print("Error reading nexus file")
@@ -88,6 +104,12 @@ class NexusReader(object): Loads the projection data from the nexus file.
returns: numpy array with projection data
'''
+ try:
+ if 0 not in self.get_image_keys():
+ raise ValueError("Projections are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 0)
def load_flat(self, dimensions=None):
@@ -95,6 +117,12 @@ class NexusReader(object): Loads the flat field data from the nexus file.
returns: numpy array with flat field data
'''
+ try:
+ if 1 not in self.get_image_keys():
+ raise ValueError("Flats are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 1)
def load_dark(self, dimensions=None):
@@ -102,6 +130,12 @@ class NexusReader(object): Loads the Dark field data from the nexus file.
returns: numpy array with dark field data
'''
+ try:
+ if 2 not in self.get_image_keys():
+ raise ValueError("Darks are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 2)
def get_projection_angles(self):
@@ -114,11 +148,11 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- angles = np.array(file['entry1/tomo_entry/data/rotation_angle'],np.float32)
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ angles = np.array(file[self.angle_path],np.float32)
+ image_keys = np.array(file[self.key_path])
return angles[image_keys==0]
except:
- print("Error reading nexus file")
+ print("get_projection_angles Error reading nexus file")
raise
@@ -132,8 +166,8 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- projections = file['entry1/tomo_entry/data/data']
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ projections = file[self.data_path]
+ image_keys = np.array(file[self.key_path])
dims = list(projections.shape)
dims[0] = dims[1]
dims[1] = np.sum(image_keys==0)
@@ -151,15 +185,25 @@ class NexusReader(object): if self.filename is None:
return
try:
- with NexusFile(self.filename,'r') as file:
- projections = file['entry1/tomo_entry/data/data']
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ with NexusFile(self.filename,'r') as file:
+ try:
+ projections = file[self.data_path]
+ except KeyError as ke:
+ raise KeyError('Error: data path {0} not found\n{1}'\
+ .format(self.data_path,
+ ke.args[0]))
+ #image_keys = np.array(file[self.key_path])
+ image_keys = self.get_image_keys()
dims = list(projections.shape)
dims[0] = np.sum(image_keys==0)
return tuple(dims)
except:
- print("Error reading nexus file")
- raise
+ print("Warning: Error reading image_keys trying accessing data on " , self.data_path)
+ with NexusFile(self.filename,'r') as file:
+ dims = file[self.data_path].shape
+ return tuple(dims)
+
+
def get_acquisition_data(self, dimensions=None):
'''
@@ -179,6 +223,160 @@ class NexusReader(object): return AcquisitionData(data, geometry=geometry,
dimension_labels=['angle','vertical','horizontal'])
+ def get_acquisition_data_subset(self, ymin=None, ymax=None):
+ '''
+ This method load the acquisition data and given dimension and returns an AcquisitionData Object
+ '''
+ if not h5pyAvailable:
+ raise Exception("Error: h5py is not installed")
+ if self.filename is None:
+ return
+ try:
+
+
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ pass
+ dims = file[self.data_path].shape
+ if ymin is None and ymax is None:
+ data = np.array(file[self.data_path])
+ else:
+ if ymin is None:
+ ymin = 0
+ if ymax > dims[1]:
+ raise ValueError('ymax out of range')
+ data = np.array(file[self.data_path][:,:ymax,:])
+ elif ymax is None:
+ ymax = dims[1]
+ if ymin < 0:
+ raise ValueError('ymin out of range')
+ data = np.array(file[self.data_path][:,ymin:,:])
+ else:
+ if ymax > dims[1]:
+ raise ValueError('ymax out of range')
+ if ymin < 0:
+ raise ValueError('ymin out of range')
+
+ data = np.array(file[self.data_path]
+ [: , ymin:ymax , :] )
+
+ except:
+ print("Error reading nexus file")
+ raise
+
+
+ try:
+ angles = self.get_projection_angles()
+ except KeyError as ke:
+ n = data.shape[0]
+ angles = np.linspace(0, n, n+1, dtype=np.float32)
+
+ if ymax-ymin > 1:
+
+ geometry = AcquisitionGeometry('parallel', '3D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ pixel_num_v = ymax-ymin,
+ pixel_size_v = 1,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data, False, geometry=geometry,
+ dimension_labels=['angle','vertical','horizontal'])
+ elif ymax-ymin == 1:
+ geometry = AcquisitionGeometry('parallel', '2D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data.squeeze(), False, geometry=geometry,
+ dimension_labels=['angle','horizontal'])
+ def get_acquisition_data_slice(self, y_slice=0):
+ return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1)
+ def get_acquisition_data_whole(self):
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ print ("Warning: ")
+ dims = file[self.data_path].shape
+
+ ymin = 0
+ ymax = dims[1] - 1
+
+ return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax)
+
+
+
+ def list_file_content(self):
+ try:
+ with NexusFile(self.filename,'r') as file:
+ file.visit(print)
+ except:
+ print("Error reading nexus file")
+ raise
+ def get_acquisition_data_batch(self, bmin=None, bmax=None):
+ if not h5pyAvailable:
+ raise Exception("Error: h5py is not installed")
+ if self.filename is None:
+ return
+ try:
+
+
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ dims = file[self.data_path].shape
+ if bmin is None or bmax is None:
+ raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits')
+
+ if bmin >= 0 and bmin < bmax and bmax <= dims[0]:
+ data = np.array(file[self.data_path][bmin:bmax])
+ else:
+ raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0]))
+
+ except:
+ print("Error reading nexus file")
+ raise
+
+
+ try:
+ angles = self.get_projection_angles()[bmin:bmax]
+ except KeyError as ke:
+ n = data.shape[0]
+ angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax]
+
+ if bmax-bmin > 1:
+
+ geometry = AcquisitionGeometry('parallel', '3D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ pixel_num_v = bmax-bmin,
+ pixel_size_v = 1,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data, False, geometry=geometry,
+ dimension_labels=['angle','vertical','horizontal'])
+ elif bmax-bmin == 1:
+ geometry = AcquisitionGeometry('parallel', '2D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data.squeeze(), False, geometry=geometry,
+ dimension_labels=['angle','horizontal'])
+
+
class XTEKReader(object):
'''
diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index a45100c..a736277 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -21,6 +21,12 @@ import numpy import time from ccpi.optimisation.funcs import Function +from ccpi.optimisation.funcs import ZeroFun +from ccpi.framework import ImageData +from ccpi.framework import AcquisitionData +from ccpi.optimisation.spdhg import spdhg +from ccpi.optimisation.spdhg import KullbackLeibler +from ccpi.optimisation.spdhg import KullbackLeiblerConvexConjugate def FISTA(x_init, f=None, g=None, opt=None): '''Fast Iterative Shrinkage-Thresholding Algorithm @@ -37,27 +43,27 @@ def FISTA(x_init, f=None, g=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() + if f is None: f = ZeroFun() + if g is None: g = ZeroFun() # algorithmic parameters if opt is None: - opt = {'tol': 1e-4, 'iter': 1000} - else: - try: - max_iter = opt['iter'] - except KeyError as ke: - opt[ke] = 1000 - try: - opt['tol'] = 1000 - except KeyError as ke: - opt[ke] = 1e-4 - tol = opt['tol'] - max_iter = opt['iter'] + opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} + max_iter = opt['iter'] if 'iter' in opt.keys() else 1000 + tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 + memopt = opt['memopt'] if 'memopt' in opt.keys() else False + + # initialization - x_old = x_init - y = x_init; + if memopt: + y = x_init.clone() + x_old = x_init.clone() + x = x_init.clone() + u = x_init.clone() + else: + x_old = x_init + y = x_init; timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) @@ -65,22 +71,44 @@ def FISTA(x_init, f=None, g=None, opt=None): invL = 1/f.L t_old = 1 + + c = f(x_init) + g(x_init) # algorithm loop for it in range(0, max_iter): time0 = time.time() - - u = y - invL*f.grad(y) - - x = g.prox(u,invL) - - t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) - - y = x + (t_old-1)/t*(x-x_old) - - x_old = x - t_old = t + if memopt: + # u = y - invL*f.grad(y) + # store the result in x_old + f.gradient(y, out=u) + u.__imul__( -invL ) + u.__iadd__( y ) + # x = g.prox(u,invL) + g.proximal(u, invL, out=x) + + t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) + + # y = x + (t_old-1)/t*(x-x_old) + x.subtract(x_old, out=y) + y.__imul__ ((t_old-1)/t) + y.__iadd__( x ) + + x_old.fill(x) + t_old = t + + + else: + u = y - invL*f.grad(y) + + x = g.prox(u,invL) + + t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) + + y = x + (t_old-1)/t*(x-x_old) + + x_old = x.copy() + t_old = t # time and criterion timing[it] = time.time() - time0 @@ -92,12 +120,13 @@ def FISTA(x_init, f=None, g=None, opt=None): #print(it, 'out of', 10, 'iterations', end='\r'); - criter = criter[0:it+1]; + #criter = criter[0:it+1]; timing = numpy.cumsum(timing[0:it+1]); return x, it, timing, criter -def FBPD(x_init, f=None, g=None, h=None, opt=None): +def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ + regulariser=None, opt=None): '''FBPD Algorithm Parameters: @@ -108,9 +137,9 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() - if h is None: h = Function() + if constraint is None: constraint = ZeroFun() + if data_fidelity is None: data_fidelity = ZeroFun() + if regulariser is None: regulariser = ZeroFun() # algorithmic parameters if opt is None: @@ -126,20 +155,24 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): opt[ke] = 1e-4 tol = opt['tol'] max_iter = opt['iter'] + memopt = opt['memopts'] if 'memopts' in opt.keys() else False # step-sizes - tau = 2 / (g.L + 2); - sigma = (1/tau - g.L/2) / h.L; + tau = 2 / (data_fidelity.L + 2); + sigma = (1/tau - data_fidelity.L/2) / regulariser.L; inv_sigma = 1/sigma # initialization x = x_init - y = h.op.direct(x); + y = operator.direct(x); timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) + + + # algorithm loop for it in range(0, max_iter): @@ -147,23 +180,23 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): # primal forward-backward step x_old = x; - x = x - tau * ( g.grad(x) + h.op.adjoint(y) ); - x = f.prox(x, tau); + x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); + x = constraint.prox(x, tau); # dual forward-backward step - y = y + sigma * h.op.direct(2*x - x_old); - y = y - sigma * h.prox(inv_sigma*y, inv_sigma); + y = y + sigma * operator.direct(2*x - x_old); + y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma); # time and criterion timing[it] = time.time() - t - criter[it] = f(x) + g(x) + h(h.op.direct(x)); + criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) # stopping rule #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: # break - criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); + criter = criter[0:it+1] + timing = numpy.cumsum(timing[0:it+1]) return x, it, timing, criter @@ -191,8 +224,8 @@ def CGLS(x_init, operator , data , opt=None): tol = opt['tol'] max_iter = opt['iter'] - r = data.clone() - x = x_init.clone() + r = data.copy() + x = x_init.copy() d = operator.adjoint(r) @@ -222,4 +255,66 @@ def CGLS(x_init, operator , data , opt=None): criter[it] = (r**2).sum() return x, it, timing, criter + +def SIRT(x_init, operator , data , opt=None, constraint=None): + '''Simultaneous Iterative Reconstruction Technique + + Parameters: + x_init: initial guess + operator: operator for forward/backward projections + data: data to operate on + opt: additional algorithm + constraint: func of Indicator type specifying convex constraint. + ''' + + if opt is None: + opt = {'tol': 1e-4, 'iter': 1000} + else: + try: + max_iter = opt['iter'] + except KeyError as ke: + opt[ke] = 1000 + try: + opt['tol'] = 1000 + except KeyError as ke: + opt[ke] = 1e-4 + tol = opt['tol'] + max_iter = opt['iter'] + + # Set default constraint to unconstrained + if constraint==None: + constraint = Function() + + x = x_init.clone() + + timing = numpy.zeros(max_iter) + criter = numpy.zeros(max_iter) + + # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0 + relax_par = 1.0 + + # Set up scaling matrices D and M. + im1 = ImageData(geometry=x_init.geometry) + im1.array[:] = 1.0 + M = 1/operator.direct(im1) + del im1 + aq1 = AcquisitionData(geometry=M.geometry) + aq1.array[:] = 1.0 + D = 1/operator.adjoint(aq1) + del aq1 + + # algorithm loop + for it in range(0, max_iter): + t = time.time() + r = data - operator.direct(x) + + x = constraint.prox(x + relax_par * (D*operator.adjoint(M*r)),None) + + timing[it] = time.time() - t + if it > 0: + criter[it-1] = (r**2).sum() + + r = data - operator.direct(x) + criter[it] = (r**2).sum() + return x, it, timing, criter diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index f5463a3..c7a6143 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -19,14 +19,32 @@ from ccpi.optimisation.ops import Identity, FiniteDiff2D import numpy +from ccpi.framework import DataContainer +def isSizeCorrect(data1 ,data2): + if issubclass(type(data1), DataContainer) and \ + issubclass(type(data2), DataContainer): + # check dimensionality + if data1.check_dimensions(data2): + return True + elif issubclass(type(data1) , numpy.ndarray) and \ + issubclass(type(data2) , numpy.ndarray): + return data1.shape == data2.shape + else: + raise ValueError("{0}: getting two incompatible types: {1} {2}"\ + .format('Function', type(data1), type(data2))) + return False + class Function(object): def __init__(self): - self.op = Identity() - def __call__(self,x): return 0 - def grad(self,x): return 0 - def prox(self,x,tau): return x + self.L = None + def __call__(self,x, out=None): raise NotImplementedError + def grad(self, x): raise NotImplementedError + def prox(self, x, tau): raise NotImplementedError + def gradient(self, x, out=None): raise NotImplementedError + def proximal(self, x, tau, out=None): raise NotImplementedError + class Norm2(Function): @@ -37,10 +55,25 @@ class Norm2(Function): self.gamma = gamma; self.direction = direction; - def __call__(self, x): + def __call__(self, x, out=None): - xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, + if out is None: + xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, keepdims=True)) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + arr = out.as_array() + numpy.square(x.as_array(), out=arr) + xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + p = numpy.sum(self.gamma*xx) return p @@ -53,6 +86,29 @@ class Norm2(Function): p = x.as_array() * xx return type(x)(p,geometry=x.geometry) + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x,tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + numpy.square(x.as_array(), out = out.as_array()) + xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction, + keepdims=True )) + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out.as_array()) + + + elif issubclass(type(out) , numpy.ndarray): + numpy.square(x.as_array(), out=out) + xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) + + xx = numpy.maximum(0, 1 - tau*self.gamma / xx) + x.multiply(xx, out= out) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + class TV2D(Norm2): @@ -79,23 +135,53 @@ class Norm2sq(Function): ''' - def __init__(self,A,b,c=1.0): + def __init__(self,A,b,c=1.0,memopt=False): + super(Norm2sq, self).__init__() + self.A = A # Should be an operator, default identity self.b = b # Default zero DataSet? self.c = c # Default 1. + self.memopt = memopt + if memopt: + #self.direct_placehold = A.adjoint(b) + self.direct_placehold = A.allocate_direct() + self.adjoint_placehold = A.allocate_adjoint() + + + # Compute the Lipschitz parameter from the operator if possible + # Leave it initialised to None otherwise + try: + self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) + except AttributeError as ae: + pass - # Compute the Lipschitz parameter from the operator. - # Initialise to None instead and only call when needed. - self.L = 2.0*self.c*(self.A.get_max_sing_val()**2) - super(Norm2sq, self).__init__() - def grad(self,x): #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b ) - return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) + return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) def __call__(self,x): #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) - return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) + #if out is None: + # return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) + #else: + y = self.A.direct(x) + y.__isub__(self.b) + y.__imul__(y) + return y.sum() * self.c + + def gradient(self, x, out = None): + if self.memopt: + #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) + + self.A.direct(x, out=self.adjoint_placehold) + self.adjoint_placehold.__isub__( self.b ) + self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold) + self.direct_placehold.__imul__(2.0 * self.c) + # can this be avoided? + out.fill(self.direct_placehold) + else: + return self.grad(x) + class ZeroFun(Function): @@ -109,21 +195,103 @@ class ZeroFun(Function): return 0 def prox(self,x,tau): - return x + return x.copy() + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + out.fill(x) + + elif issubclass(type(out) , numpy.ndarray): + out[:] = x + else: + raise ValueError ('Wrong size: x{0} out{1}' + .format(x.shape,out.shape) ) # A more interesting example, least squares plus 1-norm minimization. # Define class to represent 1-norm including prox function class Norm1(Function): def __init__(self,gamma): - # Do nothing self.gamma = gamma self.L = 1 + self.sign_x = None super(Norm1, self).__init__() - def __call__(self,x): - return self.gamma*(x.abs().sum()) + def __call__(self,x,out=None): + if out is None: + return self.gamma*(x.abs().sum()) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + x.abs(out=out) + return out.sum() * self.gamma def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if isSizeCorrect(x,out): + # check dimensionality + if issubclass(type(out), DataContainer): + v = (x.abs() - tau*self.gamma).maximum(0) + x.sign(out=out) + out *= v + #out.fill(self.prox(x,tau)) + elif issubclass(type(out) , numpy.ndarray): + v = (x.abs() - tau*self.gamma).maximum(0) + out[:] = x.sign() + out *= v + #out[:] = self.prox(x,tau) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + +# Box constraints indicator function. Calling returns 0 if argument is within +# the box. The prox operator is projection onto the box. Only implements one +# scalar lower and one upper as constraint on all elements. Should generalise +# to vectors to allow different constraints one elements. +class IndicatorBox(Function): + + def __init__(self,lower=-numpy.inf,upper=numpy.inf): + # Do nothing + self.lower = lower + self.upper = upper + super(IndicatorBox, self).__init__() + + def __call__(self,x): + + if (numpy.all(x.array>=self.lower) and + numpy.all(x.array <= self.upper) ): + val = 0 + else: + val = numpy.inf + return val + + def prox(self,x,tau=None): + return (x.maximum(self.lower)).minimum(self.upper) + + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x, tau) + else: + if not x.shape == out.shape: + raise ValueError('Norm1 Incompatible size:', + x.shape, out.shape) + #(x.abs() - tau*self.gamma).maximum(0) * x.sign() + x.abs(out = out) + out.__isub__(tau*self.gamma) + out.maximum(0, out=out) + if self.sign_x is None or not x.shape == self.sign_x.shape: + self.sign_x = x.sign() + else: + x.sign(out=self.sign_x) + + out.__imul__( self.sign_x ) diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 668b07e..450b084 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -19,69 +19,126 @@ import numpy from scipy.sparse.linalg import svds +from ccpi.framework import DataContainer +from ccpi.framework import AcquisitionData +from ccpi.framework import ImageData +from ccpi.framework import ImageGeometry +from ccpi.framework import AcquisitionGeometry # Maybe operators need to know what types they take as inputs/outputs # to not just use generic DataContainer class Operator(object): - def direct(self,x): + def direct(self,x, out=None): return x - def adjoint(self,x): + def adjoint(self,x, out=None): return x def size(self): # To be defined for specific class raise NotImplementedError def get_max_sing_val(self): raise NotImplementedError + def allocate_direct(self): + raise NotImplementedError + def allocate_adjoint(self): + raise NotImplementedError class Identity(Operator): def __init__(self): self.s1 = 1.0 + self.L = 1 super(Identity, self).__init__() - def direct(self,x): - return x + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) - def adjoint(self,x): - return x + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def size(self): + return NotImplemented + + def get_max_sing_val(self): + return self.s1 + +class TomoIdentity(Operator): + def __init__(self, geometry, **kwargs): + self.s1 = 1.0 + self.geometry = geometry + super(TomoIdentity, self).__init__() + + def direct(self,x,out=None): + if out is None: + return x.copy() + else: + out.fill(x) + + def adjoint(self,x, out=None): + if out is None: + return x.copy() + else: + out.fill(x) def size(self): return NotImplemented def get_max_sing_val(self): return self.s1 + def allocate_direct(self): + if issubclass(type(self.geometry), ImageGeometry): + return ImageData(geometry=self.geometry) + elif issubclass(type(self.geometry), AcquisitionGeometry): + return AcquisitionData(geometry=self.geometry) + else: + raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry)) + def allocate_adjoint(self): + return self.allocate_direct() + + class FiniteDiff2D(Operator): def __init__(self): self.s1 = 8.0 super(FiniteDiff2D, self).__init__() - def direct(self,x): + def direct(self,x, out=None): '''Forward differences with Neumann BC.''' + # FIXME this seems to be working only with numpy arrays + d1 = numpy.zeros_like(x.as_array()) d1[:,:-1] = x.as_array()[:,1:] - x.as_array()[:,:-1] d2 = numpy.zeros_like(x.as_array()) d2[:-1,:] = x.as_array()[1:,:] - x.as_array()[:-1,:] d = numpy.stack((d1,d2),0) - - return type(x)(d,geometry=x.geometry) + #x.geometry.voxel_num_z = 2 + return type(x)(d,False,geometry=x.geometry) - def adjoint(self,x): + def adjoint(self,x, out=None): '''Backward differences, Neumann BC.''' Nrows = x.get_dimension_size('horizontal_x') - Ncols = x.get_dimension_size('horizontal_x') + Ncols = x.get_dimension_size('horizontal_y') Nchannels = 1 if len(x.shape) == 4: Nchannels = x.get_dimension_size('channel') zer = numpy.zeros((Nrows,1)) xxx = x.as_array()[0,:,:-1] - h = numpy.concatenate((zer,xxx), 1) - numpy.concatenate((xxx,zer), 1) + # + h = numpy.concatenate((zer,xxx), 1) + h -= numpy.concatenate((xxx,zer), 1) zer = numpy.zeros((1,Ncols)) xxx = x.as_array()[1,:-1,:] - v = numpy.concatenate((zer,xxx), 0) - numpy.concatenate((xxx,zer), 0) - return type(x)(h + v,geometry=x.geometry) + # + v = numpy.concatenate((zer,xxx), 0) + v -= numpy.concatenate((xxx,zer), 0) + return type(x)(h + v, False, geometry=x.geometry) def size(self): return NotImplemented @@ -129,10 +186,10 @@ def PowerMethodNonsquareOld(op,numiters): # return s, x0 -def PowerMethodNonsquare(op,numiters): +def PowerMethodNonsquare(op,numiters , x0=None): # Initialise random # Jakob's - #inputsize = op.size()[1] + # inputsize , outputsize = op.size() #x0 = ImageContainer(numpy.random.randn(*inputsize) # Edo's #vg = ImageGeometry(voxel_num_x=inputsize[0], @@ -143,13 +200,16 @@ def PowerMethodNonsquare(op,numiters): #print (x0) #x0.fill(numpy.random.randn(*x0.shape)) - x0 = op.create_image_data() + if x0 is None: + #x0 = op.create_image_data() + x0 = op.allocate_direct() + x0.fill(numpy.random.randn(*x0.shape)) s = numpy.zeros(numiters) # Loop for it in numpy.arange(numiters): x1 = op.adjoint(op.direct(x0)) - x1norm = numpy.sqrt((x1**2).sum()) + x1norm = numpy.sqrt((x1*x1).sum()) #print ("x0 **********" ,x0) #print ("x1 **********" ,x1) s[it] = (x1*x0).sum() / (x0*x0).sum() @@ -162,11 +222,19 @@ class LinearOperatorMatrix(Operator): self.s1 = None # Largest singular value, initially unknown super(LinearOperatorMatrix, self).__init__() - def direct(self,x): - return type(x)(numpy.dot(self.A,x.as_array())) + def direct(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A,x.as_array())) + else: + numpy.dot(self.A, x.as_array(), out=out.as_array()) + - def adjoint(self,x): - return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + def adjoint(self,x, out=None): + if out is None: + return type(x)(numpy.dot(self.A.transpose(),x.as_array())) + else: + numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) + def size(self): return self.A.shape @@ -178,3 +246,15 @@ class LinearOperatorMatrix(Operator): return self.s1 else: return self.s1 + def allocate_direct(self): + '''allocates the memory to hold the result of adjoint''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((N_A,1)) + return DataContainer(out) + def allocate_adjoint(self): + '''allocate the memory to hold the result of direct''' + #numpy.dot(self.A.transpose(),x.as_array()) + M_A, N_A = self.A.shape + out = numpy.zeros((M_A,1)) + return DataContainer(out) diff --git a/Wrappers/Python/ccpi/optimisation/spdhg.py b/Wrappers/Python/ccpi/optimisation/spdhg.py new file mode 100755 index 0000000..263a7cd --- /dev/null +++ b/Wrappers/Python/ccpi/optimisation/spdhg.py @@ -0,0 +1,338 @@ +# Copyright 2018 Matthias Ehrhardt, Edoardo Pasca
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import numpy
+
+from ccpi.optimisation.funcs import Function
+from ccpi.framework import ImageData
+from ccpi.framework import AcquisitionData
+
+
+class spdhg():
+ """Computes a saddle point with a stochastic PDHG.
+
+ This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
+
+ (x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
+
+ where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
+ proper functionals. For this algorithm, they all may be non-smooth and no
+ strong convexity is assumed.
+
+ Parameters
+ ----------
+ f : list of functions
+ Functionals Y[i] -> IR_infty that all have a convex conjugate with a
+ proximal operator, i.e.
+ f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i].
+ g : function
+ Functional X -> IR_infty that has a proximal operator, i.e.
+ g.prox(tau) : X -> X.
+ A : list of functions
+ Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
+ x : primal variable, optional
+ By default equals 0.
+ y : dual variable, optional
+ Part of a product space. By default equals 0.
+ z : variable, optional
+ Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
+ tau : scalar / vector / matrix, optional
+ Step size for primal variable. Note that the proximal operator of g
+ has to be well-defined for this input.
+ sigma : scalar, optional
+ Scalar / vector / matrix used as step size for dual variable. Note that
+ the proximal operator related to f (see above) has to be well-defined
+ for this input.
+ prob : list of scalars, optional
+ Probabilities prob[i] that a subset i is selected in each iteration.
+ If fun_select is not given, then the sum of all probabilities must
+ equal 1.
+ A_norms : list of scalars, optional
+ Norms of the operators in A. Can be used to determine the step sizes
+ tau and sigma and the probabilities prob.
+ fun_select : function, optional
+ Function that selects blocks at every iteration IN -> {1,...,n}. By
+ default this is serial sampling, fun_select(k) selects an index
+ i \in {1,...,n} with probability prob[i].
+
+ References
+ ----------
+ [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
+ *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
+ and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808
+ (2018) http://doi.org/10.1007/s10851-010-0251-1
+
+ [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
+ A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
+ stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
+ 58 (2017) http://doi.org/10.1117/12.2272946.
+
+ [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster
+ PET Reconstruction with Non-Smooth Priors by Randomization and
+ Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150
+ """
+
+ def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None,
+ prob=None, A_norms=None, fun_select=None):
+ # fun_select is optional and by default performs serial sampling
+
+ if x is None:
+ x = A[0].allocate_direct(0)
+
+ if y is None:
+ if z is not None:
+ raise ValueError('y and z have to be defaulted together')
+
+ y = [Ai.allocate_adjoint(0) for Ai in A]
+ z = 0 * x.copy()
+
+ else:
+ if z is None:
+ raise ValueError('y and z have to be defaulted together')
+
+ if A_norms is not None:
+ if tau is not None or sigma is not None or prob is not None:
+ raise ValueError('Either A_norms or (tau, sigma, prob) must '
+ 'be given')
+
+ tau = 1 / sum(A_norms)
+ sigma = [1 / nA for nA in A_norms]
+ prob = [nA / sum(A_norms) for nA in A_norms]
+
+ #uniform prob, needs different sigma and tau
+ #n = len(A)
+ #prob = [1./n] * n
+
+ if fun_select is None:
+ if prob is None:
+ raise ValueError('prob was not determined')
+
+ def fun_select(k):
+ return [int(numpy.random.choice(len(A), 1, p=prob))]
+
+ self.iter = 0
+ self.x = x
+
+ self.y = y
+ self.z = z
+
+ self.f = f
+ self.g = g
+ self.A = A
+ self.tau = tau
+ self.sigma = sigma
+ self.prob = prob
+ self.fun_select = fun_select
+
+ # Initialize variables
+ self.z_relax = z.copy()
+ self.tmp = self.x.copy()
+
+ def update(self):
+ # select block
+ selected = self.fun_select(self.iter)
+
+ # update primal variable
+ #tmp = (self.x - self.tau * self.z_relax).as_array()
+ #self.x.fill(self.g.prox(tmp, self.tau))
+ self.tmp = - self.tau * self.z_relax
+ self.tmp += self.x
+ self.x = self.g.prox(self.tmp, self.tau)
+
+ # update dual variable and z, z_relax
+ self.z_relax = self.z.copy()
+ for i in selected:
+ # save old yi
+ y_old = self.y[i].copy()
+
+ # y[i]= prox(tmp)
+ tmp = y_old + self.sigma[i] * self.A[i].direct(self.x)
+ self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i])
+
+ # update adjoint of dual variable
+ dz = self.A[i].adjoint(self.y[i] - y_old)
+ self.z += dz
+
+ # compute extrapolation
+ self.z_relax += (1 + 1 / self.prob[i]) * dz
+
+ self.iter += 1
+
+
+## Functions
+
+class KullbackLeibler(Function):
+ def __init__(self, data, background):
+ self.data = data
+ self.background = background
+ self.__offset = None
+
+ def __call__(self, x):
+ """Return the KL-diveregnce in the point ``x``.
+
+ If any components of ``x`` is non-positive, the value is positive
+ infinity.
+
+ Needs one extra array of memory of the size of `prior`.
+ """
+
+ # define short variable names
+ y = self.data
+ r = self.background
+
+ # Compute
+ # sum(x + r - y + y * log(y / (x + r)))
+ # = sum(x - y * log(x + r)) + self.offset
+ # Assume that
+ # x + r > 0
+
+ # sum the result up
+ obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset()
+
+ if numpy.isnan(obj):
+ # In this case, some element was less than or equal to zero
+ return numpy.inf
+ else:
+ return obj
+
+ @property
+ def convex_conj(self):
+ """The convex conjugate functional of the KL-functional."""
+ return KullbackLeiblerConvexConjugate(self.data, self.background)
+
+ def offset(self):
+ """The offset which is independent of the unknown."""
+
+ if self.__offset is None:
+ tmp = self.domain.element()
+
+ # define short variable names
+ y = self.data
+ r = self.background
+
+ tmp = self.domain.element(numpy.maximum(y, 1))
+ tmp = r - y + y * numpy.log(tmp)
+
+ # sum the result up
+ self.__offset = numpy.sum(tmp)
+
+ return self.__offset
+
+# def __repr__(self):
+# """to be added???"""
+# """Return ``repr(self)``."""
+ # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
+ ## self.domain, self.data,
+ # self.background)
+
+
+class KullbackLeiblerConvexConjugate(Function):
+ """The convex conjugate of Kullback-Leibler divergence functional.
+
+ Notes
+ -----
+ The functional :math:`F^*` with prior :math:`g>0` is given by:
+
+ .. math::
+ F^*(x)
+ =
+ \\begin{cases}
+ \\sum_{i} \left( -g_i \ln(1 - x_i) \\right)
+ & \\text{if } x_i < 1 \\forall i
+ \\\\
+ +\\infty & \\text{else}
+ \\end{cases}
+
+ See Also
+ --------
+ KullbackLeibler : convex conjugate functional
+ """
+
+ def __init__(self, data, background):
+ self.data = data
+ self.background = background
+
+ def __call__(self, x):
+ y = self.data
+ r = self.background
+
+ tmp = numpy.sum(- x * r - y * numpy.log(1 - x))
+
+ if numpy.isnan(tmp):
+ # In this case, some element was larger than or equal to one
+ return numpy.inf
+ else:
+ return tmp
+
+
+ def prox(self, x, tau, out=None):
+ # Let y = data, r = background, z = x + tau * r
+ # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y))
+ # Currently it needs 3 extra copies of memory.
+
+ if out is None:
+ out = x.copy()
+
+ # define short variable names
+ try: # this should be standard SIRF/CIL mode
+ y = self.data.as_array()
+ r = self.background.as_array()
+ x = x.as_array()
+
+ try:
+ taua = tau.as_array()
+ except:
+ taua = tau
+
+ z = x + tau * r
+
+ out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)))
+
+ return out
+
+ except: # e.g. for NumPy
+ y = self.data
+ r = self.background
+
+ try:
+ taua = tau.as_array()
+ except:
+ taua = tau
+
+ z = x + tau * r
+
+ out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))
+
+ return out
+
+ @property
+ def convex_conj(self):
+ return KullbackLeibler(self.data, self.background)
+
+
+def mult(x, y):
+ try:
+ xa = x.as_array()
+ except:
+ xa = x
+
+ out = y.clone()
+ out.fill(xa * y.as_array())
+
+ return out
diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py index a2d0446..6a9057a 100755 --- a/Wrappers/Python/ccpi/processors.py +++ b/Wrappers/Python/ccpi/processors.py @@ -19,6 +19,7 @@ from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\
AcquisitionGeometry, ImageGeometry, ImageData
+from ccpi.reconstruction.parallelbeam import alg as pbalg
import numpy
from scipy import ndimage
@@ -39,8 +40,8 @@ class Normalizer(DataProcessor): def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5):
kwargs = {
- 'flat_field' : None,
- 'dark_field' : None,
+ 'flat_field' : flat_field,
+ 'dark_field' : dark_field,
# very small number. Used when there is a division by zero
'tolerance' : tolerance
}
@@ -53,7 +54,8 @@ class Normalizer(DataProcessor): self.set_dark_field(dark_field)
def check_input(self, dataset):
- if dataset.number_of_dimensions == 3:
+ if dataset.number_of_dimensions == 3 or\
+ dataset.number_of_dimensions == 2:
return True
else:
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
@@ -65,7 +67,7 @@ class Normalizer(DataProcessor): raise ValueError('Dark Field should be 2D')
elif len(numpy.shape(df)) == 2:
self.dark_field = df
- elif issubclass(type(df), DataSet):
+ elif issubclass(type(df), DataContainer):
self.dark_field = self.set_dark_field(df.as_array())
def set_flat_field(self, df):
@@ -74,7 +76,7 @@ class Normalizer(DataProcessor): raise ValueError('Flat Field should be 2D')
elif len(numpy.shape(df)) == 2:
self.flat_field = df
- elif issubclass(type(df), DataSet):
+ elif issubclass(type(df), DataContainer):
self.flat_field = self.set_flat_field(df.as_array())
@staticmethod
@@ -86,22 +88,40 @@ class Normalizer(DataProcessor): c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0
return c
+ @staticmethod
+ def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark):
+ '''returns the estimated relative error of the normalised projection
+
+ n = (projection - dark) / (flat - dark)
+ Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d)
+ '''
+ a = (projection - dark)
+ b = (flat-dark)
+ df = delta_flat / flat
+ dd = delta_dark / dark
+ rel_norm_error = (b + a) / (b * a) * (df + dd)
+ return rel_norm_error
+
def process(self):
projections = self.get_input()
dark = self.dark_field
flat = self.flat_field
- if not (projections.shape[1:] == dark.shape and \
- projections.shape[1:] == flat.shape):
- raise ValueError('Flats/Dark and projections size do not match.')
-
-
- a = numpy.asarray(
- [ Normalizer.normalize_projection(
- projection, flat, dark, self.tolerance) \
- for projection in projections.as_array() ]
- )
+ if projections.number_of_dimensions == 3:
+ if not (projections.shape[1:] == dark.shape and \
+ projections.shape[1:] == flat.shape):
+ raise ValueError('Flats/Dark and projections size do not match.')
+
+
+ a = numpy.asarray(
+ [ Normalizer.normalize_projection(
+ projection, flat, dark, self.tolerance) \
+ for projection in projections.as_array() ]
+ )
+ elif projections.number_of_dimensions == 2:
+ a = Normalizer.normalize_projection(projections.as_array(),
+ flat, dark, self.tolerance)
y = type(projections)( a , True,
dimension_labels=projections.dimension_labels,
geometry=projections.geometry)
@@ -388,3 +408,107 @@ class CenterOfRotationFinder(DataProcessor): return cor
+
+class AcquisitionDataPadder(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self,
+ center_of_rotation = None,
+ acquisition_geometry = None,
+ pad_value = 1e-5):
+ kwargs = {
+ 'acquisition_geometry' : acquisition_geometry,
+ 'center_of_rotation' : center_of_rotation,
+ 'pad_value' : pad_value
+ }
+
+ super(AcquisitionDataPadder, self).__init__(**kwargs)
+
+ def check_input(self, dataset):
+ if self.acquisition_geometry is None:
+ self.acquisition_geometry = dataset.geometry
+ if dataset.number_of_dimensions == 3:
+ return True
+ else:
+ raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
+ .format(dataset.number_of_dimensions))
+
+ def process(self):
+ projections = self.get_input()
+ w = projections.get_dimension_size('horizontal')
+ delta = w - 2 * self.center_of_rotation
+
+ padded_width = int (
+ numpy.ceil(abs(delta)) + w
+ )
+ delta_pix = padded_width - w
+
+ voxel_per_pixel = 1
+ geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
+ self.acquisition_geometry.angles,
+ self.center_of_rotation,
+ voxel_per_pixel )
+
+ padded_geometry = self.acquisition_geometry.clone()
+
+ padded_geometry.pixel_num_h = geom['n_h']
+ padded_geometry.pixel_num_v = geom['n_v']
+
+ delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
+ delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
+
+ if delta_pix_h == 0:
+ delta_pix_h = delta_pix
+ padded_geometry.pixel_num_h = padded_width
+ #initialize a new AcquisitionData with values close to 0
+ out = AcquisitionData(geometry=padded_geometry)
+ out = out + self.pad_value
+
+
+ #pad in the horizontal-vertical plane -> slice on angles
+ if delta > 0:
+ #pad left of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ else:
+ #pad right of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(0, w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ #cleaned = eval(command)
+ exec(command)
+ return out
\ No newline at end of file diff --git a/Wrappers/Python/conda-recipe/bld.bat b/Wrappers/Python/conda-recipe/bld.bat index 4b4c7f5..97a4e62 100644 --- a/Wrappers/Python/conda-recipe/bld.bat +++ b/Wrappers/Python/conda-recipe/bld.bat @@ -1,8 +1,8 @@ + IF NOT DEFINED CIL_VERSION ( ECHO CIL_VERSION Not Defined. exit 1 ) - ROBOCOPY /E "%RECIPE_DIR%\.." "%SRC_DIR%" %PYTHON% setup.py build_py diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 5dd97b0..43e85d5 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -1,7 +1,7 @@ if [ -z "$CIL_VERSION" ]; then echo "Need to set CIL_VERSION" exit 1 -fi +fi mkdir ${SRC_DIR}/ccpi cp -r "${RECIPE_DIR}/../../../" ${SRC_DIR}/ccpi diff --git a/Wrappers/Python/conda-recipe/conda_build_config.yaml b/Wrappers/Python/conda-recipe/conda_build_config.yaml new file mode 100644 index 0000000..96a211f --- /dev/null +++ b/Wrappers/Python/conda-recipe/conda_build_config.yaml @@ -0,0 +1,8 @@ +python: + - 2.7 # [not win] + - 3.5 + - 3.6 +numpy: + # TODO investigage, as it doesn't currently build with cvxp, requires >1.14 + #- 1.12 + - 1.15 diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index f72c980..1b7cae6 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -2,24 +2,31 @@ package: name: ccpi-framework version: {{ environ['CIL_VERSION'] }} - build: preserve_egg_dir: False script_env: - CIL_VERSION -# number: 0 + #number: 0 +test: + requires: + - python-wget + - cvxpy # [not win] + requirements: build: - python - - numpy + - numpy {{ numpy }} - setuptools run: + - {{ pin_compatible('numpy', max_pin='x.x') }} - python - numpy + - scipy - matplotlib - + - h5py + about: home: http://www.ccpi.ac.uk license: Apache 2.0 License diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 6a20d05..5bf6538 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -1,160 +1,883 @@ import unittest
import numpy
-from ccpi.framework import DataContainer, ImageData, AcquisitionData, \
- ImageGeometry, AcquisitionGeometry
+import numpy as np
+from ccpi.framework import DataContainer
+from ccpi.framework import ImageData
+from ccpi.framework import AcquisitionData
+from ccpi.framework import ImageGeometry
+from ccpi.framework import AcquisitionGeometry
import sys
+from timeit import default_timer as timer
+from ccpi.optimisation.algs import FISTA
+from ccpi.optimisation.algs import FBPD
+from ccpi.optimisation.funcs import Norm2sq
+from ccpi.optimisation.funcs import ZeroFun
+from ccpi.optimisation.funcs import Norm1
+from ccpi.optimisation.funcs import TV2D
+from ccpi.optimisation.funcs import Norm2
+
+from ccpi.optimisation.ops import LinearOperatorMatrix
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.ops import Identity
+
+
+import numpy.testing
+import wget
+import os
+from ccpi.io.reader import NexusReader
+
+
+try:
+ from cvxpy import *
+ cvx_not_installable = False
+except ImportError:
+ cvx_not_installable = True
+
+
+def aid(x):
+ # This function returns the memory
+ # block address of an array.
+ return x.__array_interface__['data'][0]
+
+
+def dt(steps):
+ return steps[-1] - steps[-2]
+
class TestDataContainer(unittest.TestCase):
-
+ def create_simple_ImageData(self):
+ N = 64
+ ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N)
+ Phantom = ImageData(geometry=ig)
+
+ x = Phantom.as_array()
+
+ x[int(round(N/4)):int(round(3*N/4)),
+ int(round(N/4)):int(round(3*N/4))] = 0.5
+ x[int(round(N/8)):int(round(7*N/8)),
+ int(round(3*N/8)):int(round(5*N/8))] = 1
+
+ return (ig, Phantom)
+
+ def create_DataContainer(self, X,Y,Z, value=1):
+ steps = [timer()]
+ a = value * numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ return ds
+
def test_creation_nocopy(self):
- shape = (2,3,4,5)
+ shape = (2, 3, 4, 5)
size = shape[0]
for i in range(1, len(shape)):
size = size * shape[i]
#print("a refcount " , sys.getrefcount(a))
- a = numpy.asarray([i for i in range( size )])
+ a = numpy.asarray([i for i in range(size)])
#print("a refcount " , sys.getrefcount(a))
a = numpy.reshape(a, shape)
#print("a refcount " , sys.getrefcount(a))
- ds = DataContainer(a, False, ['X', 'Y','Z' ,'W'])
+ ds = DataContainer(a, False, ['X', 'Y', 'Z', 'W'])
#print("a refcount " , sys.getrefcount(a))
- self.assertEqual(sys.getrefcount(a),3)
- self.assertEqual(ds.dimension_labels , {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'})
-
+ self.assertEqual(sys.getrefcount(a), 3)
+ self.assertEqual(ds.dimension_labels, {0: 'X', 1: 'Y', 2: 'Z', 3: 'W'})
+
+ def testGb_creation_nocopy(self):
+ X, Y, Z = 512, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print("test clone")
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ #print("a refcount " , sys.getrefcount(a))
+ self.assertEqual(sys.getrefcount(a), 3)
+ ds1 = ds.copy()
+ self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array()))
+ ds1 = ds.clone()
+ self.assertNotEqual(aid(ds.as_array()), aid(ds1.as_array()))
+
+ def testInlineAlgebra(self):
+ print("Test Inline Algebra")
+ X, Y, Z = 1024, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print(t0)
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ #ds.__iadd__( 2 )
+ ds += 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 3.)
+ #ds.__isub__( 2 )
+ ds -= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+ #ds.__imul__( 2 )
+ ds *= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 2.)
+ #ds.__idiv__( 2 )
+ ds /= 2
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+
+ ds1 = ds.copy()
+ #ds1.__iadd__( 1 )
+ ds1 += 1
+ #ds.__iadd__( ds1 )
+ ds += ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 3.)
+ #ds.__isub__( ds1 )
+ ds -= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+ #ds.__imul__( ds1 )
+ ds *= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 2.)
+ #ds.__idiv__( ds1 )
+ ds /= ds1
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+
+ def test_unary_operations(self):
+ print("Test unary operations")
+ X, Y, Z = 1024, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = -numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+ print(t0)
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+
+ ds.sign(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], -1.)
+
+ ds.abs(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+
+ ds.__imul__(2)
+ ds.sqrt(out=ds)
+ steps.append(timer())
+ print(dt(steps))
+ self.assertEqual(ds.as_array()[0][0][0],
+ numpy.sqrt(2., dtype='float32'))
+
+ def test_binary_operations(self):
+ self.binary_add()
+ self.binary_subtract()
+ self.binary_multiply()
+ self.binary_divide()
+
+ def binary_add(self):
+ print("Test binary add")
+ X, Y, Z = 512, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.add(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.add(ds1, out=ds)", dt(steps))
+ steps.append(timer())
+ ds2 = ds.add(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.add(ds1)", dt(steps))
+
+ self.assertLess(t1, t2)
+ self.assertEqual(ds.as_array()[0][0][0], 2.)
+
+ ds0 = ds
+ ds0.add(2, out=ds0)
+ steps.append(timer())
+ print("ds0.add(2,out=ds0)", dt(steps), 3, ds0.as_array()[0][0][0])
+ self.assertEqual(4., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.add(2)
+ steps.append(timer())
+ print("ds3 = ds0.add(2)", dt(steps), 5, ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1, dt2)
+
+ def binary_subtract(self):
+ print("Test binary subtract")
+ X, Y, Z = 512, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.subtract(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.subtract(ds1, out=ds)", dt(steps))
+ self.assertEqual(0., ds.as_array()[0][0][0])
+
+ steps.append(timer())
+ ds2 = ds.subtract(ds1)
+ self.assertEqual(-1., ds2.as_array()[0][0][0])
+
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.subtract(ds1)", dt(steps))
+
+ self.assertLess(t1, t2)
+
+ del ds1
+ ds0 = ds.copy()
+ steps.append(timer())
+ ds0.subtract(2, out=ds0)
+ #ds0.__isub__( 2 )
+ steps.append(timer())
+ print("ds0.subtract(2,out=ds0)", dt(
+ steps), -2., ds0.as_array()[0][0][0])
+ self.assertEqual(-2., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.subtract(2)
+ steps.append(timer())
+ print("ds3 = ds0.subtract(2)", dt(steps), 0., ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1, dt2)
+ self.assertEqual(-2., ds0.as_array()[0][0][0])
+ self.assertEqual(-4., ds3.as_array()[0][0][0])
+
+ def binary_multiply(self):
+ print("Test binary multiply")
+ X, Y, Z = 1024, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.multiply(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.multiply(ds1, out=ds)", dt(steps))
+ steps.append(timer())
+ ds2 = ds.multiply(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.multiply(ds1)", dt(steps))
+
+ self.assertLess(t1, t2)
+
+ ds0 = ds
+ ds0.multiply(2, out=ds0)
+ steps.append(timer())
+ print("ds0.multiply(2,out=ds0)", dt(
+ steps), 2., ds0.as_array()[0][0][0])
+ self.assertEqual(2., ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.multiply(2)
+ steps.append(timer())
+ print("ds3 = ds0.multiply(2)", dt(steps), 4., ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1, dt2)
+ self.assertEqual(4., ds3.as_array()[0][0][0])
+ self.assertEqual(2., ds.as_array()[0][0][0])
+
+ def binary_divide(self):
+ print("Test binary divide")
+ X, Y, Z = 1024, 512, 512
+ X, Y, Z = 256, 512, 512
+ steps = [timer()]
+ a = numpy.ones((X, Y, Z), dtype='float32')
+ steps.append(timer())
+ t0 = dt(steps)
+
+ #print("a refcount " , sys.getrefcount(a))
+ ds = DataContainer(a, False, ['X', 'Y', 'Z'])
+ ds1 = ds.copy()
+
+ steps.append(timer())
+ ds.divide(ds1, out=ds)
+ steps.append(timer())
+ t1 = dt(steps)
+ print("ds.divide(ds1, out=ds)", dt(steps))
+ steps.append(timer())
+ ds2 = ds.divide(ds1)
+ steps.append(timer())
+ t2 = dt(steps)
+ print("ds2 = ds.divide(ds1)", dt(steps))
+
+ self.assertLess(t1, t2)
+ self.assertEqual(ds.as_array()[0][0][0], 1.)
+
+ ds0 = ds
+ ds0.divide(2, out=ds0)
+ steps.append(timer())
+ print("ds0.divide(2,out=ds0)", dt(steps), 0.5, ds0.as_array()[0][0][0])
+ self.assertEqual(0.5, ds0.as_array()[0][0][0])
+
+ dt1 = dt(steps)
+ ds3 = ds0.divide(2)
+ steps.append(timer())
+ print("ds3 = ds0.divide(2)", dt(steps), 0.25, ds3.as_array()[0][0][0])
+ dt2 = dt(steps)
+ self.assertLess(dt1, dt2)
+ self.assertEqual(.25, ds3.as_array()[0][0][0])
+ self.assertEqual(.5, ds.as_array()[0][0][0])
+
def test_creation_copy(self):
- shape = (2,3,4,5)
+ shape = (2, 3, 4, 5)
size = shape[0]
for i in range(1, len(shape)):
size = size * shape[i]
#print("a refcount " , sys.getrefcount(a))
- a = numpy.asarray([i for i in range( size )])
+ a = numpy.asarray([i for i in range(size)])
#print("a refcount " , sys.getrefcount(a))
a = numpy.reshape(a, shape)
#print("a refcount " , sys.getrefcount(a))
- ds = DataContainer(a, True, ['X', 'Y','Z' ,'W'])
+ ds = DataContainer(a, True, ['X', 'Y', 'Z', 'W'])
#print("a refcount " , sys.getrefcount(a))
- self.assertEqual(sys.getrefcount(a),2)
-
+ self.assertEqual(sys.getrefcount(a), 2)
+
def test_subset(self):
- shape = (4,3,2)
+ shape = (4, 3, 2)
a = [i for i in range(2*3*4)]
a = numpy.asarray(a)
a = numpy.reshape(a, shape)
- ds = DataContainer(a, True, ['X', 'Y','Z'])
+ ds = DataContainer(a, True, ['X', 'Y', 'Z'])
sub = ds.subset(['X'])
res = True
try:
numpy.testing.assert_array_equal(sub.as_array(),
- numpy.asarray([0,6,12,18]))
+ numpy.asarray([0, 6, 12, 18]))
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
-
+
sub = ds.subset(['X'], Y=2, Z=0)
res = True
try:
numpy.testing.assert_array_equal(sub.as_array(),
- numpy.asarray([4,10,16,22]))
+ numpy.asarray([4, 10, 16, 22]))
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
-
-
+
sub = ds.subset(['Y'])
try:
numpy.testing.assert_array_equal(
- sub.as_array(), numpy.asarray([0,2,4]))
+ sub.as_array(), numpy.asarray([0, 2, 4]))
res = True
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
-
-
+
sub = ds.subset(['Z'])
try:
numpy.testing.assert_array_equal(
- sub.as_array(), numpy.asarray([0,1]))
+ sub.as_array(), numpy.asarray([0, 1]))
res = True
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
sub = ds.subset(['Z'], X=1, Y=2)
try:
numpy.testing.assert_array_equal(
- sub.as_array(), numpy.asarray([10,11]))
+ sub.as_array(), numpy.asarray([10, 11]))
res = True
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
-
+
print(a)
- sub = ds.subset(['X', 'Y'] , Z=1)
+ sub = ds.subset(['X', 'Y'], Z=1)
res = True
try:
numpy.testing.assert_array_equal(sub.as_array(),
- numpy.asarray([[ 1, 3, 5],
- [ 7, 9, 11],
- [13, 15, 17],
- [19, 21, 23]]))
+ numpy.asarray([[1, 3, 5],
+ [7, 9, 11],
+ [13, 15, 17],
+ [19, 21, 23]]))
except AssertionError as err:
res = False
- print (err)
+ print(err)
self.assertTrue(res)
-
+
def test_ImageData(self):
# create ImageData from geometry
vgeometry = ImageGeometry(voxel_num_x=4, voxel_num_y=3, channels=2)
vol = ImageData(geometry=vgeometry)
- self.assertEqual(vol.shape , (2,3,4))
-
+ self.assertEqual(vol.shape, (2, 3, 4))
+
vol1 = vol + 1
self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape))
-
+
vol1 = vol - 1
self.assertNumpyArrayEqual(vol1.as_array(), -numpy.ones(vol.shape))
-
+
vol1 = 2 * (vol + 1)
self.assertNumpyArrayEqual(vol1.as_array(), 2 * numpy.ones(vol.shape))
-
- vol1 = (vol + 1) / 2
- self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) / 2 )
-
+
+ vol1 = (vol + 1) / 2
+ self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) / 2)
+
vol1 = vol + 1
- self.assertEqual(vol1.sum() , 2*3*4)
- vol1 = ( vol + 2 ) ** 2
- self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) * 4 )
-
-
-
+ self.assertEqual(vol1.sum(), 2*3*4)
+ vol1 = (vol + 2) ** 2
+ self.assertNumpyArrayEqual(vol1.as_array(), numpy.ones(vol.shape) * 4)
+
+ self.assertEqual(vol.number_of_dimensions, 3)
+
def test_AcquisitionData(self):
- sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10),
- geom_type='parallel', pixel_num_v=3,
- pixel_num_h=5 , channels=2)
+ sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=10),
+ geom_type='parallel', pixel_num_v=3,
+ pixel_num_h=5, channels=2)
sino = AcquisitionData(geometry=sgeometry)
- self.assertEqual(sino.shape , (2,10,3,5))
-
-
+ self.assertEqual(sino.shape, (2, 10, 3, 5))
+
+ def assertNumpyArrayEqual(self, first, second):
+ res = True
+ try:
+ numpy.testing.assert_array_equal(first, second)
+ except AssertionError as err:
+ res = False
+ print(err)
+ self.assertTrue(res)
+
+ def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
+ res = True
+ try:
+ numpy.testing.assert_array_almost_equal(first, second, decimal)
+ except AssertionError as err:
+ res = False
+ print(err)
+ self.assertTrue(res)
+ def test_DataContainerChaining(self):
+ dc = self.create_DataContainer(256,256,256,1)
+
+ dc.add(9,out=dc)\
+ .subtract(1,out=dc)
+ self.assertEqual(1+9-1,dc.as_array().flatten()[0])
+
+
+
+
+class TestAlgorithms(unittest.TestCase):
def assertNumpyArrayEqual(self, first, second):
res = True
try:
numpy.testing.assert_array_equal(first, second)
except AssertionError as err:
res = False
- print (err)
+ print(err)
+ self.assertTrue(res)
+
+ def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):
+ res = True
+ try:
+ numpy.testing.assert_array_almost_equal(first, second, decimal)
+ except AssertionError as err:
+ res = False
+ print(err)
self.assertTrue(res)
+
+ def test_FISTA_cvx(self):
+ if not cvx_not_installable:
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0], 1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt': True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A, b, c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n, 1)))
+
+ f.grad(x_init)
+
+ # Run FISTA for least squares plus zero function.
+ x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0, opt=opt)
+
+ # Print solution and final objective/criterion value for comparison
+ print("FISTA least squares plus zero function solution and objective value:")
+ print(x_fista0.array)
+ print(criter0[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x0 = Variable(n)
+ objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]))
+ prob0 = Problem(objective0)
+
+ # The optimal objective is returned by prob.solve().
+ result0 = prob0.solve(verbose=False, solver=SCS, eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus zero function solution and objective value:")
+ print(x0.value)
+ print(objective0.value)
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fista0.array), x0.value, 6)
+ else:
+ self.assertTrue(cvx_not_installable)
+
+ def test_FISTA_Norm1_cvx(self):
+ if not cvx_not_installable:
+ opt = {'memopt': True}
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0], 1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt': True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A, b, c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n, 1)))
+
+ # Create 1-norm object instance
+ g1 = Norm1(lam)
+
+ g1(x_init)
+ g1.prox(x_init, 0.02)
+
+ # Combine with least squares and solve using generic FISTA implementation
+ x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1, opt=opt)
+
+ # Print for comparison
+ print("FISTA least squares plus 1-norm solution and objective value:")
+ print(x_fista1.as_array().squeeze())
+ print(criter1[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1 = Variable(n)
+ objective1 = Minimize(
+ 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1))
+ prob1 = Problem(objective1)
+
+ # The optimal objective is returned by prob.solve().
+ result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1.value)
+ print(objective1.value)
+
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fista1.array), x1.value, 6)
+ else:
+ self.assertTrue(cvx_not_installable)
+
+ def test_FBPD_Norm1_cvx(self):
+ if not cvx_not_installable:
+ opt = {'memopt': True}
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0], 1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt': True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A, b, c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n, 1)))
+
+ # Create 1-norm object instance
+ g1 = Norm1(lam)
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1 = Variable(n)
+ objective1 = Minimize(
+ 0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1, 1))
+ prob1 = Problem(objective1)
+
+ # The optimal objective is returned by prob.solve().
+ result1 = prob1.solve(verbose=False, solver=SCS, eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1.value)
+ print(objective1.value)
+
+ # Now try another algorithm FBPD for same problem:
+ x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,
+ Identity(), None, f, g1)
+ print(x_fbpd1)
+ print(criterfbpd1[-1])
+
+ self.assertNumpyArrayAlmostEqual(
+ numpy.squeeze(x_fbpd1.array), x1.value, 6)
+ else:
+ self.assertTrue(cvx_not_installable)
+ # Plot criterion curve to see both FISTA and FBPD converge to same value.
+ # Note that FISTA is very efficient for 1-norm minimization so it beats
+ # FBPD in this test by a lot. But FBPD can handle a larger class of problems
+ # than FISTA can.
+
+ # Now try 1-norm and TV denoising with FBPD, first 1-norm.
+
+ # Set up phantom size NxN by creating ImageGeometry, initialising the
+ # ImageData object with this geometry and empty array and finally put some
+ # data into its array, and display as image.
+ def test_FISTA_denoise_cvx(self):
+ if not cvx_not_installable:
+ opt = {'memopt': True}
+ N = 64
+ ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N)
+ Phantom = ImageData(geometry=ig)
+
+ x = Phantom.as_array()
+
+ x[int(round(N/4)):int(round(3*N/4)),
+ int(round(N/4)):int(round(3*N/4))] = 0.5
+ x[int(round(N/8)):int(round(7*N/8)),
+ int(round(3*N/8)):int(round(5*N/8))] = 1
+
+ # Identity operator for denoising
+ I = TomoIdentity(ig)
+
+ # Data and add noise
+ y = I.direct(Phantom)
+ y.array = y.array + 0.1*np.random.randn(N, N)
+
+ # Data fidelity term
+ f_denoise = Norm2sq(I, y, c=0.5, memopt=True)
+
+ # 1-norm regulariser
+ lam1_denoise = 1.0
+ g1_denoise = Norm1(lam1_denoise)
+
+ # Initial guess
+ x_init_denoise = ImageData(np.zeros((N, N)))
+
+ # Combine with least squares and solve using generic FISTA implementation
+ x_fista1_denoise, it1_denoise, timing1_denoise, \
+ criter1_denoise = \
+ FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+
+ print(x_fista1_denoise)
+ print(criter1_denoise[-1])
+
+ # Now denoise LS + 1-norm with FBPD
+ x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\
+ criterfbpd1_denoise = \
+ FBPD(x_init_denoise, I, None, f_denoise, g1_denoise)
+ print(x_fbpd1_denoise)
+ print(criterfbpd1_denoise[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1_denoise = Variable(N**2, 1)
+ objective1_denoise = Minimize(
+ 0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise, 1))
+ prob1_denoise = Problem(objective1_denoise)
+
+ # The optimal objective is returned by prob.solve().
+ result1_denoise = prob1_denoise.solve(
+ verbose=False, solver=SCS, eps=1e-12)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1_denoise.value)
+ print(objective1_denoise.value)
+ self.assertNumpyArrayAlmostEqual(
+ x_fista1_denoise.array.flatten(), x1_denoise.value, 5)
+
+ self.assertNumpyArrayAlmostEqual(
+ x_fbpd1_denoise.array.flatten(), x1_denoise.value, 5)
+ x1_cvx = x1_denoise.value
+ x1_cvx.shape = (N, N)
+
+ # Now TV with FBPD
+ lam_tv = 0.1
+ gtv = TV2D(lam_tv)
+ gtv(gtv.op.direct(x_init_denoise))
+
+ opt_tv = {'tol': 1e-4, 'iter': 10000}
+
+ x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\
+ criterfbpdtv_denoise = \
+ FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv, opt=opt_tv)
+ print(x_fbpdtv_denoise)
+ print(criterfbpdtv_denoise[-1])
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ xtv_denoise = Variable((N, N))
+ objectivetv_denoise = Minimize(
+ 0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise))
+ probtv_denoise = Problem(objectivetv_denoise)
+
+ # The optimal objective is returned by prob.solve().
+ resulttv_denoise = probtv_denoise.solve(
+ verbose=False, solver=SCS, eps=1e-12)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(xtv_denoise.value)
+ print(objectivetv_denoise.value)
+
+ self.assertNumpyArrayAlmostEqual(
+ x_fbpdtv_denoise.as_array(), xtv_denoise.value, 1)
+
+ else:
+ self.assertTrue(cvx_not_installable)
+
+
+class TestFunction(unittest.TestCase):
+ def assertNumpyArrayEqual(self, first, second):
+ res = True
+ try:
+ numpy.testing.assert_array_equal(first, second)
+ except AssertionError as err:
+ res = False
+ print(err)
+ self.assertTrue(res)
+
+ def create_simple_ImageData(self):
+ N = 64
+ ig = ImageGeometry(voxel_num_x=N, voxel_num_y=N)
+ Phantom = ImageData(geometry=ig)
+
+ x = Phantom.as_array()
+
+ x[int(round(N/4)):int(round(3*N/4)),
+ int(round(N/4)):int(round(3*N/4))] = 0.5
+ x[int(round(N/8)):int(round(7*N/8)),
+ int(round(3*N/8)):int(round(5*N/8))] = 1
+
+ return (ig, Phantom)
+
+ def _test_Norm2(self):
+ print("test Norm2")
+ opt = {'memopt': True}
+ ig, Phantom = self.create_simple_ImageData()
+ x = Phantom.as_array()
+ print(Phantom)
+ print(Phantom.as_array())
+
+ norm2 = Norm2()
+ v1 = norm2(x)
+ v2 = norm2(Phantom)
+ self.assertEqual(v1, v2)
+
+ p1 = norm2.prox(Phantom, 1)
+ print(p1)
+ p2 = norm2.prox(x, 1)
+ self.assertNumpyArrayEqual(p1.as_array(), p2)
+
+ p3 = norm2.proximal(Phantom, 1)
+ p4 = norm2.proximal(x, 1)
+ self.assertNumpyArrayEqual(p3.as_array(), p2)
+ self.assertNumpyArrayEqual(p3.as_array(), p4)
+
+ out = Phantom.copy()
+ p5 = norm2.proximal(Phantom, 1, out=out)
+ self.assertEqual(id(p5), id(out))
+ self.assertNumpyArrayEqual(p5.as_array(), p3.as_array())
# =============================================================================
# def test_upper(self):
# self.assertEqual('foo'.upper(), 'FOO')
-#
+#
# def test_isupper(self):
# self.assertTrue('FOO'.isupper())
# self.assertFalse('Foo'.isupper())
-#
+#
# def test_split(self):
# s = 'hello world'
# self.assertEqual(s.split(), ['hello', 'world'])
@@ -163,5 +886,84 @@ class TestDataContainer(unittest.TestCase): # s.split(2)
# =============================================================================
+class TestNexusReader(unittest.TestCase):
+
+ def setUp(self):
+ wget.download('https://github.com/DiamondLightSource/Savu/raw/master/test_data/data/24737_fd.nxs')
+ self.filename = '24737_fd.nxs'
+
+ def tearDown(self):
+ os.remove(self.filename)
+
+
+ def testGetDimensions(self):
+ nr = NexusReader(self.filename)
+ self.assertEqual(nr.get_sinogram_dimensions(), (135, 91, 160), "Sinogram dimensions are not correct")
+
+ def testGetProjectionDimensions(self):
+ nr = NexusReader(self.filename)
+ self.assertEqual(nr.get_projection_dimensions(), (91,135,160), "Projection dimensions are not correct")
+
+ def testLoadProjectionWithoutDimensions(self):
+ nr = NexusReader(self.filename)
+ projections = nr.load_projection()
+ self.assertEqual(projections.shape, (91,135,160), "Loaded projection data dimensions are not correct")
+
+ def testLoadProjectionWithDimensions(self):
+ nr = NexusReader(self.filename)
+ projections = nr.load_projection((slice(0,1), slice(0,135), slice(0,160)))
+ self.assertEqual(projections.shape, (1,135,160), "Loaded projection data dimensions are not correct")
+
+ def testLoadProjectionCompareSingle(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(0,1), slice(0,135), slice(0,160)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[0:1,:,:])
+
+ def testLoadProjectionCompareMulti(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(0,3), slice(0,135), slice(0,160)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[0:3,:,:])
+
+ def testLoadProjectionCompareRandom(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(1,8), slice(5,10), slice(8,20)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[1:8,5:10,8:20])
+
+ def testLoadProjectionCompareFull(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[:,:,:])
+
+ def testLoadFlatCompareFull(self):
+ nr = NexusReader(self.filename)
+ flats_full = nr.load_flat()
+ flats_part = nr.load_flat((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(flats_part, flats_full[:,:,:])
+
+ def testLoadDarkCompareFull(self):
+ nr = NexusReader(self.filename)
+ darks_full = nr.load_dark()
+ darks_part = nr.load_dark((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(darks_part, darks_full[:,:,:])
+
+ def testProjectionAngles(self):
+ nr = NexusReader(self.filename)
+ angles = nr.get_projection_angles()
+ self.assertEqual(angles.shape, (91,), "Loaded projection number of angles are not correct")
+
+ def test_get_acquisition_data_subset(self):
+ nr = NexusReader(self.filename)
+ key = nr.get_image_keys()
+ sl = nr.get_acquisition_data_subset(0,10)
+ data = nr.get_acquisition_data().subset(['vertical','horizontal'])
+
+ self.assertTrue(sl.shape , (10,data.shape[1]))
+
+
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file + unittest.main()
+
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index b4fabbd..b584344 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,11 +23,7 @@ import os import sys - -cil_version=os.environ['CIL_VERSION'] -if cil_version == '': - print("Please set the environmental variable CIL_VERSION") - sys.exit(1) +cil_version='0.11.3' setup( name="ccpi-framework", diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py index cbfe50e..27b1c97 100644 --- a/Wrappers/Python/wip/demo_compare_cvx.py +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -1,9 +1,11 @@ from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer from ccpi.optimisation.algs import FISTA, FBPD, CGLS -from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D +from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D, Norm2 -from ccpi.optimisation.ops import LinearOperatorMatrix, Identity +from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity +from ccpi.optimisation.ops import Identity +from ccpi.optimisation.ops import FiniteDiff2D # Requires CVXPY, see http://www.cvxpy.org/ # CVXPY can be installed in anaconda using @@ -33,9 +35,9 @@ b = DataContainer(bmat) # Regularization parameter lam = 10 - +opt = {'memopt':True} # Create object instances with the test data A and b. -f = Norm2sq(A,b,c=0.5) +f = Norm2sq(A,b,c=0.5, memopt=True) g0 = ZeroFun() # Initial guess @@ -44,7 +46,7 @@ x_init = DataContainer(np.zeros((n,1))) f.grad(x_init) # Run FISTA for least squares plus zero function. -x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0) +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0 , opt=opt) # Print solution and final objective/criterion value for comparison print("FISTA least squares plus zero function solution and objective value:") @@ -56,7 +58,7 @@ if use_cvxpy: # Construct the problem. x0 = Variable(n) - objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat) ) + objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat.T[0]) ) prob0 = Problem(objective0) # The optimal objective is returned by prob.solve(). @@ -80,10 +82,24 @@ plt.show() g1 = Norm1(lam) g1(x_init) -g1.prox(x_init,0.02) - +x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1))) +x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1))) +v = g1.prox(x_rand,0.02) +#vv = g1.prox(x_rand2,0.02) +vv = v.copy() +vv *= 0 +print (">>>>>>>>>>vv" , vv.as_array()) +vv.fill(v) +print (">>>>>>>>>>fill" , vv.as_array()) +g1.proximal(x_rand, 0.02, out=vv) +print (">>>>>>>>>>v" , v.as_array()) +print (">>>>>>>>>>gradient" , vv.as_array()) + +print (">>>>>>>>>>" , (v-vv).as_array()) +import sys +#sys.exit(0) # Combine with least squares and solve using generic FISTA implementation -x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1) +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) # Print for comparison print("FISTA least squares plus 1-norm solution and objective value:") @@ -95,7 +111,7 @@ if use_cvxpy: # Construct the problem. x1 = Variable(n) - objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat) + lam*norm(x1,1) ) + objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) ) prob1 = Problem(objective1) # The optimal objective is returned by prob.solve(). @@ -108,7 +124,7 @@ if use_cvxpy: print(objective1.value) # Now try another algorithm FBPD for same problem: -x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1) print(x_fbpd1) print(criterfbpd1[-1]) @@ -147,7 +163,7 @@ plt.title('Phantom image') plt.show() # Identity operator for denoising -I = Identity() +I = TomoIdentity(ig) # Data and add noise y = I.direct(Phantom) @@ -157,8 +173,10 @@ plt.imshow(y.array) plt.title('Noisy image') plt.show() + +################### # Data fidelity term -f_denoise = Norm2sq(I,y,c=0.5) +f_denoise = Norm2sq(I,y,c=0.5,memopt=True) # 1-norm regulariser lam1_denoise = 1.0 @@ -168,23 +186,24 @@ g1_denoise = Norm1(lam1_denoise) x_init_denoise = ImageData(np.zeros((N,N))) # Combine with least squares and solve using generic FISTA implementation -x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise) +x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) print(x_fista1_denoise) print(criter1_denoise[-1]) -plt.imshow(x_fista1_denoise.as_array()) -plt.title('FISTA LS+1') -plt.show() +#plt.imshow(x_fista1_denoise.as_array()) +#plt.title('FISTA LS+1') +#plt.show() # Now denoise LS + 1-norm with FBPD -x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \ + criterfbpd1_denoise = FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) -plt.imshow(x_fbpd1_denoise.as_array()) -plt.title('FBPD LS+1') -plt.show() +#plt.imshow(x_fbpd1_denoise.as_array()) +#plt.title('FBPD LS+1') +#plt.show() if use_cvxpy: # Compare to CVXPY @@ -206,30 +225,53 @@ if use_cvxpy: x1_cvx = x1_denoise.value x1_cvx.shape = (N,N) + + +#plt.imshow(x1_cvx) +#plt.title('CVX LS+1') +#plt.show() + +fig = plt.figure() +plt.subplot(1,4,1) +plt.imshow(y.array) +plt.title("LS+1") +plt.subplot(1,4,2) +plt.imshow(x_fista1_denoise.as_array()) +plt.title("fista") +plt.subplot(1,4,3) +plt.imshow(x_fbpd1_denoise.as_array()) +plt.title("fbpd") +plt.subplot(1,4,4) plt.imshow(x1_cvx) -plt.title('CVX LS+1') +plt.title("cvx") plt.show() -# Now TV with FBPD +############################################################## +# Now TV with FBPD and Norm2 lam_tv = 0.1 gtv = TV2D(lam_tv) -gtv(gtv.op.direct(x_init_denoise)) +norm2 = Norm2(lam_tv) +op = FiniteDiff2D() +#gtv(gtv.op.direct(x_init_denoise)) opt_tv = {'tol': 1e-4, 'iter': 10000} -x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \ + criterfbpdtv_denoise = FBPD(x_init_denoise, op, None, \ + f_denoise, norm2 ,opt=opt_tv) print(x_fbpdtv_denoise) print(criterfbpdtv_denoise[-1]) plt.imshow(x_fbpdtv_denoise.as_array()) plt.title('FBPD TV') -plt.show() +#plt.show() if use_cvxpy: # Compare to CVXPY # Construct the problem. - xtv_denoise = Variable(N,N) + xtv_denoise = Variable((N,N)) + #print (xtv_denoise.value.shape) objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) probtv_denoise = Problem(objectivetv_denoise) @@ -244,7 +286,21 @@ if use_cvxpy: plt.imshow(xtv_denoise.value) plt.title('CVX TV') +#plt.show() + +fig = plt.figure() +plt.subplot(1,3,1) +plt.imshow(y.array) +plt.title("TV2D") +plt.subplot(1,3,2) +plt.imshow(x_fbpdtv_denoise.as_array()) +plt.title("fbpd tv denoise") +plt.subplot(1,3,3) +plt.imshow(xtv_denoise.value) +plt.title("CVX tv") plt.show() + + plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV') -plt.loglog(criterfbpdtv_denoise, label='FBPD TV')
\ No newline at end of file +plt.loglog(criterfbpdtv_denoise, label='FBPD TV') diff --git a/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py b/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py index 59d634e..8370c78 100644 --- a/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py +++ b/Wrappers/Python/wip/demo_imat_multichan_RGLTK.py @@ -32,7 +32,6 @@ ProjAngleChannels = np.zeros((totalAngles,totChannels,n,n),dtype='float32') ######################################################################### print ("Loading the data...") -#MainPath = '/media/algol/336F96987817D4B4/DATA/IMAT_DATA/' # path to data MainPath = '/media/jakob/050d8d45-fab3-4285-935f-260e6c5f162c1/Data/neutrondata/' # path to data pathname0 = '{!s}{!s}'.format(MainPath,'PSI_phantom_IMAT/DATA/Sample/') counterFileName = 4675 diff --git a/Wrappers/Python/wip/demo_memhandle.py b/Wrappers/Python/wip/demo_memhandle.py new file mode 100755 index 0000000..db48d73 --- /dev/null +++ b/Wrappers/Python/wip/demo_memhandle.py @@ -0,0 +1,193 @@ + +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer +from ccpi.optimisation.algs import FISTA, FBPD, CGLS +from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D + +from ccpi.optimisation.ops import LinearOperatorMatrix, Identity +from ccpi.optimisation.ops import TomoIdentity + +# Requires CVXPY, see http://www.cvxpy.org/ +# CVXPY can be installed in anaconda using +# conda install -c cvxgrp cvxpy libgcc + + +import numpy as np +import matplotlib.pyplot as plt + +# Problem data. +m = 30 +n = 20 +np.random.seed(1) +Amat = np.random.randn(m, n) +A = LinearOperatorMatrix(Amat) +bmat = np.random.randn(m) +bmat.shape = (bmat.shape[0],1) + + + +# A = Identity() +# Change n to equal to m. + +b = DataContainer(bmat) + +# Regularization parameter +lam = 10 + +# Create object instances with the test data A and b. +f = Norm2sq(A,b,c=0.5, memopt=True) +g0 = ZeroFun() + +# Initial guess +x_init = DataContainer(np.zeros((n,1))) + +f.grad(x_init) +opt = {'memopt': True} +# Run FISTA for least squares plus zero function. +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0) +x_fista0_m, it0_m, timing0_m, criter0_m = FISTA(x_init, f, g0, opt=opt) + +iternum = [i for i in range(len(criter0))] +# Print solution and final objective/criterion value for comparison +print("FISTA least squares plus zero function solution and objective value:") +print(x_fista0.array) +print(criter0[-1]) + + +# Plot criterion curve to see FISTA converge to same value as CVX. +#iternum = np.arange(1,1001) +plt.figure() +plt.loglog(iternum,criter0,label='FISTA LS') +plt.loglog(iternum,criter0_m,label='FISTA LS memopt') +plt.legend() +plt.show() +#%% +# Create 1-norm object instance +g1 = Norm1(lam) + +g1(x_init) +g1.prox(x_init,0.02) + +# Combine with least squares and solve using generic FISTA implementation +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1) +x_fista1_m, it1_m, timing1_m, criter1_m = FISTA(x_init, f, g1, opt=opt) +iternum = [i for i in range(len(criter1))] +# Print for comparison +print("FISTA least squares plus 1-norm solution and objective value:") +print(x_fista1) +print(criter1[-1]) + + +# Now try another algorithm FBPD for same problem: +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +iternum = [i for i in range(len(criterfbpd1))] +print(x_fbpd1) +print(criterfbpd1[-1]) + +# Plot criterion curve to see both FISTA and FBPD converge to same value. +# Note that FISTA is very efficient for 1-norm minimization so it beats +# FBPD in this test by a lot. But FBPD can handle a larger class of problems +# than FISTA can. +plt.figure() +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.loglog(iternum,criter1_m,label='FISTA LS+1 memopt') +plt.legend() +plt.show() + +plt.figure() +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.loglog(iternum,criterfbpd1,label='FBPD LS+1') +plt.legend() +plt.show() +#%% +# Now try 1-norm and TV denoising with FBPD, first 1-norm. + +# Set up phantom size NxN by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display as image. +N = 1000 +ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) +Phantom = ImageData(geometry=ig) + +x = Phantom.as_array() +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +plt.imshow(x) +plt.title('Phantom image') +plt.show() + +# Identity operator for denoising +I = TomoIdentity(ig) + +# Data and add noise +y = I.direct(Phantom) +y.array += 0.1*np.random.randn(N, N) + +plt.figure() +plt.imshow(y.array) +plt.title('Noisy image') +plt.show() + +# Data fidelity term +f_denoise = Norm2sq(I,y,c=0.5, memopt=True) + +# 1-norm regulariser +lam1_denoise = 1.0 +g1_denoise = Norm1(lam1_denoise) + +# Initial guess +x_init_denoise = ImageData(np.zeros((N,N))) +opt = {'memopt': False, 'iter' : 50} +# Combine with least squares and solve using generic FISTA implementation +print ("no memopt") +x_fista1_denoise, it1_denoise, timing1_denoise, \ + criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) +opt = {'memopt': True, 'iter' : 50} +print ("yes memopt") +x_fista1_denoise_m, it1_denoise_m, timing1_denoise_m, \ + criter1_denoise_m = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt) + +print(x_fista1_denoise) +print(criter1_denoise[-1]) + +plt.figure() +plt.imshow(x_fista1_denoise.as_array()) +plt.title('FISTA LS+1') +plt.show() + +plt.figure() +plt.imshow(x_fista1_denoise_m.as_array()) +plt.title('FISTA LS+1 memopt') +plt.show() + +plt.figure() +plt.loglog(iternum,criter1_denoise,label='FISTA LS+1') +plt.loglog(iternum,criter1_denoise_m,label='FISTA LS+1 memopt') +plt.legend() +plt.show() +#%% +# Now denoise LS + 1-norm with FBPD +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +print(x_fbpd1_denoise) +print(criterfbpd1_denoise[-1]) + +plt.figure() +plt.imshow(x_fbpd1_denoise.as_array()) +plt.title('FBPD LS+1') +plt.show() + + +# Now TV with FBPD +lam_tv = 0.1 +gtv = TV2D(lam_tv) +gtv(gtv.op.direct(x_init_denoise)) + +opt_tv = {'tol': 1e-4, 'iter': 10000} + +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +print(x_fbpdtv_denoise) +print(criterfbpdtv_denoise[-1]) + +plt.imshow(x_fbpdtv_denoise.as_array()) +plt.title('FBPD TV') +plt.show() diff --git a/Wrappers/Python/wip/demo_test_sirt.py b/Wrappers/Python/wip/demo_test_sirt.py new file mode 100644 index 0000000..6f5a44d --- /dev/null +++ b/Wrappers/Python/wip/demo_test_sirt.py @@ -0,0 +1,176 @@ +# This demo illustrates how to use the SIRT algorithm without and with +# nonnegativity and box constraints. The ASTRA 2D projectors are used. + +# First make all imports +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, \ + AcquisitionData +from ccpi.optimisation.algs import FISTA, FBPD, CGLS, SIRT +from ccpi.optimisation.funcs import Norm2sq, Norm1, TV2D, IndicatorBox +from ccpi.astra.ops import AstraProjectorSimple + +import numpy as np +import matplotlib.pyplot as plt + +# Choose either a parallel-beam (1=parallel2D) or fan-beam (2=cone2D) test case +test_case = 1 + +# Set up phantom size NxN by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display as image. +N = 128 +ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) +Phantom = ImageData(geometry=ig) + +x = Phantom.as_array() +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +plt.imshow(x) +plt.title('Phantom image') +plt.show() + +# Set up AcquisitionGeometry object to hold the parameters of the measurement +# setup geometry: # Number of angles, the actual angles from 0 to +# pi for parallel beam and 0 to 2pi for fanbeam, set the width of a detector +# pixel relative to an object pixel, the number of detector pixels, and the +# source-origin and origin-detector distance (here the origin-detector distance +# set to 0 to simulate a "virtual detector" with same detector pixel size as +# object pixel size). +angles_num = 20 +det_w = 1.0 +det_num = N +SourceOrig = 200 +OrigDetec = 0 + +if test_case==1: + angles = np.linspace(0,np.pi,angles_num,endpoint=False) + ag = AcquisitionGeometry('parallel', + '2D', + angles, + det_num,det_w) +elif test_case==2: + angles = np.linspace(0,2*np.pi,angles_num,endpoint=False) + ag = AcquisitionGeometry('cone', + '2D', + angles, + det_num, + det_w, + dist_source_center=SourceOrig, + dist_center_detector=OrigDetec) +else: + NotImplemented + +# Set up Operator object combining the ImageGeometry and AcquisitionGeometry +# wrapping calls to ASTRA as well as specifying whether to use CPU or GPU. +Aop = AstraProjectorSimple(ig, ag, 'gpu') + +# Forward and backprojection are available as methods direct and adjoint. Here +# generate test data b and do simple backprojection to obtain z. +b = Aop.direct(Phantom) +z = Aop.adjoint(b) + +plt.imshow(b.array) +plt.title('Simulated data') +plt.show() + +plt.imshow(z.array) +plt.title('Backprojected data') +plt.show() + +# Using the test data b, different reconstruction methods can now be set up as +# demonstrated in the rest of this file. In general all methods need an initial +# guess and some algorithm options to be set: +x_init = ImageData(np.zeros(x.shape),geometry=ig) +opt = {'tol': 1e-4, 'iter': 1000} + +# First a CGLS reconstruction can be done: +x_CGLS, it_CGLS, timing_CGLS, criter_CGLS = CGLS(x_init, Aop, b, opt) + +plt.imshow(x_CGLS.array) +plt.title('CGLS') +plt.colorbar() +plt.show() + +plt.semilogy(criter_CGLS) +plt.title('CGLS criterion') +plt.show() + +# A SIRT unconstrained reconstruction can be done: similarly: +x_SIRT, it_SIRT, timing_SIRT, criter_SIRT = SIRT(x_init, Aop, b, opt) + +plt.imshow(x_SIRT.array) +plt.title('SIRT unconstrained') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT) +plt.title('SIRT unconstrained criterion') +plt.show() + +# A SIRT nonnegativity constrained reconstruction can be done using the +# additional input "constraint" set to a box indicator function with 0 as the +# lower bound and the default upper bound of infinity: +x_SIRT0, it_SIRT0, timing_SIRT0, criter_SIRT0 = SIRT(x_init, Aop, b, opt, + constraint=IndicatorBox(lower=0)) + +plt.imshow(x_SIRT0.array) +plt.title('SIRT nonneg') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT0) +plt.title('SIRT nonneg criterion') +plt.show() + +# A SIRT reconstruction with box constraints on [0,1] can also be done: +x_SIRT01, it_SIRT01, timing_SIRT01, criter_SIRT01 = SIRT(x_init, Aop, b, opt, + constraint=IndicatorBox(lower=0,upper=1)) + +plt.imshow(x_SIRT01.array) +plt.title('SIRT box(0,1)') +plt.colorbar() +plt.show() + +plt.semilogy(criter_SIRT01) +plt.title('SIRT box(0,1) criterion') +plt.show() + +# The indicator function can also be used with the FISTA algorithm to do +# least squares with nonnegativity constraint. + +# Create least squares object instance with projector, test data and a constant +# coefficient of 0.5: +f = Norm2sq(Aop,b,c=0.5) + +# Run FISTA for least squares without constraints +x_fista, it, timing, criter = FISTA(x_init, f, None,opt) + +plt.imshow(x_fista.array) +plt.title('FISTA Least squares') +plt.show() + +plt.semilogy(criter) +plt.title('FISTA Least squares criterion') +plt.show() + +# Run FISTA for least squares with nonnegativity constraint +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, IndicatorBox(lower=0),opt) + +plt.imshow(x_fista0.array) +plt.title('FISTA Least squares nonneg') +plt.show() + +plt.semilogy(criter0) +plt.title('FISTA Least squares nonneg criterion') +plt.show() + +# Run FISTA for least squares with box constraint [0,1] +x_fista01, it01, timing01, criter01 = FISTA(x_init, f, IndicatorBox(lower=0,upper=1),opt) + +plt.imshow(x_fista01.array) +plt.title('FISTA Least squares box(0,1)') +plt.show() + +plt.semilogy(criter01) +plt.title('FISTA Least squares box(0,1) criterion') +plt.show()
\ No newline at end of file diff --git a/Wrappers/Python/wip/multifile_nexus.py b/Wrappers/Python/wip/multifile_nexus.py new file mode 100755 index 0000000..d1ad438 --- /dev/null +++ b/Wrappers/Python/wip/multifile_nexus.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*-
+"""
+Created on Wed Aug 15 16:00:53 2018
+
+@author: ofn77899
+"""
+
+import os
+from ccpi.io.reader import NexusReader
+
+from sys import getsizeof
+
+import matplotlib.pyplot as plt
+
+from ccpi.framework import DataProcessor, DataContainer
+from ccpi.processors import Normalizer
+from ccpi.processors import CenterOfRotationFinder
+import numpy
+
+
+class averager(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.N = 0
+ self.avg = 0
+ self.min = 0
+ self.max = 0
+ self.var = 0
+ self.ske = 0
+ self.kur = 0
+
+ def add_reading(self, val):
+
+ if (self.N == 0):
+ self.avg = val;
+ self.min = val;
+ self.max = val;
+ elif (self.N == 1):
+ #//set min/max
+ self.max = val if val > self.max else self.max
+ self.min = val if val < self.min else self.min
+
+
+ thisavg = (self.avg + val)/2
+ #// initial value is in avg
+ self.var = (self.avg - thisavg)*(self.avg-thisavg) + (val - thisavg) * (val-thisavg)
+ self.ske = self.var * (self.avg - thisavg)
+ self.kur = self.var * self.var
+ self.avg = thisavg
+ else:
+ self.max = val if val > self.max else self.max
+ self.min = val if val < self.min else self.min
+
+ M = self.N
+
+ #// b-factor =(<v>_N + v_(N+1)) / (N+1)
+ #float b = (val -avg) / (M+1);
+ b = (val -self.avg) / (M+1)
+
+ self.kur = self.kur + (M *(b*b*b*b) * (1+M*M*M))- (4* b * self.ske) + (6 * b *b * self.var * (M-1))
+
+ self.ske = self.ske + (M * b*b*b *(M-1)*(M+1)) - (3 * b * self.var * (M-1))
+
+ #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
+ self.var = self.var * ((M-1)/M) + (b * b * (M+1))
+
+ self.avg = self.avg * (M/(M+1)) + val / (M+1)
+
+ self.N += 1
+
+ def stats(self, vector):
+ i = 0
+ while i < vector.size:
+ self.add_reading(vector[i])
+ i+=1
+
+avg = averager()
+a = numpy.linspace(0,39,40)
+avg.stats(a)
+print ("average" , avg.avg, a.mean())
+print ("variance" , avg.var, a.var())
+b = a - a.mean()
+b *= b
+b = numpy.sqrt(sum(b)/(a.size-1))
+print ("std" , numpy.sqrt(avg.var), b)
+#%%
+
+class DataStatMoments(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self, axis, skewness=False, kurtosis=False, offset=0):
+ kwargs = {
+ 'axis' : axis,
+ 'skewness' : skewness,
+ 'kurtosis' : kurtosis,
+ 'offset' : offset,
+ }
+ #DataProcessor.__init__(self, **kwargs)
+ super(DataStatMoments, self).__init__(**kwargs)
+
+
+ def check_input(self, dataset):
+ #self.N = dataset.get_dimension_size(self.axis)
+ return True
+
+ @staticmethod
+ def add_sample(dataset, N, axis, stats=None, offset=0):
+ #dataset = self.get_input()
+ if (N == 0):
+ # get the axis number along to calculate the stats
+
+
+ #axs = dataset.get_dimension_size(self.axis)
+ # create a placeholder for the output
+ if stats is None:
+ ax = dataset.get_dimension_axis(axis)
+ shape = [dataset.shape[i] for i in range(len(dataset.shape)) if i != ax]
+ # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
+ shape.insert(0, 4+2)
+ stats = numpy.zeros(shape)
+
+ stats[0] = dataset.subset(**{axis:N-offset}).array[:]
+
+ #avg = val
+ elif (N == 1):
+ # val
+ stats[5] = dataset.subset(**{axis:N-offset}).array
+ stats[4] = stats[0] + stats[5]
+ stats[4] /= 2 # thisavg
+ stats[5] -= stats[4] # (val - thisavg)
+
+ #float thisavg = (avg + val)/2;
+
+ #// initial value is in avg
+ #var = (avg - thisavg)*(avg-thisavg) + (val - thisavg) * (val-thisavg);
+ stats[1] = stats[5] * stats[5] + stats[5] * stats[5]
+ #skewness = var * (avg - thisavg);
+ stats[2] = stats[1] * stats[5]
+ #kurtosis = var * var;
+ stats[3] = stats[1] * stats[1]
+ #avg = thisavg;
+ stats[0] = stats[4]
+ else:
+
+ #float M = (float)N;
+ M = N
+ #val
+ stats[4] = dataset.subset(**{axis:N-offset}).array
+ #// b-factor =(<v>_N + v_(N+1)) / (N+1)
+ stats[5] = stats[4] - stats[0]
+ stats[5] /= (M+1) # b factor
+ #float b = (val -avg) / (M+1);
+
+ #kurtosis = kurtosis + (M *(b*b*b*b) * (1+M*M*M))- (4* b * skewness) + (6 * b *b * var * (M-1));
+ #if self.kurtosis:
+ # stats[3] += (M * stats[5] * stats[5] * stats[5] * stats[5]) - \
+ # (4 * stats[5] * stats[2]) + \
+ # (6 * stats[5] * stats[5] * stats[1] * (M-1))
+
+ #skewness = skewness + (M * b*b*b *(M-1)*(M+1)) - (3 * b * var * (M-1));
+ #if self.skewness:
+ # stats[2] = stats[2] + (M * stats[5]* stats[5] * stats[5] * (M-1)*(M-1) ) -\
+ # 3 * stats[5] * stats[1] * (M-1)
+ #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
+ #var = var * ((M-1)/M) + (b * b * (M+1));
+ stats[1] = ((M-1)/M) * stats[1] + (stats[5] * stats[5] * (M+1))
+
+ #avg = avg * (M/(M+1)) + val / (M+1)
+ stats[0] = stats[0] * (M/(M+1)) + stats[4] / (M+1)
+
+ N += 1
+ return stats , N
+
+
+ def process(self):
+
+ data = self.get_input()
+
+ #stats, i = add_sample(0)
+ N = data.get_dimension_size(self.axis)
+ ax = data.get_dimension_axis(self.axis)
+ stats = None
+ i = 0
+ while i < N:
+ stats , i = DataStatMoments.add_sample(data, i, self.axis, stats, offset=self.offset)
+ self.offset += N
+ labels = ['StatisticalMoments']
+
+ labels += [data.dimension_labels[i] \
+ for i in range(len(data.dimension_labels)) if i != ax]
+ y = DataContainer( stats[:4] , False,
+ dimension_labels=labels)
+ return y
+
+directory = r'E:\Documents\Dataset\CCPi\Nexus_test'
+data_path="entry1/instrument/pco1_hw_hdf_nochunking/data"
+
+reader = NexusReader(os.path.join( os.path.abspath(directory) , '74331.nxs'))
+
+print ("read flat")
+read_flat = NexusReader(os.path.join( os.path.abspath(directory) , '74240.nxs'))
+read_flat.data_path = data_path
+flatsslice = read_flat.get_acquisition_data_whole()
+avg = DataStatMoments('angle')
+
+avg.set_input(flatsslice)
+flats = avg.get_output()
+
+ave = averager()
+ave.stats(flatsslice.array[:,0,0])
+
+print ("avg" , ave.avg, flats.array[0][0][0])
+print ("var" , ave.var, flats.array[1][0][0])
+
+print ("read dark")
+read_dark = NexusReader(os.path.join( os.path.abspath(directory) , '74243.nxs'))
+read_dark.data_path = data_path
+
+## darks are very many so we proceed in batches
+total_size = read_dark.get_projection_dimensions()[0]
+print ("total_size", total_size)
+
+batchsize = 40
+if batchsize > total_size:
+ batchlimits = [batchsize * (i+1) for i in range(int(total_size/batchsize))] + [total_size-1]
+else:
+ batchlimits = [total_size]
+#avg.N = 0
+avg.offset = 0
+N = 0
+for batch in range(len(batchlimits)):
+ print ("running batch " , batch)
+ bmax = batchlimits[batch]
+ bmin = bmax-batchsize
+
+ darksslice = read_dark.get_acquisition_data_batch(bmin,bmax)
+ if batch == 0:
+ #create stats
+ ax = darksslice.get_dimension_axis('angle')
+ shape = [darksslice.shape[i] for i in range(len(darksslice.shape)) if i != ax]
+ # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
+ shape.insert(0, 4+2)
+ print ("create stats shape ", shape)
+ stats = numpy.zeros(shape)
+ print ("N" , N)
+ #avg.set_input(darksslice)
+ i = bmin
+ while i < bmax:
+ stats , i = DataStatMoments.add_sample(darksslice, i, 'angle', stats, bmin)
+ print ("{0}-{1}-{2}".format(bmin, i , bmax ) )
+
+darks = stats
+#%%
+
+fig = plt.subplot(2,2,1)
+fig.imshow(flats.subset(StatisticalMoments=0).array)
+fig = plt.subplot(2,2,2)
+fig.imshow(numpy.sqrt(flats.subset(StatisticalMoments=1).array))
+fig = plt.subplot(2,2,3)
+fig.imshow(darks[0])
+fig = plt.subplot(2,2,4)
+fig.imshow(numpy.sqrt(darks[1]))
+plt.show()
+
+
+#%%
+norm = Normalizer(flat_field=flats.array[0,200,:], dark_field=darks[0,200,:])
+#norm.set_flat_field(flats.array[0,200,:])
+#norm.set_dark_field(darks.array[0,200,:])
+norm.set_input(reader.get_acquisition_data_slice(200))
+
+n = Normalizer.normalize_projection(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], 1e-5)
+#dn_n= Normalizer.estimate_normalised_error(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:],
+# numpy.sqrt(flats.array[1,200,:]), numpy.sqrt(darks[1,200,:]))
+#%%
+
+
+#%%
+fig = plt.subplot(2,1,1)
+
+
+fig.imshow(norm.get_input().as_array())
+fig = plt.subplot(2,1,2)
+fig.imshow(n)
+
+#fig = plt.subplot(3,1,3)
+#fig.imshow(dn_n)
+
+
+plt.show()
+
+
+
+
+
+
|