diff options
author | Edoardo Pasca <edo.paskino@gmail.com> | 2018-11-10 09:47:42 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-11-10 09:47:42 +0000 |
commit | 9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0 (patch) | |
tree | 862b6f969393260c4191ea1f7a28395da1c34037 /Wrappers | |
parent | 42d9e33488400dd8b232002ad862be861f5f7439 (diff) | |
download | framework-9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0.tar.gz framework-9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0.tar.bz2 framework-9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0.tar.xz framework-9b1d18b61e9f5f623c1d4f8c93b2895a4c57e4c0.zip |
Nexus read slice (#144)
* replaces in with explicit test
closes #139
* add read of single slice or subset
* get_image_keys
* reader reads single slice
* adds get_acquisition_data_slice and get_acquisition_data_subset
it is possible to read in only a subset of the dataset.
* added multifile_nexus.py
* cleaned example
* fix normaliser and reader
* wip
* wip for reader
* added support for non standard?? nexus files
updated example with normalisation.
* fix camelcase
* Added initial unittest for reader
bugfix for python 2.7
* fixes for python 2.7
* remove duplicate test requirement
* minimal unittest for get_acquisition_data_subset
Diffstat (limited to 'Wrappers')
-rw-r--r-- | Wrappers/Python/ccpi/framework.py | 40 | ||||
-rw-r--r-- | Wrappers/Python/ccpi/io/reader.py | 228 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/processors.py | 154 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/meta.yaml | 17 | ||||
-rwxr-xr-x | Wrappers/Python/conda-recipe/run_test.py | 85 | ||||
-rw-r--r-- | Wrappers/Python/setup.py | 2 | ||||
-rwxr-xr-x | Wrappers/Python/wip/multifile_nexus.py | 307 |
7 files changed, 762 insertions, 71 deletions
diff --git a/Wrappers/Python/ccpi/framework.py b/Wrappers/Python/ccpi/framework.py index 4d74d2b..9938fb7 100644 --- a/Wrappers/Python/ccpi/framework.py +++ b/Wrappers/Python/ccpi/framework.py @@ -17,18 +17,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import from __future__ import division -import abc +from __future__ import print_function +from __future__ import unicode_literals + import numpy import sys from datetime import timedelta, datetime import warnings -if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: - ABC = abc.ABC -else: - ABC = abc.ABCMeta('ABC', (), {}) - def find_key(dic, val): """return the key of dictionary dic given the value""" return [k for k, v in dic.items() if v == val][0] @@ -376,7 +374,6 @@ class DataContainer(object): return self.shape == other.shape ## algebra - def __add__(self, other , out=None, *args, **kwargs): if issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -525,9 +522,9 @@ class DataContainer(object): # must return self + def __iadd__(self, other): if isinstance(other, (int, float)) : - #print ("__iadd__", self.array.shape) numpy.add(self.array, other, out=self.array) elif issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -539,11 +536,7 @@ class DataContainer(object): def __imul__(self, other): if isinstance(other, (int, float)) : - #print ("__imul__", self.array.shape) - #print ("type(self)", type(self)) - #print ("self.array", self.array, other) arr = self.as_array() - #print ("arr", arr) numpy.multiply(arr, other, out=arr) elif issubclass(type(other), DataContainer): if self.check_dimensions(other): @@ -627,13 +620,14 @@ class DataContainer(object): .format(len(self.shape),len(new_order))) - def copy(self): + def copy(self): '''alias of clone''' return self.clone() ## binary operations def pixel_wise_binary(self,pwop, x2 , out=None, *args, **kwargs): + if out is None: if isinstance(x2, (int, float, complex)): out = pwop(self.as_array() , x2 , *args, **kwargs ) @@ -657,11 +651,8 @@ class DataContainer(object): raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): if self.check_dimensions(out): + pwop(self.as_array(), x2, out=out.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) return out else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) @@ -694,7 +685,6 @@ class DataContainer(object): return self.pixel_wise_binary(numpy.maximum, x2=x2, out=out, *args, **kwargs) ## unary operations - def pixel_wise_unary(self,pwop, out=None, *args, **kwargs): if out is None: out = pwop(self.as_array() , *args, **kwargs ) @@ -705,19 +695,11 @@ class DataContainer(object): elif issubclass(type(out), DataContainer): if self.check_dimensions(out): pwop(self.as_array(), out=out.as_array(), *args, **kwargs ) - #return type(self)(out.as_array(), - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) else: raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) elif issubclass(type(out), numpy.ndarray): if self.array.shape == out.shape and self.array.dtype == out.dtype: pwop(self.as_array(), out=out, *args, **kwargs) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) else: raise ValueError (message(type(self), "incompatible class:" , pwop.__name__, type(out))) @@ -725,11 +707,6 @@ class DataContainer(object): return self.pixel_wise_unary(numpy.abs, out=out, *args, **kwargs) def sign(self, out=None, *args, **kwargs): - #out = numpy.sign(self.as_array() ) - #return type(self)(out, - # deep_copy=False, - # dimension_labels=self.dimension_labels, - # geometry=self.geometry) return self.pixel_wise_unary(numpy.sign , out=out, *args, **kwargs) def sqrt(self, out=None, *args, **kwargs): @@ -743,7 +720,6 @@ class DataContainer(object): ## reductions def sum(self, out=None, *args, **kwargs): return self.as_array().sum(*args, **kwargs) - #return self.pixel_wise_unary(numpy.sum, out=out, *args, **kwargs) class ImageData(DataContainer): '''DataContainer for holding 2D or 3D DataContainer''' diff --git a/Wrappers/Python/ccpi/io/reader.py b/Wrappers/Python/ccpi/io/reader.py index b0b5414..856f5e0 100644 --- a/Wrappers/Python/ccpi/io/reader.py +++ b/Wrappers/Python/ccpi/io/reader.py @@ -22,6 +22,11 @@ This is a reader module with classes for loading 3D datasets. @author: Mr. Srikanth Nagella
'''
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
from ccpi.framework import AcquisitionGeometry
from ccpi.framework import AcquisitionData
import numpy as np
@@ -53,7 +58,18 @@ class NexusReader(object): self.angles = None
self.geometry = None
self.filename = nexus_filename
-
+ self.key_path = 'entry1/tomo_entry/instrument/detector/image_key'
+ self.data_path = 'entry1/tomo_entry/data/data'
+ self.angle_path = 'entry1/tomo_entry/data/rotation_angle'
+
+ def get_image_keys(self):
+ try:
+ with NexusFile(self.filename,'r') as file:
+ return np.array(file[self.key_path])
+ except KeyError as ke:
+ raise KeyError("get_image_keys: " , ke.args[0] , self.key_path)
+
+
def load(self, dimensions=None, image_key_id=0):
'''
This is generic loading function of flat field, dark field and projection data.
@@ -64,10 +80,10 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ image_keys = np.array(file[self.key_path])
projections = None
if dimensions == None:
- projections = np.array(file['entry1/tomo_entry/data/data'])
+ projections = np.array(file[self.data_path])
result = projections[image_keys==image_key_id]
return result
else:
@@ -76,8 +92,8 @@ class NexusReader(object): projection_indexes = index_array[0][dimensions[0]]
new_dimensions = list(dimensions)
new_dimensions[0]= projection_indexes
- new_dimensions = tuple(new_dimensions)
- result = np.array(file['entry1/tomo_entry/data/data'][new_dimensions])
+ new_dimensions = tuple(new_dimensions)
+ result = np.array(file[self.data_path][new_dimensions])
return result
except:
print("Error reading nexus file")
@@ -88,6 +104,12 @@ class NexusReader(object): Loads the projection data from the nexus file.
returns: numpy array with projection data
'''
+ try:
+ if 0 not in self.get_image_keys():
+ raise ValueError("Projections are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 0)
def load_flat(self, dimensions=None):
@@ -95,6 +117,12 @@ class NexusReader(object): Loads the flat field data from the nexus file.
returns: numpy array with flat field data
'''
+ try:
+ if 1 not in self.get_image_keys():
+ raise ValueError("Flats are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 1)
def load_dark(self, dimensions=None):
@@ -102,6 +130,12 @@ class NexusReader(object): Loads the Dark field data from the nexus file.
returns: numpy array with dark field data
'''
+ try:
+ if 2 not in self.get_image_keys():
+ raise ValueError("Darks are not in the data. Data Path " ,
+ self.data_path)
+ except KeyError as ke:
+ raise KeyError(ke.args[0] , self.data_path)
return self.load(dimensions, 2)
def get_projection_angles(self):
@@ -114,11 +148,11 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- angles = np.array(file['entry1/tomo_entry/data/rotation_angle'],np.float32)
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ angles = np.array(file[self.angle_path],np.float32)
+ image_keys = np.array(file[self.key_path])
return angles[image_keys==0]
except:
- print("Error reading nexus file")
+ print("get_projection_angles Error reading nexus file")
raise
@@ -132,8 +166,8 @@ class NexusReader(object): return
try:
with NexusFile(self.filename,'r') as file:
- projections = file['entry1/tomo_entry/data/data']
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ projections = file[self.data_path]
+ image_keys = np.array(file[self.key_path])
dims = list(projections.shape)
dims[0] = dims[1]
dims[1] = np.sum(image_keys==0)
@@ -151,15 +185,25 @@ class NexusReader(object): if self.filename is None:
return
try:
- with NexusFile(self.filename,'r') as file:
- projections = file['entry1/tomo_entry/data/data']
- image_keys = np.array(file['entry1/tomo_entry/instrument/detector/image_key'])
+ with NexusFile(self.filename,'r') as file:
+ try:
+ projections = file[self.data_path]
+ except KeyError as ke:
+ raise KeyError('Error: data path {0} not found\n{1}'\
+ .format(self.data_path,
+ ke.args[0]))
+ #image_keys = np.array(file[self.key_path])
+ image_keys = self.get_image_keys()
dims = list(projections.shape)
dims[0] = np.sum(image_keys==0)
return tuple(dims)
except:
- print("Error reading nexus file")
- raise
+ print("Warning: Error reading image_keys trying accessing data on " , self.data_path)
+ with NexusFile(self.filename,'r') as file:
+ dims = file[self.data_path].shape
+ return tuple(dims)
+
+
def get_acquisition_data(self, dimensions=None):
'''
@@ -179,6 +223,160 @@ class NexusReader(object): return AcquisitionData(data, geometry=geometry,
dimension_labels=['angle','vertical','horizontal'])
+ def get_acquisition_data_subset(self, ymin=None, ymax=None):
+ '''
+ This method load the acquisition data and given dimension and returns an AcquisitionData Object
+ '''
+ if not h5pyAvailable:
+ raise Exception("Error: h5py is not installed")
+ if self.filename is None:
+ return
+ try:
+
+
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ pass
+ dims = file[self.data_path].shape
+ if ymin is None and ymax is None:
+ data = np.array(file[self.data_path])
+ else:
+ if ymin is None:
+ ymin = 0
+ if ymax > dims[1]:
+ raise ValueError('ymax out of range')
+ data = np.array(file[self.data_path][:,:ymax,:])
+ elif ymax is None:
+ ymax = dims[1]
+ if ymin < 0:
+ raise ValueError('ymin out of range')
+ data = np.array(file[self.data_path][:,ymin:,:])
+ else:
+ if ymax > dims[1]:
+ raise ValueError('ymax out of range')
+ if ymin < 0:
+ raise ValueError('ymin out of range')
+
+ data = np.array(file[self.data_path]
+ [: , ymin:ymax , :] )
+
+ except:
+ print("Error reading nexus file")
+ raise
+
+
+ try:
+ angles = self.get_projection_angles()
+ except KeyError as ke:
+ n = data.shape[0]
+ angles = np.linspace(0, n, n+1, dtype=np.float32)
+
+ if ymax-ymin > 1:
+
+ geometry = AcquisitionGeometry('parallel', '3D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ pixel_num_v = ymax-ymin,
+ pixel_size_v = 1,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data, False, geometry=geometry,
+ dimension_labels=['angle','vertical','horizontal'])
+ elif ymax-ymin == 1:
+ geometry = AcquisitionGeometry('parallel', '2D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data.squeeze(), False, geometry=geometry,
+ dimension_labels=['angle','horizontal'])
+ def get_acquisition_data_slice(self, y_slice=0):
+ return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1)
+ def get_acquisition_data_whole(self):
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ print ("Warning: ")
+ dims = file[self.data_path].shape
+
+ ymin = 0
+ ymax = dims[1] - 1
+
+ return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax)
+
+
+
+ def list_file_content(self):
+ try:
+ with NexusFile(self.filename,'r') as file:
+ file.visit(print)
+ except:
+ print("Error reading nexus file")
+ raise
+ def get_acquisition_data_batch(self, bmin=None, bmax=None):
+ if not h5pyAvailable:
+ raise Exception("Error: h5py is not installed")
+ if self.filename is None:
+ return
+ try:
+
+
+ with NexusFile(self.filename,'r') as file:
+ try:
+ dims = self.get_projection_dimensions()
+ except KeyError:
+ dims = file[self.data_path].shape
+ if bmin is None or bmax is None:
+ raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits')
+
+ if bmin >= 0 and bmin < bmax and bmax <= dims[0]:
+ data = np.array(file[self.data_path][bmin:bmax])
+ else:
+ raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0]))
+
+ except:
+ print("Error reading nexus file")
+ raise
+
+
+ try:
+ angles = self.get_projection_angles()[bmin:bmax]
+ except KeyError as ke:
+ n = data.shape[0]
+ angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax]
+
+ if bmax-bmin > 1:
+
+ geometry = AcquisitionGeometry('parallel', '3D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ pixel_num_v = bmax-bmin,
+ pixel_size_v = 1,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data, False, geometry=geometry,
+ dimension_labels=['angle','vertical','horizontal'])
+ elif bmax-bmin == 1:
+ geometry = AcquisitionGeometry('parallel', '2D',
+ angles,
+ pixel_num_h = dims[2],
+ pixel_size_h = 1 ,
+ dist_source_center = None,
+ dist_center_detector = None,
+ channels = 1)
+ return AcquisitionData(data.squeeze(), False, geometry=geometry,
+ dimension_labels=['angle','horizontal'])
+
+
class XTEKReader(object):
'''
diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py index a2d0446..6a9057a 100755 --- a/Wrappers/Python/ccpi/processors.py +++ b/Wrappers/Python/ccpi/processors.py @@ -19,6 +19,7 @@ from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\
AcquisitionGeometry, ImageGeometry, ImageData
+from ccpi.reconstruction.parallelbeam import alg as pbalg
import numpy
from scipy import ndimage
@@ -39,8 +40,8 @@ class Normalizer(DataProcessor): def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5):
kwargs = {
- 'flat_field' : None,
- 'dark_field' : None,
+ 'flat_field' : flat_field,
+ 'dark_field' : dark_field,
# very small number. Used when there is a division by zero
'tolerance' : tolerance
}
@@ -53,7 +54,8 @@ class Normalizer(DataProcessor): self.set_dark_field(dark_field)
def check_input(self, dataset):
- if dataset.number_of_dimensions == 3:
+ if dataset.number_of_dimensions == 3 or\
+ dataset.number_of_dimensions == 2:
return True
else:
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
@@ -65,7 +67,7 @@ class Normalizer(DataProcessor): raise ValueError('Dark Field should be 2D')
elif len(numpy.shape(df)) == 2:
self.dark_field = df
- elif issubclass(type(df), DataSet):
+ elif issubclass(type(df), DataContainer):
self.dark_field = self.set_dark_field(df.as_array())
def set_flat_field(self, df):
@@ -74,7 +76,7 @@ class Normalizer(DataProcessor): raise ValueError('Flat Field should be 2D')
elif len(numpy.shape(df)) == 2:
self.flat_field = df
- elif issubclass(type(df), DataSet):
+ elif issubclass(type(df), DataContainer):
self.flat_field = self.set_flat_field(df.as_array())
@staticmethod
@@ -86,22 +88,40 @@ class Normalizer(DataProcessor): c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0
return c
+ @staticmethod
+ def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark):
+ '''returns the estimated relative error of the normalised projection
+
+ n = (projection - dark) / (flat - dark)
+ Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d)
+ '''
+ a = (projection - dark)
+ b = (flat-dark)
+ df = delta_flat / flat
+ dd = delta_dark / dark
+ rel_norm_error = (b + a) / (b * a) * (df + dd)
+ return rel_norm_error
+
def process(self):
projections = self.get_input()
dark = self.dark_field
flat = self.flat_field
- if not (projections.shape[1:] == dark.shape and \
- projections.shape[1:] == flat.shape):
- raise ValueError('Flats/Dark and projections size do not match.')
-
-
- a = numpy.asarray(
- [ Normalizer.normalize_projection(
- projection, flat, dark, self.tolerance) \
- for projection in projections.as_array() ]
- )
+ if projections.number_of_dimensions == 3:
+ if not (projections.shape[1:] == dark.shape and \
+ projections.shape[1:] == flat.shape):
+ raise ValueError('Flats/Dark and projections size do not match.')
+
+
+ a = numpy.asarray(
+ [ Normalizer.normalize_projection(
+ projection, flat, dark, self.tolerance) \
+ for projection in projections.as_array() ]
+ )
+ elif projections.number_of_dimensions == 2:
+ a = Normalizer.normalize_projection(projections.as_array(),
+ flat, dark, self.tolerance)
y = type(projections)( a , True,
dimension_labels=projections.dimension_labels,
geometry=projections.geometry)
@@ -388,3 +408,107 @@ class CenterOfRotationFinder(DataProcessor): return cor
+
+class AcquisitionDataPadder(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self,
+ center_of_rotation = None,
+ acquisition_geometry = None,
+ pad_value = 1e-5):
+ kwargs = {
+ 'acquisition_geometry' : acquisition_geometry,
+ 'center_of_rotation' : center_of_rotation,
+ 'pad_value' : pad_value
+ }
+
+ super(AcquisitionDataPadder, self).__init__(**kwargs)
+
+ def check_input(self, dataset):
+ if self.acquisition_geometry is None:
+ self.acquisition_geometry = dataset.geometry
+ if dataset.number_of_dimensions == 3:
+ return True
+ else:
+ raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
+ .format(dataset.number_of_dimensions))
+
+ def process(self):
+ projections = self.get_input()
+ w = projections.get_dimension_size('horizontal')
+ delta = w - 2 * self.center_of_rotation
+
+ padded_width = int (
+ numpy.ceil(abs(delta)) + w
+ )
+ delta_pix = padded_width - w
+
+ voxel_per_pixel = 1
+ geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
+ self.acquisition_geometry.angles,
+ self.center_of_rotation,
+ voxel_per_pixel )
+
+ padded_geometry = self.acquisition_geometry.clone()
+
+ padded_geometry.pixel_num_h = geom['n_h']
+ padded_geometry.pixel_num_v = geom['n_v']
+
+ delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
+ delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
+
+ if delta_pix_h == 0:
+ delta_pix_h = delta_pix
+ padded_geometry.pixel_num_h = padded_width
+ #initialize a new AcquisitionData with values close to 0
+ out = AcquisitionData(geometry=padded_geometry)
+ out = out + self.pad_value
+
+
+ #pad in the horizontal-vertical plane -> slice on angles
+ if delta > 0:
+ #pad left of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ else:
+ #pad right of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(0, w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ #cleaned = eval(command)
+ exec(command)
+ return out
\ No newline at end of file diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 45d25f9..5de01c5 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: ccpi-framework - version: 0.11.2 + version: 0.11.3 build: @@ -9,6 +9,12 @@ build: # - CIL_VERSION number: 0 + +test: + requires: + - python-wget + - cvxpy # [not win] + requirements: build: - python @@ -19,13 +25,8 @@ requirements: - numpy - scipy - matplotlib - run: - - python - - numpy - - cvxpy # [not win] - - scipy - - matplotlib - + - h5py + about: home: http://www.ccpi.ac.uk license: Apache 2.0 License diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index d6bc340..5bf6538 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -20,6 +20,13 @@ from ccpi.optimisation.ops import LinearOperatorMatrix from ccpi.optimisation.ops import TomoIdentity
from ccpi.optimisation.ops import Identity
+
+import numpy.testing
+import wget
+import os
+from ccpi.io.reader import NexusReader
+
+
try:
from cvxpy import *
cvx_not_installable = False
@@ -879,6 +886,84 @@ class TestFunction(unittest.TestCase): # s.split(2)
# =============================================================================
+class TestNexusReader(unittest.TestCase):
+
+ def setUp(self):
+ wget.download('https://github.com/DiamondLightSource/Savu/raw/master/test_data/data/24737_fd.nxs')
+ self.filename = '24737_fd.nxs'
+
+ def tearDown(self):
+ os.remove(self.filename)
+
+
+ def testGetDimensions(self):
+ nr = NexusReader(self.filename)
+ self.assertEqual(nr.get_sinogram_dimensions(), (135, 91, 160), "Sinogram dimensions are not correct")
+
+ def testGetProjectionDimensions(self):
+ nr = NexusReader(self.filename)
+ self.assertEqual(nr.get_projection_dimensions(), (91,135,160), "Projection dimensions are not correct")
+
+ def testLoadProjectionWithoutDimensions(self):
+ nr = NexusReader(self.filename)
+ projections = nr.load_projection()
+ self.assertEqual(projections.shape, (91,135,160), "Loaded projection data dimensions are not correct")
+
+ def testLoadProjectionWithDimensions(self):
+ nr = NexusReader(self.filename)
+ projections = nr.load_projection((slice(0,1), slice(0,135), slice(0,160)))
+ self.assertEqual(projections.shape, (1,135,160), "Loaded projection data dimensions are not correct")
+
+ def testLoadProjectionCompareSingle(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(0,1), slice(0,135), slice(0,160)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[0:1,:,:])
+
+ def testLoadProjectionCompareMulti(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(0,3), slice(0,135), slice(0,160)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[0:3,:,:])
+
+ def testLoadProjectionCompareRandom(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(1,8), slice(5,10), slice(8,20)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[1:8,5:10,8:20])
+
+ def testLoadProjectionCompareFull(self):
+ nr = NexusReader(self.filename)
+ projections_full = nr.load_projection()
+ projections_part = nr.load_projection((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(projections_part, projections_full[:,:,:])
+
+ def testLoadFlatCompareFull(self):
+ nr = NexusReader(self.filename)
+ flats_full = nr.load_flat()
+ flats_part = nr.load_flat((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(flats_part, flats_full[:,:,:])
+
+ def testLoadDarkCompareFull(self):
+ nr = NexusReader(self.filename)
+ darks_full = nr.load_dark()
+ darks_part = nr.load_dark((slice(None,None), slice(None,None), slice(None,None)))
+ numpy.testing.assert_array_equal(darks_part, darks_full[:,:,:])
+
+ def testProjectionAngles(self):
+ nr = NexusReader(self.filename)
+ angles = nr.get_projection_angles()
+ self.assertEqual(angles.shape, (91,), "Loaded projection number of angles are not correct")
+
+ def test_get_acquisition_data_subset(self):
+ nr = NexusReader(self.filename)
+ key = nr.get_image_keys()
+ sl = nr.get_acquisition_data_subset(0,10)
+ data = nr.get_acquisition_data().subset(['vertical','horizontal'])
+
+ self.assertTrue(sl.shape , (10,data.shape[1]))
+
if __name__ == '__main__':
unittest.main()
+
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index ef579e9..b584344 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,7 +23,7 @@ import os import sys -cil_version='0.11.2' +cil_version='0.11.3' setup( name="ccpi-framework", diff --git a/Wrappers/Python/wip/multifile_nexus.py b/Wrappers/Python/wip/multifile_nexus.py new file mode 100755 index 0000000..d1ad438 --- /dev/null +++ b/Wrappers/Python/wip/multifile_nexus.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*-
+"""
+Created on Wed Aug 15 16:00:53 2018
+
+@author: ofn77899
+"""
+
+import os
+from ccpi.io.reader import NexusReader
+
+from sys import getsizeof
+
+import matplotlib.pyplot as plt
+
+from ccpi.framework import DataProcessor, DataContainer
+from ccpi.processors import Normalizer
+from ccpi.processors import CenterOfRotationFinder
+import numpy
+
+
+class averager(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.N = 0
+ self.avg = 0
+ self.min = 0
+ self.max = 0
+ self.var = 0
+ self.ske = 0
+ self.kur = 0
+
+ def add_reading(self, val):
+
+ if (self.N == 0):
+ self.avg = val;
+ self.min = val;
+ self.max = val;
+ elif (self.N == 1):
+ #//set min/max
+ self.max = val if val > self.max else self.max
+ self.min = val if val < self.min else self.min
+
+
+ thisavg = (self.avg + val)/2
+ #// initial value is in avg
+ self.var = (self.avg - thisavg)*(self.avg-thisavg) + (val - thisavg) * (val-thisavg)
+ self.ske = self.var * (self.avg - thisavg)
+ self.kur = self.var * self.var
+ self.avg = thisavg
+ else:
+ self.max = val if val > self.max else self.max
+ self.min = val if val < self.min else self.min
+
+ M = self.N
+
+ #// b-factor =(<v>_N + v_(N+1)) / (N+1)
+ #float b = (val -avg) / (M+1);
+ b = (val -self.avg) / (M+1)
+
+ self.kur = self.kur + (M *(b*b*b*b) * (1+M*M*M))- (4* b * self.ske) + (6 * b *b * self.var * (M-1))
+
+ self.ske = self.ske + (M * b*b*b *(M-1)*(M+1)) - (3 * b * self.var * (M-1))
+
+ #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
+ self.var = self.var * ((M-1)/M) + (b * b * (M+1))
+
+ self.avg = self.avg * (M/(M+1)) + val / (M+1)
+
+ self.N += 1
+
+ def stats(self, vector):
+ i = 0
+ while i < vector.size:
+ self.add_reading(vector[i])
+ i+=1
+
+avg = averager()
+a = numpy.linspace(0,39,40)
+avg.stats(a)
+print ("average" , avg.avg, a.mean())
+print ("variance" , avg.var, a.var())
+b = a - a.mean()
+b *= b
+b = numpy.sqrt(sum(b)/(a.size-1))
+print ("std" , numpy.sqrt(avg.var), b)
+#%%
+
+class DataStatMoments(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self, axis, skewness=False, kurtosis=False, offset=0):
+ kwargs = {
+ 'axis' : axis,
+ 'skewness' : skewness,
+ 'kurtosis' : kurtosis,
+ 'offset' : offset,
+ }
+ #DataProcessor.__init__(self, **kwargs)
+ super(DataStatMoments, self).__init__(**kwargs)
+
+
+ def check_input(self, dataset):
+ #self.N = dataset.get_dimension_size(self.axis)
+ return True
+
+ @staticmethod
+ def add_sample(dataset, N, axis, stats=None, offset=0):
+ #dataset = self.get_input()
+ if (N == 0):
+ # get the axis number along to calculate the stats
+
+
+ #axs = dataset.get_dimension_size(self.axis)
+ # create a placeholder for the output
+ if stats is None:
+ ax = dataset.get_dimension_axis(axis)
+ shape = [dataset.shape[i] for i in range(len(dataset.shape)) if i != ax]
+ # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
+ shape.insert(0, 4+2)
+ stats = numpy.zeros(shape)
+
+ stats[0] = dataset.subset(**{axis:N-offset}).array[:]
+
+ #avg = val
+ elif (N == 1):
+ # val
+ stats[5] = dataset.subset(**{axis:N-offset}).array
+ stats[4] = stats[0] + stats[5]
+ stats[4] /= 2 # thisavg
+ stats[5] -= stats[4] # (val - thisavg)
+
+ #float thisavg = (avg + val)/2;
+
+ #// initial value is in avg
+ #var = (avg - thisavg)*(avg-thisavg) + (val - thisavg) * (val-thisavg);
+ stats[1] = stats[5] * stats[5] + stats[5] * stats[5]
+ #skewness = var * (avg - thisavg);
+ stats[2] = stats[1] * stats[5]
+ #kurtosis = var * var;
+ stats[3] = stats[1] * stats[1]
+ #avg = thisavg;
+ stats[0] = stats[4]
+ else:
+
+ #float M = (float)N;
+ M = N
+ #val
+ stats[4] = dataset.subset(**{axis:N-offset}).array
+ #// b-factor =(<v>_N + v_(N+1)) / (N+1)
+ stats[5] = stats[4] - stats[0]
+ stats[5] /= (M+1) # b factor
+ #float b = (val -avg) / (M+1);
+
+ #kurtosis = kurtosis + (M *(b*b*b*b) * (1+M*M*M))- (4* b * skewness) + (6 * b *b * var * (M-1));
+ #if self.kurtosis:
+ # stats[3] += (M * stats[5] * stats[5] * stats[5] * stats[5]) - \
+ # (4 * stats[5] * stats[2]) + \
+ # (6 * stats[5] * stats[5] * stats[1] * (M-1))
+
+ #skewness = skewness + (M * b*b*b *(M-1)*(M+1)) - (3 * b * var * (M-1));
+ #if self.skewness:
+ # stats[2] = stats[2] + (M * stats[5]* stats[5] * stats[5] * (M-1)*(M-1) ) -\
+ # 3 * stats[5] * stats[1] * (M-1)
+ #//var = var * ((M-1)/M) + ((val - avg)*(val - avg)/(M+1)) ;
+ #var = var * ((M-1)/M) + (b * b * (M+1));
+ stats[1] = ((M-1)/M) * stats[1] + (stats[5] * stats[5] * (M+1))
+
+ #avg = avg * (M/(M+1)) + val / (M+1)
+ stats[0] = stats[0] * (M/(M+1)) + stats[4] / (M+1)
+
+ N += 1
+ return stats , N
+
+
+ def process(self):
+
+ data = self.get_input()
+
+ #stats, i = add_sample(0)
+ N = data.get_dimension_size(self.axis)
+ ax = data.get_dimension_axis(self.axis)
+ stats = None
+ i = 0
+ while i < N:
+ stats , i = DataStatMoments.add_sample(data, i, self.axis, stats, offset=self.offset)
+ self.offset += N
+ labels = ['StatisticalMoments']
+
+ labels += [data.dimension_labels[i] \
+ for i in range(len(data.dimension_labels)) if i != ax]
+ y = DataContainer( stats[:4] , False,
+ dimension_labels=labels)
+ return y
+
+directory = r'E:\Documents\Dataset\CCPi\Nexus_test'
+data_path="entry1/instrument/pco1_hw_hdf_nochunking/data"
+
+reader = NexusReader(os.path.join( os.path.abspath(directory) , '74331.nxs'))
+
+print ("read flat")
+read_flat = NexusReader(os.path.join( os.path.abspath(directory) , '74240.nxs'))
+read_flat.data_path = data_path
+flatsslice = read_flat.get_acquisition_data_whole()
+avg = DataStatMoments('angle')
+
+avg.set_input(flatsslice)
+flats = avg.get_output()
+
+ave = averager()
+ave.stats(flatsslice.array[:,0,0])
+
+print ("avg" , ave.avg, flats.array[0][0][0])
+print ("var" , ave.var, flats.array[1][0][0])
+
+print ("read dark")
+read_dark = NexusReader(os.path.join( os.path.abspath(directory) , '74243.nxs'))
+read_dark.data_path = data_path
+
+## darks are very many so we proceed in batches
+total_size = read_dark.get_projection_dimensions()[0]
+print ("total_size", total_size)
+
+batchsize = 40
+if batchsize > total_size:
+ batchlimits = [batchsize * (i+1) for i in range(int(total_size/batchsize))] + [total_size-1]
+else:
+ batchlimits = [total_size]
+#avg.N = 0
+avg.offset = 0
+N = 0
+for batch in range(len(batchlimits)):
+ print ("running batch " , batch)
+ bmax = batchlimits[batch]
+ bmin = bmax-batchsize
+
+ darksslice = read_dark.get_acquisition_data_batch(bmin,bmax)
+ if batch == 0:
+ #create stats
+ ax = darksslice.get_dimension_axis('angle')
+ shape = [darksslice.shape[i] for i in range(len(darksslice.shape)) if i != ax]
+ # output has 4 components avg, std, skewness and kurtosis + last avg+ (val-thisavg)
+ shape.insert(0, 4+2)
+ print ("create stats shape ", shape)
+ stats = numpy.zeros(shape)
+ print ("N" , N)
+ #avg.set_input(darksslice)
+ i = bmin
+ while i < bmax:
+ stats , i = DataStatMoments.add_sample(darksslice, i, 'angle', stats, bmin)
+ print ("{0}-{1}-{2}".format(bmin, i , bmax ) )
+
+darks = stats
+#%%
+
+fig = plt.subplot(2,2,1)
+fig.imshow(flats.subset(StatisticalMoments=0).array)
+fig = plt.subplot(2,2,2)
+fig.imshow(numpy.sqrt(flats.subset(StatisticalMoments=1).array))
+fig = plt.subplot(2,2,3)
+fig.imshow(darks[0])
+fig = plt.subplot(2,2,4)
+fig.imshow(numpy.sqrt(darks[1]))
+plt.show()
+
+
+#%%
+norm = Normalizer(flat_field=flats.array[0,200,:], dark_field=darks[0,200,:])
+#norm.set_flat_field(flats.array[0,200,:])
+#norm.set_dark_field(darks.array[0,200,:])
+norm.set_input(reader.get_acquisition_data_slice(200))
+
+n = Normalizer.normalize_projection(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:], 1e-5)
+#dn_n= Normalizer.estimate_normalised_error(norm.get_input().as_array(), flats.array[0,200,:], darks[0,200,:],
+# numpy.sqrt(flats.array[1,200,:]), numpy.sqrt(darks[1,200,:]))
+#%%
+
+
+#%%
+fig = plt.subplot(2,1,1)
+
+
+fig.imshow(norm.get_input().as_array())
+fig = plt.subplot(2,1,2)
+fig.imshow(n)
+
+#fig = plt.subplot(3,1,3)
+#fig.imshow(dn_n)
+
+
+plt.show()
+
+
+
+
+
+
|