diff options
5 files changed, 380 insertions, 176 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py index ed95c3f..7604c8a 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py @@ -151,7 +151,7 @@ class Algorithm(object): self.max_iteration, self.get_last_objective()) ) else: if callback is not None: - callback(self.iteration, self.get_last_objective()) + callback(self.iteration, self.get_last_objective(), self.x) i += 1 if i == iterations: break diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 7631e29..98c9797 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -31,77 +31,62 @@ class PDHG(Algorithm): self.g is not None: print ("Calling from creator") self.set_up(self.f, + self.g, self.operator, - self.g, self.tau, self.sigma) def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): # algorithmic parameters - + self.operator = operator + self.f = f + self.g = g + self.tau = tau + self.sigma = sigma + self.opt = opt if sigma is None and tau is None: raise ValueError('Need sigma*tau||K||^2<1') self.x_old = self.operator.domain_geometry().allocate() - self.y_old = self.operator.range_geometry().allocate() + self.x_tmp = self.x_old.copy() + self.x = self.x_old.copy() + self.y_old = self.operator.range_geometry().allocate() + self.y_tmp = self.y_old.copy() + self.y = self.y_old.copy() + self.xbar = self.x_old.copy() - self.x = self.x_old.copy() - self.y = self.y_old.copy() - if self.memopt: - self.y_tmp = self.y_old.copy() - self.x_tmp = self.x_old.copy() - #y = y_tmp + # relaxation parameter self.theta = 1 def update(self): - if self.memopt: - # Gradient descent, Dual problem solution - # self.y_old += self.sigma * self.operator.direct(self.xbar) - self.operator.direct(self.xbar, out=self.y_tmp) - self.y_tmp *= self.sigma - self.y_old += self.y_tmp - - #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - self.f.proximal_conjugate(self.y_old, self.sigma, out=self.y) - - # Gradient ascent, Primal problem solution - self.operator.adjoint(self.y, out=self.x_tmp) - self.x_tmp *= self.tau - self.x_old -= self.x_tmp - - self.g.proximal(self.x_old, self.tau, out=self.x) - - #Update - self.x.subtract(self.x_old, out=self.xbar) - self.xbar *= self.theta - self.xbar += self.x - - self.x_old.fill(self.x) - self.y_old.fill(self.y) + + # Gradient descent, Dual problem solution + self.operator.direct(self.xbar, out=self.y_tmp) + self.y_tmp *= self.sigma + self.y_tmp += self.y_old - else: - # Gradient descent, Dual problem solution - self.y_old += self.sigma * self.operator.direct(self.xbar) - self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) + + # Gradient ascent, Primal problem solution + self.operator.adjoint(self.y, out=self.x_tmp) + self.x_tmp *= -1*self.tau + self.x_tmp += self.x_old - # Gradient ascent, Primal problem solution - self.x_old -= self.tau * self.operator.adjoint(self.y) - self.x = self.g.proximal(self.x_old, self.tau) + self.g.proximal(self.x_tmp, self.tau, out=self.x) - #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old - self.xbar *= self.theta - self.xbar += self.x + #Update + self.x.subtract(self.x_old, out=self.xbar) + self.xbar *= self.theta + self.xbar += self.x - self.x_old = self.x - self.y_old = self.y + self.x_old.fill(self.x) + self.y_old.fill(self.y) def update_objective(self): p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) @@ -149,61 +134,43 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): - - if not memopt: - - y_tmp = y_old + sigma * operator.direct(xbar) - y = f.proximal_conjugate(y_tmp, sigma) - - x_tmp = x_old - tau*operator.adjoint(y) - x = g.proximal(x_tmp, tau) - - x.subtract(x_old, out=xbar) - xbar *= theta - xbar += x - - x_old.fill(x) - y_old.fill(y) - - if i%10==0: - - p1 = f(operator.direct(x)) + g(x) - d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) - primal.append(p1) - dual.append(d1) - pdgap.append(p1-d1) - print(p1, d1, p1-d1) - - else: - + + if memopt: operator.direct(xbar, out = y_tmp) y_tmp *= sigma - y_tmp += y_old - f.proximal_conjugate(y_tmp, sigma, out=y) - + y_tmp += y_old + else: + y_tmp = y_old + sigma * operator.direct(xbar) + + f.proximal_conjugate(y_tmp, sigma, out=y) + + if memopt: operator.adjoint(y, out = x_tmp) x_tmp *= -1*tau x_tmp += x_old - - g.proximal(x_tmp, tau, out = x) + else: + x_tmp = x_old - tau*operator.adjoint(y) - x.subtract(x_old, out=xbar) - xbar *= theta - xbar += x - - x_old.fill(x) - y_old.fill(y) + g.proximal(x_tmp, tau, out=x) + + x.subtract(x_old, out=xbar) + xbar *= theta + xbar += x + + x_old.fill(x) + y_old.fill(y) + + if i%10==0: - if i%10==0: - - p1 = f(operator.direct(x)) + g(x) - d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) - primal.append(p1) - dual.append(d1) - pdgap.append(p1-d1) - print(p1, d1, p1-d1) + p1 = f(operator.direct(x)) + g(x) + d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) ) + primal.append(p1) + dual.append(d1) + pdgap.append(p1-d1) + print(p1, d1, p1-d1) + t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py index 22d21fd..c912b12 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py +++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py @@ -62,19 +62,7 @@ class KullbackLeibler(Function): if out is None: return 1 - self.b/(x + self.bnoise) else: -#<<<<<<< HEAD -# self.b.divide(x+self.bnoise, out=out) -# out.subtract(1, out=out) -# -# def convex_conjugate(self, x): -# -# tmp = self.b/( 1 - x ) -# ind = tmp.as_array()>0 -# -# sel -# -# return (self.b * ( ImageData( numpy.log(tmp) ) - 1 ) - self.bnoise * (x - 1)).sum() -#======= + x.add(self.bnoise, out=out) self.b.divide(out, out=out) out.subtract(1, out=out) @@ -119,20 +107,20 @@ class KullbackLeibler(Function): return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) else: - z = x + tau * self.bnoise - res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) - out.fill(res) +# z = x + tau * self.bnoise +# res = 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) +# out.fill(res) # else: -# z_m = x + tau * self.bnoise -1 -# self.b.multiply(4*tau, out=out) -# z_m.multiply(z_m, out=z_m) -# out += z_m -# out.sqrt(out=out) -# z = z_m + 2 -# z_m.sqrt(out=z_m) -# z_m += 2 -# out *= -1 -# out += z_m + z_m = x + tau * self.bnoise -1 + self.b.multiply(4*tau, out=out) + z_m.multiply(z_m, out=z_m) + out += z_m + out.sqrt(out=out) + z_m.sqrt(out=z_m) + z_m += 2 + out *= -1 + out += z_m + out *= 0.5 def __rmul__(self, scalar): @@ -165,24 +153,4 @@ if __name__ == '__main__': print(f(x)) -# numpy.random.seed(10) -# -# -# x = numpy.random.randint(-10, 10, size = (2,3)) -# b = numpy.random.randint(1, 10, size = (2,3)) -# -# ind1 = x>0 -# -# res = x[ind1] - b * numpy.log(x[ind1]) -# -## ind = x>0 -# -## y = x[ind] -# -# -# -# -# - - diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 5490782..294e696 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -93,30 +93,19 @@ class L2NormSquared(Function): if self.b is None: return x/(1+2*tau) else: + tmp = x.subtract(self.b) + tmp /= (1+2*tau) + tmp += self.b + return tmp +# return (x-self.b)/(1+2*tau) + self.b -# tmp = x.subtract(self.b) -# tmp -= self.b -# tmp /= (1+2*tau) -# tmp += self.b -# return tmp - return (x-self.b)/(1+2*tau) + self.b - -# if self.b is not None: -# out=x -# if self.b is not None: -# out -= self.b -# out /= (1+2*tau) -# if self.b is not None: -# out += self.b -# return out else: - out.fill(x) if self.b is not None: - out -= self.b - out /= (1+2*tau) - if self.b is not None: - out += self.b - + x.subtract(self.b, out=out) + out /= (1+2*tau) + out += self.b + else: + x.divide((1+2*tau), out=out) def proximal_conjugate(self, x, tau, out=None): @@ -288,17 +277,3 @@ if __name__ == '__main__': numpy.testing.assert_array_almost_equal(res1.as_array(), \ res2.as_array(), decimal=4) - - - - - - - - - - - - - - diff --git a/Wrappers/Python/demos/pdhg_TV_tomography2Dccpi.py b/Wrappers/Python/demos/pdhg_TV_tomography2Dccpi.py new file mode 100644 index 0000000..70ca57a --- /dev/null +++ b/Wrappers/Python/demos/pdhg_TV_tomography2Dccpi.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer, \ + AcquisitionGeometry, AcquisitionData + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, BlockFunction, ScaledFunction + +#from ccpi.astra.ops import AstraProjectorSimple +from ccpi.plugins.ops import CCPiProjectorSimple +#from skimage.util import random_noise +from timeit import default_timer as timer + +#%% + +#%%############################################################################### +# Create phantom for TV tomography + +#import os +#import tomophantom +#from tomophantom import TomoP2D +#from tomophantom.supp.qualitymetrics import QualityTools + +#model = 1 # select a model number from the library +#N = 150 # set dimension of the phantom +## one can specify an exact path to the parameters file +## path_library2D = '../../../PhantomLibrary/models/Phantom2DLibrary.dat' +#path = os.path.dirname(tomophantom.__file__) +#path_library2D = os.path.join(path, "Phantom2DLibrary.dat") +##This will generate a N_size x N_size phantom (2D) +#phantom_2D = TomoP2D.Model(model, N, path_library2D) +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +#data = ImageData(phantom_2D, geometry=ig) + +N = 75 +#x = np.zeros((N,N)) + +vert = 4 +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N, voxel_num_z=vert) +data = ig.allocate() +Phantom = data +# Populate image data by looping over and filling slices +i = 0 +while i < vert: + if vert > 1: + x = Phantom.subset(vertical=i).array + else: + x = Phantom.array + x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 + x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 0.98 + if vert > 1 : + Phantom.fill(x, vertical=i) + i += 1 + + +#%% +#detectors = N +#angles = np.linspace(0,np.pi,100) +#angles_num = 100 +angles_num = N +det_w = 1.0 +det_num = N + +angles = np.linspace(0,np.pi,angles_num,endpoint=False,dtype=np.float32)*\ + 180/np.pi +angles = np.linspace(-90.,90.,N, dtype=np.float32) +# Inputs: Geometry, 2D or 3D, angles, horz detector pixel count, +# horz detector pixel size, vert detector pixel count, +# vert detector pixel size. +ag = AcquisitionGeometry('parallel', + '3D', + angles, + N, + det_w, + vert, + det_w) + +from ccpi.reconstruction.parallelbeam import alg as pbalg +#from ccpi.plugins.processors import setupCCPiGeometries +def setupCCPiGeometries(ig, ag, counter): + + #vg = ImageGeometry(voxel_num_x=voxel_num_x,voxel_num_y=voxel_num_y, voxel_num_z=voxel_num_z) + #Phantom_ccpi = ImageData(geometry=vg, + # dimension_labels=['horizontal_x','horizontal_y','vertical']) + ##.subset(['horizontal_x','horizontal_y','vertical']) + ## ask the ccpi code what dimensions it would like + Phantom_ccpi = ig.allocate(dimension_labels=[ImageGeometry.HORIZONTAL_X, + ImageGeometry.HORIZONTAL_Y, + ImageGeometry.VERTICAL]) + + voxel_per_pixel = 1 + angles = ag.angles + geoms = pbalg.pb_setup_geometry_from_image(Phantom_ccpi.as_array(), + angles, + voxel_per_pixel ) + + pg = AcquisitionGeometry('parallel', + '3D', + angles, + geoms['n_h'], 1.0, + geoms['n_v'], 1.0 #2D in 3D is a slice 1 pixel thick + ) + + center_of_rotation = Phantom_ccpi.get_dimension_size('horizontal_x') / 2 + #ad = AcquisitionData(geometry=pg,dimension_labels=['angle','vertical','horizontal']) + ad = pg.allocate(dimension_labels=[AcquisitionGeometry.ANGLE, + AcquisitionGeometry.VERTICAL, + AcquisitionGeometry.HORIZONTAL]) + geoms_i = pbalg.pb_setup_geometry_from_acquisition(ad.as_array(), + angles, + center_of_rotation, + voxel_per_pixel ) + + counter+=1 + + if counter < 4: + print (geoms, geoms_i) + if (not ( geoms_i == geoms )): + print ("not equal and {} {} {}".format(counter, geoms['output_volume_z'], geoms_i['output_volume_z'])) + X = max(geoms['output_volume_x'], geoms_i['output_volume_x']) + Y = max(geoms['output_volume_y'], geoms_i['output_volume_y']) + Z = max(geoms['output_volume_z'], geoms_i['output_volume_z']) + return setupCCPiGeometries(X,Y,Z,angles, counter) + else: + print ("happy now {} {} {}".format(counter, geoms['output_volume_z'], geoms_i['output_volume_z'])) + + return geoms + else: + return geoms_i + + + +#voxel_num_x, voxel_num_y, voxel_num_z, angles, counter +print ("###############################################") +print (ig) +print (ag) +g = setupCCPiGeometries(ig, ag, 0) +print (g) +print ("###############################################") +print ("###############################################") +#ag = AcquisitionGeometry('parallel','2D',angles, detectors) +#Aop = AstraProjectorSimple(ig, ag, 'cpu') +Aop = CCPiProjectorSimple(ig, ag, 'cpu') +sin = Aop.direct(data) + +plt.imshow(sin.subset(vertical=0).as_array()) +plt.title('Sinogram') +plt.colorbar() +plt.show() + + +#%% +# Add Gaussian noise to the sinogram data +np.random.seed(10) +n1 = np.random.random(sin.shape) + +noisy_data = sin + ImageData(5*n1) + +plt.imshow(noisy_data.subset(vertical=0).as_array()) +plt.title('Noisy Sinogram') +plt.colorbar() +plt.show() + + +#%% Works only with Composite Operator Structure of PDHG + +#ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) + +# Create operators +op1 = Gradient(ig) +op2 = Aop + +# Form Composite Operator +operator = BlockOperator(op1, op2, shape=(2,1) ) + +alpha = 50 +f = BlockFunction( alpha * MixedL21Norm(), \ + 0.5 * L2NormSquared(b = noisy_data) ) +g = ZeroFunction() + +normK = Aop.norm() + +# Compute operator Norm +normK = operator.norm() + +## Primal & dual stepsizes +diag_precon = False + +if diag_precon: + + def tau_sigma_precond(operator): + + tau = 1/operator.sum_abs_row() + sigma = 1/ operator.sum_abs_col() + + return tau, sigma + + tau, sigma = tau_sigma_precond(operator) + +else: + sigma = 1 + tau = 1/(sigma*normK**2) + +# Compute operator Norm +normK = operator.norm() + +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) +niter = 3 +opt = {'niter':niter} +opt1 = {'niter':niter, 'memopt': True} + + + +pdhg1 = PDHG(f=f,g=g, operator=operator, tau=tau, sigma=sigma) +pdhg1.max_iteration = 2000 +pdhg1.update_objective_interval = 10 +pdhg2 = PDHG(f=f,g=g, operator=operator, tau=tau, sigma=sigma, memopt=True) +pdhg2.max_iteration = 2000 +pdhg2.update_objective_interval = 100 + +t1_old = timer() +resold, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +t2_old = timer() + +pdhg1.run(niter) +print (sum(pdhg1.timing)) +res = pdhg1.get_output().subset(vertical=0) + +t3 = timer() +#res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +pdhg2.run(niter) +print (sum(pdhg2.timing)) +res1 = pdhg2.get_output().subset(vertical=0) +t4 = timer() +# +print ("No memopt in {}s, memopt in {}/{}s old {}s".format(sum(pdhg1.timing), + sum(pdhg2.timing),t4-t3, t2_old-t1_old)) + +t1_old = timer() +resold1, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t2_old = timer() + +#%% +plt.figure() +plt.subplot(2,3,1) +plt.imshow(res.as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(2,3,2) +plt.imshow(res1.as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(2,3,3) +plt.imshow((res1 - resold1.subset(vertical=0)).abs().as_array()) +plt.title('diff') +plt.colorbar() +plt.subplot(2,3,4) +plt.imshow(resold.subset(vertical=0).as_array()) +plt.title('old nomemopt') +plt.colorbar() +plt.subplot(2,3,5) +plt.imshow(resold1.subset(vertical=0).as_array()) +plt.title('old memopt') +plt.colorbar() +plt.subplot(2,3,6) +plt.imshow((resold1 - resold).subset(vertical=0).as_array()) +plt.title('diff old') +plt.colorbar() +#plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt') +#plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt') +#plt.legend() +plt.show() +# +print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(sum(pdhg1.timing), t4 -t3)) +diff = (res1 - res).abs().as_array().max() +# +print(" Max of abs difference is {}".format(diff)) + |