From 584df30253cc36920f17f6b0bc438c978c3d8462 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 15:24:35 +0100 Subject: working no optimisation --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 143 +++++++++++++++------ 1 file changed, 102 insertions(+), 41 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 5bf96cc..0b9921c 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -6,7 +6,7 @@ Created on Mon Feb 4 16:18:06 2019 @author: evangelos """ from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData +from ccpi.framework import ImageData, DataContainer import numpy as np import time from ccpi.optimisation.operators import BlockOperator @@ -23,6 +23,7 @@ class PDHG(Algorithm): self.g = kwargs.get('g', None) self.tau = kwargs.get('tau', None) self.sigma = kwargs.get('sigma', None) + self.memopt = kwargs.get('memopt', False) if self.f is not None and self.operator is not None and \ self.g is not None: @@ -76,11 +77,32 @@ class PDHG(Algorithm): #self.y = self.y_old def update_objective(self): - self.loss.append([self.f(self.operator.direct(self.x)) + self.g(self.x), - -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(- 1 * self.operator.adjoint(self.y))) - ]) - - + p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) + d1 = -(self.f.convex_conjugate(self.y) + self.g(-1*self.operator.adjoint(self.y))) + + self.loss.append([p1,d1,p1-d1]) + + +def assertBlockDataContainerEqual(container1, container2): + print ("assert Block Data Container Equal") + assert issubclass(container1.__class__, container2.__class__) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + +def assertNumpyArrayEqual(first, second): + res = True + try: + np.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + assert res def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): @@ -98,16 +120,19 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False - + if memopt: + print ("memopt") + else: + print("no memopt") x_old = operator.domain_geometry().allocate() y_old = operator.range_geometry().allocate() - xbar = x_old - x_tmp = x_old - x = x_old + xbar = x_old.copy() + x_tmp = x_old.copy() + x = x_old.copy() - y_tmp = y_old - y = y_tmp + y_tmp = y_old.copy() + y = y_tmp.copy() # relaxation parameter theta = 1 @@ -120,36 +145,72 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): - -# # Gradient descent, Dual problem solution -# y_tmp = y_old + sigma * operator.direct(xbar) - y_tmp = operator.direct(xbar) - y_tmp *= sigma - y_tmp +=y_old - - y = f.proximal_conjugate(y_tmp, sigma) + if memopt: + # # Gradient descent, Dual problem solution + # y_tmp = y_old + sigma * operator.direct(xbar) + #y_tmp = operator.direct(xbar) + operator.direct(xbar, out=y_tmp) + y_tmp *= sigma + y_tmp +=y_old + + y = f.proximal_conjugate(y_tmp, sigma) + #f.proximal_conjugate(y_tmp, sigma, out=y) + + # Gradient ascent, Primal problem solution + # x_tmp = x_old - tau * operator.adjoint(y) + + #x_tmp = operator.adjoint(y) + operator.adjoint(y, out=x_tmp) + x_tmp *=-tau + x_tmp +=x_old + + #x = g.proximal(x_tmp, tau) + g.proximal(x_tmp, tau, out=x) + + #Update + # xbar = x + theta * (x - x_old) + x.subtract(x_old, out=xbar) + xbar *= theta + xbar += x + + x_old.fill(x) + y_old.fill(y) + else: + + # # Gradient descent, Dual problem solution + y_tmp1 = y_old + sigma * operator.direct(xbar) + # y_tmp = operator.direct(xbar) + operator.direct(xbar, out=y_tmp) + y_tmp *= sigma + y_tmp +=y_old + #print ("y_tmp1 equale y_tmp?") + #assertBlockDataContainerEqual(y_tmp1, y_tmp) + + y = f.proximal_conjugate(y_tmp, sigma) + #f.proximal_conjugate(y_tmp, sigma, out=y) + #print ("y1 equale y?") + #assertBlockDataContainerEqual(y1, y) + # Gradient ascent, Primal problem solution + x_tmp1 = x_old - tau * operator.adjoint(y) + + # x_tmp = operator.adjoint(y) + operator.adjoint(y, out=x_tmp) + x_tmp *=-tau + x_tmp +=x_old + + assertNumpyArrayEqual(x_tmp.as_array(),x_tmp1.as_array()) - # Gradient ascent, Primal problem solution -# x_tmp = x_old - tau * operator.adjoint(y) - - x_tmp = operator.adjoint(y) - x_tmp *=-tau - x_tmp +=x_old - - x = g.proximal(x_tmp, tau) - - #Update -# xbar = x + theta * (x - x_old) - xbar = x - x_old - xbar *= theta - xbar += x - - x_old = x - y_old = y - -# operator.direct(xbar, out = y_tmp) -# y_tmp *= sigma -# y_tmp +=y_old + x = g.proximal(x_tmp, tau) + # g.proximal(x_tmp, tau, out=x) + + #Update + xbar = x + theta * (x - x_old) + # xbar = x - x_old + # xbar *= theta + # xbar += x + + x_old = x + y_old = y -- cgit v1.2.3 From d1cd883ce417ae08cfc7f853377f3e17fa55be01 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:36:25 +0100 Subject: set output image to 0 --- .../Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 3d2a96b..db9f09d 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -54,7 +54,9 @@ class FiniteDiff(LinearOperator): out = np.zeros_like(x_asarr) fd_arr = out else: - fd_arr = out.as_array() + fd_arr = out.as_array() + # set the array to 0 + fd_arr[:] = 0 # if out is None: # out = self.gm_domain.allocate().as_array() @@ -70,7 +72,7 @@ class FiniteDiff(LinearOperator): np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) if self.bnd_cond == 'Neumann': - pass + pass elif self.bnd_cond == 'Periodic': np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) else: -- cgit v1.2.3 From c20f443cb80221260e284668fad2053798f9000a Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:37:20 +0100 Subject: fix out --- .../ccpi/optimisation/functions/BlockFunction.py | 26 +++++++++++++++------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 81c16cd..bbf1d29 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -52,15 +52,25 @@ class BlockFunction(Function): def proximal_conjugate(self, x, tau, out = None): '''proximal_conjugate does not take into account the BlockOperator''' - out = [None]*self.length - if isinstance(tau, Number): - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + if out is not None: + if isinstance(tau, Number): + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) + else: + for i in range(self.length): + self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) + else: - for i in range(self.length): - out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) - - return BlockDataContainer(*out) + + out = [None]*self.length + if isinstance(tau, Number): + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) + else: + for i in range(self.length): + out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) + + return BlockDataContainer(*out) def proximal(self, x, tau, out = None): '''proximal does not take into account the BlockOperator''' -- cgit v1.2.3 From 430618bd44aec77468900222b996e1d3c32dae03 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:38:19 +0100 Subject: wrong indentation --- Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index f96c7a1..3c06641 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -69,10 +69,10 @@ class L2NormSquared(Function): out *= 2 else: y = x - if self.b is not None: -# x.subtract(self.b, out=x) - y = x - self.b - return 2*y + if self.b is not None: + # x.subtract(self.b, out=x) + y = x - self.b + return 2*y def convex_conjugate(self, x, out=None): -- cgit v1.2.3 From da02e629dcd85b2dc1e06a4a8d8bff973fc70a88 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Wed, 10 Apr 2019 17:39:58 +0100 Subject: nothing really --- .../ccpi/optimisation/functions/MixedL21Norm.py | 28 +++++++++++++++------- .../ccpi/optimisation/functions/ScaledFunction.py | 4 +++- 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 4266e51..ed1d5e5 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -87,17 +87,29 @@ class MixedL21Norm(Function): res = BlockDataContainer(*frac) return res - -# tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha -# res = x.divide(ImageData(tmp2).maximum(1.0)) else: +# pass + - tmp = [ el*el for el in x] - res = (sum(tmp).sqrt()).maximum(1.0) - frac = [x[i]/res for i in range(x.shape[0])] - res = BlockDataContainer(*frac) +# # tmp2 = np.sqrt(x.as_array()[0]**2 + x.as_array()[1]**2 + 2*x.as_array()[2]**2)/self.alpha +# # res = x.divide(ImageData(tmp2).maximum(1.0)) +# if out is None: + + tmp = [ el*el for el in x] + res = (sum(tmp).sqrt()).maximum(1.0) + frac = [x[i]/res for i in range(x.shape[0])] + res = BlockDataContainer(*frac) - return res + return res + # else: + # tmp = [ el*el for el in x] + # res = (sum(tmp).sqrt()).maximum(1.0) + # #frac = [x[i]/res for i in range(x.shape[0])] + # for i in range(x.shape[0]): + # a = out.get_item(i) + # b = x.get_item(i) + # b /= res + # a.fill( b ) def __rmul__(self, scalar): return ScaledFunction(self, scalar) diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 046a4a6..9e2ba0c 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -61,7 +61,9 @@ class ScaledFunction(object): if out is None: return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar) else: - out.fill(self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + out.fill(self.scalar*self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)) + #self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out) + #out *= self.scalar def grad(self, x): '''Alias of gradient(x,None)''' -- cgit v1.2.3 From 5c74019510f95599b87ba869a7b8efc71edcde23 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:01:14 +0100 Subject: fix fill method --- Wrappers/Python/ccpi/framework/BlockDataContainer.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index 9664037..13663c2 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -186,9 +186,14 @@ class BlockDataContainer(object): return self.clone() def clone(self): return type(self)(*[el.copy() for el in self.containers], shape=self.shape) - def fill(self, x): - for el,ot in zip(self.containers, x): - el.fill(ot) + def fill(self, other): + if isinstance (other, BlockDataContainer): + if not self.is_compatible(other): + raise ValueError('Incompatible containers') + for el,ot in zip(self.containers, other.containers): + el.fill(ot) + else: + return ValueError('Cannot fill with object provided {}'.format(type(other))) def __add__(self, other): return self.add( other ) -- cgit v1.2.3 From 3ff8a543fb4ef59179ce3490bc28b8f61bf979ac Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:02:02 +0100 Subject: fix PDHG optimised --- .../Python/ccpi/optimisation/algorithms/PDHG.py | 74 +++++++++++++++------- 1 file changed, 52 insertions(+), 22 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 0b9921c..086e322 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -45,36 +45,66 @@ class PDHG(Algorithm): self.y_old = self.operator.range_geometry().allocate() self.xbar = self.x_old.copy() - #x_tmp = x_old + self.x = self.x_old.copy() self.y = self.y_old.copy() - #y_tmp = y_old + if self.memopt: + self.y_tmp = self.y_old.copy() + self.x_tmp = self.x_old.copy() #y = y_tmp # relaxation parameter self.theta = 1 def update(self): - # Gradient descent, Dual problem solution - self.y_old += self.sigma * self.operator.direct(self.xbar) - self.y = self.f.proximal_conjugate(self.y_old, self.sigma) - - # Gradient ascent, Primal problem solution - self.x_old -= self.tau * self.operator.adjoint(self.y) - self.x = self.g.proximal(self.x_old, self.tau) - - #Update - #xbar = x + theta * (x - x_old) - self.xbar.fill(self.x) - self.xbar -= self.x_old - self.xbar *= self.theta - self.xbar += self.x - -# self.x_old.fill(self.x) -# self.y_old.fill(self.y) - self.y_old = self.y.copy() - self.x_old = self.x.copy() - #self.y = self.y_old + if self.memopt: + # Gradient descent, Dual problem solution + # self.y_old += self.sigma * self.operator.direct(self.xbar) + self.operator.direct(self.xbar, out=self.y_tmp) + self.y_tmp *= self.sigma + self.y_old += self.y_tmp + + #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + self.f.proximal_conjugate(self.y_old, self.sigma, out=self.y) + + # Gradient ascent, Primal problem solution + self.operator.adjoint(self.y, out=self.x_tmp) + self.x_tmp *= self.tau + self.x_old -= self.x_tmp + + self.g.proximal(self.x_old, self.tau, out=self.x) + + #Update + self.x.subtract(self.x_old, out=self.xbar) + #self.xbar -= self.x_old + self.xbar *= self.theta + self.xbar += self.x + + self.x_old.fill(self.x) + self.y_old.fill(self.y) + #self.y_old = self.y.copy() + #self.x_old = self.x.copy() + else: + # Gradient descent, Dual problem solution + self.y_old += self.sigma * self.operator.direct(self.xbar) + self.y = self.f.proximal_conjugate(self.y_old, self.sigma) + + # Gradient ascent, Primal problem solution + self.x_old -= self.tau * self.operator.adjoint(self.y) + self.x = self.g.proximal(self.x_old, self.tau) + + #Update + #xbar = x + theta * (x - x_old) + self.xbar.fill(self.x) + self.xbar -= self.x_old + self.xbar *= self.theta + self.xbar += self.x + + self.x_old.fill(self.x) + self.y_old.fill(self.y) + #self.y_old = self.y.copy() + #self.x_old = self.x.copy() + #self.y = self.y_old def update_objective(self): p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) -- cgit v1.2.3 From 2415b5a334d3bdf87fec47ca3ec290a6602ee13c Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:03:45 +0100 Subject: set to zero the output variable on input if memopt --- .../Python/ccpi/optimisation/operators/GradientOperator.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 9c639df..d655653 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -65,19 +65,19 @@ class Gradient(LinearOperator): def adjoint(self, x, out=None): if out is not None: - tmp = self.gm_domain.allocate() for i in range(x.shape[0]): self.FD.direction=self.ind[i] self.FD.adjoint(x.get_item(i), out = tmp) -# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) - out+=tmp + if i == 0: + out.fill(tmp) + else: + out += tmp else: tmp = self.gm_domain.allocate() for i in range(x.shape[0]): self.FD.direction=self.ind[i] - tmp+=self.FD.adjoint(x.get_item(i)) -# tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + tmp += self.FD.adjoint(x.get_item(i)) return tmp -- cgit v1.2.3 From 362e786b86a1ae8c7b1e88b45fc553e8f57b7dfa Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:04:48 +0100 Subject: output variable is required to contain zeros on start --- .../Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index db9f09d..954f022 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -186,7 +186,9 @@ class FiniteDiff(LinearOperator): out = np.zeros_like(x_asarr) fd_arr = out else: - fd_arr = out.as_array() + #out *= 0 + fd_arr = out.as_array() + fd_arr[:] = 0 # if out is None: # out = self.gm_domain.allocate().as_array() -- cgit v1.2.3 From a7bb88da8e8d4e94a3dbeb04f95928cb7d1fbd48 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Thu, 11 Apr 2019 12:07:36 +0100 Subject: updated tests --- Wrappers/Python/test/test_BlockDataContainer.py | 51 ++++++++++- Wrappers/Python/test/test_Operator.py | 108 ++++++++++++++++++++---- Wrappers/Python/test/test_functions.py | 108 +++++++++++++++++++++++- Wrappers/Python/wip/pdhg_TV_denoising.py | 88 +++++++++++++------ 4 files changed, 304 insertions(+), 51 deletions(-) (limited to 'Wrappers') diff --git a/Wrappers/Python/test/test_BlockDataContainer.py b/Wrappers/Python/test/test_BlockDataContainer.py index 2ee0e94..2fca23c 100755 --- a/Wrappers/Python/test/test_BlockDataContainer.py +++ b/Wrappers/Python/test/test_BlockDataContainer.py @@ -14,7 +14,7 @@ from ccpi.optimisation.funcs import Norm2sq, Norm1 from ccpi.framework import ImageGeometry, AcquisitionGeometry from ccpi.framework import ImageData, AcquisitionData #from ccpi.optimisation.algorithms import GradientDescent -from ccpi.framework import BlockDataContainer +from ccpi.framework import BlockDataContainer, DataContainer #from ccpi.optimisation.Algorithms import CGLS import functools @@ -402,3 +402,52 @@ class TestBlockDataContainer(unittest.TestCase): c5 = d.get_item(0).power(2).sum() + def test_BlockDataContainer_fill(self): + print ("test block data container") + ig0 = ImageGeometry(2,3,4) + ig1 = ImageGeometry(2,3,5) + + data0 = ImageData(geometry=ig0) + data1 = ImageData(geometry=ig1) + 1 + + data2 = ImageData(geometry=ig0) + 2 + data3 = ImageData(geometry=ig1) + 3 + + cp0 = BlockDataContainer(data0,data1) + #cp1 = BlockDataContainer(data2,data3) + + cp2 = BlockDataContainer(data0+1, data1+1) + + data0.fill(data2) + self.assertNumpyArrayEqual(data0.as_array(), data2.as_array()) + data0 = ImageData(geometry=ig0) + + for el,ot in zip(cp0, cp2): + print (el.shape, ot.shape) + cp0.fill(cp2) + self.assertBlockDataContainerEqual(cp0, cp2) + + + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + diff --git a/Wrappers/Python/test/test_Operator.py b/Wrappers/Python/test/test_Operator.py index 6656d34..293fb43 100644 --- a/Wrappers/Python/test/test_Operator.py +++ b/Wrappers/Python/test/test_Operator.py @@ -2,7 +2,8 @@ import unittest #from ccpi.optimisation.operators import Operator from ccpi.optimisation.ops import TomoIdentity from ccpi.framework import ImageGeometry, ImageData, BlockDataContainer, DataContainer -from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator +from ccpi.optimisation.operators import BlockOperator, BlockScaledOperator,\ + FiniteDiff import numpy from timeit import default_timer as timer from ccpi.framework import ImageGeometry @@ -11,7 +12,43 @@ from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff def dt(steps): return steps[-1] - steps[-2] -class TestOperator(unittest.TestCase): +class CCPiTestClass(unittest.TestCase): + def assertBlockDataContainerEqual(self, container1, container2): + print ("assert Block Data Container Equal") + self.assertTrue(issubclass(container1.__class__, container2.__class__)) + for col in range(container1.shape[0]): + if issubclass(container1.get_item(col).__class__, DataContainer): + print ("Checking col ", col) + self.assertNumpyArrayEqual( + container1.get_item(col).as_array(), + container2.get_item(col).as_array() + ) + else: + self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col)) + + def assertNumpyArrayEqual(self, first, second): + res = True + try: + numpy.testing.assert_array_equal(first, second) + except AssertionError as err: + res = False + print(err) + self.assertTrue(res) + + def assertNumpyArrayAlmostEqual(self, first, second, decimal=6): + res = True + try: + numpy.testing.assert_array_almost_equal(first, second, decimal) + except AssertionError as err: + res = False + print(err) + print("expected " , second) + print("actual " , first) + + self.assertTrue(res) + + +class TestOperator(CCPiTestClass): def test_ScaledOperator(self): ig = ImageGeometry(10,20,30) img = ig.allocate() @@ -29,6 +66,40 @@ class TestOperator(unittest.TestCase): y = Id.direct(img) numpy.testing.assert_array_equal(y.as_array(), img.as_array()) + def test_FiniteDifference(self): + ## + N, M = 2, 3 + + ig = ImageGeometry(N, M) + Id = Identity(ig) + + FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + u = FD.domain_geometry().allocate('random_int') + + + res = FD.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + FD.adjoint(u, out=res) + w = FD.adjoint(u) + + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + + res = Id.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + Id.adjoint(u, out=res) + w = Id.adjoint(u) + + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + self.assertNumpyArrayEqual(u.as_array(), w.as_array()) + + G = Gradient(ig) + + u = G.range_geometry().allocate(ImageGeometry.RANDOM_INT) + res = G.domain_geometry().allocate(ImageGeometry.RANDOM_INT) + G.adjoint(u, out=res) + w = G.adjoint(u) + self.assertNumpyArrayEqual(res.as_array(), w.as_array()) + + + class TestBlockOperator(unittest.TestCase): @@ -90,22 +161,23 @@ class TestBlockOperator(unittest.TestCase): print (z1.shape) print(z1[0][0].as_array()) print(res[0][0].as_array()) + self.assertBlockDataContainerEqual(z1, res) + # for col in range(z1.shape[0]): + # a = z1.get_item(col) + # b = res.get_item(col) + # if isinstance(a, BlockDataContainer): + # for col2 in range(a.shape[0]): + # self.assertNumpyArrayEqual( + # a.get_item(col2).as_array(), + # b.get_item(col2).as_array() + # ) + # else: + # self.assertNumpyArrayEqual( + # a.as_array(), + # b.as_array() + # ) + z1 = B.range_geometry().allocate(ImageGeometry.RANDOM_INT) - for col in range(z1.shape[0]): - a = z1.get_item(col) - b = res.get_item(col) - if isinstance(a, BlockDataContainer): - for col2 in range(a.shape[0]): - self.assertNumpyArrayEqual( - a.get_item(col2).as_array(), - b.get_item(col2).as_array() - ) - else: - self.assertNumpyArrayEqual( - a.as_array(), - b.as_array() - ) - z1 = B.direct(u) res1 = B.adjoint(z1) res2 = B.domain_geometry().allocate() B.adjoint(z1, out=res2) @@ -264,7 +336,7 @@ class TestBlockOperator(unittest.TestCase): u = ig.allocate('random_int') steps = [timer()] i = 0 - n = 25. + n = 2. t1 = t2 = 0 res = B.range_geometry().allocate() diff --git a/Wrappers/Python/test/test_functions.py b/Wrappers/Python/test/test_functions.py index 19cb65f..1891afd 100644 --- a/Wrappers/Python/test/test_functions.py +++ b/Wrappers/Python/test/test_functions.py @@ -65,6 +65,9 @@ class TestFunction(unittest.TestCase): a3 = 0.5 * d.squared_norm() + d.dot(noisy_data) self.assertEqual(a3, g.convex_conjugate(d)) #print( a3, g.convex_conjugate(d)) + + #test proximal conjugate + def test_L2NormSquared(self): # TESTS for L2 and scalar * L2 @@ -94,7 +97,7 @@ class TestFunction(unittest.TestCase): c2 = 1/4. * u.squared_norm() numpy.testing.assert_equal(c1, c2) - #check convex conjuagate with data + #check convex conjugate with data d1 = f1.convex_conjugate(u) d2 = (1./4.) * u.squared_norm() + (u*b).sum() numpy.testing.assert_equal(d1, d2) @@ -121,10 +124,9 @@ class TestFunction(unittest.TestCase): l1 = f1.proximal_conjugate(u, tau) l2 = (u - tau * b)/(1 + tau/2 ) numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) - - + # check scaled function properties - + # scalar scalar = 100 f_scaled_no_data = scalar * L2NormSquared() @@ -161,7 +163,105 @@ class TestFunction(unittest.TestCase): numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + def test_L2NormSquaredOut(self): + # TESTS for L2 and scalar * L2 + + M, N, K = 2,3,5 + ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) + u = ig.allocate(ImageGeometry.RANDOM_INT) + b = ig.allocate(ImageGeometry.RANDOM_INT) + + # check grad/call no data + f = L2NormSquared() + a1 = f.gradient(u) + a2 = a1 * 0. + f.gradient(u, out=a2) + numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) + #numpy.testing.assert_equal(f(u), u.squared_norm()) + + # check grad/call with data + f1 = L2NormSquared(b=b) + b1 = f1.gradient(u) + b2 = b1 * 0. + f1.gradient(u, out=b2) + + numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) + #numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) + + # check proximal no data + tau = 5 + e1 = f.proximal(u, tau) + e2 = e1 * 0. + f.proximal(u, tau, out=e2) + numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) + + # check proximal with data + tau = 5 + h1 = f1.proximal(u, tau) + h2 = h1 * 0. + f1.proximal(u, tau, out=h2) + numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4) + + # check proximal conjugate no data + tau = 0.2 + k1 = f.proximal_conjugate(u, tau) + k2 = k1 * 0. + f.proximal_conjugate(u, tau, out=k2) + + numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4) + + # check proximal conjugate with data + l1 = f1.proximal_conjugate(u, tau) + l2 = l1 * 0. + f1.proximal_conjugate(u, tau, out=l2) + numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4) + + # check scaled function properties + + # scalar + scalar = 100 + f_scaled_no_data = scalar * L2NormSquared() + f_scaled_data = scalar * L2NormSquared(b=b) + + # grad + w = f_scaled_no_data.gradient(u) + ww = w * 0 + f_scaled_no_data.gradient(u, out=ww) + + numpy.testing.assert_array_almost_equal(w.as_array(), + ww.as_array(), decimal=4) + + # numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) + + # # conj + # numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ + # f.convex_conjugate(u/scalar) * scalar, decimal=4) + + # numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ + # scalar * f1.convex_conjugate(u/scalar), decimal=4) + + # # proximal + w = f_scaled_no_data.proximal(u, tau) + ww = w * 0 + f_scaled_no_data.proximal(u, tau, out=ww) + numpy.testing.assert_array_almost_equal(w.as_array(), \ + ww.as_array()) + + + # numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ + # f1.proximal(u, tau*scalar).as_array()) + + + # proximal conjugate + w = f_scaled_no_data.proximal_conjugate(u, tau) + ww = w * 0 + f_scaled_no_data.proximal_conjugate(u, tau, out=ww) + numpy.testing.assert_array_almost_equal(w.as_array(), \ + ww.as_array(), decimal=4) + # numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ + # ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4) + def test_Norm2sq_as_FunctionOperatorComposition(self): M, N, K = 2,3,5 ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index d871ba0..f569fa7 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -19,12 +19,15 @@ from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ from skimage.util import random_noise +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] # ############################################################################ # Create phantom for TV denoising -N = 200 +N = 512 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 @@ -36,8 +39,8 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) -plt.imshow(noisy_data.as_array()) -plt.show() +#plt.imshow(noisy_data.as_array()) +#plt.show() #%% @@ -45,7 +48,7 @@ plt.show() alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '1' +method = '0' if method == '0': @@ -83,34 +86,63 @@ print ("normK", normK) sigma = 1 tau = 1/(sigma*normK**2) -opt = {'niter':2000} +# opt = {'niter':2000, 'memopt': True} -res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -plt.figure(figsize=(5,5)) -plt.imshow(res.as_array()) -plt.colorbar() -plt.show() -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 10 -# -#pdhg.run(2000) -# -# + +# opt = {'niter':2000, 'memopt': False} +# res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) + +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.title('memopt') +# plt.colorbar() +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.title('no memopt') +# plt.colorbar() +# plt.subplot(1,3,3) +# plt.imshow((res1 - res).abs().as_array()) +# plt.title('diff') +# plt.colorbar() +# plt.show() +pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +pdhg.max_iteration = 2000 +pdhg.update_objective_interval = 100 + + +pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +pdhgo.max_iteration = 2000 +pdhgo.update_objective_interval = 100 + +steps = [timer()] +pdhgo.run(200) +steps.append(timer()) +t1 = dt(steps) + +pdhg.run(200) +steps.append(timer()) +t2 = dt(steps) + +print ("Time difference {} {} {}".format(t1,t2,t2-t1)) +sol = pdhg.get_output().as_array() +#sol = result.as_array() # -#sol = pdhg.get_output().as_array() -##sol = result.as_array() -## -#fig = plt.figure() -#plt.subplot(1,2,1) -#plt.imshow(noisy_data.as_array()) -##plt.colorbar() -#plt.subplot(1,2,2) -#plt.imshow(sol) -##plt.colorbar() -#plt.show() +fig = plt.figure() +plt.subplot(1,3,1) +plt.imshow(noisy_data.as_array()) +plt.colorbar() +plt.subplot(1,3,2) +plt.imshow(sol) +plt.colorbar() +plt.subplot(1,3,3) +plt.imshow(pdhgo.get_output().as_array()) +plt.colorbar() + +plt.show() ## # ### -- cgit v1.2.3