diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-09 09:45:07 +0100 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-09 09:45:07 +0100 |
commit | e7a0152d17e03df79db6cd3cd50d07202f9d185d (patch) | |
tree | 929ede7f607be32df47724e0795bc0f6e15dc250 /Wrappers | |
parent | dd7a2438bdfafc5a2bbbb34a5e80336d12b5e86d (diff) | |
download | framework-e7a0152d17e03df79db6cd3cd50d07202f9d185d.tar.gz framework-e7a0152d17e03df79db6cd3cd50d07202f9d185d.tar.bz2 framework-e7a0152d17e03df79db6cd3cd50d07202f9d185d.tar.xz framework-e7a0152d17e03df79db6cd3cd50d07202f9d185d.zip |
profile out
Diffstat (limited to 'Wrappers')
4 files changed, 171 insertions, 39 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index f25cdbf..e9bd801 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -125,19 +125,39 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): for i in range(niter): - # Gradient descent, Dual problem solution - y_tmp = y_old + sigma * operator.direct(xbar) +# # Gradient descent, Dual problem solution +# y_tmp = y_old + sigma * operator.direct(xbar) + y_tmp = operator.direct(xbar) + y_tmp *= sigma + y_tmp +=y_old + y = f.proximal_conjugate(y_tmp, sigma) # Gradient ascent, Primal problem solution - x_tmp = x_old - tau * operator.adjoint(y) +# x_tmp = x_old - tau * operator.adjoint(y) + + x_tmp = operator.adjoint(y) + x_tmp *=-tau + x_tmp +=x_old + x = g.proximal(x_tmp, tau) #Update - xbar = x + theta * (x - x_old) +# xbar = x + theta * (x - x_old) + xbar = x - x_old + xbar *= theta + xbar += x x_old = x - y_old = y + y_old = y + +# operator.direct(xbar, out = y_tmp) +# y_tmp *= sigma +# y_tmp +=y_old + + + + diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py index 6c080bb..c6a7f95 100755 --- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py @@ -114,20 +114,35 @@ class BlockOperator(Operator): BlockOperator work on BlockDataContainer, but they will work on DataContainers and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) ''' + if not isinstance (x, BlockDataContainer): x_b = BlockDataContainer(x) else: x_b = x shape = self.get_output_shape(x_b.shape) res = [] - for row in range(self.shape[0]): - for col in range(self.shape[1]): - if col == 0: - prod = self.get_item(row,col).direct(x_b.get_item(col)) - else: - prod += self.get_item(row,col).direct(x_b.get_item(col)) - res.append(prod) - return BlockDataContainer(*res, shape=shape) + + if out is None: + + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + prod = self.get_item(row,col).direct(x_b.get_item(col)) + else: + prod += self.get_item(row,col).direct(x_b.get_item(col)) + res.append(prod) + return BlockDataContainer(*res, shape=shape) + + else: + + tmp = self.range_geometry().allocate() + for row in range(self.shape[0]): + for col in range(self.shape[1]): + if col == 0: + self.get_item(row,col).direct(x_b.get_item(col), out=tmp.get_item(col)) + else: + self.get_item(row,col).direct(x_b.get_item(col), out=out) + out+=tmp def adjoint(self, x, out=None): '''Adjoint operation for the BlockOperator @@ -258,13 +273,11 @@ if __name__ == '__main__': from ccpi.framework import ImageGeometry from ccpi.optimisation.operators import Gradient, Identity, SparseFiniteDiff - from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer - from ccpi.optimisation.operators import Operator, LinearOperator - - + M, N = 4, 3 ig = ImageGeometry(M, N) - arr = ig.allocate('random_int') + arr = ig.allocate('random_int') + G = Gradient(ig) Id = Identity(ig) @@ -294,11 +307,19 @@ if __name__ == '__main__': # ttt = B.sum_abs_col() # - numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) - numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) + #TODO this is not working +# numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4) +# numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4) +# numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4) + + u = ig.allocate('random_int') + + z1 = B.direct(u) + res = B.range_geometry().allocate() + B.direct(u, out = res) +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py index 0faba22..3d2a96b 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py @@ -29,6 +29,7 @@ class FiniteDiff(LinearOperator): '''FIXME: domain and range should be geometries''' self.gm_domain = gm_domain self.gm_range = gm_range + self.direction = direction self.bnd_cond = bnd_cond @@ -50,9 +51,16 @@ class FiniteDiff(LinearOperator): x_sz = len(x.shape) if out is None: - out = np.zeros(x.shape) + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() - fd_arr = out +# if out is None: +# out = self.gm_domain.allocate().as_array() +# +# fd_arr = out.as_array() +# fd_arr = self.gm_domain.allocate().as_array() ######################## Direct for 2D ############################### if x_sz == 2: @@ -162,8 +170,9 @@ class FiniteDiff(LinearOperator): else: raise NotImplementedError - res = out/self.voxel_size - return type(x)(res) +# res = out #/self.voxel_size + return type(x)(out) + def adjoint(self, x, out=None): @@ -172,9 +181,17 @@ class FiniteDiff(LinearOperator): x_sz = len(x.shape) if out is None: - out = np.zeros(x.shape) + out = np.zeros_like(x_asarr) + fd_arr = out + else: + fd_arr = out.as_array() - fd_arr = out +# if out is None: +# out = self.gm_domain.allocate().as_array() +# fd_arr = out +# else: +# fd_arr = out.as_array() +## fd_arr = self.gm_domain.allocate().as_array() ######################## Adjoint for 2D ############################### if x_sz == 2: @@ -299,8 +316,8 @@ class FiniteDiff(LinearOperator): else: raise NotImplementedError - res = out/self.voxel_size - return type(x)(-res) + out *= -1 #/self.voxel_size + return type(x)(out) def range_geometry(self): '''Returns the range geometry''' @@ -317,6 +334,29 @@ class FiniteDiff(LinearOperator): return self.s1 +if __name__ == '__main__': + + from ccpi.framework import ImageGeometry + + N, M = 2, 3 + + ig = ImageGeometry(N, M) + + + FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') + u = FD.domain_geometry().allocate('random_int') + + + res = FD.domain_geometry().allocate() + FD.direct(u, out=res) + print(res.as_array()) +# z = FD.direct(u) + +# print(z.as_array(), res.as_array()) + + +# w = G.range_geometry().allocate('random_int') +
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py index 52923af..54456cc 100644 --- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py +++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py @@ -38,22 +38,47 @@ class Gradient(LinearOperator): else: raise ValueError('No channels to correlate') - self.bnd_cond = bnd_cond + self.bnd_cond = bnd_cond + + # Call FiniteDiff operator + + self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond) def direct(self, x, out=None): - tmp = self.gm_range.allocate() - for i in range(tmp.shape[0]): - tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) - return tmp + + if out is not None: + for i in range(self.gm_range.shape[0]): + self.FD.direction=self.ind[i] + self.FD.direct(x, out = out[i]) +# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x, out=out[i]) + return out + else: + tmp = self.gm_range.allocate() + for i in range(tmp.shape[0]): + self.FD.direction=self.ind[i] + tmp.get_item(i).fill(self.FD.direct(x)) +# tmp.get_item(i).fill(FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).direct(x)) + return tmp def adjoint(self, x, out=None): - tmp = self.gm_domain.allocate() - for i in range(x.shape[0]): - tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) - return tmp + if out is not None: + + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + self.FD.direction=self.ind[i] + self.FD.adjoint(x.get_item(i), out = tmp) +# FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i), out=tmp) + out-=tmp + else: + tmp = self.gm_domain.allocate() + for i in range(x.shape[0]): + self.FD.direction=self.ind[i] + tmp+=self.FD.adjoint(x.get_item(i)) +# tmp+=FiniteDiff(self.gm_domain, direction = self.ind[i], bnd_cond = self.bnd_cond).adjoint(x.get_item(i)) + return tmp def domain_geometry(self): @@ -109,6 +134,7 @@ if __name__ == '__main__': from ccpi.optimisation.operators import Identity, BlockOperator + M, N = 2, 3 ig = ImageGeometry(M, N) arr = ig.allocate('random_int' ) @@ -179,7 +205,32 @@ if __name__ == '__main__': a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) # - c1 = arr + a +# c1 = arr + a # c2 = arr + a # c2 = a1 + arr + + from ccpi.framework import ImageGeometry +# from ccpi.optimisation.operators import Gradient +# + N, M = 2, 3 +# + ig = ImageGeometry(N, M) +# + G = Gradient(ig) +# + u = G.domain_geometry().allocate('random_int') + w = G.range_geometry().allocate('random_int') +# +# + res = G.range_geometry().allocate() +# + G.direct(u, out=res) + z = G.direct(u) +# + print(res[0].as_array()) + print(z[0].as_array()) +# +## LHS = (G.direct(u)*w).sum() +## RHS = (u * G.adjoint(w)).sum() + # |