diff options
author | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-15 17:27:51 +0100 |
---|---|---|
committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-04-15 17:27:51 +0100 |
commit | c0bde2086da1b1160c956fafbbb666c256d3e4b9 (patch) | |
tree | 209fdc78a6f2b0b005d3c788cc99f1caa137778d /Wrappers | |
parent | 85171b24b4bddf18b82a4fd5fd6e2bfe16f0afbf (diff) | |
download | framework-c0bde2086da1b1160c956fafbbb666c256d3e4b9.tar.gz framework-c0bde2086da1b1160c956fafbbb666c256d3e4b9.tar.bz2 framework-c0bde2086da1b1160c956fafbbb666c256d3e4b9.tar.xz framework-c0bde2086da1b1160c956fafbbb666c256d3e4b9.zip |
fixing other method pf pdhg denoising
Diffstat (limited to 'Wrappers')
7 files changed, 176 insertions, 195 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 5e92767..d1b5351 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -158,14 +158,24 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old -= tau*operator.adjoint(y) x = g.proximal(x_old, tau) - xbar.fill(x) - xbar -= x_old + x.subtract(x_old, out=xbar) xbar *= theta xbar += x - + x_old.fill(x) - y_old.fill(y) + y_old.fill(y) + + +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + else: operator.direct(xbar, out = y_tmp) @@ -186,6 +196,15 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 38bc458..70511bb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -54,15 +54,20 @@ class FunctionOperatorComposition(Function): def proximal(self, x, tau, out=None): - '''proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=out) + '''proximal does not take into account the Operator''' + if out is None: + return self.function.proximal(x, tau) + else: + self.function.proximal(x, tau, out=out) + def proximal_conjugate(self, x, tau, out=None): ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=out) + if out is None: + return self.function.proximal_conjugate(x, tau) + else: + self.function.proximal_conjugate(x, tau, out=out) def gradient(self, x, out=None): diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 2d0a00a..2ac11ee 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -34,6 +34,7 @@ class L2NormSquared(Function): super(L2NormSquared, self).__init__() self.b = kwargs.get('b',None) + self.L = 2 def __call__(self, x): @@ -247,7 +248,38 @@ if __name__ == '__main__': print(res_out.as_array()) numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ - res_out.as_array(), decimal=4) + res_out.as_array(), decimal=4) + + + + ig1 = ImageGeometry(2,3) + + tau = 0.1 + + u = ig1.allocate('random_int') + b = ig1.allocate('random_int') + + scalar = 0.5 + f_scaled = scalar * L2NormSquared(b=b) + f_noscaled = L2NormSquared(b=b) + + + res1 = f_scaled.proximal(u, tau) + res2 = f_noscaled.proximal(u, tau*scalar) + +# res2 = (u + tau*b)/(1+tau) + + numpy.testing.assert_array_almost_equal(res1.as_array(), \ + res2.as_array(), decimal=4) + + + + + + + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 3541bc2..7e6b6e7 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -54,9 +54,8 @@ class MixedL21Norm(Function): else: - #tmp = [ el**2 for el in x.containers ] - #res = sum(tmp).sqrt().sum() - res = x.pnorm() + tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() return res @@ -109,7 +108,7 @@ class MixedL21Norm(Function): # x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) #TODO this is slow, why ??? -# x.divide(x.norm().maximum(1.0), out=out) +# x.divide(x.pnorm().maximum(1.0), out=out) def __rmul__(self, scalar): diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index cb85249..7caeab2 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -37,11 +37,14 @@ class ScaledFunction(object): '''
def __init__(self, function, scalar):
super(ScaledFunction, self).__init__()
- self.L = None
+
if not isinstance (scalar, Number):
raise TypeError('expected scalar: got {}'.format(type(scalar)))
self.scalar = scalar
self.function = function
+
+ if self.function.L is not None:
+ self.L = self.scalar * self.function.L
def __call__(self,x, out=None):
'''Evaluates the function at x '''
@@ -64,7 +67,7 @@ class ScaledFunction(object): if out is None:
return self.function.proximal(x, tau*self.scalar)
else:
- out.fill( self.function.proximal(x, tau*self.scalar) )
+ self.function.proximal(x, tau*self.scalar, out = out)
def proximal_conjugate(self, x, tau, out = None):
'''This returns the proximal operator for the function at x, tau
diff --git a/Wrappers/Python/wip/fista_TV_denoising.py b/Wrappers/Python/wip/fista_TV_denoising.py new file mode 100644 index 0000000..a9712fc --- /dev/null +++ b/Wrappers/Python/wip/fista_TV_denoising.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old, FISTA + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from ccpi.optimisation.algs import FISTA + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +# Create phantom for TV denoising + +N = 100 + +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) +noisy_data = ImageData(n1) + + +plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') +plt.show() + +# Regularisation Parameter +alpha = 2 + +operator = Gradient(ig) +g = alpha * MixedL21Norm() +f = 0.5 * L2NormSquared(b = noisy_data) + +x_init = ig.allocate() +opt = {'niter':2000} + + +x = FISTA(x_init, f, g, opt) + +#fista = FISTA() +#fista.set_up(x_init, f, g, opt ) +#fista.max_iteration = 10 +# +#fista.run(2000) +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(fista.get_output().as_array()) +#plt.title('no memopt class') + + + diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index e142d94..a5cd1bf 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -23,11 +23,9 @@ from timeit import default_timer as timer def dt(steps): return steps[-1] - steps[-2] -#%% - # Create phantom for TV denoising -N = 500 +N = 100 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -40,16 +38,16 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) + plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') plt.show() -#%% - # Regularisation Parameter alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '0' +method = '1' if method == '0': @@ -74,12 +72,9 @@ else: # No Composite # ########################################################################### operator = Gradient(ig) - f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) - g = L2NormSquared(b = noisy_data) - - ########################################################################### -#%% - + f = alpha * MixedL21Norm() + g = 0.5 * L2NormSquared(b = noisy_data) + # Compute operator Norm normK = operator.norm() @@ -94,180 +89,36 @@ t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) t2 = timer() +print(" Run memopt") t3 = timer() res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) t4 = timer() -# -print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) - -# -#%% -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -## -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -## -#steps = [timer()] -#pdhgo.run(2000) -#steps.append(timer()) -#t1 = dt(steps) -## -#pdhg.run(2000) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -#res = pdhg.get_output() -#res1 = pdhgo.get_output() #%% -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((res1 - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhg.get_output().as_array()) -#plt.title('no memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhg.get_output() - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() -# -# -# -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.title('memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - - +plt.figure(figsize=(15,15)) +plt.subplot(3,1,1) +plt.imshow(res.as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(3,1,2) +plt.imshow(res1.as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(3,1,3) +plt.imshow((res1 - res).abs().as_array()) +plt.title('diff') +plt.colorbar() +plt.show() +# +plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt') +plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt') +plt.legend() +plt.show() - -# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -# res = pdhg.get_output() -# res1 = pdhgo.get_output() -# -# diff = (res-res1) -# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) -# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) -# -# -# plt.figure(figsize=(5,5)) -# plt.subplot(1,3,1) -# plt.imshow(res.as_array()) -# plt.colorbar() -# #plt.show() -# -# #plt.figure(figsize=(5,5)) -# plt.subplot(1,3,2) -# plt.imshow(res1.as_array()) -# plt.colorbar() - -#plt.show() +print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3)) +diff = (res1 - res).abs().as_array().max() +print(" Max of abs difference is {}".format(diff)) -#======= -## opt = {'niter':2000, 'memopt': True} -# -## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -#>>>>>>> origin/pdhg_fix -# -# -## opt = {'niter':2000, 'memopt': False} -## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -## plt.figure(figsize=(5,5)) -## plt.subplot(1,3,1) -## plt.imshow(res.as_array()) -## plt.title('memopt') -## plt.colorbar() -## plt.subplot(1,3,2) -## plt.imshow(res1.as_array()) -## plt.title('no memopt') -## plt.colorbar() -## plt.subplot(1,3,3) -## plt.imshow((res1 - res).abs().as_array()) -## plt.title('diff') -## plt.colorbar() -## plt.show() -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -# -# -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -# -#steps = [timer()] -#pdhgo.run(200) -#steps.append(timer()) -#t1 = dt(steps) -# -#pdhg.run(200) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) -#sol = pdhg.get_output().as_array() -##sol = result.as_array() -## -#fig = plt.figure() -#plt.subplot(1,3,1) -#plt.imshow(noisy_data.as_array()) -#plt.colorbar() -#plt.subplot(1,3,2) -#plt.imshow(sol) -#plt.colorbar() -#plt.subplot(1,3,3) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.colorbar() -# -#plt.show() -### -## -#### -##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') -##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -##plt.legend() -##plt.show() -# -# -##%% -## |