summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py27
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py39
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py15
-rwxr-xr-xWrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py3
-rwxr-xr-xWrappers/Python/wip/pdhg_TV_denoising.py75
-rw-r--r--Wrappers/Python/wip/pdhg_TV_tomography2D.py40
-rw-r--r--Wrappers/Python/wip/pdhg_tv_denoising_poisson.py129
7 files changed, 239 insertions, 89 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index d1b5351..bc080f8 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -152,29 +152,29 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
if not memopt:
- y_old += sigma * operator.direct(xbar)
- y = f.proximal_conjugate(y_old, sigma)
-
- x_old -= tau*operator.adjoint(y)
- x = g.proximal(x_old, tau)
-
+ y_tmp = y_old + sigma * operator.direct(xbar)
+ y = f.proximal_conjugate(y_tmp, sigma)
+
+ x_tmp = x_old - tau*operator.adjoint(y)
+ x = g.proximal(x_tmp, tau)
+
x.subtract(x_old, out=xbar)
xbar *= theta
xbar += x
-
+
x_old.fill(x)
- y_old.fill(y)
+ y_old.fill(y)
-# if i%100==0:
-#
+# if i%10==0:
+##
# p1 = f(operator.direct(x)) + g(x)
+## print(p1)
# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
# primal.append(p1)
# dual.append(d1)
# pdgap.append(p1-d1)
# print(p1, d1, p1-d1)
-
else:
@@ -196,9 +196,10 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
x_old.fill(x)
y_old.fill(y)
-# if i%100==0:
-#
+# if i%10==0:
+##
# p1 = f(operator.direct(x)) + g(x)
+## print(p1)
# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) )
# primal.append(p1)
# dual.append(d1)
diff --git a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
index d4370a8..cc3356a 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/KullbackLeibler.py
@@ -20,8 +20,12 @@
import numpy
from ccpi.optimisation.functions import Function
from ccpi.optimisation.functions.ScaledFunction import ScaledFunction
+<<<<<<< HEAD
+from ccpi.framework import ImageData
+=======
from ccpi.framework import ImageData, ImageGeometry
import functools
+>>>>>>> composite_operator_datacontainer
class KullbackLeibler(Function):
@@ -102,7 +106,6 @@ class KullbackLeibler(Function):
out *= 0.5
-
def proximal_conjugate(self, x, tau, out=None):
@@ -137,19 +140,39 @@ class KullbackLeibler(Function):
if __name__ == '__main__':
+
+ from ccpi.framework import ImageGeometry
+ import numpy
N, M = 2,3
ig = ImageGeometry(N, M)
- data = ImageData(numpy.random.randint(-10, 100, size=(M, N)))
- x = ImageData(numpy.random.randint(-10, 100, size=(M, N)))
-
- bnoise = ImageData(numpy.random.randint(-100, 100, size=(M, N)))
+ data = ImageData(numpy.random.randint(-10, 10, size=(M, N)))
+ x = ImageData(numpy.random.randint(-10, 10, size=(M, N)))
- f = KullbackLeibler(data, bnoise=bnoise)
- print(f.sum_value)
+ bnoise = ImageData(numpy.random.randint(-10, 10, size=(M, N)))
- print(f(x))
+ f = KullbackLeibler(data)
+ print(f(x))
+
+# numpy.random.seed(10)
+#
+#
+# x = numpy.random.randint(-10, 10, size = (2,3))
+# b = numpy.random.randint(1, 10, size = (2,3))
+#
+# ind1 = x>0
+#
+# res = x[ind1] - b * numpy.log(x[ind1])
+#
+## ind = x>0
+#
+## y = x[ind]
+#
+#
+#
+#
+#
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 2bb4ca7..3053a27 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -93,6 +93,7 @@ class L2NormSquared(Function):
if self.b is None:
return x/(1+2*tau)
else:
+
tmp = x.subtract(self.b)
#tmp -= self.b
tmp /= (1+2*tau)
@@ -287,17 +288,3 @@ if __name__ == '__main__':
numpy.testing.assert_array_almost_equal(res1.as_array(), \
res2.as_array(), decimal=4)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
index 7e6b6e7..2004e5f 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py
@@ -96,7 +96,8 @@ class MixedL21Norm(Function):
tmp = [ el*el for el in x.containers]
res = sum(tmp).sqrt().maximum(1.0)
frac = [el/res for el in x.containers]
- return BlockDataContainer(*frac)
+ return BlockDataContainer(*frac)
+
#TODO this is slow, why???
# return x.divide(x.pnorm().maximum(1.0))
diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py
index a5cd1bf..0e7f628 100755
--- a/Wrappers/Python/wip/pdhg_TV_denoising.py
+++ b/Wrappers/Python/wip/pdhg_TV_denoising.py
@@ -8,7 +8,8 @@ Created on Fri Feb 22 14:53:03 2019
from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer
-import numpy as np
+import numpy as np
+import numpy
import matplotlib.pyplot as plt
from ccpi.optimisation.algorithms import PDHG, PDHG_old
@@ -20,10 +21,10 @@ from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \
from skimage.util import random_noise
from timeit import default_timer as timer
-def dt(steps):
- return steps[-1] - steps[-2]
+#def dt(steps):
+# return steps[-1] - steps[-2]
-# Create phantom for TV denoising
+# Create phantom for TV Gaussian denoising
N = 100
@@ -89,13 +90,12 @@ t1 = timer()
res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
t2 = timer()
-print(" Run memopt")
t3 = timer()
res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1)
t4 = timer()
-
-#%%
+#
+##%%
plt.figure(figsize=(15,15))
plt.subplot(3,1,1)
plt.imshow(res.as_array())
@@ -115,10 +115,67 @@ plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt')
plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt')
plt.legend()
plt.show()
-
+#
print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3))
diff = (res1 - res).abs().as_array().max()
-
+#
print(" Max of abs difference is {}".format(diff))
+#%% Check with CVX solution
+
+from ccpi.optimisation.operators import SparseFiniteDiff
+
+try:
+ from cvxpy import *
+ cvx_not_installable = True
+except ImportError:
+ cvx_not_installable = False
+
+
+if cvx_not_installable:
+
+ ##Construct problem
+ u = Variable(ig.shape)
+
+ DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
+ DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
+
+ # Define Total Variation as a regulariser
+ regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
+ fidelity = 0.5 * sum_squares(u - noisy_data.as_array())
+
+ # choose solver
+ if 'MOSEK' in installed_solvers():
+ solver = MOSEK
+ else:
+ solver = SCS
+ obj = Minimize( regulariser + fidelity)
+ prob = Problem(obj)
+ result = prob.solve(verbose = True, solver = solver)
+
+ diff_cvx = numpy.abs( res.as_array() - u.value )
+
+ # Show result
+ plt.figure(figsize=(15,15))
+ plt.subplot(3,1,1)
+ plt.imshow(res.as_array())
+ plt.title('PDHG solution')
+ plt.colorbar()
+
+ plt.subplot(3,1,2)
+ plt.imshow(u.value)
+ plt.title('CVX solution')
+ plt.colorbar()
+
+ plt.subplot(3,1,3)
+ plt.imshow(diff_cvx)
+ plt.title('Difference')
+ plt.colorbar()
+
+ print('Primal Objective (CVX) {} '.format(obj.value))
+ print('Primal Objective (PDHG) {} '.format(primal[-1]))
+
+
+
+
diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py
index 3fec34e..419b098 100644
--- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py
+++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py
@@ -56,7 +56,7 @@ detectors = 150
angles = np.linspace(0,np.pi,100)
ag = AcquisitionGeometry('parallel','2D',angles, detectors)
-Aop = AstraProjectorSimple(ig, ag, 'gpu')
+Aop = AstraProjectorSimple(ig, ag, 'cpu')
sin = Aop.direct(data)
plt.imshow(sin.as_array())
@@ -134,23 +134,29 @@ t4 = timer()
#
print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3))
-
#%%
-#sol = pdhg.get_output().as_array()
-#fig = plt.figure()
-#plt.subplot(1,2,1)
-#plt.imshow(noisy_data.as_array())
-##plt.colorbar()
-#plt.subplot(1,2,2)
-#plt.imshow(sol)
-##plt.colorbar()
-#plt.show()
+plt.figure(figsize=(15,15))
+plt.subplot(3,1,1)
+plt.imshow(res.as_array())
+plt.title('no memopt')
+plt.colorbar()
+plt.subplot(3,1,2)
+plt.imshow(res1.as_array())
+plt.title('memopt')
+plt.colorbar()
+plt.subplot(3,1,3)
+plt.imshow((res1 - res).abs().as_array())
+plt.title('diff')
+plt.colorbar()
+plt.show()
+#
+plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt')
+plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt')
+plt.legend()
+plt.show()
#
+print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3))
+diff = (res1 - res).abs().as_array().max()
#
-##%%
-#plt.plot(np.linspace(0,N,N), data.as_array()[int(N/2),:], label = 'GTruth')
-#plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon')
-#plt.legend()
-#plt.show()
-
+print(" Max of abs difference is {}".format(diff))
diff --git a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
index 9fad6f8..16e6b43 100644
--- a/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
+++ b/Wrappers/Python/wip/pdhg_tv_denoising_poisson.py
@@ -6,24 +6,26 @@ Created on Fri Feb 22 14:53:03 2019
@author: evangelos
"""
-from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer
-
-import numpy as np
+import numpy as np
+import numpy
import matplotlib.pyplot as plt
+from ccpi.framework import ImageData, ImageGeometry
+
from ccpi.optimisation.algorithms import PDHG, PDHG_old
from ccpi.optimisation.operators import BlockOperator, Identity, Gradient
-from ccpi.optimisation.functions import ZeroFun, KullbackLeibler, \
- MixedL21Norm, FunctionOperatorComposition, BlockFunction
+from ccpi.optimisation.functions import ZeroFunction, KullbackLeibler, \
+ MixedL21Norm, BlockFunction
from skimage.util import random_noise
+from timeit import default_timer as timer
# ############################################################################
-# Create phantom for TV denoising
+# Create phantom for TV Poisson denoising
N = 100
data = np.zeros((N,N))
@@ -41,13 +43,12 @@ plt.imshow(noisy_data.as_array())
plt.colorbar()
plt.show()
-#%%
-
# Regularisation Parameter
-alpha = 10
+alpha = 2
#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ")
-method = '1'
+
+method = '0'
if method == '0':
# Create operators
@@ -57,15 +58,11 @@ if method == '0':
# Form Composite Operator
operator = BlockOperator(op1, op2, shape=(2,1) )
- #### Create functions
-# f = FunctionComposition_new(operator, mixed_L12Norm(alpha), \
-# L2NormSq(0.5, b = noisy_data) )
-
f1 = alpha * MixedL21Norm()
- f2 = KullbackLeibler(b = noisy_data)
+ f2 = 0.5 * KullbackLeibler(noisy_data)
f = BlockFunction(f1, f2 )
- g = ZeroFun()
+ g = ZeroFunction()
else:
@@ -73,30 +70,108 @@ else:
# No Composite #
###########################################################################
operator = Gradient(ig)
- f = alpha * FunctionOperatorComposition(operator, MixedL21Norm())
- g = KullbackLeibler(noisy_data)
+ f = alpha * MixedL21Norm()
+ g = 0.5 * KullbackLeibler(noisy_data)
###########################################################################
-#%%
# Compute operator Norm
normK = operator.norm()
-print ("normK", normK)
-# Primal & dual stepsizes
-#sigma = 1
-#tau = 1/(sigma*normK**2)
-sigma = 1/normK
-tau = 1/normK
+# Primal & dual stepsizes
+sigma = 1
+tau = 1/(sigma*normK**2)
opt = {'niter':2000}
+opt1 = {'niter':2000, 'memopt': True}
+t1 = timer()
res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)
-
-plt.figure(figsize=(5,5))
+t2 = timer()
+
+t3 = timer()
+res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1)
+t4 = timer()
+
+
+plt.figure(figsize=(15,15))
+plt.subplot(3,1,1)
plt.imshow(res.as_array())
+plt.title('no memopt')
+plt.colorbar()
+plt.subplot(3,1,2)
+plt.imshow(res1.as_array())
+plt.title('memopt')
plt.colorbar()
+plt.subplot(3,1,3)
+plt.imshow((res1 - res).abs().as_array())
+plt.title('diff')
+plt.colorbar()
+plt.show()
+#
+plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt')
+plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt')
+plt.legend()
plt.show()
+print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3))
+diff = (res1 - res).abs().as_array().max()
+
+print(" Max of abs difference is {}".format(diff))
+
+
+#%% Check with CVX solution
+
+from ccpi.optimisation.operators import SparseFiniteDiff
+
+try:
+ from cvxpy import *
+ cvx_not_installable = True
+except ImportError:
+ cvx_not_installable = False
+
+
+if cvx_not_installable:
+
+ ##Construct problem
+ u = Variable(ig.shape)
+
+ DY = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann')
+ DX = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann')
+
+ # Define Total Variation as a regulariser
+ regulariser = alpha * sum(norm(vstack([DX.matrix() * vec(u), DY.matrix() * vec(u)]), 2, axis = 0))
+ fidelity = 0.5 * sum_squares(u - noisy_data.as_array())
+
+
+ solver = SCS
+ obj = Minimize( regulariser + fidelity)
+ prob = Problem(obj)
+ result = prob.solve(verbose = True, solver = solver)
+
+ diff_cvx = numpy.abs( res.as_array() - u.value )
+
+ # Show result
+ plt.figure(figsize=(15,15))
+ plt.subplot(3,1,1)
+ plt.imshow(res.as_array())
+ plt.title('PDHG solution')
+ plt.colorbar()
+
+ plt.subplot(3,1,2)
+ plt.imshow(u.value)
+ plt.title('CVX solution')
+ plt.colorbar()
+
+ plt.subplot(3,1,3)
+ plt.imshow(diff_cvx)
+ plt.title('Difference')
+ plt.colorbar()
+
+ print('Primal Objective (CVX) {} '.format(obj.value))
+ print('Primal Objective (PDHG) {} '.format(primal[-1]))
+
+
+
#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
#pdhg.max_iteration = 2000
#pdhg.update_objective_interval = 10