diff options
author | jakobsj <jakobsj@users.noreply.github.com> | 2018-05-08 12:11:28 +0100 |
---|---|---|
committer | Edoardo Pasca <edo.paskino@gmail.com> | 2018-05-08 13:11:28 +0200 |
commit | 57d3daad99869a69bf14b8b4a3326630088da36a (patch) | |
tree | 03e7b95ba16f4ffaec6322cdf838fa3879cbf039 /Wrappers | |
parent | b14e71bdc72f864897793dd3e60c02a31886d292 (diff) | |
download | framework-57d3daad99869a69bf14b8b4a3326630088da36a.tar.gz framework-57d3daad99869a69bf14b8b4a3326630088da36a.tar.bz2 framework-57d3daad99869a69bf14b8b4a3326630088da36a.tar.xz framework-57d3daad99869a69bf14b8b4a3326630088da36a.zip |
Cvx (#118)
* add CVX test script
* Add L1 and TV CVX and denoising. TV FBPD bug.
* Fix TV2D bug and update cvx demo
Diffstat (limited to 'Wrappers')
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/funcs.py | 2 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/ops.py | 4 | ||||
-rw-r--r-- | Wrappers/Python/wip/demo_compare_cvx.py | 250 |
3 files changed, 253 insertions, 3 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index d11d6c3..f5463a3 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -57,7 +57,7 @@ class Norm2(Function): class TV2D(Norm2): def __init__(self, gamma): - super(TV2D,self).__init__(gamma, 2) + super(TV2D,self).__init__(gamma, 0) self.op = FiniteDiff2D() self.L = self.op.get_max_sing_val() diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py index 26787f5..668b07e 100755 --- a/Wrappers/Python/ccpi/optimisation/ops.py +++ b/Wrappers/Python/ccpi/optimisation/ops.py @@ -163,10 +163,10 @@ class LinearOperatorMatrix(Operator): super(LinearOperatorMatrix, self).__init__() def direct(self,x): - return DataContainer(numpy.dot(self.A,x.as_array())) + return type(x)(numpy.dot(self.A,x.as_array())) def adjoint(self,x): - return DataContainer(numpy.dot(self.A.transpose(),x.as_array())) + return type(x)(numpy.dot(self.A.transpose(),x.as_array())) def size(self): return self.A.shape diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py new file mode 100644 index 0000000..cbfe50e --- /dev/null +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -0,0 +1,250 @@ + +from ccpi.framework import ImageData, ImageGeometry, AcquisitionGeometry, DataContainer +from ccpi.optimisation.algs import FISTA, FBPD, CGLS +from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D + +from ccpi.optimisation.ops import LinearOperatorMatrix, Identity + +# Requires CVXPY, see http://www.cvxpy.org/ +# CVXPY can be installed in anaconda using +# conda install -c cvxgrp cvxpy libgcc + +# Whether to use or omit CVXPY +use_cvxpy = True +if use_cvxpy: + from cvxpy import * + +import numpy as np +import matplotlib.pyplot as plt + +# Problem data. +m = 30 +n = 20 +np.random.seed(1) +Amat = np.random.randn(m, n) +A = LinearOperatorMatrix(Amat) +bmat = np.random.randn(m) +bmat.shape = (bmat.shape[0],1) + +# A = Identity() +# Change n to equal to m. + +b = DataContainer(bmat) + +# Regularization parameter +lam = 10 + +# Create object instances with the test data A and b. +f = Norm2sq(A,b,c=0.5) +g0 = ZeroFun() + +# Initial guess +x_init = DataContainer(np.zeros((n,1))) + +f.grad(x_init) + +# Run FISTA for least squares plus zero function. +x_fista0, it0, timing0, criter0 = FISTA(x_init, f, g0) + +# Print solution and final objective/criterion value for comparison +print("FISTA least squares plus zero function solution and objective value:") +print(x_fista0.array) +print(criter0[-1]) + +if use_cvxpy: + # Compare to CVXPY + + # Construct the problem. + x0 = Variable(n) + objective0 = Minimize(0.5*sum_squares(Amat*x0 - bmat) ) + prob0 = Problem(objective0) + + # The optimal objective is returned by prob.solve(). + result0 = prob0.solve(verbose=False,solver=SCS,eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus zero function solution and objective value:") + print(x0.value) + print(objective0.value) + +# Plot criterion curve to see FISTA converge to same value as CVX. +iternum = np.arange(1,1001) +plt.figure() +plt.loglog(iternum[[0,-1]],[objective0.value, objective0.value], label='CVX LS') +plt.loglog(iternum,criter0,label='FISTA LS') +plt.legend() +plt.show() + +# Create 1-norm object instance +g1 = Norm1(lam) + +g1(x_init) +g1.prox(x_init,0.02) + +# Combine with least squares and solve using generic FISTA implementation +x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1) + +# Print for comparison +print("FISTA least squares plus 1-norm solution and objective value:") +print(x_fista1) +print(criter1[-1]) + +if use_cvxpy: + # Compare to CVXPY + + # Construct the problem. + x1 = Variable(n) + objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat) + lam*norm(x1,1) ) + prob1 = Problem(objective1) + + # The optimal objective is returned by prob.solve(). + result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1.value) + print(objective1.value) + +# Now try another algorithm FBPD for same problem: +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +print(x_fbpd1) +print(criterfbpd1[-1]) + +# Plot criterion curve to see both FISTA and FBPD converge to same value. +# Note that FISTA is very efficient for 1-norm minimization so it beats +# FBPD in this test by a lot. But FBPD can handle a larger class of problems +# than FISTA can. +plt.figure() +plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1') +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.legend() +plt.show() + +plt.figure() +plt.loglog(iternum[[0,-1]],[objective1.value, objective1.value], label='CVX LS+1') +plt.loglog(iternum,criter1,label='FISTA LS+1') +plt.loglog(iternum,criterfbpd1,label='FBPD LS+1') +plt.legend() +plt.show() + +# Now try 1-norm and TV denoising with FBPD, first 1-norm. + +# Set up phantom size NxN by creating ImageGeometry, initialising the +# ImageData object with this geometry and empty array and finally put some +# data into its array, and display as image. +N = 64 +ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N) +Phantom = ImageData(geometry=ig) + +x = Phantom.as_array() +x[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +x[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +plt.imshow(x) +plt.title('Phantom image') +plt.show() + +# Identity operator for denoising +I = Identity() + +# Data and add noise +y = I.direct(Phantom) +y.array = y.array + 0.1*np.random.randn(N, N) + +plt.imshow(y.array) +plt.title('Noisy image') +plt.show() + +# Data fidelity term +f_denoise = Norm2sq(I,y,c=0.5) + +# 1-norm regulariser +lam1_denoise = 1.0 +g1_denoise = Norm1(lam1_denoise) + +# Initial guess +x_init_denoise = ImageData(np.zeros((N,N))) + +# Combine with least squares and solve using generic FISTA implementation +x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise) + +print(x_fista1_denoise) +print(criter1_denoise[-1]) + +plt.imshow(x_fista1_denoise.as_array()) +plt.title('FISTA LS+1') +plt.show() + +# Now denoise LS + 1-norm with FBPD +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +print(x_fbpd1_denoise) +print(criterfbpd1_denoise[-1]) + +plt.imshow(x_fbpd1_denoise.as_array()) +plt.title('FBPD LS+1') +plt.show() + +if use_cvxpy: + # Compare to CVXPY + + # Construct the problem. + x1_denoise = Variable(N**2,1) + objective1_denoise = Minimize(0.5*sum_squares(x1_denoise - y.array.flatten()) + lam1_denoise*norm(x1_denoise,1) ) + prob1_denoise = Problem(objective1_denoise) + + # The optimal objective is returned by prob.solve(). + result1_denoise = prob1_denoise.solve(verbose=False,solver=SCS,eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(x1_denoise.value) + print(objective1_denoise.value) + +x1_cvx = x1_denoise.value +x1_cvx.shape = (N,N) + +plt.imshow(x1_cvx) +plt.title('CVX LS+1') +plt.show() + +# Now TV with FBPD +lam_tv = 0.1 +gtv = TV2D(lam_tv) +gtv(gtv.op.direct(x_init_denoise)) + +opt_tv = {'tol': 1e-4, 'iter': 10000} + +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +print(x_fbpdtv_denoise) +print(criterfbpdtv_denoise[-1]) + +plt.imshow(x_fbpdtv_denoise.as_array()) +plt.title('FBPD TV') +plt.show() + +if use_cvxpy: + # Compare to CVXPY + + # Construct the problem. + xtv_denoise = Variable(N,N) + objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) + probtv_denoise = Problem(objectivetv_denoise) + + # The optimal objective is returned by prob.solve(). + resulttv_denoise = probtv_denoise.solve(verbose=False,solver=SCS,eps=1e-12) + + # The optimal solution for x is stored in x.value and optimal objective value + # is in result as well as in objective.value + print("CVXPY least squares plus 1-norm solution and objective value:") + print(xtv_denoise.value) + print(objectivetv_denoise.value) + +plt.imshow(xtv_denoise.value) +plt.title('CVX TV') +plt.show() + +plt.loglog([0,opt_tv['iter']], [objectivetv_denoise.value,objectivetv_denoise.value], label='CVX TV') +plt.loglog(criterfbpdtv_denoise, label='FBPD TV')
\ No newline at end of file |