diff options
author | Edoardo Pasca <edo.paskino@gmail.com> | 2018-11-02 14:06:48 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-11-02 14:06:48 +0000 |
commit | 8584140e4db56fe998f9780dbc8d75a9aa3a72fe (patch) | |
tree | f4348bec38f73ab14f45667307e704d8e453c938 /Wrappers/Python | |
parent | 11cbe477fc6efca9aab33f85dd1210bb17572be1 (diff) | |
download | framework-8584140e4db56fe998f9780dbc8d75a9aa3a72fe.tar.gz framework-8584140e4db56fe998f9780dbc8d75a9aa3a72fe.tar.bz2 framework-8584140e4db56fe998f9780dbc8d75a9aa3a72fe.tar.xz framework-8584140e4db56fe998f9780dbc8d75a9aa3a72fe.zip |
Build fix (#159)
closes #148
closes #156
closes #157
closes #158
* split run_test for algorithms
* fix FBPD
adds operator as input argument
changes naming of variables to explicit
* implement proximal for ZeroFun and Norm1
proximal method must be correctly implemented. lead to error #158
* fix unit test and cvx demo
Diffstat (limited to 'Wrappers/Python')
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/algs.py | 34 | ||||
-rwxr-xr-x | Wrappers/Python/ccpi/optimisation/funcs.py | 33 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/bld.bat | 4 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/build.sh | 4 | ||||
-rw-r--r-- | Wrappers/Python/conda-recipe/meta.yaml | 10 | ||||
-rwxr-xr-x | Wrappers/Python/conda-recipe/run_test.py | 100 | ||||
-rw-r--r-- | Wrappers/Python/setup.py | 6 | ||||
-rw-r--r-- | Wrappers/Python/wip/demo_compare_cvx.py | 29 |
8 files changed, 173 insertions, 47 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index 4a6a383..24ed6e9 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -21,6 +21,7 @@ import numpy import time from ccpi.optimisation.funcs import Function +from ccpi.optimisation.funcs import ZeroFun from ccpi.framework import ImageData, AcquisitionData def FISTA(x_init, f=None, g=None, opt=None): @@ -38,8 +39,8 @@ def FISTA(x_init, f=None, g=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() + if f is None: f = ZeroFun() + if g is None: g = ZeroFun() # algorithmic parameters if opt is None: @@ -120,7 +121,8 @@ def FISTA(x_init, f=None, g=None, opt=None): return x, it, timing, criter -def FBPD(x_init, f=None, g=None, h=None, opt=None): +def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ + regulariser=None, opt=None): '''FBPD Algorithm Parameters: @@ -131,9 +133,9 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): opt: additional algorithm ''' # default inputs - if f is None: f = Function() - if g is None: g = Function() - if h is None: h = Function() + if constraint is None: constraint = ZeroFun() + if data_fidelity is None: data_fidelity = ZeroFun() + if regulariser is None: regulariser = ZeroFun() # algorithmic parameters if opt is None: @@ -152,14 +154,14 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): memopt = opt['memopts'] if 'memopts' in opt.keys() else False # step-sizes - tau = 2 / (g.L + 2); - sigma = (1/tau - g.L/2) / h.L; + tau = 2 / (data_fidelity.L + 2); + sigma = (1/tau - data_fidelity.L/2) / regulariser.L; inv_sigma = 1/sigma # initialization x = x_init - y = h.op.direct(x); + y = operator.direct(x); timing = numpy.zeros(max_iter) criter = numpy.zeros(max_iter) @@ -174,23 +176,23 @@ def FBPD(x_init, f=None, g=None, h=None, opt=None): # primal forward-backward step x_old = x; - x = x - tau * ( g.grad(x) + h.op.adjoint(y) ); - x = f.prox(x, tau); + x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); + x = constraint.prox(x, tau); # dual forward-backward step - y = y + sigma * h.op.direct(2*x - x_old); - y = y - sigma * h.prox(inv_sigma*y, inv_sigma); + y = y + sigma * operator.direct(2*x - x_old); + y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma); # time and criterion timing[it] = time.time() - t - criter[it] = f(x) + g(x) + h(h.op.direct(x)); + criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) # stopping rule #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: # break - criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); + criter = criter[0:it+1] + timing = numpy.cumsum(timing[0:it+1]) return x, it, timing, criter diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py index c105236..0ed77ae 100755 --- a/Wrappers/Python/ccpi/optimisation/funcs.py +++ b/Wrappers/Python/ccpi/optimisation/funcs.py @@ -193,12 +193,20 @@ class ZeroFun(Function): def prox(self,x,tau): return x.copy() + def proximal(self, x, tau, out=None): if out is None: - return self.prox(x, tau) + return self.prox(x,tau) else: - out.fill(x) - + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + out.fill(x) + + elif issubclass(type(out) , numpy.ndarray): + out[:] = x + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) # A more interesting example, least squares plus 1-norm minimization. # Define class to represent 1-norm including prox function class Norm1(Function): @@ -221,6 +229,25 @@ class Norm1(Function): def prox(self,x,tau): return (x.abs() - tau*self.gamma).maximum(0) * x.sign() + def proximal(self, x, tau, out=None): + if out is None: + return self.prox(x,tau) + else: + if isSizeCorrect(out, x): + # check dimensionality + if issubclass(type(out), DataContainer): + v = (x.abs() - tau*self.gamma).maximum(0) + x.sign(out=out) + out *= v + #out.fill(self.prox(x,tau)) + elif issubclass(type(out) , numpy.ndarray): + v = (x.abs() - tau*self.gamma).maximum(0) + out[:] = x.sign() + out *= v + #out[:] = self.prox(x,tau) + else: + raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) + # Box constraints indicator function. Calling returns 0 if argument is within # the box. The prox operator is projection onto the box. Only implements one diff --git a/Wrappers/Python/conda-recipe/bld.bat b/Wrappers/Python/conda-recipe/bld.bat index 4b4c7f5..d317f54 100644 --- a/Wrappers/Python/conda-recipe/bld.bat +++ b/Wrappers/Python/conda-recipe/bld.bat @@ -1,7 +1,3 @@ -IF NOT DEFINED CIL_VERSION ( -ECHO CIL_VERSION Not Defined. -exit 1 -) ROBOCOPY /E "%RECIPE_DIR%\.." "%SRC_DIR%" diff --git a/Wrappers/Python/conda-recipe/build.sh b/Wrappers/Python/conda-recipe/build.sh index 5dd97b0..2e68519 100644 --- a/Wrappers/Python/conda-recipe/build.sh +++ b/Wrappers/Python/conda-recipe/build.sh @@ -1,7 +1,3 @@ -if [ -z "$CIL_VERSION" ]; then - echo "Need to set CIL_VERSION" - exit 1 -fi mkdir ${SRC_DIR}/ccpi cp -r "${RECIPE_DIR}/../../../" ${SRC_DIR}/ccpi diff --git a/Wrappers/Python/conda-recipe/meta.yaml b/Wrappers/Python/conda-recipe/meta.yaml index 1b3d910..4b19a68 100644 --- a/Wrappers/Python/conda-recipe/meta.yaml +++ b/Wrappers/Python/conda-recipe/meta.yaml @@ -1,13 +1,13 @@ package: name: ccpi-framework - version: {{ environ['CIL_VERSION'] }} + version: 0.11.0 build: preserve_egg_dir: False - script_env: - - CIL_VERSION -# number: 0 +#script_env: +# - CIL_VERSION + number: 0 requirements: build: @@ -24,6 +24,8 @@ requirements: - python - numpy - cvxpy + - scipy + - matplotlib about: home: http://www.ccpi.ac.uk diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py index 7df5c07..ce09808 100755 --- a/Wrappers/Python/conda-recipe/run_test.py +++ b/Wrappers/Python/conda-recipe/run_test.py @@ -17,6 +17,7 @@ from ccpi.optimisation.funcs import TV2D from ccpi.optimisation.ops import LinearOperatorMatrix
from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.ops import Identity
from cvxpy import *
@@ -48,6 +49,7 @@ class TestDataContainer(unittest.TestCase): def testGb_creation_nocopy(self):
X,Y,Z = 512,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -66,6 +68,7 @@ class TestDataContainer(unittest.TestCase): def testInlineAlgebra(self):
print ("Test Inline Algebra")
X,Y,Z = 1024,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -122,6 +125,7 @@ class TestDataContainer(unittest.TestCase): def test_unary_operations(self):
print ("Test unary operations")
X,Y,Z = 1024,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = -numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -157,6 +161,7 @@ class TestDataContainer(unittest.TestCase): def binary_add(self):
print ("Test binary add")
X,Y,Z = 512,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -243,6 +248,7 @@ class TestDataContainer(unittest.TestCase): def binary_multiply(self):
print ("Test binary multiply")
X,Y,Z = 1024,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -283,6 +289,7 @@ class TestDataContainer(unittest.TestCase): def binary_divide(self):
print ("Test binary divide")
X,Y,Z = 1024,512,512
+ X,Y,Z = 256,512,512
steps = [timer()]
a = numpy.ones((X,Y,Z), dtype='float32')
steps.append(timer())
@@ -530,6 +537,33 @@ class TestAlgorithms(unittest.TestCase): print(objective0.value)
self.assertNumpyArrayAlmostEqual(
numpy.squeeze(x_fista0.array),x0.value,6)
+ def test_FISTA_Norm1(self):
+
+ opt = {'memopt':True}
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0],1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt':True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A,b,c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n,1)))
+
# Create 1-norm object instance
g1 = Norm1(lam)
@@ -541,7 +575,7 @@ class TestAlgorithms(unittest.TestCase): # Print for comparison
print("FISTA least squares plus 1-norm solution and objective value:")
- print(x_fista1)
+ print(x_fista1.as_array().squeeze())
print(criter1[-1])
# Compare to CVXPY
@@ -563,8 +597,56 @@ class TestAlgorithms(unittest.TestCase): self.assertNumpyArrayAlmostEqual(
numpy.squeeze(x_fista1.array),x1.value,6)
+ def test_FBPD_Norm1(self):
+
+ opt = {'memopt':True}
+ # Problem data.
+ m = 30
+ n = 20
+ np.random.seed(1)
+ Amat = np.random.randn(m, n)
+ A = LinearOperatorMatrix(Amat)
+ bmat = np.random.randn(m)
+ bmat.shape = (bmat.shape[0],1)
+
+ # A = Identity()
+ # Change n to equal to m.
+
+ b = DataContainer(bmat)
+
+ # Regularization parameter
+ lam = 10
+ opt = {'memopt':True}
+ # Create object instances with the test data A and b.
+ f = Norm2sq(A,b,c=0.5, memopt=True)
+ g0 = ZeroFun()
+
+ # Initial guess
+ x_init = DataContainer(np.zeros((n,1)))
+
+ # Create 1-norm object instance
+ g1 = Norm1(lam)
+
+
+ # Compare to CVXPY
+
+ # Construct the problem.
+ x1 = Variable(n)
+ objective1 = Minimize(0.5*sum_squares(Amat*x1 - bmat.T[0]) + lam*norm(x1,1) )
+ prob1 = Problem(objective1)
+
+ # The optimal objective is returned by prob.solve().
+ result1 = prob1.solve(verbose=False,solver=SCS,eps=1e-9)
+
+ # The optimal solution for x is stored in x.value and optimal objective value
+ # is in result as well as in objective.value
+ print("CVXPY least squares plus 1-norm solution and objective value:")
+ print(x1.value)
+ print(objective1.value)
+
# Now try another algorithm FBPD for same problem:
- x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1)
+ x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,
+ Identity(), None, f, g1)
print(x_fbpd1)
print(criterfbpd1[-1])
@@ -581,6 +663,8 @@ class TestAlgorithms(unittest.TestCase): # Set up phantom size NxN by creating ImageGeometry, initialising the
# ImageData object with this geometry and empty array and finally put some
# data into its array, and display as image.
+ def test_FISTA_denoise(self):
+ opt = {'memopt':True}
N = 64
ig = ImageGeometry(voxel_num_x=N,voxel_num_y=N)
Phantom = ImageData(geometry=ig)
@@ -609,14 +693,18 @@ class TestAlgorithms(unittest.TestCase): x_init_denoise = ImageData(np.zeros((N,N)))
# Combine with least squares and solve using generic FISTA implementation
- x_fista1_denoise, it1_denoise, timing1_denoise, criter1_denoise = FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
+ x_fista1_denoise, it1_denoise, timing1_denoise, \
+ criter1_denoise = \
+ FISTA(x_init_denoise, f_denoise, g1_denoise, opt=opt)
print(x_fista1_denoise)
print(criter1_denoise[-1])
# Now denoise LS + 1-norm with FBPD
- x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise)
+ x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise,\
+ criterfbpd1_denoise = \
+ FBPD(x_init_denoise, I, None, f_denoise, g1_denoise)
print(x_fbpd1_denoise)
print(criterfbpd1_denoise[-1])
@@ -652,7 +740,9 @@ class TestAlgorithms(unittest.TestCase): opt_tv = {'tol': 1e-4, 'iter': 10000}
- x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv)
+ x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise,\
+ criterfbpdtv_denoise = \
+ FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv)
print(x_fbpdtv_denoise)
print(criterfbpdtv_denoise[-1])
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index b4fabbd..dfd5bdd 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -23,11 +23,7 @@ import os import sys - -cil_version=os.environ['CIL_VERSION'] -if cil_version == '': - print("Please set the environmental variable CIL_VERSION") - sys.exit(1) +cil_version='0.11.1' setup( name="ccpi-framework", diff --git a/Wrappers/Python/wip/demo_compare_cvx.py b/Wrappers/Python/wip/demo_compare_cvx.py index 2df11c1..ad79fa5 100644 --- a/Wrappers/Python/wip/demo_compare_cvx.py +++ b/Wrappers/Python/wip/demo_compare_cvx.py @@ -4,6 +4,7 @@ from ccpi.optimisation.algs import FISTA, FBPD, CGLS from ccpi.optimisation.funcs import Norm2sq, ZeroFun, Norm1, TV2D from ccpi.optimisation.ops import LinearOperatorMatrix, TomoIdentity +from ccpi.optimisation.ops import Identity # Requires CVXPY, see http://www.cvxpy.org/ # CVXPY can be installed in anaconda using @@ -80,8 +81,22 @@ plt.show() g1 = Norm1(lam) g1(x_init) -g1.prox(x_init,0.02) - +x_rand = DataContainer(np.reshape(np.random.rand(n),(n,1))) +x_rand2 = DataContainer(np.reshape(np.random.rand(n-1),(n-1,1))) +v = g1.prox(x_rand,0.02) +#vv = g1.prox(x_rand2,0.02) +vv = v.copy() +vv *= 0 +print (">>>>>>>>>>vv" , vv.as_array()) +vv.fill(v) +print (">>>>>>>>>>fill" , vv.as_array()) +g1.proximal(x_rand, 0.02, out=vv) +print (">>>>>>>>>>v" , v.as_array()) +print (">>>>>>>>>>gradient" , vv.as_array()) + +print (">>>>>>>>>>" , (v-vv).as_array()) +import sys +#sys.exit(0) # Combine with least squares and solve using generic FISTA implementation x_fista1, it1, timing1, criter1 = FISTA(x_init, f, g1,opt=opt) @@ -108,7 +123,7 @@ if use_cvxpy: print(objective1.value) # Now try another algorithm FBPD for same problem: -x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init, None, f, g1) +x_fbpd1, itfbpd1, timingfbpd1, criterfbpd1 = FBPD(x_init,Identity(), None, f, g1) print(x_fbpd1) print(criterfbpd1[-1]) @@ -178,7 +193,8 @@ plt.title('FISTA LS+1') plt.show() # Now denoise LS + 1-norm with FBPD -x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, criterfbpd1_denoise = FBPD(x_init_denoise, None, f_denoise, g1_denoise) +x_fbpd1_denoise, itfbpd1_denoise, timingfbpd1_denoise, \ + criterfbpd1_denoise = FBPD(x_init_denoise, I, None, f_denoise, g1_denoise) print(x_fbpd1_denoise) print(criterfbpd1_denoise[-1]) @@ -217,7 +233,8 @@ gtv(gtv.op.direct(x_init_denoise)) opt_tv = {'tol': 1e-4, 'iter': 10000} -x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, criterfbpdtv_denoise = FBPD(x_init_denoise, None, f_denoise, gtv,opt=opt_tv) +x_fbpdtv_denoise, itfbpdtv_denoise, timingfbpdtv_denoise, \ + criterfbpdtv_denoise = FBPD(x_init_denoise, gtv.op, None, f_denoise, gtv,opt=opt_tv) print(x_fbpdtv_denoise) print(criterfbpdtv_denoise[-1]) @@ -230,7 +247,7 @@ if use_cvxpy: # Construct the problem. xtv_denoise = Variable((N,N)) - print (xtv_denoise.shape) + #print (xtv_denoise.value.shape) objectivetv_denoise = Minimize(0.5*sum_squares(xtv_denoise - y.array) + lam_tv*tv(xtv_denoise) ) probtv_denoise = Problem(objectivetv_denoise) |