summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xWrappers/Python/ccpi/framework/__init__.py1
-rw-r--r--Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py20
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py12
-rw-r--r--Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py15
4 files changed, 22 insertions, 26 deletions
diff --git a/Wrappers/Python/ccpi/framework/__init__.py b/Wrappers/Python/ccpi/framework/__init__.py
index 4683c21..66e2f56 100755
--- a/Wrappers/Python/ccpi/framework/__init__.py
+++ b/Wrappers/Python/ccpi/framework/__init__.py
@@ -22,3 +22,4 @@ from .framework import find_key, message
from .framework import DataProcessor
from .framework import AX, PixelByPixelDataProcessor, CastDataContainer
from .BlockDataContainer import BlockDataContainer
+from .BlockGeometry import BlockGeometry
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
index 54c947a..9267565 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py
@@ -52,51 +52,43 @@ class SimpleL2NormSq(Function):
class L2NormSq(SimpleL2NormSq):
def __init__(self, **kwargs):
-
super(L2NormSq, self).__init__()
self.b = kwargs.get('b',None)
-
+
def __call__(self, x):
-
if self.b is None:
return SimpleL2NormSq.__call__(self, x)
else:
return SimpleL2NormSq.__call__(self, x - self.b)
def gradient(self, x):
-
if self.b is None:
return 2 * x
else:
return 2 * (x - self.b)
-
+
def convex_conjugate(self, x):
-
''' The convex conjugate corresponds to the simple functional i.e.,
f(x) = alpha * ||x - b||_{2}^{2}
'''
-
if self.b is None:
return SimpleL2NormSq.convex_conjugate(self, x)
else:
return SimpleL2NormSq.convex_conjugate(self, x) + (self.b * x).sum()
-
+
def proximal(self, x, tau):
-
+
''' The proximal operator corresponds to the simple functional i.e.,
f(x) = alpha * ||x - b||_{2}^{2}
argmin_x { 0.5||x - u||^{2} + tau f(x) }
- '''
-
+ '''
if self.b is None:
return SimpleL2NormSq.proximal(self, x, tau)
else:
return self.b + SimpleL2NormSq.proximal(self, x - self.b , tau)
-
def proximal_conjugate(self, x, tau):
-
''' The proximal operator corresponds to the simple convex conjugate
functional i.e., f^{*}(x^{)
argmin_x { 0.5||x - u||^{2} + tau f(x) }
@@ -105,5 +97,3 @@ class L2NormSq(SimpleL2NormSq):
return SimpleL2NormSq.proximal_conjugate(self, x, tau)
else:
return SimpleL2NormSq.proximal_conjugate(self, x - tau * self.b, tau)
-
-
diff --git a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
index 16cd215..999975c 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/FiniteDifferenceOperator.py
@@ -24,9 +24,9 @@ class FiniteDiff(Operator):
# Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x']
def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'):
-
+ ''''''
super(FiniteDiff, self).__init__()
-
+ '''FIXME: domain and range should be geometries'''
self.gm_domain = gm_domain
self.gm_range = gm_range
self.direction = direction
@@ -297,14 +297,16 @@ class FiniteDiff(Operator):
res = out
return res
- def range_dim(self):
+ def range_geometry(self):
return self.gm_range
- def domain_dim(self):
+ def domain_geometry(self):
+ '''currently is a tuple'''
return self.gm_domain
def norm(self):
- x0 = ImageData(np.random.random_sample(self.domain_dim()))
+ x0 = self.gm_domain.allocate()
+ x0 = np.random.random_sample(x0.shape)
self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0)
return self.s1
diff --git a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
index 3dcc1bd..2eb77ce 100644
--- a/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/GradientOperator.py
@@ -11,6 +11,7 @@ from ccpi.optimisation.ops import PowerMethodNonsquare
from ccpi.framework import ImageData, BlockDataContainer
import numpy as np
from ccpi.optimisation.operators import FiniteDiff
+from ccpi.framework import BlockGeometry
#%%
@@ -57,11 +58,12 @@ class Gradient(Operator):
def alloc_range_dim(self):
return ImageData(np.zeros(self.range_dim))
- def domain_dim(self):
+ def domain_geometry(self):
return self.gm_domain
- def range_dim(self):
- return self.gm_range
+ def range_geometry(self):
+ '''fix this'''
+ return BlockGeometry(self.gm_range, self.gm_range)
def norm(self):
# return np.sqrt(4*len(self.domainDim()))
@@ -83,10 +85,10 @@ if __name__ == '__main__':
# DataContainer(np.random.randint(10, size=G.domain_dim()))]
# domain_dim
- print('Domain {}'.format(G.domain_dim()))
+ print('Domain {}'.format(G.domain_geometry()))
# range_dim
- print('Range {}'.format(G.range_dim()))
+ print('Range {}'.format(G.range_geometry()))
# Direct
z = G.direct(u)
@@ -104,7 +106,8 @@ if __name__ == '__main__':
print(G.norm())
# print(G.adjoint(G.direct(u)))
-
+
+