diff options
| author | Edoardo Pasca <edo.paskino@gmail.com> | 2019-04-15 12:20:21 +0100 | 
|---|---|---|
| committer | Edoardo Pasca <edo.paskino@gmail.com> | 2019-04-15 12:20:21 +0100 | 
| commit | f17322dbb13d6ef03cc73646ef3f32deb11d7cda (patch) | |
| tree | de0e9f7b37dd7da830083a0a82caf878d4ca3000 | |
| parent | 1a5da33eb2c7bfde2224d634cb34d17b18d7cf72 (diff) | |
| parent | 1eba627e28552985642b9eaf77ba43bf41191566 (diff) | |
| download | framework-f17322dbb13d6ef03cc73646ef3f32deb11d7cda.tar.gz framework-f17322dbb13d6ef03cc73646ef3f32deb11d7cda.tar.bz2 framework-f17322dbb13d6ef03cc73646ef3f32deb11d7cda.tar.xz framework-f17322dbb13d6ef03cc73646ef3f32deb11d7cda.zip | |
Merge remote-tracking branch 'origin/composite_operator_datacontainer' into exception_geometry
| -rwxr-xr-x | Wrappers/Python/ccpi/framework/BlockDataContainer.py | 50 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py | 63 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py | 19 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/L1Norm.py | 141 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py | 23 | ||||
| -rwxr-xr-x | Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py | 68 | ||||
| -rwxr-xr-x | Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py | 34 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py (renamed from Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py) | 18 | ||||
| -rw-r--r-- | Wrappers/Python/ccpi/optimisation/functions/__init__.py | 2 | ||||
| -rwxr-xr-x | Wrappers/Python/wip/pdhg_TV_denoising.py | 17 | ||||
| -rw-r--r-- | Wrappers/Python/wip/pdhg_TV_tomography2D.py | 4 | 
11 files changed, 267 insertions, 172 deletions
| diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py index afdf617..386cd87 100755 --- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py +++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py @@ -263,13 +263,26 @@ class BlockDataContainer(object):          return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape)      ## reductions +          def sum(self, *args, **kwargs):          return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) +          def squared_norm(self):          y = numpy.asarray([el.squared_norm() for el in self.containers])          return y.sum()  +          def norm(self): -        return numpy.sqrt(self.squared_norm())     +        return numpy.sqrt(self.squared_norm())    +     +    def pnorm(self, p=2): +                         +        if p==1:             +            return sum(self.abs())         +        elif p==2:                     +            return sum([el*el for el in self.containers]).sqrt() +        else: +            return ValueError('Not implemented') +                      def copy(self):          '''alias of clone'''              return self.clone() @@ -428,4 +441,39 @@ class BlockDataContainer(object):      def __itruediv__(self, other):          '''Inline truedivision'''          return self.__idiv__(other) +     +     + +     +     +if __name__ == '__main__': +     +    from ccpi.framework import ImageGeometry, BlockGeometry +    import numpy + +    N, M = 2, 3 +    ig = ImageGeometry(N, M)     +    BG = BlockGeometry(ig, ig) +     +    U = BG.allocate('random_int') +     +     +    print ("test sum BDC " ) +    w = U[0].as_array() + U[1].as_array()     +    w1 = sum(U).as_array()     +    numpy.testing.assert_array_equal(w, w1) +     +    print ("test sum BDC " ) +    z = numpy.sqrt(U[0].as_array()**2 + U[1].as_array()**2) +    z1 = sum(U**2).sqrt().as_array()     +    numpy.testing.assert_array_equal(z, z1)    +     +     + +    z2 = U.pnorm(2) +     +    + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py index 8cce290..bf627a5 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/BlockFunction.py @@ -6,36 +6,35 @@ Created on Fri Mar  8 10:01:31 2019  @author: evangelos  """ -import numpy as np -  from ccpi.optimisation.functions import Function  from ccpi.framework import BlockDataContainer  from numbers import Number  class BlockFunction(Function): -    '''A Block vector of Functions +    '''BlockFunction acts as a separable sum function, i.e., -    .. math:: - -      f = [f_1,f_2,f_3] -      f([x_1,x_2,x_3]) = f_1(x_1) + f_2(x_2) + f_3(x_3) +      f = [f_1,...,f_n] +       +      f([x_1,...,x_n]) = f_1(x_1) +  .... + f_n(x_n)      '''      def __init__(self, *functions): -        '''Creator''' +                  self.functions = functions                self.length = len(self.functions)          super(BlockFunction, self).__init__()      def __call__(self, x): -        '''evaluates the BlockFunction on the BlockDataContainer +         +        '''Evaluates the BlockFunction at a BlockDataContainer x          :param: x (BlockDataContainer): must have as many rows as self.length          returns sum(f_i(x_i))          ''' +                  if self.length != x.shape[0]:              raise ValueError('BlockFunction and BlockDataContainer have incompatible size')          t = 0                 @@ -44,7 +43,12 @@ class BlockFunction(Function):          return t      def convex_conjugate(self, x): -        '''Convex_conjugate does not take into account the BlockOperator'''         +         +        ''' Evaluate convex conjugate of BlockFunction at x +         +        returns sum(f_i^{*}(x_i)) +         +        '''                 t = 0                          for i in range(x.shape[0]):              t += self.functions[i].convex_conjugate(x.get_item(i)) @@ -52,7 +56,13 @@ class BlockFunction(Function):      def proximal_conjugate(self, x, tau, out = None): -        '''proximal_conjugate does not take into account the BlockOperator''' +         +        ''' Evaluate Proximal Operator of tau * f(\cdot) at x +         +        prox_{tau*f}(x) = sum_{i} prox_{tau*f_{i}}(x_{i})  +         +         +        '''          if out is not None:              if isinstance(tau, Number): @@ -76,7 +86,14 @@ class BlockFunction(Function):      def proximal(self, x, tau, out = None): -        '''proximal does not take into account the BlockOperator''' +         +        ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x +         +        prox_{tau*f^{*}}(x) = sum_{i} prox_{tau*f^{*}_{i}}(x_{i})  +         +         +        ''' +                          out = [None]*self.length          if isinstance(tau, Number):              for i in range(self.length): @@ -88,8 +105,19 @@ class BlockFunction(Function):          return BlockDataContainer(*out)           def gradient(self,x, out=None): -        '''FIXME: gradient returns pass''' -        pass +         +        ''' Evaluate gradient of f at x: f'(x)  +         +        returns: BlockDataContainer [f_{1}'(x_{1}), ... , f_{n}'(x_{n})] +                 +        ''' +         +        out = [None]*self.length +        for i in range(self.length): +            out[i] = self.functions[i].gradient(x.get_item(i)) +             +        return  BlockDataContainer(*out)            +                              if __name__ == '__main__': @@ -100,6 +128,7 @@ if __name__ == '__main__':      from ccpi.framework import ImageGeometry, BlockGeometry      from ccpi.optimisation.operators import Gradient, Identity, BlockOperator      import numpy +    import numpy as np      ig = ImageGeometry(M, N) @@ -131,11 +160,7 @@ if __name__ == '__main__':      numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \                                              res_out[1].as_array(), decimal=4)      -     -     -     -     -     +                          ########################################################################## diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 34b7e35..38bc458 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -6,32 +6,47 @@ Created on Fri Mar  8 09:55:36 2019  @author: evangelos  """ -import numpy as np -#from ccpi.optimisation.funcs import Function  from ccpi.optimisation.functions import Function  from ccpi.optimisation.functions import ScaledFunction  class FunctionOperatorComposition(Function): +    ''' Function composition with Operator, i.e., f(Ax) +     +        A: operator +        f: function +     +    ''' +          def __init__(self, operator, function): +                  super(FunctionOperatorComposition, self).__init__()          self.function = function               self.operator = operator          alpha = 1 +                  if isinstance (function, ScaledFunction):              alpha = function.scalar          self.L = 2 * alpha * operator.norm()**2      def __call__(self, x): +         +        ''' Evaluate FunctionOperatorComposition at x +         +        returns f(Ax) +         +        '''          return self.function(self.operator.direct(x))    +    #TODO do not know if we need it      def call_adjoint(self, x):          return self.function(self.operator.adjoint(x))   +      def convex_conjugate(self, x):          ''' convex_conjugate does not take into account the Operator''' diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py index 163eefa..4e53f2c 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py @@ -16,11 +16,82 @@  #   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  #   See the License for the specific language governing permissions and  #   limitations under the License. -""" -Created on Wed Mar  6 19:42:34 2019 -@author: evangelos -""" + +from ccpi.optimisation.functions import Function +from ccpi.optimisation.functions.ScaledFunction import ScaledFunction         +from ccpi.optimisation.operators import ShrinkageOperator  +  + +class L1Norm(Function): +     +    '''  +     +    Class: L1Norm +         +    Cases:  a) f(x) = ||x||_{1} +     +            b) f(x) = ||x - b||_{1} +                              +    '''       +     +    def __init__(self, **kwargs): +         +        super(L1Norm, self).__init__() +        self.b = kwargs.get('b',None)  +         +    def __call__(self, x): +         +        ''' Evaluate L1Norm at x: f(x) ''' +         +        y = x +        if self.b is not None:  +            y = x - self.b +        return y.abs().sum()   +     +    def gradient(self,x): +        #TODO implement subgradient??? +        return ValueError('Not Differentiable')    +     +    def convex_conjugate(self,x): +        #TODO implement Indicator infty??? + +        y = 0         +        if self.b is not None: +            y =  0 + (self.b * x).sum() +        return y   +     +    def proximal(self, x, tau, out=None): +         +        # TODO implement shrinkage operator, we will need it later e.g SplitBregman +         +        if out is None: +            if self.b is not None: +                return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) +            else: +                return ShrinkageOperator.__call__(self, x, tau)              +        else: +            if self.b is not None: +                out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) +            else: +                out.fill(ShrinkageOperator.__call__(self, x, tau)) +                                     +    def proximal_conjugate(self, x, tau, out=None): +         +        if out is None: +            if self.b is not None: +                return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) +            else: +                return x.divide(x.abs().maximum(1.0)) +        else: +            if self.b is not None: +                out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) +            else: +                out.fill(x.divide(x.abs().maximum(1.0)) )                 +             +    def __rmul__(self, scalar): +        return ScaledFunction(self, scalar) +  #import numpy as np  ##from ccpi.optimisation.funcs import Function @@ -92,67 +163,7 @@ Created on Wed Mar  6 19:42:34 2019  #          ###############################################################################                 -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction         -from ccpi.optimisation.operators import ShrinkageOperator  -  - -class L1Norm(Function): -     -    def __init__(self, **kwargs): -         -        super(L1Norm, self).__init__() -        self.b = kwargs.get('b',None)  -         -    def __call__(self, x): -         -        y = x -        if self.b is not None:  -            y = x - self.b -        return y.abs().sum()   -     -    def gradient(self,x): -        #TODO implement subgradient??? -        return ValueError('Not Differentiable')    -     -    def convex_conjugate(self,x): -        #TODO implement Indicator infty??? - -        y = 0         -        if self.b is not None: -            y =  0 + (self.b * x).sum() -        return y   -     -    def proximal(self, x, tau, out=None): -         -        # TODO implement shrinkage operator, we will need it later e.g SplitBregman -         -        if out is None: -            if self.b is not None: -                return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) -            else: -                return ShrinkageOperator.__call__(self, x, tau)              -        else: -            if self.b is not None: -                out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) -            else: -                out.fill(ShrinkageOperator.__call__(self, x, tau)) -                                     -    def proximal_conjugate(self, x, tau, out=None): -         -        if out is None: -            if self.b is not None: -                return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) -            else: -                return x.divide(x.abs().maximum(1.0)) -        else: -            if self.b is not None: -                out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) -            else: -                out.fill(x.divide(x.abs().maximum(1.0)) )                 -             -    def __rmul__(self, scalar): -        return ScaledFunction(self, scalar)              +              diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 903dafa..7397cfb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -17,19 +17,16 @@  #   See the License for the specific language governing permissions and  #   limitations under the License. -import numpy  from ccpi.optimisation.functions import Function  from ccpi.optimisation.functions.ScaledFunction import ScaledFunction  class L2NormSquared(Function): -    '''  -     -    Class: L2NormSquared  -         -    Cases:  a) f(x) = ||x||^{2} +    ''' +             +    Cases:  a) f(x) = \|x\|^{2}_{2} -            b) f(x) = ||x - b||^{2}, b +            b) f(x) = ||x - b||^{2}_{2}      '''     @@ -50,9 +47,7 @@ class L2NormSquared(Function):          except AttributeError as ae:              # added for compatibility with SIRF               return (y.norm()**2) -         -             -         +                      def gradient(self, x, out=None):                  ''' Evaluate gradient of L2NormSquared at x: f'(x) ''' @@ -127,11 +122,10 @@ class L2NormSquared(Function):      def __rmul__(self, scalar): -        ''' Allows multiplication of L2NormSquared with a scalar +        ''' Multiplication of L2NormSquared with a scalar          Returns: ScaledFunction -         -                 +                                  '''          return ScaledFunction(self, scalar)         @@ -139,7 +133,8 @@ class L2NormSquared(Function):  if __name__ == '__main__': -     +    from ccpi.framework import ImageGeometry +    import numpy      # TESTS for L2 and scalar * L2      M, N, K = 2,3,5 diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 24c47f4..f524c5f 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -17,47 +17,48 @@  #   See the License for the specific language governing permissions and  #   limitations under the License. -import numpy as np  from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import DataContainer, ImageData, \ -                           ImageGeometry, BlockDataContainer  +from ccpi.framework import BlockDataContainer +  import functools -############################   mixed_L1,2NORM FUNCTIONS   #####################  class MixedL21Norm(Function): +     +    ''' +        f(x) = ||x||_{2,1} = \sum |x|_{2}                    +    '''       +          def __init__(self, **kwargs):          super(MixedL21Norm, self).__init__()                                self.SymTensor = kwargs.get('SymTensor',False) -    def __call__(self, x, out=None): +    def __call__(self, x): -        ''' Evaluates L1,2Norm at point x +        ''' Evaluates L2,1Norm at point x              :param: x is a BlockDataContainer          '''          if not isinstance(x, BlockDataContainer): -            raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x)))                       +            raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x)))  +                               if self.SymTensor: +            #TODO fix this case              param = [1]*x.shape[0]              param[-1] = 2              tmp = [param[i]*(x[i] ** 2) for i in range(x.shape[0])] -            res = sum(tmp).sqrt().sum()            -        else: -             -#            tmp = [ x[i]**2 for i in range(x.shape[0])] -            tmp = [ el**2 for el in x.containers ] +            res = sum(tmp).sqrt().sum()    -#            print(x.containers) -#            print(tmp) -#            print(type(sum(tmp))) -#            print(type(tmp)) -            res = sum(tmp).sqrt().sum() -#            print(res) -        return res            +        else: +                         +            #tmp = [ el**2 for el in x.containers ] +            #res = sum(tmp).sqrt().sum() +            res = x.pnorm() + +        return res      def gradient(self, x, out=None):          return ValueError('Not Differentiable') @@ -93,19 +94,28 @@ class MixedL21Norm(Function):          else:              if out is None:                                         -                tmp = [ el*el for el in x.containers] -                res = sum(tmp).sqrt().maximum(1.0)  -                frac = [el/res for el in x.containers] -                res = BlockDataContainer(*frac)     -                return res +#                tmp = [ el*el for el in x.containers] +#                res = sum(tmp).sqrt().maximum(1.0)  +#                frac = [el/res for el in x.containers] +#                res = BlockDataContainer(*frac)     +#                return res +                                 +                return x.divide(x.pnorm().maximum(1.0))              else: -                res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) -                res = res1.sqrt().maximum(1.0) -                x.divide(res, out=out) -                #for i,el in enumerate(x.containers): -                #    el.divide(res, out=out.get_item(i)) +                                 +#                res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) +#                res = res1.sqrt().maximum(1.0) +#                x.divide(res, out=out) +                 x.divide(x.pnorm().maximum(1.0), out=out) +                                    def __rmul__(self, scalar): +         +        ''' Multiplication of L2NormSquared with a scalar +         +        Returns: ScaledFunction +              +        '''                   return ScaledFunction(self, scalar)  diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index 3fbb858..cb85249 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -20,6 +20,7 @@ from numbers import Number  import numpy
  class ScaledFunction(object):
 +    
      '''ScaledFunction
      A class to represent the scalar multiplication of an Function with a scalar.
 @@ -48,12 +49,22 @@ class ScaledFunction(object):      def convex_conjugate(self, x):
          '''returns the convex_conjugate of the scaled function '''
 -        # if out is None:
 -        #     return self.scalar * self.function.convex_conjugate(x/self.scalar)
 -        # else:
 -        #     out.fill(self.function.convex_conjugate(x/self.scalar))
 -        #     out *= self.scalar
          return self.scalar * self.function.convex_conjugate(x/self.scalar)
 +    
 +    def gradient(self, x, out=None):
 +        '''Returns the gradient of the function at x, if the function is differentiable'''
 +        if out is None:            
 +            return self.scalar * self.function.gradient(x)    
 +        else:
 +            out.fill( self.scalar * self.function.gradient(x) )
 +
 +    def proximal(self, x, tau, out=None):
 +        '''This returns the proximal operator for the function at x, tau
 +        '''
 +        if out is None:
 +            return self.function.proximal(x, tau*self.scalar)     
 +        else:
 +            out.fill( self.function.proximal(x, tau*self.scalar) )
      def proximal_conjugate(self, x, tau, out = None):
          '''This returns the proximal operator for the function at x, tau
 @@ -76,20 +87,7 @@ class ScaledFunction(object):          versions of the CIL. Use proximal instead''', DeprecationWarning)
          return self.proximal(x, out=None)
 -    def gradient(self, x, out=None):
 -        '''Returns the gradient of the function at x, if the function is differentiable'''
 -        if out is None:            
 -            return self.scalar * self.function.gradient(x)    
 -        else:
 -            out.fill( self.scalar * self.function.gradient(x) )
 -    def proximal(self, x, tau, out=None):
 -        '''This returns the proximal operator for the function at x, tau
 -        '''
 -        if out is None:
 -            return self.function.proximal(x, tau*self.scalar)     
 -        else:
 -            out.fill( self.function.proximal(x, tau*self.scalar) )
  if __name__ == '__main__':        
 diff --git a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py b/Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py index 6d21acb..cce519a 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/ZeroFun.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ZeroFunction.py @@ -17,21 +17,24 @@  #   See the License for the specific language governing permissions and  #   limitations under the License. -import numpy as np -#from ccpi.optimisation.funcs import Function  from ccpi.optimisation.functions import Function -from ccpi.framework import DataContainer, ImageData  from ccpi.framework import BlockDataContainer  -class ZeroFun(Function): +class ZeroFunction(Function): +     +    ''' ZeroFunction: f(x) = 0 +     +     +    '''      def __init__(self): -        super(ZeroFun, self).__init__() +        super(ZeroFunction, self).__init__()      def __call__(self,x):          return 0      def convex_conjugate(self, x): +                  ''' This is the support function sup <x, x^{*}>  which in fact is the           indicator function for the set = {0}          So 0 if x=0, or inf if x neq 0                 @@ -56,8 +59,3 @@ class ZeroFun(Function):              return 0          else:              return 0 - -    def domain_geometry(self): -        pass -    def range_geometry(self): -        pass
\ No newline at end of file diff --git a/Wrappers/Python/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/ccpi/optimisation/functions/__init__.py index 65e8848..a82ee3e 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/__init__.py +++ b/Wrappers/Python/ccpi/optimisation/functions/__init__.py @@ -1,7 +1,7 @@  # -*- coding: utf-8 -*-  from .Function import Function -from .ZeroFun import ZeroFun +from .ZeroFunction import ZeroFunction  from .L1Norm import L1Norm  from .L2NormSquared import L2NormSquared  from .ScaledFunction import ScaledFunction diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index 9bd5221..d885bca 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -14,7 +14,7 @@ import matplotlib.pyplot as plt  from ccpi.optimisation.algorithms import PDHG, PDHG_old  from ccpi.optimisation.operators import BlockOperator, Identity, Gradient -from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \                        MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction  from skimage.util import random_noise @@ -25,8 +25,6 @@ def dt(steps):  #%% - -# ############################################################################  # Create phantom for TV denoising  N = 200 @@ -68,7 +66,7 @@ if method == '0':      f2 = 0.5 * L2NormSquared(b = noisy_data)          f = BlockFunction(f1, f2)   -    g = ZeroFun() +    g = ZeroFunction()  else: @@ -90,8 +88,8 @@ print ("normK", normK)  sigma = 1  tau = 1/(sigma*normK**2) -opt = {'niter':1000} -opt1 = {'niter':1000, 'memopt': True} +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True}  #t1 = timer()  #res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt)  @@ -140,14 +138,11 @@ plt.colorbar()  plt.subplot(1,3,2)  plt.imshow(res1.as_array())  plt.colorbar() +  #plt.show() -#plt.figure(figsize=(5,5)) -plt.subplot(1,3,3) -plt.imshow(np.abs(res1.as_array()-res.as_array())) -plt.colorbar() -plt.show() +  #=======  ## opt = {'niter':2000, 'memopt': True}  # diff --git a/Wrappers/Python/wip/pdhg_TV_tomography2D.py b/Wrappers/Python/wip/pdhg_TV_tomography2D.py index 159f2ea..e0868f7 100644 --- a/Wrappers/Python/wip/pdhg_TV_tomography2D.py +++ b/Wrappers/Python/wip/pdhg_TV_tomography2D.py @@ -16,7 +16,7 @@ import matplotlib.pyplot as plt  from ccpi.optimisation.algorithms import PDHG, PDHG_old  from ccpi.optimisation.operators import BlockOperator, Identity, Gradient -from ccpi.optimisation.functions import ZeroFun, L2NormSquared, \ +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \                        MixedL21Norm, BlockFunction, ScaledFunction  from ccpi.astra.ops import AstraProjectorSimple @@ -90,7 +90,7 @@ operator = BlockOperator(op1, op2, shape=(2,1) )  alpha = 50  f = BlockFunction( alpha * MixedL21Norm(), \                     0.5 * L2NormSquared(b = noisy_data) ) -g = ZeroFun() +g = ZeroFunction()  # Compute operator Norm  normK = operator.norm() | 
