From 107eb18c28255c4c8dbdf8245ffb85fe6f7535d6 Mon Sep 17 00:00:00 2001 From: Edoardo Pasca Date: Mon, 29 Jan 2018 13:36:11 +0000 Subject: renamed fista_module_gpu to gpu_regularizers.pyx Cleaned test_cpu_regularizers.py --- Wrappers/Python/setup.py | 3 +- Wrappers/Python/src/cpu_regularizers.pyx | 0 Wrappers/Python/src/fista_module_gpu.pyx | 315 ------------------ Wrappers/Python/src/gpu_regularizers.pyx | 315 ++++++++++++++++++ Wrappers/Python/test/test_cpu_regularizers.py | 445 ++++++++++++++++++++++++++ 5 files changed, 762 insertions(+), 316 deletions(-) create mode 100644 Wrappers/Python/src/cpu_regularizers.pyx delete mode 100644 Wrappers/Python/src/fista_module_gpu.pyx create mode 100644 Wrappers/Python/src/gpu_regularizers.pyx create mode 100644 Wrappers/Python/test/test_cpu_regularizers.py diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py index 951146a..00c93fc 100644 --- a/Wrappers/Python/setup.py +++ b/Wrappers/Python/setup.py @@ -60,7 +60,7 @@ setup( cmdclass = {'build_ext': build_ext}, ext_modules = [Extension("ccpi.filters.gpu_regularizers", sources=[ - os.path.join("." , "src", "fista_module_gpu.pyx" ), + os.path.join("." , "src", "gpu_regularizers.pyx" ), ], include_dirs=extra_include_dirs, library_dirs=extra_library_dirs, @@ -79,6 +79,7 @@ setup( cmdclass = {'build_ext': build_ext}, ext_modules = [Extension("ccpi.filters.cpu_regularizers", sources=[os.path.join("." , "src", "fista_module.cpp" ), + os.path.join("." , "src", "cpu_regularizers.pyx" ) # os.path.join("@CMAKE_SOURCE_DIR@" , "main_func" , "regularizers_CPU", "FGP_TV_core.c"), # os.path.join("@CMAKE_SOURCE_DIR@" , "main_func" , "regularizers_CPU", "SplitBregman_TV_core.c"), # os.path.join("@CMAKE_SOURCE_DIR@" , "main_func" , "regularizers_CPU", "LLT_model_core.c"), diff --git a/Wrappers/Python/src/cpu_regularizers.pyx b/Wrappers/Python/src/cpu_regularizers.pyx new file mode 100644 index 0000000..e69de29 diff --git a/Wrappers/Python/src/fista_module_gpu.pyx b/Wrappers/Python/src/fista_module_gpu.pyx deleted file mode 100644 index 7658e36..0000000 --- a/Wrappers/Python/src/fista_module_gpu.pyx +++ /dev/null @@ -1,315 +0,0 @@ -# distutils: language=c++ -""" -Copyright 2018 CCPi -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -Author: Edoardo Pasca -""" - -import cython -import numpy as np -cimport numpy as np - - -cdef extern void Diff4th_GPU_kernel(float* A, float* B, int N, int M, int Z, - float sigma, int iter, float tau, float lambdaf); -cdef extern void NLM_GPU_kernel(float *A, float* B, float *Eucl_Vec, - int N, int M, int Z, int dimension, - int SearchW, int SimilW, - int SearchW_real, float denh2, float lambdaf); -cdef extern float pad_crop(float *A, float *Ap, - int OldSizeX, int OldSizeY, int OldSizeZ, - int NewSizeX, int NewSizeY, int NewSizeZ, - int padXY, int switchpad_crop); - -def Diff4thHajiaboli(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter): - if inputData.ndim == 2: - return Diff4thHajiaboli2D(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter) - elif inputData.ndim == 3: - return Diff4thHajiaboli3D(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter) - -def Diff4thHajiaboli2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, - float regularization_parameter, - int iterations, - float edge_preserving_parameter): - - cdef long dims[2] - dims[0] = inputData.shape[0] - dims[1] = inputData.shape[1] - N = dims[0] + 2; - M = dims[1] + 2; - - #time step is sufficiently small for an explicit methods - tau = 0.01 - - #A_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); - #B_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] A_L = \ - np.zeros([N,M], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B_L = \ - np.zeros([N,M], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B = \ - np.zeros([dims[0],dims[1]], dtype='float32') - #A = inputData - - # copy A to the bigger A_L with boundaries - #pragma omp parallel for shared(A_L, A) private(i,j) - cdef int i, j; - for i in range(N): - for j in range(M): - if (((i > 0) and (i < N-1)) and ((j > 0) and (j < M-1))): - #A_L[i*M+j] = A[(i-1)*(dims[1])+(j-1)] - A_L[i][j] = inputData[i-1][j-1] - - # Running CUDA code here - #Diff4th_GPU_kernel(A_L, B_L, N, M, Z, (float)sigma, iter, (float)tau, lambda); - Diff4th_GPU_kernel( - # A_L.data, B_L.data, - &A_L[0,0], &B_L[0,0], - N, M, 0, - edge_preserving_parameter, - iterations , - tau, - regularization_parameter); - # copy the processed B_L to a smaller B - for i in range(N): - for j in range(M): - if (((i > 0) and (i < N-1)) and ((j > 0) and (j < M-1))): - B[i-1][j-1] = B_L[i][j] - ##pragma omp parallel for shared(B_L, B) private(i,j) - #for (i=0; i < N; i++) { - # for (j=0; j < M; j++) { - # if (((i > 0) && (i < N-1)) && ((j > 0) && (j < M-1))) B[(i-1)*(dims[1])+(j-1)] = B_L[i*M+j]; - # }} - - return B - -def Diff4thHajiaboli3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, - float regularization_parameter, - int iterations, - float edge_preserving_parameter): - - cdef long dims[3] - dims[0] = inputData.shape[0] - dims[1] = inputData.shape[1] - dims[2] = inputData.shape[2] - N = dims[0] + 2 - M = dims[1] + 2 - Z = dims[2] + 2 - - # Time Step is small for an explicit methods - tau = 0.0007; - - #A_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); - #B_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] A_L = \ - np.zeros([N,M,Z], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B_L = \ - np.zeros([N,M,Z], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B = \ - np.zeros([dims[0],dims[1],dims[2]], dtype='float32') - #A = inputData - - # copy A to the bigger A_L with boundaries - #pragma omp parallel for shared(A_L, A) private(i,j) - cdef int i, j, k; - for i in range(N): - for j in range(M): - for k in range(Z): - if (((i > 0) and (i < N-1)) and \ - ((j > 0) and (j < M-1)) and \ - ((k > 0) and (k < Z-1))): - A_L[i][j][k] = inputData[i-1][j-1][k-1]; - - # Running CUDA code here - #Diff4th_GPU_kernel(A_L, B_L, N, M, Z, (float)sigma, iter, (float)tau, lambda); - Diff4th_GPU_kernel( - # A_L.data, B_L.data, - &A_L[0,0,0], &B_L[0,0,0], - N, M, Z, - edge_preserving_parameter, - iterations , - tau, - regularization_parameter); - # copy the processed B_L to a smaller B - for i in range(N): - for j in range(M): - for k in range(Z): - if (((i > 0) and (i < N-1)) and \ - ((j > 0) and (j < M-1)) and \ - ((k > 0) and (k < Z-1))): - B[i-1][j-1][k-1] = B_L[i][j][k] - - - return B - -def NML(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter): - if inputData.ndim == 2: - return NML2D(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter) - elif inputData.ndim == 3: - return NML3D(inputData, - regularization_parameter, - iterations, - edge_preserving_parameter) - - #SearchW_real = (int) mxGetScalar(prhs[1]); /* the searching window ratio */ - #SimilW = (int) mxGetScalar(prhs[2]); /* the similarity window ratio */ - #h = (float) mxGetScalar(prhs[3]); /* parameter for the PB filtering function */ - #lambda = (float) mxGetScalar(prhs[4]); - -def NML2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, - SearchW_real, - SimilW, - h, - lambdaf): - N = inputData.shape[0] - M = inputData.shape[1] - Z = 0 - numdims = inputData.ndim - - if h < 0: - raise ValueError('Parameter for the PB filtering function must be > 0') - - SearchW = SearchW_real + 2*SimilW; - - SearchW_full = 2*SearchW + 1; #/* the full searching window size */ - SimilW_full = 2*SimilW + 1; #/* the full similarity window size */ - h2 = h*h; - - padXY = SearchW + 2*SimilW; #/* padding sizes */ - newsizeX = N + 2*(padXY); #/* the X size of the padded array */ - newsizeY = M + 2*(padXY); #/* the Y size of the padded array */ - #newsizeZ = Z + 2*(padXY); #/* the Z size of the padded array */ - - #output - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B = \ - np.zeros([N,M], dtype='float32') - #/*allocating memory for the padded arrays */ - - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] Ap = \ - np.zeros([newsizeX, newsizeY], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=2, mode="c"] Bp = \ - np.zeros([newsizeX, newsizeY], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=1, mode="c"] Eucl_Vec = \ - np.zeros([SimilW_full*SimilW_full], dtype='float32') - - #/*Gaussian kernel */ - cdef int count, i_n, j_n; - cdef float val; - count = 0 - for i_n in range (-SimilW, SimilW +1): - for j_n in range(-SimilW, SimilW +1): - val = (float)(i_n*i_n + j_n*j_n)/(2*SimilW*SimilW) - Eucl_Vec[count] = np.exp(-val) - count = count + 1 - - #/*Perform padding of image A to the size of [newsizeX * newsizeY] */ - switchpad_crop = 0; # /*padding*/ - pad_crop(&inputData[0,0], &Ap[0,0], M, N, 0, newsizeY, newsizeX, 0, padXY, - switchpad_crop); - - #/* Do PB regularization with the padded array */ - NLM_GPU_kernel(&Ap[0,0], &Bp[0,0], &Eucl_Vec[0], newsizeY, newsizeX, 0, - numdims, SearchW, SimilW, SearchW_real, - h2, lambdaf); - - switchpad_crop = 1; #/*cropping*/ - pad_crop(&Bp[0,0], &B[0,0], M, N, 0, newsizeX, newsizeY, 0, padXY, - switchpad_crop) - - return B - -def NML3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, - SearchW_real, - SimilW, - h, - lambdaf): - N = inputData.shape[0] - M = inputData.shape[1] - Z = inputData.shape[2] - numdims = inputData.ndim - - if h < 0: - raise ValueError('Parameter for the PB filtering function must be > 0') - - SearchW = SearchW_real + 2*SimilW; - - SearchW_full = 2*SearchW + 1; #/* the full searching window size */ - SimilW_full = 2*SimilW + 1; #/* the full similarity window size */ - h2 = h*h; - - padXY = SearchW + 2*SimilW; #/* padding sizes */ - newsizeX = N + 2*(padXY); #/* the X size of the padded array */ - newsizeY = M + 2*(padXY); #/* the Y size of the padded array */ - newsizeZ = Z + 2*(padXY); #/* the Z size of the padded array */ - - #output - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B = \ - np.zeros([N,M,Z], dtype='float32') - #/*allocating memory for the padded arrays */ - - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] Ap = \ - np.zeros([newsizeX, newsizeY, newsizeZ], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=3, mode="c"] Bp = \ - np.zeros([newsizeX, newsizeY, newsizeZ], dtype='float32') - cdef np.ndarray[np.float32_t, ndim=1, mode="c"] Eucl_Vec = \ - np.zeros([SimilW_full*SimilW_full*SimilW_full], - dtype='float32') - - - #/*Gaussian kernel */ - cdef int count, i_n, j_n, k_n; - cdef float val; - count = 0 - for i_n in range (-SimilW, SimilW +1): - for j_n in range(-SimilW, SimilW +1): - for k_n in range(-SimilW, SimilW+1): - val = (i_n*i_n + j_n*j_n + k_n*k_n)/(2*SimilW*SimilW*SimilW) - Eucl_Vec[count] = np.exp(-val) - count = count + 1 - - #/*Perform padding of image A to the size of [newsizeX * newsizeY] */ - switchpad_crop = 0; # /*padding*/ - pad_crop(&inputData[0,0,0], &Ap[0,0,0], - M, N, Z, - newsizeY, newsizeX, newsizeZ, - padXY, - switchpad_crop); - - #/* Do PB regularization with the padded array */ - NLM_GPU_kernel(&Ap[0,0,0], &Bp[0,0,0], &Eucl_Vec[0], - newsizeY, newsizeX, newsizeZ, - numdims, SearchW, SimilW, SearchW_real, - h2, lambdaf); - - switchpad_crop = 1; #/*cropping*/ - pad_crop(&Bp[0,0,0], &B[0,0,0], - M, N, Z, - newsizeX, newsizeY, newsizeZ, - padXY, - switchpad_crop) - - return B diff --git a/Wrappers/Python/src/gpu_regularizers.pyx b/Wrappers/Python/src/gpu_regularizers.pyx new file mode 100644 index 0000000..7658e36 --- /dev/null +++ b/Wrappers/Python/src/gpu_regularizers.pyx @@ -0,0 +1,315 @@ +# distutils: language=c++ +""" +Copyright 2018 CCPi +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Author: Edoardo Pasca +""" + +import cython +import numpy as np +cimport numpy as np + + +cdef extern void Diff4th_GPU_kernel(float* A, float* B, int N, int M, int Z, + float sigma, int iter, float tau, float lambdaf); +cdef extern void NLM_GPU_kernel(float *A, float* B, float *Eucl_Vec, + int N, int M, int Z, int dimension, + int SearchW, int SimilW, + int SearchW_real, float denh2, float lambdaf); +cdef extern float pad_crop(float *A, float *Ap, + int OldSizeX, int OldSizeY, int OldSizeZ, + int NewSizeX, int NewSizeY, int NewSizeZ, + int padXY, int switchpad_crop); + +def Diff4thHajiaboli(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter): + if inputData.ndim == 2: + return Diff4thHajiaboli2D(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter) + elif inputData.ndim == 3: + return Diff4thHajiaboli3D(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter) + +def Diff4thHajiaboli2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, + float regularization_parameter, + int iterations, + float edge_preserving_parameter): + + cdef long dims[2] + dims[0] = inputData.shape[0] + dims[1] = inputData.shape[1] + N = dims[0] + 2; + M = dims[1] + 2; + + #time step is sufficiently small for an explicit methods + tau = 0.01 + + #A_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); + #B_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] A_L = \ + np.zeros([N,M], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B_L = \ + np.zeros([N,M], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B = \ + np.zeros([dims[0],dims[1]], dtype='float32') + #A = inputData + + # copy A to the bigger A_L with boundaries + #pragma omp parallel for shared(A_L, A) private(i,j) + cdef int i, j; + for i in range(N): + for j in range(M): + if (((i > 0) and (i < N-1)) and ((j > 0) and (j < M-1))): + #A_L[i*M+j] = A[(i-1)*(dims[1])+(j-1)] + A_L[i][j] = inputData[i-1][j-1] + + # Running CUDA code here + #Diff4th_GPU_kernel(A_L, B_L, N, M, Z, (float)sigma, iter, (float)tau, lambda); + Diff4th_GPU_kernel( + # A_L.data, B_L.data, + &A_L[0,0], &B_L[0,0], + N, M, 0, + edge_preserving_parameter, + iterations , + tau, + regularization_parameter); + # copy the processed B_L to a smaller B + for i in range(N): + for j in range(M): + if (((i > 0) and (i < N-1)) and ((j > 0) and (j < M-1))): + B[i-1][j-1] = B_L[i][j] + ##pragma omp parallel for shared(B_L, B) private(i,j) + #for (i=0; i < N; i++) { + # for (j=0; j < M; j++) { + # if (((i > 0) && (i < N-1)) && ((j > 0) && (j < M-1))) B[(i-1)*(dims[1])+(j-1)] = B_L[i*M+j]; + # }} + + return B + +def Diff4thHajiaboli3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, + float regularization_parameter, + int iterations, + float edge_preserving_parameter): + + cdef long dims[3] + dims[0] = inputData.shape[0] + dims[1] = inputData.shape[1] + dims[2] = inputData.shape[2] + N = dims[0] + 2 + M = dims[1] + 2 + Z = dims[2] + 2 + + # Time Step is small for an explicit methods + tau = 0.0007; + + #A_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); + #B_L = (float*)mxGetData(mxCreateNumericMatrix(N, M, mxSINGLE_CLASS, mxREAL)); + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] A_L = \ + np.zeros([N,M,Z], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B_L = \ + np.zeros([N,M,Z], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B = \ + np.zeros([dims[0],dims[1],dims[2]], dtype='float32') + #A = inputData + + # copy A to the bigger A_L with boundaries + #pragma omp parallel for shared(A_L, A) private(i,j) + cdef int i, j, k; + for i in range(N): + for j in range(M): + for k in range(Z): + if (((i > 0) and (i < N-1)) and \ + ((j > 0) and (j < M-1)) and \ + ((k > 0) and (k < Z-1))): + A_L[i][j][k] = inputData[i-1][j-1][k-1]; + + # Running CUDA code here + #Diff4th_GPU_kernel(A_L, B_L, N, M, Z, (float)sigma, iter, (float)tau, lambda); + Diff4th_GPU_kernel( + # A_L.data, B_L.data, + &A_L[0,0,0], &B_L[0,0,0], + N, M, Z, + edge_preserving_parameter, + iterations , + tau, + regularization_parameter); + # copy the processed B_L to a smaller B + for i in range(N): + for j in range(M): + for k in range(Z): + if (((i > 0) and (i < N-1)) and \ + ((j > 0) and (j < M-1)) and \ + ((k > 0) and (k < Z-1))): + B[i-1][j-1][k-1] = B_L[i][j][k] + + + return B + +def NML(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter): + if inputData.ndim == 2: + return NML2D(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter) + elif inputData.ndim == 3: + return NML3D(inputData, + regularization_parameter, + iterations, + edge_preserving_parameter) + + #SearchW_real = (int) mxGetScalar(prhs[1]); /* the searching window ratio */ + #SimilW = (int) mxGetScalar(prhs[2]); /* the similarity window ratio */ + #h = (float) mxGetScalar(prhs[3]); /* parameter for the PB filtering function */ + #lambda = (float) mxGetScalar(prhs[4]); + +def NML2D(np.ndarray[np.float32_t, ndim=2, mode="c"] inputData, + SearchW_real, + SimilW, + h, + lambdaf): + N = inputData.shape[0] + M = inputData.shape[1] + Z = 0 + numdims = inputData.ndim + + if h < 0: + raise ValueError('Parameter for the PB filtering function must be > 0') + + SearchW = SearchW_real + 2*SimilW; + + SearchW_full = 2*SearchW + 1; #/* the full searching window size */ + SimilW_full = 2*SimilW + 1; #/* the full similarity window size */ + h2 = h*h; + + padXY = SearchW + 2*SimilW; #/* padding sizes */ + newsizeX = N + 2*(padXY); #/* the X size of the padded array */ + newsizeY = M + 2*(padXY); #/* the Y size of the padded array */ + #newsizeZ = Z + 2*(padXY); #/* the Z size of the padded array */ + + #output + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] B = \ + np.zeros([N,M], dtype='float32') + #/*allocating memory for the padded arrays */ + + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] Ap = \ + np.zeros([newsizeX, newsizeY], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=2, mode="c"] Bp = \ + np.zeros([newsizeX, newsizeY], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=1, mode="c"] Eucl_Vec = \ + np.zeros([SimilW_full*SimilW_full], dtype='float32') + + #/*Gaussian kernel */ + cdef int count, i_n, j_n; + cdef float val; + count = 0 + for i_n in range (-SimilW, SimilW +1): + for j_n in range(-SimilW, SimilW +1): + val = (float)(i_n*i_n + j_n*j_n)/(2*SimilW*SimilW) + Eucl_Vec[count] = np.exp(-val) + count = count + 1 + + #/*Perform padding of image A to the size of [newsizeX * newsizeY] */ + switchpad_crop = 0; # /*padding*/ + pad_crop(&inputData[0,0], &Ap[0,0], M, N, 0, newsizeY, newsizeX, 0, padXY, + switchpad_crop); + + #/* Do PB regularization with the padded array */ + NLM_GPU_kernel(&Ap[0,0], &Bp[0,0], &Eucl_Vec[0], newsizeY, newsizeX, 0, + numdims, SearchW, SimilW, SearchW_real, + h2, lambdaf); + + switchpad_crop = 1; #/*cropping*/ + pad_crop(&Bp[0,0], &B[0,0], M, N, 0, newsizeX, newsizeY, 0, padXY, + switchpad_crop) + + return B + +def NML3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData, + SearchW_real, + SimilW, + h, + lambdaf): + N = inputData.shape[0] + M = inputData.shape[1] + Z = inputData.shape[2] + numdims = inputData.ndim + + if h < 0: + raise ValueError('Parameter for the PB filtering function must be > 0') + + SearchW = SearchW_real + 2*SimilW; + + SearchW_full = 2*SearchW + 1; #/* the full searching window size */ + SimilW_full = 2*SimilW + 1; #/* the full similarity window size */ + h2 = h*h; + + padXY = SearchW + 2*SimilW; #/* padding sizes */ + newsizeX = N + 2*(padXY); #/* the X size of the padded array */ + newsizeY = M + 2*(padXY); #/* the Y size of the padded array */ + newsizeZ = Z + 2*(padXY); #/* the Z size of the padded array */ + + #output + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] B = \ + np.zeros([N,M,Z], dtype='float32') + #/*allocating memory for the padded arrays */ + + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] Ap = \ + np.zeros([newsizeX, newsizeY, newsizeZ], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=3, mode="c"] Bp = \ + np.zeros([newsizeX, newsizeY, newsizeZ], dtype='float32') + cdef np.ndarray[np.float32_t, ndim=1, mode="c"] Eucl_Vec = \ + np.zeros([SimilW_full*SimilW_full*SimilW_full], + dtype='float32') + + + #/*Gaussian kernel */ + cdef int count, i_n, j_n, k_n; + cdef float val; + count = 0 + for i_n in range (-SimilW, SimilW +1): + for j_n in range(-SimilW, SimilW +1): + for k_n in range(-SimilW, SimilW+1): + val = (i_n*i_n + j_n*j_n + k_n*k_n)/(2*SimilW*SimilW*SimilW) + Eucl_Vec[count] = np.exp(-val) + count = count + 1 + + #/*Perform padding of image A to the size of [newsizeX * newsizeY] */ + switchpad_crop = 0; # /*padding*/ + pad_crop(&inputData[0,0,0], &Ap[0,0,0], + M, N, Z, + newsizeY, newsizeX, newsizeZ, + padXY, + switchpad_crop); + + #/* Do PB regularization with the padded array */ + NLM_GPU_kernel(&Ap[0,0,0], &Bp[0,0,0], &Eucl_Vec[0], + newsizeY, newsizeX, newsizeZ, + numdims, SearchW, SimilW, SearchW_real, + h2, lambdaf); + + switchpad_crop = 1; #/*cropping*/ + pad_crop(&Bp[0,0,0], &B[0,0,0], + M, N, Z, + newsizeX, newsizeY, newsizeZ, + padXY, + switchpad_crop) + + return B diff --git a/Wrappers/Python/test/test_cpu_regularizers.py b/Wrappers/Python/test/test_cpu_regularizers.py new file mode 100644 index 0000000..ac595e3 --- /dev/null +++ b/Wrappers/Python/test/test_cpu_regularizers.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Aug 4 11:10:05 2017 + +@author: ofn77899 +""" + +#from ccpi.viewer.CILViewer2D import Converter +#import vtk + +import matplotlib.pyplot as plt +import numpy as np +import os +from enum import Enum +import timeit +#from PIL import Image +#from Regularizer import Regularizer +#from ccpi.filters.Regularizer import Regularizer +from ccpi.filters.cpu_regularizers import SplitBregman_TV , FGP_TV , LLT_model, \ + PatchBased_Regul , TGV_PD + +############################################################################### +#https://stackoverflow.com/questions/13875989/comparing-image-in-url-to-image-in-filesystem-in-python/13884956#13884956 +#NRMSE a normalization of the root of the mean squared error +#NRMSE is simply 1 - [RMSE / (maxval - minval)]. Where maxval is the maximum +# intensity from the two images being compared, and respectively the same for +# minval. RMSE is given by the square root of MSE: +# sqrt[(sum(A - B) ** 2) / |A|], +# where |A| means the number of elements in A. By doing this, the maximum value +# given by RMSE is maxval. + +def nrmse(im1, im2): + a, b = im1.shape + rmse = np.sqrt(np.sum((im2 - im1) ** 2) / float(a * b)) + max_val = max(np.max(im1), np.max(im2)) + min_val = min(np.min(im1), np.min(im2)) + return 1 - (rmse / (max_val - min_val)) +############################################################################### +def printParametersToString(pars): + txt = r'' + for key, value in pars.items(): + if key== 'algorithm' : + txt += "{0} = {1}".format(key, value.__name__) + elif key == 'input': + txt += "{0} = {1}".format(key, np.shape(value)) + else: + txt += "{0} = {1}".format(key, value) + txt += '\n' + return txt +############################################################################### +# +# 2D Regularizers +# +############################################################################### +#Example: +# figure; +# Im = double(imread('lena_gray_256.tif'))/255; % loading image +# u0 = Im + .05*randn(size(Im)); u0(u0 < 0) = 0; +# u = SplitBregman_TV(single(u0), 10, 30, 1e-04); + +# assumes the script is launched from the test directory +filename = os.path.join(".." , ".." , ".." , "data" ,"lena_gray_512.tif") +#filename = r"C:\Users\ofn77899\Documents\GitHub\CCPi-FISTA_reconstruction\data\lena_gray_512.tif" +#filename = r"/home/ofn77899/Reconstruction/CCPi-FISTA_Reconstruction/data/lena_gray_512.tif" +#filename = r'/home/algol/Documents/Python/STD_test_images/lena_gray_512.tif' + +Im = plt.imread(filename) +Im = np.asarray(Im, dtype='float32') + +perc = 0.05 +u0 = Im + (perc* np.random.normal(size=np.shape(Im))) +# map the u0 u0->u0>0 +f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1) +u0 = f(u0).astype('float32') + +## plot +fig = plt.figure() + +a=fig.add_subplot(2,3,1) +a.set_title('noise') +imgplot = plt.imshow(u0,cmap="gray") + +reg_output = [] +############################################################################## +# Call regularizer + +####################### SplitBregman_TV ##################################### +# u = SplitBregman_TV(single(u0), 10, 30, 1e-04); + +start_time = timeit.default_timer() +pars = {'algorithm' : SplitBregman_TV , \ + 'input' : u0, + 'regularization_parameter':10. , \ +'number_of_iterations' :35 ,\ +'tolerance_constant':0.0001 , \ +'TV_penalty': 0 +} + +out = SplitBregman_TV (pars['input'], pars['regularization_parameter'], + pars['number_of_iterations'], + pars['tolerance_constant'], + pars['TV_penalty']) +plotme = out[0] +txtstr = printParametersToString(pars) +txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) +print (txtstr) + + +a=fig.add_subplot(2,3,2) + + +# these are matplotlib.patch.Patch properties +props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +# place a text box in upper left in axes coords +a.text(0.05, 0.95, txtstr, transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) +imgplot = plt.imshow(plotme,\ + #cmap="gray" + ) + +###################### FGP_TV ######################################### +# u = FGP_TV(single(u0), 0.05, 100, 1e-04); +start_time = timeit.default_timer() +pars = {'algorithm' : FGP_TV , \ + 'input' : u0, + 'regularization_parameter':5e-4, \ +'number_of_iterations' :10 ,\ +'tolerance_constant':0.001,\ +'TV_penalty': 0 +} + +out = FGP_TV (pars['input'], pars['regularization_parameter'], + pars['number_of_iterations'], + pars['tolerance_constant'], + pars['TV_penalty']) +plotme = out[0] +txtstr = printParametersToString(pars) +txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) +print (txtstr) + + +a=fig.add_subplot(2,3,3) + +# these are matplotlib.patch.Patch properties +props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +# place a text box in upper left in axes coords +imgplot = plt.imshow(plotme, \ + #cmap="gray" + ) +# place a text box in upper left in axes coords +a.text(0.05, 0.95, txtstr, transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) + +###################### LLT_model ######################################### +# * u0 = Im + .03*randn(size(Im)); % adding noise +# [Den] = LLT_model(single(u0), 10, 0.1, 1); +#Den = LLT_model(single(u0), 25, 0.0003, 300, 0.0001, 0); +#input, regularization_parameter , time_step, number_of_iterations, +# tolerance_constant, restrictive_Z_smoothing=0 + +start_time = timeit.default_timer() + +pars = {'algorithm': LLT_model , \ + 'input' : u0, + 'regularization_parameter': 25,\ + 'time_step':0.0003, \ +'number_of_iterations' :300,\ +'tolerance_constant':0.001,\ +'restrictive_Z_smoothing': 0 +} +out = LLT_model(pars['input'], + pars['regularization_parameter'], + pars['time_step'] , + pars['number_of_iterations'], + pars['tolerance_constant'], + pars['restrictive_Z_smoothing'] ) + +plotme = out[0] +txtstr = printParametersToString(pars) +txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) +print (txtstr) +a=fig.add_subplot(2,3,4) + +# these are matplotlib.patch.Patch properties +props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +# place a text box in upper left in axes coords +a.text(0.05, 0.95, txtstr, transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) +imgplot = plt.imshow(plotme,\ + #cmap="gray" + ) + + +# ###################### PatchBased_Regul ######################################### +# # Quick 2D denoising example in Matlab: +# # Im = double(imread('lena_gray_256.tif'))/255; % loading image +# # u0 = Im + .03*randn(size(Im)); u0(u0<0) = 0; % adding noise +# # ImDen = PB_Regul_CPU(single(u0), 3, 1, 0.08, 0.05); +start_time = timeit.default_timer() + +pars = {'algorithm': PatchBased_Regul , \ + 'input' : u0, + 'regularization_parameter': 0.05,\ + 'searching_window_ratio':3, \ +'similarity_window_ratio':1,\ +'PB_filtering_parameter': 0.08 +} +out = PatchBased_Regul( + pars['input'], pars['regularization_parameter'], + pars['searching_window_ratio'] , + pars['similarity_window_ratio'] , + pars['PB_filtering_parameter']) +plotme = out[0] +txtstr = printParametersToString(pars) +txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) +print (txtstr) + +a=fig.add_subplot(2,3,5) + + +# these are matplotlib.patch.Patch properties +props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +# place a text box in upper left in axes coords +a.text(0.05, 0.95, txtstr, transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) +imgplot = plt.imshow(plotme #,cmap="gray" + ) + + +# ###################### TGV_PD ######################################### +# # Quick 2D denoising example in Matlab: +# # Im = double(imread('lena_gray_256.tif'))/255; % loading image +# # u0 = Im + .03*randn(size(Im)); u0(u0<0) = 0; % adding noise +# # u = PrimalDual_TGV(single(u0), 0.02, 1.3, 1, 550); + +start_time = timeit.default_timer() + +pars = {'algorithm': TGV_PD , \ + 'input' : u0,\ + 'regularization_parameter':0.05,\ + 'first_order_term': 1.3,\ + 'second_order_term': 1, \ +'number_of_iterations': 550 +} +out = TGV_PD(pars['input'], pars['regularization_parameter'], + pars['first_order_term'] , + pars['second_order_term'] , + pars['number_of_iterations']) +plotme = out[0] +txtstr = printParametersToString(pars) +txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) +print (txtstr) +a=fig.add_subplot(2,3,6) + +# these are matplotlib.patch.Patch properties +props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +# place a text box in upper left in axes coords +a.text(0.05, 0.95, txtstr, transform=a.transAxes, fontsize=14, + verticalalignment='top', bbox=props) +imgplot = plt.imshow(plotme #, cmap="gray") + ) + + +plt.show() + +################################################################################ +## +## 3D Regularizers +## +################################################################################ +##Example: +## figure; +## Im = double(imread('lena_gray_256.tif'))/255; % loading image +## u0 = Im + .05*randn(size(Im)); u0(u0 < 0) = 0; +## u = SplitBregman_TV(single(u0), 10, 30, 1e-04); +# +##filename = r"C:\Users\ofn77899\Documents\GitHub\CCPi-Reconstruction\python\test\reconstruction_example.mha" +#filename = r"C:\Users\ofn77899\Documents\GitHub\CCPi-Simpleflex\data\head.mha" +# +#reader = vtk.vtkMetaImageReader() +#reader.SetFileName(os.path.normpath(filename)) +#reader.Update() +##vtk returns 3D images, let's take just the one slice there is as 2D +#Im = Converter.vtk2numpy(reader.GetOutput()) +#Im = Im.astype('float32') +##imgplot = plt.imshow(Im) +#perc = 0.05 +#u0 = Im + (perc* np.random.normal(size=np.shape(Im))) +## map the u0 u0->u0>0 +#f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1) +#u0 = f(u0).astype('float32') +#converter = Converter.numpy2vtkImporter(u0, reader.GetOutput().GetSpacing(), +# reader.GetOutput().GetOrigin()) +#converter.Update() +#writer = vtk.vtkMetaImageWriter() +#writer.SetInputData(converter.GetOutput()) +#writer.SetFileName(r"C:\Users\ofn77899\Documents\GitHub\CCPi-FISTA_reconstruction\data\noisy_head.mha") +##writer.Write() +# +# +### plot +#fig3D = plt.figure() +##a=fig.add_subplot(3,3,1) +##a.set_title('Original') +##imgplot = plt.imshow(Im) +#sliceNo = 32 +# +#a=fig3D.add_subplot(2,3,1) +#a.set_title('noise') +#imgplot = plt.imshow(u0.T[sliceNo]) +# +#reg_output3d = [] +# +############################################################################### +## Call regularizer +# +######################## SplitBregman_TV ##################################### +## u = SplitBregman_TV(single(u0), 10, 30, 1e-04); +# +##reg = Regularizer(Regularizer.Algorithm.SplitBregman_TV) +# +##out = reg(input=u0, regularization_parameter=10., #number_of_iterations=30, +## #tolerance_constant=1e-4, +## TV_Penalty=Regularizer.TotalVariationPenalty.l1) +# +#out2 = Regularizer.SplitBregman_TV(input=u0, regularization_parameter=10., number_of_iterations=30, +# tolerance_constant=1e-4, +# TV_Penalty=Regularizer.TotalVariationPenalty.l1) +# +# +#pars = out2[-2] +#reg_output3d.append(out2) +# +#a=fig3D.add_subplot(2,3,2) +# +# +#textstr = out2[-1] +# +# +## these are matplotlib.patch.Patch properties +#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +## place a text box in upper left in axes coords +#a.text(0.05, 0.95, textstr, transform=a.transAxes, fontsize=14, +# verticalalignment='top', bbox=props) +#imgplot = plt.imshow(reg_output3d[-1][0].T[sliceNo]) +# +####################### FGP_TV ######################################### +## u = FGP_TV(single(u0), 0.05, 100, 1e-04); +#out2 = Regularizer.FGP_TV(input=u0, regularization_parameter=0.005, +# number_of_iterations=200) +#pars = out2[-2] +#reg_output3d.append(out2) +# +#a=fig3D.add_subplot(2,3,2) +# +# +#textstr = out2[-1] +# +# +## these are matplotlib.patch.Patch properties +#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +## place a text box in upper left in axes coords +#a.text(0.05, 0.95, textstr, transform=a.transAxes, fontsize=14, +# verticalalignment='top', bbox=props) +#imgplot = plt.imshow(reg_output3d[-1][0].T[sliceNo]) +# +####################### LLT_model ######################################### +## * u0 = Im + .03*randn(size(Im)); % adding noise +## [Den] = LLT_model(single(u0), 10, 0.1, 1); +##Den = LLT_model(single(u0), 25, 0.0003, 300, 0.0001, 0); +##input, regularization_parameter , time_step, number_of_iterations, +## tolerance_constant, restrictive_Z_smoothing=0 +#out2 = Regularizer.LLT_model(input=u0, regularization_parameter=25, +# time_step=0.0003, +# tolerance_constant=0.0001, +# number_of_iterations=300) +#pars = out2[-2] +#reg_output3d.append(out2) +# +#a=fig3D.add_subplot(2,3,2) +# +# +#textstr = out2[-1] +# +# +## these are matplotlib.patch.Patch properties +#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +## place a text box in upper left in axes coords +#a.text(0.05, 0.95, textstr, transform=a.transAxes, fontsize=14, +# verticalalignment='top', bbox=props) +#imgplot = plt.imshow(reg_output3d[-1][0].T[sliceNo]) +# +####################### PatchBased_Regul ######################################### +## Quick 2D denoising example in Matlab: +## Im = double(imread('lena_gray_256.tif'))/255; % loading image +## u0 = Im + .03*randn(size(Im)); u0(u0<0) = 0; % adding noise +## ImDen = PB_Regul_CPU(single(u0), 3, 1, 0.08, 0.05); +# +#out2 = Regularizer.PatchBased_Regul(input=u0, regularization_parameter=0.05, +# searching_window_ratio=3, +# similarity_window_ratio=1, +# PB_filtering_parameter=0.08) +#pars = out2[-2] +#reg_output3d.append(out2) +# +#a=fig3D.add_subplot(2,3,2) +# +# +#textstr = out2[-1] +# +# +## these are matplotlib.patch.Patch properties +#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +## place a text box in upper left in axes coords +#a.text(0.05, 0.95, textstr, transform=a.transAxes, fontsize=14, +# verticalalignment='top', bbox=props) +#imgplot = plt.imshow(reg_output3d[-1][0].T[sliceNo]) +# + +###################### TGV_PD ######################################### +# Quick 2D denoising example in Matlab: +# Im = double(imread('lena_gray_256.tif'))/255; % loading image +# u0 = Im + .03*randn(size(Im)); u0(u0<0) = 0; % adding noise +# u = PrimalDual_TGV(single(u0), 0.02, 1.3, 1, 550); + + +#out2 = Regularizer.TGV_PD(input=u0, regularization_parameter=0.05, +# first_order_term=1.3, +# second_order_term=1, +# number_of_iterations=550) +#pars = out2[-2] +#reg_output3d.append(out2) +# +#a=fig3D.add_subplot(2,3,2) +# +# +#textstr = out2[-1] +# +# +## these are matplotlib.patch.Patch properties +#props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) +## place a text box in upper left in axes coords +#a.text(0.05, 0.95, textstr, transform=a.transAxes, fontsize=14, +# verticalalignment='top', bbox=props) +#imgplot = plt.imshow(reg_output3d[-1][0].T[sliceNo]) -- cgit v1.2.3