summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniil Kazantsev <dkazanc@hotmail.com>2018-04-17 12:58:28 +0100
committerDaniil Kazantsev <dkazanc@hotmail.com>2018-04-17 12:58:28 +0100
commitd0a33e4f941539ba44a071cfab75d7bf9543990f (patch)
treeed825ba90ca17448ab07309435095f3612ffe703
parent7e556922a60e052d24c1e467df13423904729357 (diff)
downloadregularization-d0a33e4f941539ba44a071cfab75d7bf9543990f.tar.gz
regularization-d0a33e4f941539ba44a071cfab75d7bf9543990f.tar.bz2
regularization-d0a33e4f941539ba44a071cfab75d7bf9543990f.tar.xz
regularization-d0a33e4f941539ba44a071cfab75d7bf9543990f.zip
TNV module added
-rw-r--r--Core/CMakeLists.txt1
-rwxr-xr-xCore/regularisers_CPU/SB_TV_core.c1
-rwxr-xr-xCore/regularisers_CPU/TNV_core.c451
-rw-r--r--Core/regularisers_CPU/TNV_core.h46
-rwxr-xr-xCore/regularisers_GPU/TV_SB_GPU_core.cu1
-rw-r--r--Readme.md7
-rw-r--r--Wrappers/Matlab/demos/demoMatlab_denoise.m11
-rw-r--r--Wrappers/Matlab/mex_compile/compileCPU_mex.m5
-rw-r--r--Wrappers/Matlab/mex_compile/regularisers_CPU/TNV.c73
-rw-r--r--Wrappers/Python/ccpi/filters/regularisers.py7
-rw-r--r--Wrappers/Python/demos/demo_cpu_regularisers.py53
-rw-r--r--Wrappers/Python/src/cpu_regularisers.pyx26
12 files changed, 675 insertions, 7 deletions
diff --git a/Core/CMakeLists.txt b/Core/CMakeLists.txt
index fecdeb1..4142ed9 100644
--- a/Core/CMakeLists.txt
+++ b/Core/CMakeLists.txt
@@ -90,6 +90,7 @@ add_library(cilreg SHARED
#${CMAKE_CURRENT_SOURCE_DIR}/regularisers_CPU/TGV_PD_core.c
${CMAKE_CURRENT_SOURCE_DIR}/regularisers_CPU/ROF_TV_core.c
${CMAKE_CURRENT_SOURCE_DIR}/regularisers_CPU/FGP_dTV_core.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/regularisers_CPU/TNV_core.c
${CMAKE_CURRENT_SOURCE_DIR}/regularisers_CPU/utils.c
)
target_link_libraries(cilreg ${EXTRA_LIBRARIES} )
diff --git a/Core/regularisers_CPU/SB_TV_core.c b/Core/regularisers_CPU/SB_TV_core.c
index 93b4c2c..06d3de3 100755
--- a/Core/regularisers_CPU/SB_TV_core.c
+++ b/Core/regularisers_CPU/SB_TV_core.c
@@ -32,7 +32,6 @@ limitations under the License.
* Output:
* 1. Filtered/regularized image
*
-* This function is based on the Matlab's code and paper by
* [1]. Goldstein, T. and Osher, S., 2009. The split Bregman method for L1-regularized problems. SIAM journal on imaging sciences, 2(2), pp.323-343.
*/
diff --git a/Core/regularisers_CPU/TNV_core.c b/Core/regularisers_CPU/TNV_core.c
new file mode 100755
index 0000000..8e61869
--- /dev/null
+++ b/Core/regularisers_CPU/TNV_core.c
@@ -0,0 +1,451 @@
+/*
+ * This work is part of the Core Imaging Library developed by
+ * Visual Analytics and Imaging System Group of the Science Technology
+ * Facilities Council, STFC
+ *
+ * Copyright 2017 Daniil Kazantsev
+ * Copyright 2017 Srikanth Nagella, Edoardo Pasca
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TNV_core.h"
+
+/*
+ * C-OMP implementation of Total Nuclear Variation regularisation model (2D + channels) [1]
+ * The code is modified from the implementation by Joan Duran <joan.duran@uib.es> see
+ * "denoisingPDHG_ipol.cpp" in Joans Collaborative Total Variation package
+ *
+ * Input Parameters:
+ * 1. Noisy volume of 2D + channel dimension, i.e. 3D volume
+ * 2. lambda - regularisation parameter
+ * 3. Number of iterations [OPTIONAL parameter]
+ * 4. eplsilon - tolerance constant [OPTIONAL parameter]
+ * 5. print information: 0 (off) or 1 (on) [OPTIONAL parameter]
+ *
+ * Output:
+ * 1. Filtered/regularized image
+ *
+ * [1]. Duran, J., Moeller, M., Sbert, C. and Cremers, D., 2016. Collaborative total variation: a general framework for vectorial TV models. SIAM Journal on Imaging Sciences, 9(1), pp.116-151.
+ */
+
+float TNV_CPU_main(float *Input, float *u, float lambda, int maxIter, float tol, int dimX, int dimY, int dimZ)
+{
+ int k, p, q, r, DimTotal;
+ float taulambda;
+ float *u_upd, *gx, *gy, *gx_upd, *gy_upd, *qx, *qy, *qx_upd, *qy_upd, *v, *vx, *vy, *gradx, *grady, *gradx_upd, *grady_upd, *gradx_ubar, *grady_ubar, *div, *div_upd;
+
+ p = 1;
+ q = 1;
+ r = 0;
+
+ lambda = 1.0f/(2.0f*lambda);
+ DimTotal = dimX*dimY*dimZ;
+ /* PDHG algorithm parameters*/
+ float tau = 0.5f;
+ float sigma = 0.5f;
+ float theta = 1.0f;
+
+ // Auxiliar vectors
+ u_upd = calloc(DimTotal, sizeof(float));
+ gx = calloc(DimTotal, sizeof(float));
+ gy = calloc(DimTotal, sizeof(float));
+ gx_upd = calloc(DimTotal, sizeof(float));
+ gy_upd = calloc(DimTotal, sizeof(float));
+ qx = calloc(DimTotal, sizeof(float));
+ qy = calloc(DimTotal, sizeof(float));
+ qx_upd = calloc(DimTotal, sizeof(float));
+ qy_upd = calloc(DimTotal, sizeof(float));
+ v = calloc(DimTotal, sizeof(float));
+ vx = calloc(DimTotal, sizeof(float));
+ vy = calloc(DimTotal, sizeof(float));
+ gradx = calloc(DimTotal, sizeof(float));
+ grady = calloc(DimTotal, sizeof(float));
+ gradx_upd = calloc(DimTotal, sizeof(float));
+ grady_upd = calloc(DimTotal, sizeof(float));
+ gradx_ubar = calloc(DimTotal, sizeof(float));
+ grady_ubar = calloc(DimTotal, sizeof(float));
+ div = calloc(DimTotal, sizeof(float));
+ div_upd = calloc(DimTotal, sizeof(float));
+
+ // Backtracking parameters
+ float s = 1.0f;
+ float gamma = 0.75f;
+ float beta = 0.95f;
+ float alpha0 = 0.2f;
+ float alpha = alpha0;
+ float delta = 1.5f;
+ float eta = 0.95f;
+
+ // PDHG algorithm parameters
+ taulambda = tau * lambda;
+ float divtau = 1.0f / tau;
+ float divsigma = 1.0f / sigma;
+ float theta1 = 1.0f + theta;
+
+ /*allocate memory for taulambda */
+ //taulambda = (float*) calloc(dimZ, sizeof(float));
+ //for(k=0; k < dimZ; k++) {taulambda[k] = tau*lambda[k];}
+
+ // Apply Primal-Dual Hybrid Gradient scheme
+ int iter = 0;
+ float residual = fLarge;
+ float ubarx, ubary;
+
+ for(iter = 0; iter < maxIter; iter++) {
+ // Argument of proximal mapping of fidelity term
+#pragma omp parallel for shared(v, u) private(k)
+ for(k=0; k<dimX*dimY*dimZ; k++) {v[k] = u[k] + tau*div[k];}
+
+// Proximal solution of fidelity term
+proxG(u_upd, v, Input, taulambda, dimX, dimY, dimZ);
+
+// Gradient of updated primal variable
+gradient(u_upd, gradx_upd, grady_upd, dimX, dimY, dimZ);
+
+// Argument of proximal mapping of regularization term
+#pragma omp parallel for shared(gradx_upd, grady_upd, gradx, grady) private(k, ubarx, ubary)
+for(k=0; k<dimX*dimY*dimZ; k++) {
+ ubarx = theta1 * gradx_upd[k] - theta * gradx[k];
+ ubary = theta1 * grady_upd[k] - theta * grady[k];
+ vx[k] = ubarx + divsigma * qx[k];
+ vy[k] = ubary + divsigma * qy[k];
+ gradx_ubar[k] = ubarx;
+ grady_ubar[k] = ubary;
+}
+
+proxF(gx_upd, gy_upd, vx, vy, sigma, p, q, r, dimX, dimY, dimZ);
+
+// Update dual variable
+#pragma omp parallel for shared(qx_upd, qy_upd) private(k)
+for(k=0; k<dimX*dimY*dimZ; k++) {
+ qx_upd[k] = qx[k] + sigma * (gradx_ubar[k] - gx_upd[k]);
+ qy_upd[k] = qy[k] + sigma * (grady_ubar[k] - gy_upd[k]);
+}
+
+// Divergence of updated dual variable
+#pragma omp parallel for shared(div_upd) private(k)
+for(k=0; k<dimX*dimY*dimZ; k++) {div_upd[k] = 0.0f;}
+divergence(qx_upd, qy_upd, div_upd, dimX, dimY, dimZ);
+
+// Compute primal residual, dual residual, and backtracking condition
+float resprimal = 0.0f;
+float resdual = 0.0f;
+float product = 0.0f;
+float unorm = 0.0f;
+float qnorm = 0.0f;
+
+for(k=0; k<dimX*dimY*dimZ; k++) {
+ float udiff = u[k] - u_upd[k];
+ float qxdiff = qx[k] - qx_upd[k];
+ float qydiff = qy[k] - qy_upd[k];
+ float divdiff = div[k] - div_upd[k];
+ float gradxdiff = gradx[k] - gradx_upd[k];
+ float gradydiff = grady[k] - grady_upd[k];
+
+ resprimal += fabs(divtau*udiff + divdiff);
+ resdual += fabs(divsigma*qxdiff - gradxdiff);
+ resdual += fabs(divsigma*qydiff - gradydiff);
+
+ unorm += (udiff * udiff);
+ qnorm += (qxdiff * qxdiff + qydiff * qydiff);
+ product += (gradxdiff * qxdiff + gradydiff * qydiff);
+}
+
+float b = (2.0f * tau * sigma * product) / (gamma * sigma * unorm +
+ gamma * tau * qnorm);
+
+// Adapt step-size parameters
+float dual_dot_delta = resdual * s * delta;
+float dual_div_delta = (resdual * s) / delta;
+
+if(b > 1)
+{
+ // Decrease step-sizes to fit balancing principle
+ tau = (beta * tau) / b;
+ sigma = (beta * sigma) / b;
+ alpha = alpha0;
+
+ copyIm(u, u_upd, dimX, dimY, dimZ);
+ copyIm(gx, gx_upd, dimX, dimY, dimZ);
+ copyIm(gy, gy_upd, dimX, dimY, dimZ);
+ copyIm(qx, qx_upd, dimX, dimY, dimZ);
+ copyIm(qy, qy_upd, dimX, dimY, dimZ);
+ copyIm(gradx, gradx_upd, dimX, dimY, dimZ);
+ copyIm(grady, grady_upd, dimX, dimY, dimZ);
+ copyIm(div, div_upd, dimX, dimY, dimZ);
+
+} else if(resprimal > dual_dot_delta)
+{
+ // Increase primal step-size and decrease dual step-size
+ tau = tau / (1.0f - alpha);
+ sigma = sigma * (1.0f - alpha);
+ alpha = alpha * eta;
+
+} else if(resprimal < dual_div_delta)
+{
+ // Decrease primal step-size and increase dual step-size
+ tau = tau * (1.0f - alpha);
+ sigma = sigma / (1.0f - alpha);
+ alpha = alpha * eta;
+}
+
+// Update variables
+taulambda = tau * lambda;
+//for(k=0; k < dimZ; k++) taulambda[k] = tau*lambda[k];
+
+divsigma = 1.0f / sigma;
+divtau = 1.0f / tau;
+
+copyIm(u_upd, u, dimX, dimY, dimZ);
+copyIm(gx_upd, gx, dimX, dimY, dimZ);
+copyIm(gy_upd, gy, dimX, dimY, dimZ);
+copyIm(qx_upd, qx, dimX, dimY, dimZ);
+copyIm(qy_upd, qy, dimX, dimY, dimZ);
+copyIm(gradx_upd, gradx, dimX, dimY, dimZ);
+copyIm(grady_upd, grady, dimX, dimY, dimZ);
+copyIm(div_upd, div, dimX, dimY, dimZ);
+
+// Compute residual at current iteration
+residual = (resprimal + resdual) / ((float) (dimX*dimY*dimZ));
+
+// printf("%f \n", residual);
+if (residual < tol) {
+ printf("Iterations stopped at %i with the residual %f \n", iter, residual);
+ break; }
+
+ }
+ printf("Iterations stopped at %i with the residual %f \n", iter, residual);
+ free (u_upd); free(gx); free(gy); free(gx_upd); free(gy_upd);
+ free(qx); free(qy); free(qx_upd); free(qy_upd); free(v); free(vx); free(vy);
+ free(gradx); free(grady); free(gradx_upd); free(grady_upd); free(gradx_ubar);
+ free(grady_ubar); free(div); free(div_upd);
+
+ return *u;
+}
+
+float proxG(float *u_upd, float *v, float *f, float taulambda, int dimX, int dimY, int dimZ)
+{
+ float constant;
+ int k;
+ constant = 1.0f + taulambda;
+#pragma omp parallel for shared(v, f, u_upd) private(k)
+ for(k=0; k<dimZ*dimX*dimY; k++) {
+ u_upd[k] = (v[k] + taulambda * f[k])/constant;
+ //u_upd[(dimX*dimY)*k + l] = (v[(dimX*dimY)*k + l] + taulambda * f[(dimX*dimY)*k + l])/constant;
+ }
+ return *u_upd;
+}
+
+float gradient(float *u_upd, float *gradx_upd, float *grady_upd, int dimX, int dimY, int dimZ)
+{
+ int i, j, k, l;
+ // Compute discrete gradient using forward differences
+#pragma omp parallel for shared(gradx_upd,grady_upd,u_upd) private(i, j, k, l)
+ for(k = 0; k < dimZ; k++) {
+ for(j = 0; j < dimX; j++) {
+ l = j * dimY;
+ for(i = 0; i < dimY; i++) {
+ // Derivatives in the x-direction
+ if(i != dimY-1)
+ gradx_upd[(dimX*dimY)*k + i+l] = u_upd[(dimX*dimY)*k + i+1+l] - u_upd[(dimX*dimY)*k + i+l];
+ else
+ gradx_upd[(dimX*dimY)*k + i+l] = 0.0f;
+
+ // Derivatives in the y-direction
+ if(j != dimX-1)
+ grady_upd[(dimX*dimY)*k + i+l] = u_upd[(dimX*dimY)*k + i+dimY+l] -u_upd[(dimX*dimY)*k + i+l];
+ else
+ grady_upd[(dimX*dimY)*k + i+l] = 0.0f;
+ }}}
+ return 1;
+}
+
+float proxF(float *gx, float *gy, float *vx, float *vy, float sigma, int p, int q, int r, int dimX, int dimY, int dimZ)
+{
+ // (S^p, \ell^1) norm decouples at each pixel
+// Spl1(gx, gy, vx, vy, sigma, p, num_channels, dim);
+ float divsigma = 1.0f / sigma;
+
+ // $\ell^{1,1,1}$-TV regularization
+// int i,j,k;
+// #pragma omp parallel for shared (gx,gy,vx,vy) private(i,j,k)
+// for(k = 0; k < dimZ; k++) {
+// for(i=0; i<dimX; i++) {
+// for(j=0; j<dimY; j++) {
+// gx[(dimX*dimY)*k + (i)*dimY + (j)] = SIGN(vx[(dimX*dimY)*k + (i)*dimY + (j)]) * MAX(fabs(vx[(dimX*dimY)*k + (i)*dimY + (j)]) - divsigma, 0.0f);
+// gy[(dimX*dimY)*k + (i)*dimY + (j)] = SIGN(vy[(dimX*dimY)*k + (i)*dimY + (j)]) * MAX(fabs(vy[(dimX*dimY)*k + (i)*dimY + (j)]) - divsigma, 0.0f);
+// }}}
+
+ // Auxiliar vector
+ float *proj, sum, shrinkfactor ;
+ float M1,M2,M3,valuex,valuey,T,D,det,eig1,eig2,sig1,sig2,V1, V2, V3, V4, v0,v1,v2, mu1,mu2,sig1_upd,sig2_upd,t1,t2,t3;
+ int i,j,k, ii, num;
+#pragma omp parallel for shared (gx,gy,vx,vy,p) private(i,ii,j,k,proj,num, sum, shrinkfactor, M1,M2,M3,valuex,valuey,T,D,det,eig1,eig2,sig1,sig2,V1, V2, V3, V4,v0,v1,v2,mu1,mu2,sig1_upd,sig2_upd,t1,t2,t3)
+ for(i=0; i<dimX; i++) {
+ for(j=0; j<dimY; j++) {
+
+ proj = (float*) calloc (2,sizeof(float));
+ // Compute matrix $M\in\R^{2\times 2}$
+ M1 = 0.0f;
+ M2 = 0.0f;
+ M3 = 0.0f;
+
+ for(k = 0; k < dimZ; k++)
+ {
+ valuex = vx[(dimX*dimY)*k + (i)*dimY + (j)];
+ valuey = vy[(dimX*dimY)*k + (i)*dimY + (j)];
+
+ M1 += (valuex * valuex);
+ M2 += (valuex * valuey);
+ M3 += (valuey * valuey);
+ }
+
+ // Compute eigenvalues of M
+ T = M1 + M3;
+ D = M1 * M3 - M2 * M2;
+ det = sqrt(MAX((T * T / 4.0f) - D, 0.0f));
+ eig1 = MAX((T / 2.0f) + det, 0.0f);
+ eig2 = MAX((T / 2.0f) - det, 0.0f);
+ sig1 = sqrt(eig1);
+ sig2 = sqrt(eig2);
+
+ // Compute normalized eigenvectors
+ V1 = V2 = V3 = V4 = 0.0f;
+
+ if(M2 != 0.0f)
+ {
+ v0 = M2;
+ v1 = eig1 - M3;
+ v2 = eig2 - M3;
+
+ mu1 = sqrtf(v0 * v0 + v1 * v1);
+ mu2 = sqrtf(v0 * v0 + v2 * v2);
+
+ if(mu1 > fTiny)
+ {
+ V1 = v1 / mu1;
+ V3 = v0 / mu1;
+ }
+
+ if(mu2 > fTiny)
+ {
+ V2 = v2 / mu2;
+ V4 = v0 / mu2;
+ }
+
+ } else
+ {
+ if(M1 > M3)
+ {
+ V1 = V4 = 1.0f;
+ V2 = V3 = 0.0f;
+
+ } else
+ {
+ V1 = V4 = 0.0f;
+ V2 = V3 = 1.0f;
+ }
+ }
+
+ // Compute prox_p of the diagonal entries
+ sig1_upd = sig2_upd = 0.0f;
+
+ if(p == 1)
+ {
+ sig1_upd = MAX(sig1 - divsigma, 0.0f);
+ sig2_upd = MAX(sig2 - divsigma, 0.0f);
+
+ } else if(p == INFNORM)
+ {
+ proj[0] = sigma * fabs(sig1);
+ proj[1] = sigma * fabs(sig2);
+
+ /*l1 projection part */
+ sum = fLarge;
+ num = 0;
+ shrinkfactor = 0.0f;
+ while(sum > 1.0f)
+ {
+ sum = 0.0f;
+ num = 0;
+
+ for(ii = 0; ii < 2; ii++)
+ {
+ proj[ii] = MAX(proj[ii] - shrinkfactor, 0.0f);
+
+ sum += fabs(proj[ii]);
+ if(proj[ii]!= 0.0f)
+ num++;
+ }
+
+ if(num > 0)
+ shrinkfactor = (sum - 1.0f) / num;
+ else
+ break;
+ }
+ /*l1 proj ends*/
+
+ sig1_upd = sig1 - divsigma * proj[0];
+ sig2_upd = sig2 - divsigma * proj[1];
+ }
+
+ // Compute the diagonal entries of $\widehat{\Sigma}\Sigma^{\dagger}_0$
+ if(sig1 > fTiny)
+ sig1_upd /= sig1;
+
+ if(sig2 > fTiny)
+ sig2_upd /= sig2;
+
+ // Compute solution
+ t1 = sig1_upd * V1 * V1 + sig2_upd * V2 * V2;
+ t2 = sig1_upd * V1 * V3 + sig2_upd * V2 * V4;
+ t3 = sig1_upd * V3 * V3 + sig2_upd * V4 * V4;
+
+ for(k = 0; k < dimZ; k++)
+ {
+ gx[(dimX*dimY)*k + (i)*dimY + (j)] = vx[(dimX*dimY)*k + (i)*dimY + (j)] * t1 + vy[(dimX*dimY)*k + (i)*dimY + (j)] * t2;
+ gy[(dimX*dimY)*k + (i)*dimY + (j)] = vx[(dimX*dimY)*k + (i)*dimY + (j)] * t2 + vy[(dimX*dimY)*k + (i)*dimY + (j)] * t3;
+ }
+
+ // Delete allocated memory
+ free(proj);
+ }}
+
+ return 1;
+}
+
+float divergence(float *qx_upd, float *qy_upd, float *div_upd, int dimX, int dimY, int dimZ)
+{
+ int i, j, k, l;
+#pragma omp parallel for shared(qx_upd,qy_upd,div_upd) private(i, j, k, l)
+ for(k = 0; k < dimZ; k++) {
+ for(j = 0; j < dimX; j++) {
+ l = j * dimY;
+ for(i = 0; i < dimY; i++) {
+ if(i != dimY-1)
+ {
+ // ux[k][i+l] = u[k][i+1+l] - u[k][i+l]
+ div_upd[(dimX*dimY)*k + i+1+l] -= qx_upd[(dimX*dimY)*k + i+l];
+ div_upd[(dimX*dimY)*k + i+l] += qx_upd[(dimX*dimY)*k + i+l];
+ }
+
+ if(j != dimX-1)
+ {
+ // uy[k][i+l] = u[k][i+width+l] - u[k][i+l]
+ div_upd[(dimX*dimY)*k + i+dimY+l] -= qy_upd[(dimX*dimY)*k + i+l];
+ div_upd[(dimX*dimY)*k + i+l] += qy_upd[(dimX*dimY)*k + i+l];
+ }
+ }
+ }
+ }
+ return *div_upd;
+}
diff --git a/Core/regularisers_CPU/TNV_core.h b/Core/regularisers_CPU/TNV_core.h
new file mode 100644
index 0000000..8178181
--- /dev/null
+++ b/Core/regularisers_CPU/TNV_core.h
@@ -0,0 +1,46 @@
+#include <math.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <stdio.h>
+#include "omp.h"
+#include "utils.h"
+
+#define fTiny 0.00000001f
+#define fLarge 100000000.0f
+#define INFNORM -1
+
+#define MAX(i,j) ((i)<(j) ? (j):(i))
+#define MIN(i,j) ((i)<(j) ? (i):(j))
+
+static inline int8_t SIGN(int val) {
+ if (val < 0) return -1;
+ if (val==0) return 0;
+ return 1;
+}
+
+/*
+This work is part of the Core Imaging Library developed by
+Visual Analytics and Imaging System Group of the Science Technology
+Facilities Council, STFC
+
+Copyright 2017 Daniil Kazantsev
+Copyright 2017 Srikanth Nagella, Edoardo Pasca
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+float TNV_CPU_main(float *Input, float *u, float lambda, int maxIter, float tol, int dimX, int dimY, int dimZ);
+
+/*float PDHG(float *A, float *B, float tau, float sigma, float theta, float lambda, int p, int q, int r, float tol, int maxIter, int d_c, int d_w, int d_h);*/
+float proxG(float *u_upd, float *v, float *f, float taulambda, int dimX, int dimY, int dimZ);
+float gradient(float *u_upd, float *gradx_upd, float *grady_upd, int dimX, int dimY, int dimZ);
+float proxF(float *gx, float *gy, float *vx, float *vy, float sigma, int p, int q, int r, int dimX, int dimY, int dimZ);
+float divergence(float *qx_upd, float *qy_upd, float *div_upd, int dimX, int dimY, int dimZ);
diff --git a/Core/regularisers_GPU/TV_SB_GPU_core.cu b/Core/regularisers_GPU/TV_SB_GPU_core.cu
index 0bc466f..b2100da 100755
--- a/Core/regularisers_GPU/TV_SB_GPU_core.cu
+++ b/Core/regularisers_GPU/TV_SB_GPU_core.cu
@@ -35,7 +35,6 @@ limitations under the License.
* Output:
* 1. Filtered/regularized image
*
-* This function is based on the Matlab's code and paper by
* [1]. Goldstein, T. and Osher, S., 2009. The split Bregman method for L1-regularized problems. SIAM journal on imaging sciences, 2(2), pp.323-343.
*/
diff --git a/Readme.md b/Readme.md
index 59ac46a..60c38ab 100644
--- a/Readme.md
+++ b/Readme.md
@@ -5,7 +5,7 @@ CCPi-RGL software consist of 2D/3D regularisation modules for single-channel and
can also be used as image denoising iterative filters. The core modules are written in C-OMP and CUDA languages and wrappers for Matlab and Python are provided.**
<div align="center">
- <img src="docs/images/probl.png" height="250"><br>
+ <img src="docs/images/probl.png" height="225"><br>
</div>
## Prerequisites:
@@ -24,6 +24,7 @@ can also be used as image denoising iterative filters. The core modules are writ
### Multi-channel
1. Fast-Gradient-Projection (FGP) Directional Total Variation [2D/3D CPU/GPU]; (Ref. 3,2)
+2. Total Nuclear Variation (TNV) penalty [2D+channels CPU]; (Ref. 5)
## Installation:
@@ -36,7 +37,8 @@ can also be used as image denoising iterative filters. The core modules are writ
conda build conda-recipe --numpy 1.12 --python 3.5
conda install ccpi-regulariser=0.9.2 --use-local --force
cd demos/
- python demo_cpu_regularisers.py.py # to run CPU demo
+ python demo_cpu_regularisers.py # to run CPU demo
+ python demo_gpu_regularisers.py # to run GPU demo
```
### Matlab
```
@@ -50,6 +52,7 @@ can also be used as image denoising iterative filters. The core modules are writ
2. Beck, A. and Teboulle, M., 2009. Fast gradient-based algorithms for constrained total variation image denoising and deblurring problems. IEEE Transactions on Image Processing, 18(11), pp.2419-2434.
3. Ehrhardt, M.J. and Betcke, M.M., 2016. Multicontrast MRI reconstruction with structure-guided total variation. SIAM Journal on Imaging Sciences, 9(3), pp.1084-1106.
4. Goldstein, T. and Osher, S., 2009. The split Bregman method for L1-regularized problems. SIAM journal on imaging sciences, 2(2), pp.323-343.
+5. Duran, J., Moeller, M., Sbert, C. and Cremers, D., 2016. Collaborative total variation: a general framework for vectorial TV models. SIAM Journal on Imaging Sciences, 9(1), pp.116-151.
### License:
[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
diff --git a/Wrappers/Matlab/demos/demoMatlab_denoise.m b/Wrappers/Matlab/demos/demoMatlab_denoise.m
index 129bedc..dab98dc 100644
--- a/Wrappers/Matlab/demos/demoMatlab_denoise.m
+++ b/Wrappers/Matlab/demos/demoMatlab_denoise.m
@@ -70,3 +70,14 @@ figure; imshow(u_fgp_dtv, [0 1]); title('FGP-dTV denoised image (CPU)');
% tic; u_fgp_dtvG = FGP_dTV_GPU(single(u0), single(u_ref), lambda_reg, iter_fgp, epsil_tol, eta); toc;
% figure; imshow(u_fgp_dtvG, [0 1]); title('FGP-dTV denoised image (GPU)');
%%
+fprintf('Denoise using the TNV prior (CPU) \n');
+slices = 5; N = 512;
+vol3D = zeros(N,N,slices, 'single');
+for i = 1:slices
+vol3D(:,:,i) = Im + .05*randn(size(Im));
+end
+vol3D(vol3D < 0) = 0;
+
+iter_tnv = 200; % number of TNV iterations
+tic; u_tnv = TNV(single(vol3D), lambda_reg, iter_tnv); toc;
+figure; imshow(u_tnv(:,:,3), [0 1]); title('TNV denoised stack of channels (CPU)');
diff --git a/Wrappers/Matlab/mex_compile/compileCPU_mex.m b/Wrappers/Matlab/mex_compile/compileCPU_mex.m
index c3c82ff..9892d73 100644
--- a/Wrappers/Matlab/mex_compile/compileCPU_mex.m
+++ b/Wrappers/Matlab/mex_compile/compileCPU_mex.m
@@ -17,7 +17,10 @@ movefile SB_TV.mex* ../installed/
mex FGP_dTV.c FGP_dTV_core.c utils.c CFLAGS="\$CFLAGS -fopenmp -Wall -std=c99" LDFLAGS="\$LDFLAGS -fopenmp"
movefile FGP_dTV.mex* ../installed/
-delete SB_TV_core* ROF_TV_core* FGP_TV_core* FGP_dTV_core* utils* CCPiDefines.h
+mex TNV.c TNV_core.c utils.c CFLAGS="\$CFLAGS -fopenmp -Wall -std=c99" LDFLAGS="\$LDFLAGS -fopenmp"
+movefile TNV.mex* ../installed/
+
+delete SB_TV_core* ROF_TV_core* FGP_TV_core* FGP_dTV_core* TNV_core* utils* CCPiDefines.h
fprintf('%s \n', 'All successfully compiled!');
diff --git a/Wrappers/Matlab/mex_compile/regularisers_CPU/TNV.c b/Wrappers/Matlab/mex_compile/regularisers_CPU/TNV.c
new file mode 100644
index 0000000..e0584c4
--- /dev/null
+++ b/Wrappers/Matlab/mex_compile/regularisers_CPU/TNV.c
@@ -0,0 +1,73 @@
+/*
+ * This work is part of the Core Imaging Library developed by
+ * Visual Analytics and Imaging System Group of the Science Technology
+ * Facilities Council, STFC
+ *
+ * Copyright 2017 Daniil Kazantsev
+ * Copyright 2017 Srikanth Nagella, Edoardo Pasca
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "matrix.h"
+#include "mex.h"
+#include "TNV_core.h"
+/*
+ * C-OMP implementation of Total Nuclear Variation regularisation model (2D + channels) [1]
+ * The code is modified from the implementation by Joan Duran <joan.duran@uib.es> see
+ * "denoisingPDHG_ipol.cpp" in Joans Collaborative Total Variation package
+ *
+ * Input Parameters:
+ * 1. Noisy volume of 2D + channel dimension, i.e. 3D volume
+ * 2. lambda - regularisation parameter
+ * 3. Number of iterations [OPTIONAL parameter]
+ * 4. eplsilon - tolerance constant [OPTIONAL parameter]
+ * 5. print information: 0 (off) or 1 (on) [OPTIONAL parameter]
+ *
+ * Output:
+ * 1. Filtered/regularized image
+ *
+ * [1]. Duran, J., Moeller, M., Sbert, C. and Cremers, D., 2016. Collaborative total variation: a general framework for vectorial TV models. SIAM Journal on Imaging Sciences, 9(1), pp.116-151.
+ */
+void mexFunction(
+ int nlhs, mxArray *plhs[],
+ int nrhs, const mxArray *prhs[])
+
+{
+ int number_of_dims, iter, dimX, dimY, dimZ;
+ const int *dim_array;
+ float *Input, *Output=NULL, lambda, epsil;
+
+ number_of_dims = mxGetNumberOfDimensions(prhs[0]);
+ dim_array = mxGetDimensions(prhs[0]);
+
+ /*Handling Matlab input data*/
+ if ((nrhs < 2) || (nrhs > 4)) mexErrMsgTxt("At least 2 parameters is required, all parameters are: Image(2D + channels), Regularisation parameter, Regularization parameter, iterations number, tolerance");
+
+ Input = (float *) mxGetData(prhs[0]); /* noisy sequence of channels (2D + channels) */
+ lambda = (float) mxGetScalar(prhs[1]); /* regularization parameter */
+ iter = 1000; /* default iterations number */
+ epsil = 1.00e-05; /* default tolerance constant */
+
+ if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) {mexErrMsgTxt("The input image must be in a single precision"); }
+
+ if ((nrhs == 3) || (nrhs == 4)) iter = (int) mxGetScalar(prhs[2]); /* iterations number */
+ if (nrhs == 4) epsil = (float) mxGetScalar(prhs[3]); /* tolerance constant */
+
+ /*Handling Matlab output data*/
+ dimX = dim_array[0]; dimY = dim_array[1]; dimZ = dim_array[2];
+
+ if (number_of_dims == 2) mexErrMsgTxt("The input must be 3D: [X,Y,Channels]");
+ if (number_of_dims == 3) {
+ Output = (float*)mxGetPr(plhs[0] = mxCreateNumericArray(3, dim_array, mxSINGLE_CLASS, mxREAL));
+ /* running the function */
+ TNV_CPU_main(Input, Output, lambda, iter, epsil, dimX, dimY, dimZ);
+ }
+} \ No newline at end of file
diff --git a/Wrappers/Python/ccpi/filters/regularisers.py b/Wrappers/Python/ccpi/filters/regularisers.py
index 50c4374..81deea9 100644
--- a/Wrappers/Python/ccpi/filters/regularisers.py
+++ b/Wrappers/Python/ccpi/filters/regularisers.py
@@ -2,7 +2,7 @@
script which assigns a proper device core function based on a flag ('cpu' or 'gpu')
"""
-from ccpi.filters.cpu_regularisers_cython import TV_ROF_CPU, TV_FGP_CPU, TV_SB_CPU, dTV_FGP_CPU
+from ccpi.filters.cpu_regularisers_cython import TV_ROF_CPU, TV_FGP_CPU, TV_SB_CPU, dTV_FGP_CPU, TNV_CPU
from ccpi.filters.gpu_regularisers import TV_ROF_GPU, TV_FGP_GPU, TV_SB_GPU, dTV_FGP_GPU
def ROF_TV(inputData, regularisation_parameter, iterations,
@@ -86,3 +86,8 @@ def FGP_dTV(inputData, refdata, regularisation_parameter, iterations,
else:
raise ValueError('Unknown device {0}. Expecting gpu or cpu'\
.format(device))
+def TNV(inputData, regularisation_parameter, iterations, tolerance_param):
+ return TNV_CPU_pyx(inputData,
+ regularisation_parameter,
+ iterations,
+ tolerance_param)
diff --git a/Wrappers/Python/demos/demo_cpu_regularisers.py b/Wrappers/Python/demos/demo_cpu_regularisers.py
index 0e4355b..e74fa58 100644
--- a/Wrappers/Python/demos/demo_cpu_regularisers.py
+++ b/Wrappers/Python/demos/demo_cpu_regularisers.py
@@ -12,7 +12,7 @@ import matplotlib.pyplot as plt
import numpy as np
import os
import timeit
-from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, FGP_dTV
+from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, FGP_dTV, TNV
from qualitymetrics import rmse
###############################################################################
def printParametersToString(pars):
@@ -242,6 +242,57 @@ imgplot = plt.imshow(fgp_dtv_cpu, cmap="gray")
plt.title('{}'.format('CPU results'))
+print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
+print ("__________Total nuclear Variation__________")
+print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
+
+## plot
+fig = plt.figure(4)
+plt.suptitle('Performance of TNV regulariser using the CPU')
+a=fig.add_subplot(1,2,1)
+a.set_title('Noisy Image')
+imgplot = plt.imshow(u0,cmap="gray")
+
+channelsNo = 5
+N = 512
+noisyVol = np.zeros((channelsNo,N,N),dtype='float32')
+idealVol = np.zeros((channelsNo,N,N),dtype='float32')
+
+for i in range (slices):
+ noisyVol[i,:,:] = Im + np.random.normal(loc = 0 , scale = perc * Im , size = np.shape(Im))
+ idealVol[i,:,:] = Im
+
+# set parameters
+pars = {'algorithm' : TNV, \
+ 'input' : noisyVol,\
+ 'regularisation_parameter': 0.04, \
+ 'number_of_iterations' : 200 ,\
+ 'tolerance_constant':1e-05
+ }
+
+print ("#############TNV CPU#################")
+start_time = timeit.default_timer()
+tnv_cpu = TNV(pars['input'],
+ pars['regularisation_parameter'],
+ pars['number_of_iterations'],
+ pars['tolerance_constant'])
+
+rms = rmse(idealVol, tnv_cpu)
+pars['rmse'] = rms
+
+txtstr = printParametersToString(pars)
+txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
+print (txtstr)
+a=fig.add_subplot(1,2,2)
+
+# these are matplotlib.patch.Patch properties
+props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
+# place a text box in upper left in axes coords
+a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
+ verticalalignment='top', bbox=props)
+imgplot = plt.imshow(tnv_cpu[3,:,:], cmap="gray")
+plt.title('{}'.format('CPU results'))
+
# Uncomment to test 3D regularisation performance
#%%
diff --git a/Wrappers/Python/src/cpu_regularisers.pyx b/Wrappers/Python/src/cpu_regularisers.pyx
index 417670d..898bb40 100644
--- a/Wrappers/Python/src/cpu_regularisers.pyx
+++ b/Wrappers/Python/src/cpu_regularisers.pyx
@@ -21,6 +21,7 @@ cimport numpy as np
cdef extern float TV_ROF_CPU_main(float *Input, float *Output, float lambdaPar, int iterationsNumb, float tau, int dimX, int dimY, int dimZ);
cdef extern float TV_FGP_CPU_main(float *Input, float *Output, float lambdaPar, int iterationsNumb, float epsil, int methodTV, int nonneg, int printM, int dimX, int dimY, int dimZ);
cdef extern float SB_TV_CPU_main(float *Input, float *Output, float lambdaPar, int iterationsNumb, float epsil, int methodTV, int printM, int dimX, int dimY, int dimZ);
+cdef extern float TNV_CPU_main(float *Input, float *u, float lambda, int maxIter, float tol, int dimX, int dimY, int dimZ);
cdef extern float dTV_FGP_CPU_main(float *Input, float *InputRef, float *Output, float lambdaPar, int iterationsNumb, float epsil, float eta, int methodTV, int nonneg, int printM, int dimX, int dimY, int dimZ);
@@ -249,3 +250,28 @@ def dTV_FGP_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData,
printM,
dims[2], dims[1], dims[0])
return outputData
+
+#****************************************************************#
+#*********************Total Nuclear Variation********************#
+#****************************************************************#
+def TNV_CPU_pyx(inputData, regularisation_parameter, iterationsNumb, tolerance_param):
+ if inputData.ndim == 2:
+ return
+ elif inputData.ndim == 3:
+ return TNV_3D(inputData, regularisation_parameter, iterationsNumb, tolerance_param)
+
+def TNV_3D(np.ndarray[np.float32_t, ndim=3, mode="c"] inputData,
+ float regularisation_parameter,
+ int iterationsNumb,
+ float tolerance_param):
+ cdef long dims[3]
+ dims[0] = inputData.shape[0]
+ dims[1] = inputData.shape[1]
+ dims[2] = inputData.shape[2]
+
+ cdef np.ndarray[np.float32_t, ndim=3, mode="c"] outputData = \
+ np.zeros([dims[0],dims[1],dims[2]], dtype='float32')
+
+ # Run TNV iterations for 3D (X,Y,Channels) data
+ TNV_CPU_main(&inputData[0,0,0], &outputData[0,0,0], regularisation_parameter, iterationsNumb, tolerance_param, dims[2], dims[1], dims[0])
+ return outputData