summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniil Kazantsev <dkazanc3@googlemail.com>2018-10-17 15:23:23 +0100
committerGitHub <noreply@github.com>2018-10-17 15:23:23 +0100
commitabc61cfd1258969d5cc5eedec5d52183e5407556 (patch)
treea44be3e78745fbca0ecca1f67585c755f11836c3
parent56d876e94e67f0d774627378c0527ae7b390e13a (diff)
parent6887ae5026e20fa8acad644f040fdab1a085e772 (diff)
downloadregularization-abc61cfd1258969d5cc5eedec5d52183e5407556.tar.gz
regularization-abc61cfd1258969d5cc5eedec5d52183e5407556.tar.bz2
regularization-abc61cfd1258969d5cc5eedec5d52183e5407556.tar.xz
regularization-abc61cfd1258969d5cc5eedec5d52183e5407556.zip
Merge pull request #62 from vais-ral/demo_tests
fixes tests/demos and closes #43
-rw-r--r--Core/regularisers_CPU/FGP_TV_core.c14
-rwxr-xr-xWrappers/Python/conda-recipe/run_test.py20
-rw-r--r--Wrappers/Python/demos/demo_cpu_regularisers.py2
-rw-r--r--Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py8
4 files changed, 22 insertions, 22 deletions
diff --git a/Core/regularisers_CPU/FGP_TV_core.c b/Core/regularisers_CPU/FGP_TV_core.c
index d828d48..68d58b7 100644
--- a/Core/regularisers_CPU/FGP_TV_core.c
+++ b/Core/regularisers_CPU/FGP_TV_core.c
@@ -55,7 +55,7 @@ float TV_FGP_CPU_main(float *Input, float *Output, float lambdaPar, int iteratio
P1 = calloc(DimTotal, sizeof(float));
P2 = calloc(DimTotal, sizeof(float));
P1_prev = calloc(DimTotal, sizeof(float));
- P2_prev = calloc(DimTotal, sizeof(float));
+ P2_prev = calloc(DimTotal, sizeof(float));
R1 = calloc(DimTotal, sizeof(float));
R2 = calloc(DimTotal, sizeof(float));
@@ -63,13 +63,13 @@ float TV_FGP_CPU_main(float *Input, float *Output, float lambdaPar, int iteratio
for(ll=0; ll<iterationsNumb; ll++) {
/* computing the gradient of the objective function */
- Obj_func2D(Input, Output, R1, R2, lambdaPar, (long)(dimY), (long)(dimZ));
+ Obj_func2D(Input, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY));
/* apply nonnegativity */
- if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;}
+ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;}
/*Taking a step towards minus of the gradient*/
- Grad_func2D(P1, P2, Output, R1, R2, lambdaPar, (long)(dimY), (long)(dimZ));
+ Grad_func2D(P1, P2, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY));
/* projection step */
Proj_func2D(P1, P2, methodTV, DimTotal);
@@ -90,9 +90,9 @@ float TV_FGP_CPU_main(float *Input, float *Output, float lambdaPar, int iteratio
if (count > 4) break;
/*storing old values*/
- copyIm(Output, Output_prev, (long)(dimY), (long)(dimZ), 1l);
- copyIm(P1, P1_prev, (long)(dimY), (long)(dimZ), 1l);
- copyIm(P2, P2_prev, (long)(dimY), (long)(dimZ), 1l);
+ copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l);
+ copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), 1l);
+ copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), 1l);
tk = tkp1;
}
if (printM == 1) printf("FGP-TV iterations stopped at iteration %i \n", ll);
diff --git a/Wrappers/Python/conda-recipe/run_test.py b/Wrappers/Python/conda-recipe/run_test.py
index 398ef60..86013a3 100755
--- a/Wrappers/Python/conda-recipe/run_test.py
+++ b/Wrappers/Python/conda-recipe/run_test.py
@@ -65,8 +65,8 @@ class TestRegularisers(unittest.TestCase):
pars = {'algorithm': ROF_TV, \
'input' : u0,\
'regularisation_parameter':0.04,\
- 'number_of_iterations': 1200,\
- 'time_marching_parameter': 0.0025
+ 'number_of_iterations': 1000,\
+ 'time_marching_parameter': 0.0001
}
print ("#############ROF TV CPU####################")
start_time = timeit.default_timer()
@@ -94,7 +94,7 @@ class TestRegularisers(unittest.TestCase):
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
print ("--------Compare the results--------")
- tolerance = 1e-05
+ tolerance = 1e-04
diff_im = np.zeros(np.shape(rof_cpu))
diff_im = abs(rof_cpu - rof_gpu)
diff_im[diff_im > tolerance] = 1
@@ -361,8 +361,8 @@ class TestRegularisers(unittest.TestCase):
'input' : u0,\
'regularisation_parameterROF':0.04, \
'regularisation_parameterLLT':0.01, \
- 'number_of_iterations' :500 ,\
- 'time_marching_parameter' :0.0025 ,\
+ 'number_of_iterations' :1000 ,\
+ 'time_marching_parameter' :0.0001 ,\
}
print ("#############LLT- ROF CPU####################")
@@ -394,7 +394,7 @@ class TestRegularisers(unittest.TestCase):
txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
print (txtstr)
print ("--------Compare the results--------")
- tolerance = 1e-05
+ tolerance = 1e-04
diff_im = np.zeros(np.shape(lltrof_gpu))
diff_im = abs(lltrof_cpu - lltrof_gpu)
diff_im[diff_im > tolerance] = 1
@@ -643,14 +643,14 @@ class TestRegularisers(unittest.TestCase):
Im = np.asarray(Im, dtype='float32')
"""
tolerance = 1e-05
- rms_rof_exp = 0.006812507 #expected value for ROF model
+ rms_rof_exp = 8.313131464999238e-05 #expected value for ROF model
# set parameters for ROF-TV
pars_rof_tv = {'algorithm': ROF_TV, \
'input' : Im,\
'regularisation_parameter':0.04,\
'number_of_iterations': 50,\
- 'time_marching_parameter': 0.0025
+ 'time_marching_parameter': 0.00001
}
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_________testing ROF-TV (2D, CPU)__________")
@@ -715,14 +715,14 @@ class TestRegularisers(unittest.TestCase):
Im = Im/255
tolerance = 1e-05
- rms_rof_exp = 0.006812507 #expected value for ROF model
+ rms_rof_exp = 8.313131464999238e-05 #expected value for ROF model
# set parameters for ROF-TV
pars_rof_tv = {'algorithm': ROF_TV, \
'input' : Im,\
'regularisation_parameter':0.04,\
'number_of_iterations': 50,\
- 'time_marching_parameter': 0.0025
+ 'time_marching_parameter': 0.00001
}
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("_________testing ROF-TV (2D, GPU)__________")
diff --git a/Wrappers/Python/demos/demo_cpu_regularisers.py b/Wrappers/Python/demos/demo_cpu_regularisers.py
index b94f11c..e99b271 100644
--- a/Wrappers/Python/demos/demo_cpu_regularisers.py
+++ b/Wrappers/Python/demos/demo_cpu_regularisers.py
@@ -36,7 +36,7 @@ filename = os.path.join(".." , ".." , ".." , "data" ,"lena_gray_512.tif")
Im = plt.imread(filename)
Im = np.asarray(Im, dtype='float32')
-Im = Im/255
+Im = Im/255.0
perc = 0.05
u0 = Im + np.random.normal(loc = 0 ,
scale = perc * Im ,
diff --git a/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py b/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py
index e45dc40..3d6e92f 100644
--- a/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py
+++ b/Wrappers/Python/demos/demo_cpu_vs_gpu_regularisers.py
@@ -66,8 +66,8 @@ imgplot = plt.imshow(u0,cmap="gray")
pars = {'algorithm': ROF_TV, \
'input' : u0,\
'regularisation_parameter':0.04,\
- 'number_of_iterations': 1200,\
- 'time_marching_parameter': 0.0025
+ 'number_of_iterations': 4500,\
+ 'time_marching_parameter': 0.00002
}
print ("#############ROF TV CPU####################")
start_time = timeit.default_timer()
@@ -406,8 +406,8 @@ pars = {'algorithm' : LLT_ROF, \
'input' : u0,\
'regularisation_parameterROF':0.04, \
'regularisation_parameterLLT':0.01, \
- 'number_of_iterations' :500 ,\
- 'time_marching_parameter' :0.0025 ,\
+ 'number_of_iterations' :4500 ,\
+ 'time_marching_parameter' :0.00002 ,\
}
print ("#############LLT- ROF CPU####################")