Commit fe2f4d03 authored by Alessandro Mirone's avatar Alessandro Mirone
Browse files

added positivity constraint in TV regularisation

parent b5a06ee1
......@@ -19,36 +19,30 @@ def mdiv(grad):
res[ :-1 , :, : ] += this_grad[0, :-1 , :,:]
res[ 1:-1 , :, : ] -= this_grad[0, :-2 , :,:]
res[ -1 , :, : ] -= this_grad[0, -2 , :,:]
res[ :, :-1 , : ] += this_grad[0, :, :-1 ,:]
res[ :, 1:-1 , : ] -= this_grad[0, :, :-2 ,:]
res[ :, -1 , : ] -= this_grad[0, :, -2 ,:]
res[ :, :-1 , : ] += this_grad[1, :, :-1 ,:]
res[ :, 1:-1 , : ] -= this_grad[1, :, :-2 ,:]
res[ :, -1 , : ] -= this_grad[1, :, -2 ,:]
res[ :, : , :-1 ] += this_grad[1, : ,:, :-1 ]
res[ :, : , 1:-1 ] -= this_grad[1, : ,:, :-2 ]
res[ :, : , -1 ] -= this_grad[1, : ,:, -2 ]
return res
def mygradient(img):
shape = [2 ] + list(img.shape)
shape = [3 ] + list(img.shape)
gradient = np.zeros(shape, dtype=img.dtype)
gradient[0,:,:,: ] = np.diff(img, axis=0)
gradient[1,:,:,: ] = np.diff(img, axis=1)
gradient[2,:,:,: ] = np.diff(img, axis=2)
return gradient
def v_project(v,weight ):
norms = np.minimum( weight, np.sqrt( v[0]*v[0] + v[1]*v[1] ))
return v/ norms
def my_denoise_tv_chambolle_positive(image, weight=0.1, n_iter_max):
ndim = image.ndim
g = np.zeros_like(p)
x = np.zeros_like(image)
tmpxa = np.zeros_like(image)
v = np.zeros((image.ndim, ) + image.shape, dtype=image.dtype)
i = 0
sigma = 1.0/math.sqrt(8.0)
tau = 1.0/math.sqrt(8.0)
while i < n_iter_max:
......@@ -62,11 +56,98 @@ def my_denoise_tv_chambolle_positive(image, weight=0.1, n_iter_max):
return x
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200,
positivity=False):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
positivity : bool, optional
Adds positivity constraint
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = image.ndim
p = np.zeros((image.ndim, ) + image.shape, dtype=image.dtype)
g = np.zeros_like(p)
d = np.zeros_like(image)
i = 0
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
slices_d = [slice(None), ] * ndim
slices_p = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax+1] = slice(0, -1)
slices_p[0] = ax
d[tuple(slices_d)] += p[tuple(slices_p)]
slices_d[ax] = slice(None)
slices_p[ax+1] = slice(None)
out_nopos = image + d
else:
out_nopos = image
if not positivity:
out = out_nopos
else:
out = np.maximum(0, out_nopos)
removed = np.minimum(out_nopos, 0)
d = d-removed
E = (d ** 2).sum()
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
slices_g = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_g[ax+1] = slice(0, -1)
slices_g[0] = ax
g[tuple(slices_g)] = np.diff(out, axis=ax)
slices_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
E += weight * norm.sum()
tau = 1. / (2.*ndim)
norm *= tau / weight
norm += 1.
p -= tau * g
p /= norm
E /= float(image.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def superr( scalDD, scalDS, scalSS, niter=15, beta=1.0e-8):
"""
......@@ -125,7 +206,9 @@ def Fista( scalDD, scalDS , scalSS, solution , niter=500, beta=0.1 ):
err = calculate_grad(scalDD, scalDS , scalSS, y, grad)
solution[:] = y - grad/Lip
solution[:]=skimage.restoration.denoise_tv_chambolle(solution, weight=beta, eps=0.000002)
# solution[:]=skimage.restoration.denoise_tv_chambolle(solution, weight=beta, eps=0.000002)
solution[:]=_denoise_tv_chambolle_nd(solution, weight=beta, eps=0.000002, positivity=True)
## solution[:] = np.maximum(solution, 0)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment