Commit ab43cbce authored by Alessandro Mirone's avatar Alessandro Mirone

aggiunto progetto Jerome

parent b2cf2700
import time
import numpy
from numpy import *
from numpy.fft import ifft
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def gen_base(N):
#
# On cree la base de fonctions
#
composantes= eye(N)
composantes=[ifft(t) for t in composantes]
c=composantes
CC=[c[0].real]+ [t.real for t in c[1:N/2]] + [t.imag for t in c[1:N/2]] + [c[N/2].real]
# On y ajoute des gaussiennes shiftees
x = arange(N)-N/2
extrafunc = exp( -x*x/15.0)
CC= CC +[ roll(extrafunc, shift) for shift in range(0,N,1) ]
extrafunc = exp( -x*x/20.0)
CC= CC +[ roll(extrafunc, shift) for shift in range(0,N,1) ]
# On transforme le tout en array de float
CC=array(CC,"f")
# CC est un tableau 3NxN
# On normalise les vecteurs de la base
for i in range(N):
t=CC[i]
norma = sqrt((t*t).sum())
t=t/norma
CC[i]=t
return(CC)
def gen_image(N,sigma):
""" Generation d'une image et d'une image bruitee associee
Entrees :
N : Image dimensionnality
sigma : SD of gaussian white noise"""
# Image
image = arange(N)
image=(image-N/2.)
image=exp( -image*image/20)
# Noise
bruit = numpy.random.normal(0,sigma,N)
# Noisy image
noisy_image = image + bruit
return(image,noisy_image)
class Problem:
"""Classe definissant un probleme de type argmin_x ||Ax-b||^2+rho*phi_s(x)
ou phi_s(x) = sum phi_s(x(i)) et phi_s(t) = abs(t)-s*log(1+abs(t)/s)"""
def __init__(self,A,b,rho,s,source):
"""Constructeur"""
self.A = A # Dictionnary
self.b = b # Noisy image
self.rho = rho # Penalty coeff
self.s = s # Smoothness coeff
self.source = source # Source image
self.vp = 0 # Greatest eigenvalue
self.L = 0 # Lipschitz Coeff of AtA
self.sol_F = [] # Solution avec FISTA
self.err_F = [] # ||A*sol_F - b||
self.sol_CG = [] # Solution avec PR_CG
self.err_CG = [] # ||A*sol_CG - b||
self.sol_CP = [] # Solution avec CP
self.err_CP = [] # ||A*sol_CP-b||
self.N = self.A.shape[1] # Image dimension
self.M = self.A.shape[0] # Representation dimension
self.niter_F = 30 # Nb of iterations for FISTA
self.niter_CG = 30 # Nb of iterations for PR_CG
self.niter_CP = 30 # Nb of iterations for CP
def niter(self,NIT):
self.niter_CG = NIT
self.niter_F = NIT
self.niter_CP = NIT
# self.fista_n(False,True) # 03/19 : ???!!!???
def gradient(self,x):
""" Calcul du gradient de ||Ax-b||^2+rho*phi_s(x)
Entrees :
x :
Sorties :
g : gradient de la fonction en x
"""
residu = tensordot(self.A,x,[[0],[0]])-self.b
g = 2*tensordot(self.A,residu,[[1],[0]])
return(g)
def puissance(self,B,x):
""" Calcul de la plus grande valeur propre et d'un vecteur propre associe par la methode
des puissances.
Entrees :
B : La matrice carree
x : Le vecteur initial
Sortie :
vp : La plus grande valeur propre
"""
res = 1.e5
vp = 0
nit = 0
while res > 0.001:
x = tensordot(B,x,[[1],[0]])
i = argmax(x)
x = x/float(x[i])
vp_tmp =vp
vp = tensordot(B,x,[[1],[0]])[i]
res = abs(vp-vp_tmp)
nit +=1
self.vp = vp
def lip(self):
""" Calcul de la constante de Lipschitz d'une matrice A,
ou A est le gradient d'une fonctionnelle quelconque.
Entree : A, et un vecteur initial x
Sortie : 2*Valeur propre de plus grand module de tA*A
Tout est dans la fonction puissance"""
x = numpy.random.normal(0,1,self.N)
B = tensordot(self.A,self.A,[[0],[0]])
#print("taille de tAA ",B.shape)
self.puissance(B,x)
self.L = 2*float(self.vp)
# La fonction de seuillage (max(|x_i|-rho,0)*sign(x_i)
def seuillage(self,x):
""" Fonction de seuillage pour la penalite L1 """
x = numpy.sign(x)*(numpy.maximum(abs(x)-self.rho,0))
return(x)
@staticmethod
def seuillage_s(x,r,s):
""" Fonction de seuillage pour une penalite <<lisse>>
Ref : Elad-Zibu, 2010 et Elad-Matalon-Zibu, 2007
phi_s(t) = abs(t)-s*ln(1+abs(t)/s)
"""
x = numpy.sign(x)*(abs(x)-r-s+sqrt((abs(x)-r-s)*(abs(x)-r-s)+4*s*abs(x)))/2.0
return(x)
def fista_n(self,visu,flag_s=False):
""" Algo FISTA avec penalite L1 pour du debruitage
Entrees :
CC : la base
noisy_image : image bruitee
niter : nb d'iterations
L : constante de Lip
rho : coeff de penalisation
image : image source (pour le calcul de l'erreur
visu : True : on affiche les graphes, False : on ne renvoie que l'erreur par rapport a l'image source
flag_s : True : utilise la fonction de seuillage_s avec une penalite lissee. False : normal.
s : le parametre de la penalite lissee (proche de 0, penalite proche de norme l1 ; >> 1 : penalite proche de norme l2)
Sorties :
erreur1 : vecteur de l'erreur par rapport a l'image_bruitee a chaque iteration
erreur2 : vecteur de l'erreur par rapport a l'image source a chaque iteration"""
self.lip()
t=1.0
w = zeros([len(self.A)],"f")
synth = tensordot(self.A,w,[[0],[0]]) # Calcul de l'image synthetisee
residu = synth - self.source
self.err_F.append(sqrt((residu*residu).sum()))
y = w
residu = self.source
for i in range(self.niter_F):
w_tmp = w
if flag_s :
w = self.seuillage_s(y-self.gradient(y)/self.L,self.rho/self.L,self.s)
else :
w = self.seuillage(y-self.gradient(y)/self.L,self.rho/self.L)
t_tmp = t
t = 0.5*(1+sqrt(1+4*t*t))
y = w+(t_tmp-1)/t*(w-w_tmp)
synth = tensordot(self.A,w,[[0],[0]]) # Calcul de l'image synthetisee
residu = synth - self.source
self.err_F.append(sqrt((residu*residu).sum()))
if visu == True and i%10 == 0:
plot(arange(self.N),self.b,'b')
plot(arange(self.N),synth,'r')
plot(arange(self.N),self.source,'g')
show(True)
self.sol_F = synth
def eval_phi(self,x):
"""Evalue la fonction phi_s, son gradient en x et sa derivee seconde
Retourne phi_s(x), Grad_x (phi_s) et Hess_x(phi_s) sous forme de vecteur (Hess est diago)"""
n = x.shape[0]
px = 0
dpx = zeros(n)
ddpx = zeros(n)
for i in arange(n):
absx = abs(x[i])
px += absx-self.s*log(1+absx/self.s)
dpx[i] = sign(x[i])*absx/(self.s+absx)
ddpx[i] = self.s/((self.s+absx)*(self.s+absx))
return(px,dpx,ddpx)
def SideFunc(self,p,x,a):
""" Appelee dans Newton_Bisection : evaluation de la derivee de f(a) = J(x+ap) et de sa derivee seconde en un point
f(a) = J(x+ap)
f'(a) = p.GradJ(x+ap)
f''(a) = p.HessJ(x+ap).p
Entrees :
alpha : valeur en laquelle on veut evaluer
Sorties :
f : evaluation de la fonction en alpha
df : evaluation de la derivee en alpha
"""
Ap = tensordot(self.A,p,[[0],[0]])
normAp = (Ap*Ap).sum()
Axmb = tensordot(self.A,x,[[0],[0]])-self.b
phi = self.eval_phi(x+a*p)
h = 2*inner(Ap,Axmb)+2*a*normAp+self.rho*inner(p,phi[1])
dh = 2*normAp+self.rho*inner(p*p,phi[2])
return(h,dh)
def Newton_Bisection(self,niter,accuracy,p,x,alpha):
"""Algo de Newton + Bisection pour determiner le pas optimal dans l'algo de gradient conjugue
On cherche argmin_alpha J(x+alpha*p) ou J est la fonctionnelle a minimiser
Permet d'eviter des situations ou l'algo de Newton "elementaire" oscille autour de deux positions.
On note :
h : dJ/dalpha J(x+alpha*p)
dh : la derivee de h
Entrees :
niter : nombre d'iterations max
accuracy : precision souhaitee
p : la direction de descente
x : le vecteur de depart
alpha : la valeur initiale de alpha
Sorties :
alpha : le pas recherche
"""
# localisation de la solution par dichotomie (a1 et a2 en sortie)
alpham = alpha-1
alphap = alpha+1
hm = self.SideFunc(p,x,alpham)[0]
hp = self.SideFunc(p,x,alphap)[0]
h = self.SideFunc(p,x,alpha)[0]
if h == 0:
return(alpha)
elif hm == 0:
return(alpha -1)
elif hp == 0:
return(alpha+1)
while (hm*h > 0 and hp*h > 0):
alpham -= 1
alphap += 1
hm = self.SideFunc(p,x,alpham)[0]
hp = self.SideFunc(p,x,alphap)[0]
if hm == 0:
return(alpham)
elif hp == 0 :
return(alphap)
if h*hp < 0:
a1 = alpha
a2 = alphap
elif h*hm < 0:
a1 = alpham
a2 = alpha
# algo
hl = self.SideFunc(p,x,a1)[0]
hh = self.SideFunc(p,x,a2)[0]
if hl*hh >0 :
print("Mauvaise localisation initiale de la solution")
if hl == 0:
alpha = a1
elif hh == 0:
alpha = a2
elif hl < 0:
al = a1
ah = a2
else :
al = a2
ah = a1
alpha = .5*(a1+a2)
daold = abs(a2-a1)
da = daold
func = self.SideFunc(p,x,alpha)
h = func[0]
dh = func[1]
for i in range(niter):
if ((alpha - ah)*dh-h)*((alpha-al)*dh-h) > 0 or abs(2*h) > abs(daold*dh):
daold = da
da = 0.5*(ah-al)
alpha = al+da
if al == alpha:
break
else:
daold = da
da = h/dh
tmp = alpha
alpha = alpha - da
if tmp == alpha :
break
if abs(da) < accuracy:
break
func = self.SideFunc(p,x,alpha)
h = func[0]
dh = func[1]
if h < 0 :
al = alpha
else:
ah = alpha
i += 1
return(alpha)
def PR_CG(self):
"""Algo du Gradient Conjugue pour une fonctionnelle non lineaire,
selon Pollack Ribiere (avec beta = max(0,beta_PR) afin de reinitialiser
les directions de descente automatiquement en cas de besoin
Le probleme : argmin_x ||Ax-y||2+rho*phi_s(x)
ou phi_s(x) := sum(phi_s(x_i)) et phi_s(x_i) = |x|-s*log(1+|x|/s)
Entrees :
A : la matrice du fidelity term (le dictionnaire)
x : la valeur initiale
noisy_image : l'image a debruiter
s : le parametre de phi_s
rho : le coefficient de penalisation
Sorties :
xx : l'approximation de la solution du pb de min
erreur : l'erreur"""
x = zeros([len(self.A)],"f")
f = tensordot(self.A,x,[[0],[0]])-self.b
self.err_CG.append(sqrt((f*f).sum()))
phi = self.eval_phi(x)
f = (f*f).sum()+self.rho*phi[0]
gradQ = self.gradient(x)
gradL = phi[1]
gradf = gradQ+self.rho*gradL
p = -gradf
for k in range(self.niter_CG):
Ap = tensordot(self.A,p,[[0],[0]])
normAp = (Ap*Ap).sum()
Axmb = tensordot(self.A,x,[[0],[0]])-self.b
term1 = -inner(Ap,Axmb)/normAp
alpha = term1 # Linesearch initial value
alpha = self.Newton_Bisection(100,0.0001,p,x,alpha) # Line search par mixte Newton - Dichotomie
x = x+alpha*p
# Modif du 27 mars : calcul "exact" de beta au lieu de la formule de PR
# Calcul de beta
gradQ_tmp = gradQ
gradf_tmp = gradf
gradQ = self.gradient(x)
L = self.eval_phi(x)
gradL = L[1]
hessL = L[2]
gradf = gradQ + self.rho*gradL
var_gradQ = gradQ-gradQ_tmp
term = var_gradQ+self.rho*alpha*p*hessL
beta = inner(term,gradf)/inner(term,p)
# Fin de la modif
# Test pour redemarrage eventuel
if beta <0:
print("Restart")
beta = max(0,beta) # (propre a PR)
p = -gradf + beta*p
self.sol_CG = tensordot(self.A,x,[[0],[0]])
res = self.sol_CG - self.source
self.err_CG.append(sqrt((res*res).sum()))
k+=1
# Below functions used in Chambolle_Pock algorithm
# Ref : A first order primal-dual algo for convex problems with applications to imaging, Chambolle, Pock, 2010
# Considered problem : argmin_x F(Kx)+G(x) with :
# F:RN->R, G:RM->R proper convex lower semi-continuous
# K:RM->RN linear continuous
# Conjugate function are denoted with a *.
# Resolvent operator : (I+tau*deltaF)(y) = argmin_x {0.5||x-y||^2+F(x)}
# We suppose resolvent operator (denoted ro in the following) have closed form solution (hence functions rofs (ro of F*) et rog (ro of G)
def CP(self):
""" Algo de Chambolle-Pock"""
tau = 0.01
sigma = 1
theta = 0.5
x = zeros(self.M)
y = zeros(self.N)
xb = x
self.sol_CP = tensordot(self.A,x,[[0],[0]])
self.err_CP.append(sqrt(((self.sol_CP-self.source)*(self.sol_CP-self.source)).sum()))
for i in arange(self.niter_CP):
tmp1 = y + sigma*tensordot(self.A,xb,[[0],[0]])
y = tmp1 - (tmp1+2*self.b)/(1.0+2*sigma)
xold = x
tmp2 = x-tau*tensordot(self.A,y,[[1],[0]])
x = self.seuillage_s(tmp2,self.rho*tau,self.s)
xb = x+theta*(x-xold)
self.sol_CP = tensordot(self.A,x,[[0],[0]])
self.err_CP.append(sqrt(((self.sol_CP-self.source)*(self.sol_CP-self.source)).sum()))
def graph(self):
x=arange(self.niter_CG)
subplot(121)
plot(x,self.err_F,'r',label='FISTA-S')
plot(x,self.err_CG,'g',label='PR-CG')
legend()
title('Error shrinking')
xx = arange(self.N)
subplot(122)
plot(xx,self.sol_F,'r',label='FISTA-S')
plot(xx,self.sol_CG,'g',label='PR-CG')
plot(xx,self.source,'k',label='Source')
plot(xx,self.b,'b',label='Noisy')
legend()
title('Denoising')
show()
import time
import numpy
#import random
from numpy import *
from numpy.fft import ifft
from pylab import *
import matplotlib.pyplot as plt
import moduleJL2 as JL2
N = 40
A = JL2.gen_base(N)
[source,b] = JL2.gen_image(N,0.1)
rho = linspace(0.1,1,10)
s = linspace(0.01,0.1,10)
e_FISTA = zeros(10)
e_PRCG = zeros(10)
e_CP = zeros(10)
for i in range(10):
P1 = JL2.Problem(A,b,rho[i],0.01,source)
P1.fista_n(False,True)
e_FISTA[i] = P1.err_F[30]
P1.PR_CG()
e_PRCG[i] = P1.err_CG[30]
P1.CP()
e_CP[i] = P1.err_CP[30]
show()
plot(rho,e_FISTA,'r',label='FISTA')
plot(rho,e_PRCG,'g',label='PR-CG')
plot(rho,e_CP,'c',label='CP')
ylim(0,1)
title('Analyse de l''erreur apres 30 iterations - sigma = 0.1 - s = 0.01')
xlabel("s")
ylabel("erreur")
show()
import time
import numpy
#import random
from numpy import *
from numpy.fft import ifft
from pylab import *
import matplotlib.pyplot as plt
import moduleJL2 as JL2
N = 40
A = JL2.gen_base(N)
[source,b] = JL2.gen_image(N,0.1)
rho = linspace(0.1,1,10)
s = linspace(0.005,0.015,10)
e_FISTA = zeros(10)
e_PRCG = zeros(10)
e_CP = zeros(10)
for i in range(10):
P1 = JL2.Problem(A,b,0.5,s[i],source)
P1.fista_n(False,True)
e_FISTA[i] = P1.err_F[30]
P1.PR_CG()
e_PRCG[i] = P1.err_CG[30]
P1.CP()
e_CP[i] = P1.err_CP[30]
show()
plot(s,e_FISTA,'r',label='FISTA')
plot(s,e_PRCG,'g',label='PR-CG')
plot(s,e_CP,'c',label='CP')
ylim(0,1)
title("Analyse de l'erreur apres 30 iterations - sigma = 0.1 - rho = 0.5")
xlabel("s")
ylabel("erreur")
show()
import time
import numpy
#import random
from numpy import *
from numpy.fft import ifft
from pylab import *
import matplotlib.pyplot as plt
import moduleJL2 as JL2
N = 40
A = JL2.gen_base(N)
[source,b] = JL2.gen_image(N,0.1)
P1 = JL2.Problem(A,b,0.5,0.01,source)
#P1.niter(40)
a=time.clock()
P1.fista_n(False,True)
b=time.clock()
P1.PR_CG()
c=time.clock()
P1.CP()
d = time.clock()
print("FISTA : ",str(b-a))
print("PR_CG : ",str(c-b))
print("CP : ",str(d-c))
x = arange(P1.niter_CG+1)
plot(x,P1.err_F,'r',label='FISTA')
plot(x,P1.err_CG,'g',label='PR-CG')
plot(x,P1.err_CP,'c',label='CP')
legend()
show()
#P1.graph()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment