Commit 67a91dd5 authored by myron's avatar myron Committed by Pierre Paleo
Browse files

restructured OK

parent f2faf172
......@@ -494,7 +494,7 @@ class AlignmentBase(object):
class CenterOfRotation(AlignmentBase):
def find_shift_original(
def find_shift(
self,
img_1: np.ndarray,
img_2: np.ndarray,
......@@ -504,9 +504,6 @@ class CenterOfRotation(AlignmentBase):
peak_fit_radius=1,
high_pass=None,
low_pass=None,
half_tomo_cor_guess=None,
half_tomo_return_stats=False,
global_search=False,
):
"""Find the Center of Rotation (CoR), given two images.
......@@ -552,12 +549,6 @@ class CenterOfRotation(AlignmentBase):
Low-pass filter properties, as described in `nabu.misc.fourier_filters`
high_pass: float or sequence of two floats
High-pass filter properties, as described in `nabu.misc.fourier_filters`
half_tomo_cor_guess: float or None
The approximate position of the rotation axis from the image center. Optional.
When given a special algorithm is used which can work also in half-tomo conditions
global_search: False, True or a couple of floats
If True a global search is done on on the detector span without assuming centered scan conditions.
If (lim1,lim2) the search is done between lim1 and lim2
Raises
------
......@@ -585,15 +576,6 @@ class CenterOfRotation(AlignmentBase):
>>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3))
"""
if global_search:
if global_search is True:
limits = None
else:
limits = global_search
return self.global_search_master_(
img_1, img_2, roi_yxhw, median_filt_shape, padding_mode, peak_fit_radius, high_pass, low_pass, limits=limits
)
self._check_img_pair_sizes(img_1, img_2)
if peak_fit_radius < 1:
......@@ -608,18 +590,6 @@ class CenterOfRotation(AlignmentBase):
img_1 = self._prepare_image(img_1, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
img_2 = self._prepare_image(img_2, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
if half_tomo_cor_guess is not None:
cor_in_img = img_1.shape[1] // 2 + half_tomo_cor_guess
tmpsigma = min(
(img_1.shape[1] - cor_in_img) / 4.0,
(cor_in_img) / 4.0,
)
tmpx = (np.arange(img_1.shape[1]) - cor_in_img) / tmpsigma
apodis = np.exp(-tmpx * tmpx / 2.0)
if half_tomo_return_stats:
img_1_orig = np.array(img_1)
img_1[:] = img_1 * apodis
cc = self._compute_correlation_fft(img_1, img_2, padding_mode, high_pass=high_pass, low_pass=low_pass)
img_shape = img_2.shape
......@@ -629,147 +599,20 @@ class CenterOfRotation(AlignmentBase):
(f_vals, fv, fh) = self.extract_peak_region_2d(cc, peak_radius=peak_fit_radius, cc_vs=cc_vs, cc_hs=cc_hs)
fitted_shifts_vh = self.refine_max_position_2d(f_vals, fv, fh)
return_value = fitted_shifts_vh[-1] / 2.0
if half_tomo_cor_guess is not None:
# fftfreqs and fftshifts are introducing jumps in the results
# we correct for that here
tmp = fitted_shifts_vh[-1]
p1 = tmp
if tmp < 0:
p2 = cc.shape[1] + tmp
else:
p2 = -cc.shape[1] + tmp
if abs(half_tomo_cor_guess - p1 / 2) < abs(half_tomo_cor_guess - p2 / 2):
return_value = p1 / 2
else:
return_value = p2 / 2
if half_tomo_return_stats:
cor_in_img = img_1.shape[1] // 2 + return_value
tmpsigma = min(
(img_1.shape[1] - cor_in_img) / 4.0,
(cor_in_img) / 4.0,
)
M1 = int(round(return_value + img_1.shape[1] // 2)) - int(round(tmpsigma))
M2 = int(round(return_value + img_1.shape[1] // 2)) + int(round(tmpsigma))
piece1 = img_1_orig[:, M1:M2]
piece2 = img_2[:, img_1.shape[1] - M2 : img_1.shape[1] - M1]
energy = np.array(piece1 * piece1 + piece2 * piece2, "d").sum()
diff_energy = np.array((piece1 - piece2) * (piece1 - piece2), "d").sum()
return return_value, diff_energy / energy, energy
return return_value
return fitted_shifts_vh[-1] / 2.0
class CenterOfRotationAdaptiveSearch(CenterOfRotation):
""" This adaptive method works by applying a gaussian which highlights, by apodisation, a region
which can possibly contain the good center of rotation.
The whole image is spanned during several applications of the apodisation, at each application
the apodisation function, which is a gaussian, is moved to a new guess position.
The lenght of the step, by which the gaussian is moved, and its sigma are
obtained by multiplying the shortest distance to the left or right border with
step_fraction and sigma_fraction factors
"""
sigma_fraction = 1.0/4.0
step_fraction = 1.0/6.0
def find_shift(
self,
img_1: np.ndarray,
img_2: np.ndarray,
roi_yxhw=None,
median_filt_shape=None,
padding_mode=None,
peak_fit_radius=1,
high_pass=None,
low_pass=None,
):
"""Find the Center of Rotation (CoR), given two images.
This method finds the half-shift between two opposite images, by
means of correlation computed in Fourier space.
The output of this function, allows to compute motor movements for
aligning the sample rotation axis. Given the following values:
- L1: distance from source to motor
- L2: distance from source to detector
- ps: physical pixel size
- v: output of this function
displacement of motor = (L1 / L2 * ps) * v
Parameters
----------
img_1: numpy.ndarray
First image
img_2: numpy.ndarray
Second image, it needs to have been flipped already (e.g. using numpy.fliplr).
roi_yxhw: (2, ) or (4, ) numpy.ndarray, tuple, or array, optional
4 elements vector containing: vertical and horizontal coordinates
of first pixel, plus height and width of the Region of Interest (RoI).
Or a 2 elements vector containing: plus height and width of the
centered Region of Interest (RoI).
Default is None -> deactivated.
median_filt_shape: (2, ) numpy.ndarray, tuple, or array, optional
Shape of the median filter window. Default is None -> deactivated.
padding_mode: str in numpy.pad's mode list, optional
Padding mode, which determines the type of convolution. If None or
'wrap' are passed, this resorts to the traditional circular convolution.
If 'edge' or 'constant' are passed, it results in a linear convolution.
Default is the circular convolution.
All options are:
None | 'constant' | 'edge' | 'linear_ramp' | 'maximum' | 'mean'
| 'median' | 'minimum' | 'reflect' | 'symmetric' |'wrap'
peak_fit_radius: int, optional
Radius size around the max correlation pixel, for sub-pixel fitting.
Minimum and default value is 1.
low_pass: float or sequence of two floats
Low-pass filter properties, as described in `nabu.misc.fourier_filters`
high_pass: float or sequence of two floats
High-pass filter properties, as described in `nabu.misc.fourier_filters`
Raises
------
ValueError
In case images are not 2-dimensional or have different sizes.
Returns
-------
float
Estimated center of rotation position from the center of the RoI in pixels.
Examples
--------
The following code computes the center of rotation position for two
given images in a tomography scan, where the second image is taken at
180 degrees from the first.
>>> radio1 = data[0, :, :]
... radio2 = np.fliplr(data[1, :, :])
... CoR_calc = CenterOfRotation()
... cor_position = CoR_calc.find_shift(radio1, radio2)
Or for noisy images:
>>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3))
"""
self._check_img_pair_sizes(img_1, img_2)
if peak_fit_radius < 1:
logging.getLogger(__name__).warning(
"Parameter peak_fit_radius should be at least 1, given: %d instead." % peak_fit_radius
)
peak_fit_radius = 1
img_shape = img_2.shape
roi_yxhw = self._determine_roi(img_shape, roi_yxhw)
img_1 = self._prepare_image(img_1, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
img_2 = self._prepare_image(img_2, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
cc = self._compute_correlation_fft(img_1, img_2, padding_mode, high_pass=high_pass, low_pass=low_pass)
img_shape = img_2.shape
cc_vs = np.fft.fftfreq(img_shape[-2], 1 / img_shape[-2])
cc_hs = np.fft.fftfreq(img_shape[-1], 1 / img_shape[-1])
(f_vals, fv, fh) = self.extract_peak_region_2d(cc, peak_radius=peak_fit_radius, cc_vs=cc_vs, cc_hs=cc_hs)
fitted_shifts_vh = self.refine_max_position_2d(f_vals, fv, fh)
return fitted_shifts_vh[-1] / 2.0
def global_adaptive_find_shift(
self,
img_1: np.ndarray,
img_2: np.ndarray,
......@@ -826,11 +669,8 @@ class CenterOfRotation(AlignmentBase):
Low-pass filter properties, as described in `nabu.misc.fourier_filters`
high_pass: float or sequence of two floats
High-pass filter properties, as described in `nabu.misc.fourier_filters`
half_tomo_cor_guess: float or None
The approximate position of the rotation axis from the image center. Optional.
When given a special algorithm is used which can work also in half-tomo conditions
margins: None or a couple of floats or ints
margins is None or in the form of (margin1,margin2) the search is done between margin1 and dim_x-1-margin2.
if margins is None or in the form of (margin1,margin2) the search is done between margin1 and dim_x-1-margin2.
If left to None then by default (margin1,margin2) = ( 10, 10 )
......@@ -852,12 +692,12 @@ class CenterOfRotation(AlignmentBase):
>>> radio1 = data[0, :, :]
... radio2 = np.fliplr(data[1, :, :])
... CoR_calc = CenterOfRotation()
... CoR_calc = CenterOfRotationAdaptiveSearch()
... cor_position = CoR_calc.find_shift(radio1, radio2)
Or for noisy images:
>>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3))
>>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3), high_pass=20, low_pass=1 )
"""
if peak_fit_radius < 1:
......@@ -870,58 +710,10 @@ class CenterOfRotation(AlignmentBase):
used_type = img_1.dtype
img_1_original = img_1
img_2_original = img_2
if low_pass is not None or hig_hpass is not None:
pad_size = np.ceil(np.array(img_2.shape) / 2).astype(np.int)
pad_array = [(0,)] * len(img_2.shape)
for a in range(2):
pad_array[a] = (pad_size[a],)
print( " PADA ", pad_array)
print(" IMAS ", img_1.shape)
tmp_img_1 = np.pad(img_1, pad_array, mode="edge")
print(" tmp_img_1 S ", tmp_img_1.shape)
tmp_img_2 = np.pad(img_2, pad_array, mode="edge")
tmp_img_fft_1 = local_fftn(tmp_img_1)
tmp_img_fft_2 = local_fftn(tmp_img_2)
print(" SHAPES ", tmp_img_1.shape )
print(" SHAPES ", tmp_img_2.shape )
print(" LOCAL ", local_fftn)
print(" LOCAL use", __have_scipy__)
filt = fourier_filters.get_bandpass_filter(
tmp_img_1.shape,
cutoff_lowpass=low_pass,
cutoff_highpass=high_pass,
use_rfft=__have_scipy__,
data_type=self.data_type,
)
print(" SHAPES F", filt.shape )
print(" SHAPES F", tmp_img_fft_1.shape )
print(" SHAPES F", tmp_img_1.shape )
tmp_img_1 = np.real(local_ifftn( filt*tmp_img_fft_1 ))
tmp_img_2 = np.real(local_ifftn( filt*tmp_img_fft_2 ))
img_1 = tmp_img_1[pad_array[0][0]:-pad_array[0][0] , pad_array[1][0]:-pad_array[1][0] ]
img_2 = tmp_img_2[pad_array[0][0]:-pad_array[0][0] , pad_array[1][0]:-pad_array[1][0] ]
roi_yxhw = self._determine_roi(img_1.shape, roi_yxhw)
img_1 = self._prepare_image(img_1, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape).astype(used_type)
img_2 = self._prepare_image(img_2, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape).astype(used_type)
img_1 = self._prepare_image(img_1, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
img_2 = self._prepare_image(img_2, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
dim_radio = img_1.shape[1]
......@@ -947,9 +739,9 @@ class CenterOfRotation(AlignmentBase):
while Xcor < lim2:
tmpsigma = min(
(img_1.shape[1] - Xcor) / 4.0,
(Xcor) / 4.0,
)
(img_1.shape[1] - Xcor) ,
(Xcor) ,
)* self.sigma_fraction
tmpx = (np.arange(img_1.shape[1]) - Xcor) / tmpsigma
apodis = np.exp(-tmpx * tmpx / 2.0)
......@@ -958,27 +750,37 @@ class CenterOfRotation(AlignmentBase):
img_1_apodised = img_1 * apodis
cor_position = self.find_shift_original(
img_1_original,
img_2_original,
cor_position = super( self.__class__, self).find_shift(
img_1_apodised.astype(used_type),
img_2 .astype(used_type),
low_pass=low_pass,
high_pass=high_pass,
half_tomo_cor_guess = Xcor_rel,
roi_yxhw= roi_yxhw,
)
p1 = cor_position*2
if cor_position < 0:
p2 = img_2.shape[1] + cor_position*2
else:
p2 = -img_2.shape[1] + cor_position*2
if abs(Xcor_rel - p1 / 2) < abs(Xcor_rel - p2 / 2):
cor_position = p1 / 2
else:
cor_position = p2 / 2
cor_in_img = img_1.shape[1] // 2 + cor_position
tmpsigma = min(
(img_1.shape[1] - cor_in_img) / 4.0,
(cor_in_img) / 4.0,
)
(img_1.shape[1] - cor_in_img) ,
(cor_in_img) ,
)* self.sigma_fraction
M1 = int(round(cor_position + img_1.shape[1] // 2)) - int(round(tmpsigma))
M2 = int(round(cor_position + img_1.shape[1] // 2)) + int(round(tmpsigma))
piece1 = img_1_original[:, M1:M2]
piece2 = img_2_original[:, img_1.shape[1] - M2 : img_1.shape[1] - M1]
piece1 = img_1[:, M1:M2]
piece2 = img_2[:, img_1.shape[1] - M2 : img_1.shape[1] - M1]
energy = np.array(piece1 * piece1 + piece2 * piece2, "d").sum()
diff_energy = np.array((piece1 - piece2) * (piece1 - piece2), "d").sum()
cost = diff_energy / energy
......@@ -987,13 +789,12 @@ class CenterOfRotation(AlignmentBase):
if not np.isnan(cost):
found_centers.append([cost, abs(Xcor_rel - cor_position), cor_position, energy])
Xcor = min(Xcor + Xcor / 6.0, Xcor + (dim_radio - Xcor) / 6.0)
Xcor = min(Xcor + Xcor , Xcor + (dim_radio - Xcor) )* self.step_fraction
found_centers.sort()
cor_position = found_centers[0][2]
return cor_position
__call__ = find_shift
......
......@@ -350,8 +350,8 @@ class TestCor(object):
radio2 = np.fliplr(self.ht_im2)
cor_pos = self.ht_cor
CoR_calc = alignment.CenterOfRotation()
cor_position = CoR_calc.global_adaptive_find_shift(radio1, radio2, low_pass=1, high_pass=20)
CoR_calc = alignment.CenterOfRotationAdaptiveSearch()
cor_position = CoR_calc.find_shift(radio1, radio2, low_pass=1, high_pass=20)
message = (
"Computed CoR %f " % cor_position
......@@ -370,9 +370,9 @@ class TestCor(object):
cor_pos = 983.107
CoR_calc = alignment.CenterOfRotation()
CoR_calc = alignment.CenterOfRotationAdaptiveSearch()
cor_position = CoR_calc.global_adaptive_find_shift(radio1, radio2, low_pass=1, high_pass=20)
cor_position = CoR_calc.find_shift(radio1, radio2, low_pass=1, high_pass=20)
print("Found cor_position", cor_position)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment