Commit 00a34bc6 authored by myron's avatar myron
Browse files

Using scipy.fft.rfft if available

parent 98536e60
Pipeline #28120 passed with stages
in 5 minutes
import numpy as np
try:
import scipy.fft
my_fftn = scipy.fft.rfftn
my_ifftn = scipy.fft.irfftn
my_fft2 = scipy.fft.rfft2
my_ifft2 = scipy.fft.irfft2
def my_fft_layout_adapt(x):
slicelist = tuple(slice(0, (N + 1) // 2) for N in x.shape)
return x[slicelist]
except ImportError:
my_fftn = np.fft.fftn
my_ifftn = np.fft.ifftn
my_fft2 = np.fft.fft2
my_ifft2 = np.fft.ifft2
def my_fft_layout_adapt(x):
return x
import logging
from numpy.polynomial.polynomial import Polynomial, polyval
......@@ -337,7 +360,7 @@ class AlignmentBase(object):
img.shape[-2:], cutoff_lowpass=low_pass, cutoff_highpass=high_pass
)
# fft2 and iff2 use axes=(-2, -1) by default
img = np.fft.ifft2(np.fft.fft2(img) * img_filter).real
img = my_ifft2(my_fft2(img) * my_fft_layout_adapt(img_filter)).real
if median_filt_shape is not None:
img_shape = img.shape
......@@ -374,8 +397,8 @@ class AlignmentBase(object):
img_2 = np.pad(img_2, pad_array, mode=padding_mode)
# compute fft's of the 2 images
img_fft_1 = np.fft.fftn(img_1, axes=axes)
img_fft_2 = np.conjugate(np.fft.fftn(img_2, axes=axes))
img_fft_1 = my_fftn(img_1, axes=axes)
img_fft_2 = np.conjugate(my_fftn(img_2, axes=axes))
img_prod = img_fft_1 * img_fft_2
......@@ -384,7 +407,7 @@ class AlignmentBase(object):
img_prod *= filt
# inverse fft of the product to get cross_correlation of the 2 images
cc = np.real(np.fft.ifftn(img_prod, axes=axes))
cc = np.real(my_ifftn(img_prod, axes=axes))
if not do_circular_conv:
cc = np.fft.fftshift(cc, axes=axes)
......@@ -562,11 +585,11 @@ class DetectorTranslationAlongBeam(AlignmentBase):
return_shifts=False,
use_adjacent_imgs=False,
):
"""Find vertical and horizontal position increments per a unit-distance detector translation along the
"""Find vertical and horizontal position increments per a unit-distance detector translation along the
traslation axis. The units are pixel_unit/input_unit where input_unit are the unit that the user has used
to pass the argument img_pos. The output expresses shifts of the detector so that if the image is moving
in the positive direction (expressed in pixels coordinates) the output will be negative because it means
that the detector as a whole is shifting in the opposite direction (taking the shaped beam as a reference)
that the detector as a whole is shifting in the opposite direction (taking the shaped beam as a reference)
Parameters
----------
......
......@@ -110,7 +110,8 @@ class TestAlignmentBase(object):
(found_peaks_val, found_peaks_pos) = AlignmentBase.extract_peak_regions_1d(img, axis=-1, cc_coords=cc_coords)
message = "The found peak positions do not correspond to the expected peak positions:\n Expected: %s\n Found: %s" % (
peaks_pos, found_peaks_pos[1, :]
peaks_pos,
found_peaks_pos[1, :],
)
assert np.all(peaks_val == found_peaks_val[1, :]), message
......@@ -146,7 +147,7 @@ class TestCor(object):
radio1 = self.data[0, :, :]
radio2 = np.fliplr(self.data[1, :, :])
noise_level = radio1.max() / 2.0
noise_level = radio1.max() / 8.0
noise_ima1 = np.random.normal(0.0, size=radio1.shape) * noise_level
noise_ima2 = np.random.normal(0.0, size=radio2.shape) * noise_level
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment