alignment.py 22.1 KB
Newer Older
Nicola Vigano's avatar
Nicola Vigano committed
1
import numpy as np
myron's avatar
myron committed
2
import math
3
from numpy.polynomial.polynomial import Polynomial
4

5
from nabu.utils import previouspow2
6

7
8
try:
    from scipy.ndimage.filters import median_filter
myron's avatar
myron committed
9
    import scipy.special as spspe
10

11
12
13
    __have_scipy__ = True
except ImportError:
    from silx.math.medianfilter import medfilt2d as median_filter
14

15
    __have_scipy__ = False
16

myron's avatar
myron committed
17
18
19

def _get_lowpass_filter(img_shape, cutoff_pix, cutoff_trans_pix):
    """Computes a low pass filter with the erfc.
20

myron's avatar
myron committed
21
22
23
24
25
26
27
28
29
30
31
    :param img_shape: Shape of the image
    :type img_shape: tuple
    :param cutoff_pix: Position of the cut off in pixels
    :type cutoff_pix: float
    :param cutoff_trans_pix: Size of the cut off transition in pixels
    :type cutoff_trans_pix: float

    :return: The computes filter
    :rtype: `numpy.array_like`
    """
    coords = [np.fft.fftfreq(s, 1) for s in img_shape]
32
    coords = np.meshgrid(*coords, indexing="ij")
myron's avatar
myron committed
33

34
35
    k_cut = 0.5 / cutoff_pix
    k_cut_width = 0.5 / cutoff_trans_pix
myron's avatar
myron committed
36
37

    r = np.sqrt(np.sum(np.array(coords) ** 2, axis=0))
38
    k_pos_rescaled = (r - k_cut) / k_cut_width
39
    if __have_scipy__:
40
        res = spspe.erfc(k_pos_rescaled) / 2
myron's avatar
myron committed
41
    else:
42
        res = np.array(list(map(math.erfc, k_pos_rescaled))) / 2
myron's avatar
myron committed
43

44
    return res
myron's avatar
myron committed
45

46

myron's avatar
myron committed
47
48
class AlignmentBase(object):
    @staticmethod
49
50
    def refine_max_position_2d(f_vals: np.ndarray, fy=None, fx=None):
        """Computes the sub-pixel max position of the given function sampling.
51

52
53
54
55
56
57
58
59
        Parameters
        ----------
        f_vals: numpy.ndarray
            Function values of the sampled points
        fy: numpy.ndarray, optional
            Vertical coordinates of the sampled points
        fx: numpy.ndarray, optional
            Horizontal coordinates of the sampled points
60

61
62
63
64
65
        Raises
        ------
        ValueError
            In case position and values do not have the same size, or in case
            the fitted maximum is outside the fitting region.
66

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
        Returns
        -------
        tuple(float, float)
            Estimated (vertical, horizontal) function max, according to the
            coordinates in fy and fx.
        """
        if len(f_vals.shape) > 2:
            raise ValueError(
                "The fitted values should form a 2-dimensional array. Array of shape: [%s] was given."
                % (" ".join(("%d" % s for s in f_vals.shape)))
            )
        if fy is None:
            fy_half_size = (f_vals.shape[0] - 1) / 2
            fy = np.linspace(-fy_half_size, fy_half_size, f_vals.shape[0])
        elif not (len(fy.shape) == 1 and np.all(fy.size == f_vals.shape[0])):
            raise ValueError(
                "Vertical coordinates should have the same length as values matrix. Sizes of fy: %d, f_vals: [%s]"
                % (fy.size, " ".join(("%d" % s for s in f_vals.shape)))
            )
        if fx is None:
            fx_half_size = (f_vals.shape[1] - 1) / 2
            fx = np.linspace(-fx_half_size, fx_half_size, f_vals.shape[1])
        elif not (len(fx.shape) == 1 and np.all(fx.size == f_vals.shape[1])):
            raise ValueError(
                "Horizontal coordinates should have the same length as values matrix. Sizes of fx: %d, f_vals: [%s]"
                % (fx.size, " ".join(("%d" % s for s in f_vals.shape)))
            )

        fy, fx = np.meshgrid(fy, fx, indexing="ij")
        fy = fy.flatten()
        fx = fx.flatten()
        coords = np.array([np.ones(f_vals.size), fy, fx, fy * fx, fy ** 2, fx ** 2])
99

100
        coeffs = np.linalg.lstsq(coords.T, f_vals.flatten(), rcond=None)[0]
101

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
        # For a 1D parabola `f(x) = ax^2 + bx + c`, the vertex position is:
        # x_v = -b / 2a. For a 2D parabola, the vertex position is:
        # (y, x)_v = - b / A, where:
        A = [[2 * coeffs[4], coeffs[3]], [coeffs[3], 2 * coeffs[5]]]
        b = coeffs[1:3]
        vertex_yx = np.linalg.lstsq(A, -b, rcond=None)[0]

        vertex_min_yx = [np.min(fy), np.min(fx)]
        vertex_max_yx = [np.max(fy), np.max(fx)]
        if np.any(vertex_yx < vertex_min_yx) or np.any(vertex_yx > vertex_max_yx):
            raise ValueError(
                "Fitted (y: {}, x: {}) positions are outide the margins of input: y: [{}, {}], x: [{}, {}]".format(
                    vertex_yx[0], vertex_yx[1], vertex_min_yx[0], vertex_max_yx[0], vertex_min_yx[1], vertex_max_yx[1]
                )
            )
        return vertex_yx

    @staticmethod
120
    def refine_max_position_1d(f_vals, fx=None):
121
122
123
124
125
126
        """Computes the sub-pixel max position of the given function sampling.

        Parameters
        ----------
        f_vals: numpy.ndarray
            Function values of the sampled points
127
128
        fx: numpy.ndarray, optional
            Coordinates of the sampled points
129
130
131
132

        Raises
        ------
        ValueError
133
134
            In case position and values do not have the same size, or in case
            the fitted maximum is outside the fitting region.
135
136
137
138
139
140

        Returns
        -------
        float
            Estimated function max, according to the coordinates in fx.
        """
141
        if not len(f_vals.shape) == 1:
142
            raise ValueError(
143
144
145
146
147
148
149
150
151
152
                "The fitted values should form a 1-dimensional array. Array of shape: [%s] was given."
                % (" ".join(("%d" % s for s in f_vals.shape)))
            )
        if fx is None:
            fx_half_size = (f_vals.size - 1) / 2
            fx = np.linspace(-fx_half_size, fx_half_size, f_vals.size)
        elif not (len(fx.shape) == 1 and np.all(fx.size == f_vals.size)):
            raise ValueError(
                "Base coordinates should have the same length as values array. Sizes of fx: %d, f_vals: %d"
                % (fx.size, f_vals.size)
153
154
            )

155
156
157
158
        # using Polynomial.fit, because supposed to be more numerically stable
        # than previous solutions (according to numpy).
        poly = Polynomial.fit(fx, f_vals, deg=2)
        coeffs = poly.convert().coef
159

160
161
162
        # For a 1D parabola `f(x) = c + bx + ax^2`, the vertex position is:
        # x_v = -b / 2a.
        vertex_x = - coeffs[1] / (2 * coeffs[2])
163

164
165
166
167
168
169
170
171
172
        vertex_min_x = np.min(fx)
        vertex_max_x = np.max(fx)
        if not (vertex_min_x < vertex_x < vertex_max_x):
            raise ValueError(
                "Fitted x: {} position is outide the margins of input: x: [{}, {}]".format(
                    vertex_x, vertex_min_x, vertex_max_x
                )
            )
        return vertex_x
173

174
175
176
177
178
179
180
181
    @staticmethod
    def _determine_roi(img_shape, roi_yxhw, do_truncate_horz_pow2):
        if roi_yxhw is None:
            # vertical window size is reduced to a power of 2 to accelerate fft
            # same thing horizontal window - if requested. Default is not
            roi_yxhw = previouspow2(img_shape)
            if not do_truncate_horz_pow2:
                roi_yxhw[1] = img_shape[1]
182

183
184
185
186
        if len(roi_yxhw) == 2:  # Convert centered 2-element roi into 4-element
            roi_yxhw = np.array(roi_yxhw, dtype=np.int)
            roi_yxhw = np.concatenate(((img_shape - roi_yxhw) // 2, roi_yxhw))
        return roi_yxhw
187

188
    @staticmethod
189
    def _prepare_image(
190
        img, invalid_val=1e-5, roi_yxhw=None, median_filt_shape=None, cutoff_pix=None, cutoff_trans_pix=None,
191
    ):
192
193
        img = np.squeeze(img)  # Removes singleton dimensions, but does a shallow copy
        img = np.ascontiguousarray(img)
194

195
        if roi_yxhw is not None:
196
            img = img[
197
                ..., roi_yxhw[0] : roi_yxhw[0] + roi_yxhw[2], roi_yxhw[1] : roi_yxhw[1] + roi_yxhw[3],
198
199
            ]

myron's avatar
myron committed
200
        img = img.copy()
201
202
203
204

        img[np.isnan(img)] = invalid_val
        img[np.isinf(img)] = invalid_val

myron's avatar
myron committed
205
206
207
208
        if cutoff_pix is not None and cutoff_trans_pix is not None:
            myfilter = _get_lowpass_filter(img.shape[-2:], cutoff_pix, cutoff_trans_pix)
            img_shape = img.shape
            if len(img_shape) == 2:
209
                img = np.fft.ifft2(np.fft.fft2(img) * myfilter).real
myron's avatar
myron committed
210
            elif len(img_shape) > 2:
211
212
213
                # if dealing with a stack of images, we have to do them one by one
                img = np.reshape(img, (-1,) + tuple(img_shape[-2:]))
                for ii in range(img.shape[0]):
214
                    img[ii, ...] = np.fft.ifft2(np.fft.fft2(img[ii, ...]) * myfilter).real
215
216
                img = np.reshape(img, img_shape)

217
        if median_filt_shape is not None:
218
219
220
221
            img_shape = img.shape
            if __have_scipy__:
                # expanding filter shape with ones, to cover the stack of images
                # but disabling inter-image filtering
222
                median_filt_shape = np.concatenate(
223
                    (np.ones((len(img_shape) - len(median_filt_shape),), dtype=np.int), median_filt_shape,)
224
                )
225
226
227
228
229
230
                img = median_filter(img, size=median_filt_shape)
            else:
                if len(img_shape) == 2:
                    img = median_filter(img, kernel_size=median_filt_shape)
                elif len(img_shape) > 2:
                    # if dealing with a stack of images, we have to do them one by one
231
                    img = np.reshape(img, (-1,) + tuple(img_shape[-2:]))
232
                    for ii in range(img.shape[0]):
233
                        img[ii, ...] = median_filter(img[ii, ...], kernel_size=median_filt_shape)
234
                    img = np.reshape(img, img_shape)
235
236
237

        return img

238
239
    @staticmethod
    def _compute_correlation_fft(img_1, img_2, padding_mode):
240
        do_circular_conv = padding_mode is None or padding_mode == "wrap"
241
242
        if not do_circular_conv:
            padding = np.ceil(np.array(img_2.img_shape) / 2).astype(np.int)
243
244
            img_1 = np.pad(img_1, ((padding[0],), (padding[1],)), mode=padding_mode)
            img_2 = np.pad(img_2, ((padding[0],), (padding[1],)), mode=padding_mode)
245
246
247
248
249
250
251
252

        # compute fft's of the 2 images
        img_fft_1 = np.fft.fft2(img_1)
        img_fft_2 = np.conjugate(np.fft.fft2(img_2))
        # inverse fft of the product to get cross_correlation of the 2 images
        cc = np.real(np.fft.ifft2(img_fft_1 * img_fft_2))

        if not do_circular_conv:
253
            cc = cc[padding[0] : -padding[0], padding[1] : -padding[1]]
254
255
256

        return cc

257
258

class CenterOfRotation(AlignmentBase):
259
    def __init__(self, horz_fft_width=False):
260
261
262
263
264
265
266
267
268
269
270
        """
        Center of Rotation (CoR) computation object.
        This class is used on radios.

        Parameters
        ----------
        horz_fft_width: boolean, optional
            If True, restrict the horizontal size to a power of 2:

            >>> new_x_dim = 2 ** math.floor(math.log2(x_dim))
        """
271
        self._init_parameters(horz_fft_width)
272

273
    def _init_parameters(self, horz_fft_width):
274
275
276
277
278
279
280
        self.truncate_horz_pow2 = horz_fft_width

    @staticmethod
    def _check_img_sizes(img_1: np.ndarray, img_2: np.ndarray):
        shape_1 = np.squeeze(img_1).shape
        shape_2 = np.squeeze(img_2).shape
        if not len(shape_1) == 2:
281
            raise ValueError(
282
                "Images need to be 2-dimensional. Shape of image #1: %s" % (" ".join(("%d" % x for x in shape_1)))
283
            )
284
        if not len(shape_2) == 2:
285
            raise ValueError(
286
                "Images need to be 2-dimensional. Shape of image #2: %s" % (" ".join(("%d" % x for x in shape_2)))
287
            )
288
        if not np.all(shape_1 == shape_2):
289
290
            raise ValueError(
                "Images need to be of the same shape. Shape of image #1: %s, image #2: %s"
291
                % (" ".join(("%d" % x for x in shape_1)), " ".join(("%d" % x for x in shape_2)),)
292
            )
293

294
    def find_shift(
295
296
297
298
299
        self,
        img_1: np.ndarray,
        img_2: np.ndarray,
        roi_yxhw=None,
        median_filt_shape=None,
myron's avatar
myron committed
300
        padding_mode=None,
301
        peak_fit_radius=1,
302
303
        cutoff_pix=None,
        cutoff_trans_pix=None,
304
    ):
305
        """Find the Center of Rotation (CoR), given to images.
Nicola Vigano's avatar
Nicola Vigano committed
306

Pierre Paleo's avatar
Fix doc    
Pierre Paleo committed
307
308
        This method finds the half-shift between two opposite images, by
        means of correlation computed in Fourier space.
309

310
311
        The output of this function, allows to compute motor movements for
        aligning the sample rotation axis. Given the following values:
312

313
314
315
316
        - L1: distance from source to motor
        - L2: distance from source to detector
        - ps: physical pixel size
        - v: output of this function
317

318
        displacement of motor = (L1 / L2 * s) * v
319

320
321
322
323
324
325
        Parameters
        ----------
        img_1: numpy.ndarray
            First image
        img_2: numpy.ndarray
            Second image, it needs to have been flipped already (e.g. using numpy.fliplr).
326
        roi_yxhw: (2, ) or (4, ) numpy.ndarray, tuple, or array, optional
327
328
            4 elements vector containing: vertical and horizontal coordinates
            of first pixel, plus height and width of the Region of Interest (RoI).
329
330
            Or a 2 elements vector containing: plus height and width of the
            centered Region of Interest (RoI).
331
332
333
            Default is None -> deactivated.
        median_filt_shape: (2, ) numpy.ndarray, tuple, or array, optional
            Shape of the median filter window. Default is None -> deactivated.
334
335
336
337
338
339
340
341
        padding_mode: str in numpy.pad's mode list, optional
            Padding mode, which determines the type of convolution. If None or
            'wrap' are passed, this resorts to the traditional circular convolution.
            If 'edge' or 'constant' are passed, it results in a linear convolution.
            Default is the circular convolution.
            All options are:
                None | 'constant' | 'edge' | 'linear_ramp' | 'maximum' | 'mean'
                | 'median' | 'minimum' | 'reflect' | 'symmetric' |'wrap'
342
        peak_fit_radius: int, optional
343
344
            Radius size around the max correlation pixel, for sub-pixel fitting.
            Minimum and default value is 1.
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363

        Raises
        ------
        ValueError
            In case images are not 2-dimensional or have different sizes.

        Returns
        -------
        float
            Estimated center of rotation position from the center of the RoI in pixels.

        Examples
        --------
        The following code computes the center of rotation position for two
        given images in a tomography scan, where the second image is taken at
        180 degrees from the first.

        >>> radio1 = data[0, :, :]
        ... radio2 = np.fliplr(data[1, :, :])
364
        ... CoR_calc = CenterOfRotation()
365
        ... cor_position = CoR_calc.find_shift(radio1, radio2)
366
367
368

        Or for noisy images:

369
        >>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3))
370
371
372
        """
        self._check_img_sizes(img_1, img_2)

373
374
375
376
        if peak_fit_radius < 1:
            print("WARNING: peak_fit_radius should be at least 1, given: %d instead." % peak_fit_radius)
            peak_fit_radius = 1

377
        img_shape = img_2.shape
378
        roi_yxhw = self._determine_roi(img_shape, roi_yxhw, self.truncate_horz_pow2)
379

380
381
382
383
384
385
386
387
388
389
390
391
392
393
        img_1 = self._prepare_image(
            img_1,
            roi_yxhw=roi_yxhw,
            median_filt_shape=median_filt_shape,
            cutoff_pix=cutoff_pix,
            cutoff_trans_pix=cutoff_trans_pix,
        )
        img_2 = self._prepare_image(
            img_2,
            roi_yxhw=roi_yxhw,
            median_filt_shape=median_filt_shape,
            cutoff_pix=cutoff_pix,
            cutoff_trans_pix=cutoff_trans_pix,
        )
394

395
        cc = self._compute_correlation_fft(img_1, img_2, padding_mode)
396
397
398
        img_shape = img_2.shape
        cc_vs = np.fft.fftfreq(img_shape[-2], 1 / img_shape[-2])
        cc_hs = np.fft.fftfreq(img_shape[-1], 1 / img_shape[-1])
399

400
        # get pixel having the maximum value of the correlation array
Nicola Vigano's avatar
Nicola Vigano committed
401
        pix_max_corr = np.argmax(cc)
402
        pv, ph = np.unravel_index(pix_max_corr, img_shape)
myron's avatar
myron committed
403

404
405
406
        # select a n x n neighborhood for the sub-pixel fitting (with wrapping)
        pv = np.arange(pv - peak_fit_radius, pv + peak_fit_radius + 1) % img_shape[-2]
        ph = np.arange(ph - peak_fit_radius, ph + peak_fit_radius + 1) % img_shape[-1]
myron's avatar
myron committed
407

408
409
410
        # extract the (v, h) pixel coordinates
        fv = cc_vs[pv]
        fh = cc_hs[ph]
411

412
413
414
        # extract the correlation values
        pv, ph = np.meshgrid(pv, ph, indexing="ij")
        f_vals = cc[pv, ph]
415

416
        fitted_shifts_vh = self.refine_max_position_2d(f_vals, fv, fh)
417

418
        return fitted_shifts_vh[-1] / 2.0
419

420
    __call__ = find_shift
421
422
423
424
425
426
427
428


class DetectorTranslationAlongBeam(AlignmentBase):
    @staticmethod
    def _check_img_sizes(img_stack: np.ndarray, img_pos: np.ndarray):
        shape_stack = np.squeeze(img_stack).shape
        shape_pos = np.squeeze(img_pos).shape
        if not len(shape_stack) == 3:
429
            raise ValueError(
430
                "A stack of 2-dimensional images is required. Shape of stack: %s" % (" ".join(("%d" % x for x in shape_stack)))
431
            )
432
        if not len(shape_pos) == 1:
433
434
435
436
            raise ValueError(
                "Positions need to be a 1-dimensional array. Shape of the positions variable: %s"
                % (" ".join(("%d" % x for x in shape_pos)))
            )
437
438
        if not shape_stack[0] == shape_pos[0]:
            raise ValueError(
439
440
                "The same number of images and positions is required."
                + " Shape of stack: %s, shape of positions variable: %s"
441
                % (" ".join(("%d" % x for x in shape_stack)), " ".join(("%d" % x for x in shape_pos)),)
442
            )
443
444

    def find_shift(
445
446
447
448
449
450
451
        self,
        img_stack: np.ndarray,
        img_pos: np.array,
        roi_yxhw=None,
        median_filt_shape=None,
        padding_mode=None,
        peak_fit_radius=2,
452
        equispaced_increments=False
453
454
455
456
457
    ):
        """Find the deviation of the translation axis of the area detector
        along the beam propagation direction.

        TODO: Add more information here! Including interpretation of the result
458
459
        This means giving also an example on how to convert the returned values
        into meaningful quantities. See "Returns" for more details.
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482

        Parameters
        ----------
        img_stack: numpy.ndarray
            A stack of images (usually 4) at different distances
        img_pos: numpy.ndarray
            Position of the images along the translation axis
        roi_yxhw: (2, ) or (4, ) numpy.ndarray, tuple, or array, optional
            4 elements vector containing: vertical and horizontal coordinates
            of first pixel, plus height and width of the Region of Interest (RoI).
            Or a 2 elements vector containing: plus height and width of the
            centered Region of Interest (RoI).
            Default is None -> deactivated.
        median_filt_shape: (2, ) numpy.ndarray, tuple, or array, optional
            Shape of the median filter window. Default is None -> deactivated.
        padding_mode: str in numpy.pad's mode list, optional
            Padding mode, which determines the type of convolution. If None or
            'wrap' are passed, this resorts to the traditional circular convolution.
            If 'edge' or 'constant' are passed, it results in a linear convolution.
            Default is the circular convolution.
            All options are:
                None | 'constant' | 'edge' | 'linear_ramp' | 'maximum' | 'mean'
                | 'median' | 'minimum' | 'reflect' | 'symmetric' |'wrap'
483
        peak_fit_radius: int, optional
484
485
            Radius size around the max correlation pixel, for sub-pixel fitting.
            Minimum and default value is 1.
486
487
488
489
490
491
492
493
494
        equispaced_increments: boolean, optional
            Tells whether the position increments are equispaced or not. If
            equispaced increments are used, we have to compute the correlation
            images with respect to the first image, otherwise we can do it
            against adjacent images.
            The advantage of doing it between adjacent images is that we do not
            build up large displacements in the correlation.
            However, if this is done for equispaced images, the linear fitting
            becomes unstable.
495
496
497
498

        Returns
        -------
        tuple(float, float)
499
500
            Estimated (vertical, horizontal) increment per unit-distance of the
            ratio between pixel-size and detector translation.
501
502
503
504
505
506
507

        Examples
        --------
        TODO: Add examples here!
        """
        self._check_img_sizes(img_stack, img_pos)

508
509
510
511
        if peak_fit_radius < 1:
            print("WARNING: peak_fit_radius should be at least 1, given: %d instead." % peak_fit_radius)
            peak_fit_radius = 1

512
513
514
515
        num_imgs = img_stack.shape[0]
        img_shape = img_stack.shape[-2:]
        roi_yxhw = self._determine_roi(img_shape, roi_yxhw, False)

516
        img_stack = self._prepare_image(img_stack, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
517

518
519
        cc_vs = np.fft.fftfreq(img_shape[-2], 1 / img_shape[-2])
        cc_hs = np.fft.fftfreq(img_shape[-1], 1 / img_shape[-1])
520

521
        # do correlations
522
        ccs = [
523
524
525
            self._compute_correlation_fft(
                img_stack[0 if equispaced_increments else ii - 1, ...], img_stack[ii, ...], padding_mode
            )
526
527
            for ii in range(1, num_imgs)
        ]
528

529
        shifts_vh = np.empty((num_imgs - 1, 2))
530
531
532
533
534
        for ii, cc in enumerate(ccs):
            # get pixel having the maximum value of the correlation array
            pix_max_corr = np.argmax(cc)
            pv, ph = np.unravel_index(pix_max_corr, img_shape)

535
536
537
538
539
540
541
542
543
544
545
            # select a n x n neighborhood for the sub-pixel fitting (with wrapping)
            pv = np.arange(pv - peak_fit_radius, pv + peak_fit_radius + 1) % img_shape[-2]
            ph = np.arange(ph - peak_fit_radius, ph + peak_fit_radius + 1) % img_shape[-1]

            # extract the (v, h) pixel coordinates
            fv = cc_vs[pv]
            fh = cc_hs[ph]

            # extract the correlation values
            pv, ph = np.meshgrid(pv, ph, indexing="ij")
            f_vals = cc[pv, ph]
546

547
            shifts_vh[ii, :] = self.refine_max_position_2d(f_vals, fv, fh)
548

549
550
551
552
553
554
555
556
557
558
559
        if equispaced_increments:
            img_pos_increments = img_pos[1:] - img_pos[0]
        else:
            img_pos_increments = - np.diff(img_pos)

        # Polynomial.fit is supposed to be more numerically stable than polyfit
        # (according to numpy)
        coeffs_v = Polynomial.fit(img_pos_increments, shifts_vh[:, 0], deg=1).convert().coef
        coeffs_h = Polynomial.fit(img_pos_increments, shifts_vh[:, 1], deg=1).convert().coef

        return coeffs_v[1], coeffs_h[1]