Commit a2f69fad authored by Thomas Vincent's avatar Thomas Vincent

Add normalization to qspace converter

parent c1f85119
......@@ -30,6 +30,7 @@ __license__ = "MIT"
__date__ = "01/03/2016"
import logging
import os
import time
import ctypes
......@@ -47,6 +48,10 @@ from ...util.histogramnd_lut import histogramnd_get_lut, histogramnd_from_lut
# from silx.math import histogramnd
from ...io import XsocsH5, QSpaceH5, ShiftH5
logger = logging.getLogger(__name__)
disp_times = False
......@@ -882,7 +887,8 @@ class QSpaceConverter(object):
shiftH5=shiftH5,
beam_energy=beam_energy,
direct_beam=center_chan,
channels_per_degree=chan_per_deg)
channels_per_degree=chan_per_deg,
normalizer=normalizer)
manager = mp.Manager()
self.__term_evt = term_evt = manager.Event()
......@@ -967,6 +973,7 @@ class QSpaceConverter(object):
entries,
img_size,
output_f,
normalizer,
image_binning,
medfilt_dims,
img_dtype)
......@@ -1157,7 +1164,8 @@ def _create_result_file(h5_fn,
shiftH5=None,
beam_energy=None,
direct_beam=None,
channels_per_degree=None):
channels_per_degree=None,
normalizer=''):
"""
Initializes the output file.
:param h5_fn: name of the file to initialize
......@@ -1181,6 +1189,8 @@ def _create_result_file(h5_fn,
:param beam_energy: Beam energy in eV
:param direct_beam: Direct beam calibration position
:param channels_per_degree: Channels per degree calibration
:param str normalizer:
Name of measurement group dataset used for normalization
"""
if not overwrite:
......@@ -1211,6 +1221,10 @@ def _create_result_file(h5_fn,
qspace_h5.set_direct_beam(direct_beam)
qspace_h5.set_channels_per_degree(channels_per_degree)
if normalizer is None:
normalizer = ''
qspace_h5.set_image_normalizer(normalizer)
if shiftH5:
sample_shifts = []
grid_shifts = []
......@@ -1240,6 +1254,7 @@ def _to_qspace(th_idx,
entries,
img_size,
output_fn,
normalizer,
image_binning,
medfilt_dims,
img_dtype):
......@@ -1250,6 +1265,8 @@ def _to_qspace(th_idx,
:param entries:
:param img_size:
:param output_fn:
:param str normalizer:
Name of measurement group dataset to use for normalization
:param image_binning:
:param medfilt_dims:
:param img_dtype:
......@@ -1292,6 +1309,11 @@ def _to_qspace(th_idx,
# h_lut.shape = (n_xy, -1)
h_lut = h_lut_shared
if normalizer and img_dtype.kind != 'f':
# Force the type to float64
logger.info('Using float64 to perform normalization')
img_dtype = np.float64
img = np.ascontiguousarray(np.zeros(img_size), dtype=img_dtype)
# TODO : handle case when nav is not a multiple of img_size!!
......@@ -1333,6 +1355,7 @@ def _to_qspace(th_idx,
image_indices = None
for entry_idx, entry in enumerate(entries):
xsocsH5 = XsocsH5.XsocsH5(entry_files[entry_idx], mode='r')
t0 = time.time()
......@@ -1345,9 +1368,7 @@ def _to_qspace(th_idx,
# slows down things a big, not much tho)
# TODO : add a lock on the files if there is no SWMR
# test if it slows down things much
with XsocsH5.XsocsH5(entry_files[entry_idx],
mode='r').image_dset_ctx() \
as img_data: # noqa
with xsocsH5.image_dset_ctx() as img_data:
t1 = time.time()
img_data.read_direct(img,
source_sel=np.s_[image_idx],
......@@ -1363,11 +1384,29 @@ def _to_qspace(th_idx,
t_read += time.time() - t0
t0 = time.time()
# Apply normalization
if normalizer:
normalization = xsocsH5.measurement(entry, normalizer)
# Make sure to use float to perform division
assert img.dtype.kind == 'f'
img /= normalization[image_idx]
# Perform binning
if image_binning[0] != 1 or image_binning[1] != 1:
# Increase type size for (u)int8|16
if img.dtype.kind == 'u' and img.dtype.itemsize < 4:
binning_dtype = np.uint32
elif img.dtype.kind == 'i' and img.dtype.itemsize < 4:
binning_dtype = np.int32
else: # Keep same dtype for (u)int32|64 and float
binning_dtype = img.dtype
intensity = (img.reshape(img_shape_1).
sum(axis=sum_axis_1,
dtype=np.uint32).reshape(img_shape_2).
sum(axis=sum_axis_2, dtype=np.uint32) *
dtype=binning_dtype).reshape(img_shape_2).
sum(axis=sum_axis_2, dtype=binning_dtype) *
avg_weight)
# intensity = xu.blockAverage2D(img, nav[0],
# nav[1], roi=roi)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment