Commit c38ea44a authored by Pierre Paleo's avatar Pierre Paleo
Browse files

resources: add FlatFieldLoader class

parent 769bdc91
import os
import numpy as np
from silx.io.url import DataUrl
from tomoscan.io import HDF5File
from tomoscan.esrf.edfscan import EDFTomoScan
from tomoscan.esrf.hdf5scan import HDF5TomoScan
from tomoscan.esrf.hdf5scan import HDF5TomoScan, ImageKey
from ..utils import check_supported
from ..thirdparty.tomwer_load_flats_darks import get_flats_frm_process_file, get_darks_frm_process_file
from .logger import LoggerOrPrint
val_to_nxkey = {
"flats": ImageKey.FLAT_FIELD.value,
"darks": ImageKey.DARK_FIELD.value,
"projections": ImageKey.PROJECTION.value,
}
dataset_infos = {
"num_radios": None,
"num_darks": None,
......@@ -146,7 +155,6 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
self._rot_angles = None
def _load_flats_from_tomwer(self, tomwer_processes_fname=None):
tomwer_processes_fname = tomwer_processes_fname or "tomwer_processes.h5"
tomwer_processes_file = os.path.join(self.dataset_scanner.path, "tomwer_processes.h5")
......@@ -181,7 +189,6 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
def analyze_dataset(dataset_path):
if not(os.path.isdir(dataset_path)):
if not(os.path.isfile(dataset_path)):
......@@ -194,3 +201,196 @@ def analyze_dataset(dataset_path):
dataset_structure = dataset_analyzer_class(dataset_path)
return dataset_structure
class FlatFieldLoader:
"""
A helper class to load flats and darks, or to compute the final ones.
At ESRF, several darks/flats series are acquired during a scan.
Each series undergoes a reduction (mean, median, ...) to get a "final" image.
For example, there is one series of flats in the beginning, and one in the end ;
thus there are two corresponding "final" flats.
"""
def __init__(
self, h5_file, h5_path, image_keys, image_type,
reduction_method="mean", lookup_files=None, logger=None
):
"""
Initialize a FlatFieldLoader helper.
Parameters
-----------
h5_file: str
File name of the HDF5 file.
h5_path: str
HDF5 path to the data volume.
image_keys: list of int
List of keys corresponding to each image of the data volume.
See Nexus Format specification, NXtomo, "image_key".
image_type: str
Which image type to load. Can be "flats" or "darks".
reduction_method: str, optional
Which reduction method to use. Can be "mean", "median" or "sum".
Default is "mean".
lookup_files: list of DataUrl, optional
List of paths (DataUrl) to inspect to load existing "final" flats/darks.
logger: Logger object, optional
Logging object
"""
self.h5_file = h5_file
self.h5_path = h5_path
self.image_keys = image_keys
self._set_image_type(image_type)
self._set_reduction_method(reduction_method)
self.lookup_files = lookup_files or []
self.logger = LoggerOrPrint(logger)
def _set_image_type(self, image_type):
img_types = {
"flats": "flats",
"flat": "flats",
ImageKey.FLAT_FIELD.value: "flats",
"darks": "darks",
"dark": "darks",
ImageKey.DARK_FIELD.value: "darks",
}
check_supported(image_type, img_types, "Image type")
self.image_type = img_types[image_type]
def _set_reduction_method(self, reduction_method):
red_methods = {
"mean": np.mean,
"median": np.median,
"sum": np.sum
}
check_supported(reduction_method, red_methods, "reduction method")
self.reduction_method = reduction_method
self.reduction_function = red_methods[reduction_method]
def browse_existing_flatfield_results(self):
"""
Attempt at loading already computed "final" darks and flats from existing files.
"""
basedir = os.path.dirname(self.h5_file)
existing_data = None
for fpath in self.lookup_files:
existing_data = self.load_existing_flatfield(fpath)
if existing_data is not None:
self.logger.info("Loaded %s from %s" % (self.image_type, fpath.file_path()))
break
if existing_data is None:
self.logger.debug("%s not loaded from any file" % self.image_type)
return existing_data
def load_existing_flatfield(self, file_url):
"""
Attempt at loading already computed "final" darks and flats from an existing file.
Parameters
-----------
fname: DataUrl
URL of the flat/dark to load.
"""
results_path = os.path.join(file_url.data_path(), "results") # NXProcess
res = {}
try:
with HDF5File(file_url.file_path(), "r", swmr=True) as f:
results = f[results_path]
for key, val in results[self.image_type].items():
res[key] = val[:]
except Exception as exc:
self.logger.error(
"Could not load %s from %s: %s"
% (self.image_type, file_url.file_path(), str(exc))
)
res = None
return res
@staticmethod
def get_data_slices(image_keys, key):
"""
Return indices in the data volume where images correspond to a certain key.
Parameters
----------
image_keys: list of int
List of keys corresponding to each image of the data volume.
See Nexus Format specification, NXtomo, "image_key".
key: int
Key to search in the `image_keys` list.
Returns
--------
slices: list of slice
A list where each item is a slice.
"""
diffs = np.diff(image_keys == key)
indices = np.arange(diffs.size)[diffs]
assert indices.size % 2 == 0
indices += 1
slices = []
for i in range(0, indices.size, 2):
start_idx = indices[i]
end_idx = indices[i+1]
slices.append(
slice(start_idx, end_idx)
)
return slices
def get_data_chunk(self, data_slice):
"""
Get a data chunk from the data volume whose path was specified at class instantiation.
"""
with HDF5File(self.h5_file, "r", swmr=True) as f:
data_raw = f[self.h5_path][data_slice]
return data_raw
def apply_reduction(self, volume):
"""
Apply a reduction function on a data volume.
The data is assumed to be three dimensional, a "stack of images".
Parameters
-----------
volume: numpy.ndarray
Data volume
"""
return self.reduction_function(volume, axis=0)
def get_final_images(self):
"""
Main function of this class.
This function first attempts at loading already-computed flat/dark.
If nothing is found, then the final flats/darks are computed from raw ones.
Returns
-------
imgs: dict
Dictionary where the key is the index of the final flats/darks,
and the values are the corresponding reduced data.
"""
existing_imgs = self.browse_existing_flatfield_results()
if existing_imgs is not None:
return existing_imgs
img_slices = self.get_data_slices(self.image_keys, val_to_nxkey[self.image_type])
if img_slices == []:
self.logger.error("No %s found in %s" % (self.image_type, self.h5_file))
return None
res = {}
for data_slice in img_slices:
data_chunk = self.get_data_chunk(data_slice)
img = self.apply_reduction(data_chunk)
res[data_slice.start] = img
return res
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment