Commit 971557c2 authored by payno's avatar payno
Browse files

[HDF5DatasetAnalyzer] update to handle the new tomwer_processes file name

parent dcdfcfe6
Pipeline #44791 failed with stage
in 32 seconds
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Using the Nabu library from Python # Using the Nabu library from Python
This notebook shows how to use the Nabu software for performing a basic reconstruction of a tomography dataset. This notebook shows how to use the Nabu software for performing a basic reconstruction of a tomography dataset.
The computations are done on a local machine with a GPU and Cuda available. The computations are done on a local machine with a GPU and Cuda available.
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 1 - Load the dataset informations ## 1 - Load the dataset informations
We must provide `nabu` with the the configuration file (`nabu.conf`), describing the path to the dataset and the processing steps. This is the equivalent of the `.par` file in PyHST2. In this file, no information is given on the detector size, energy, distance, etc: these informations are extracted from the dataset metadata. We must provide `nabu` with the the configuration file (`nabu.conf`), describing the path to the dataset and the processing steps. This is the equivalent of the `.par` file in PyHST2. In this file, no information is given on the detector size, energy, distance, etc: these informations are extracted from the dataset metadata.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.resources.processconfig import ProcessConfig from nabu.resources.processconfig import ProcessConfig
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
conf = ProcessConfig("/scratch/paleo/bamboo/nabu.conf") conf = ProcessConfig("/scratch/paleo/bamboo/nabu.conf")
``` ```
%% Output %% Output
Loading darks and refs from /scratch/paleo/bamboo/tomwer_processes.h5 Loading darks and refs from /scratch/paleo/bamboo/[XXX]_tomwer_processes.h5
Warning: n_angles was detected to 4000, but 4101 radios are available. You might want to run remove_unused_radios(). Warning: n_angles was detected to 4000, but 4101 radios are available. You might want to run remove_unused_radios().
Removing unused radios Removing unused radios
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# We can easily get information on the processing steps. # We can easily get information on the processing steps.
nabu_config = conf.nabu_config nabu_config = conf.nabu_config
print(nabu_config) print(nabu_config)
# The same can be done with the dataset structure # The same can be done with the dataset structure
dataset_infos = conf.dataset_infos dataset_infos = conf.dataset_infos
# print([getattr(dataset_infos, attr) for attr in ["energy", "distance", "n_angles", "radio_dims"]]) # print([getattr(dataset_infos, attr) for attr in ["energy", "distance", "n_angles", "radio_dims"]])
``` ```
%% Output %% Output
{'dataset': {'location': '/scratch/paleo/bamboo/bamboo_nxtomomill.h5', 'binning': 1, 'binning_z': 1, 'projections_subsampling': 1}, 'preproc': {'flatfield_enabled': True, 'ccd_filter_enabled': False, 'ccd_filter_threshold': 0.04, 'double_flatfield_enabled': True, 'dff_sigma': None, 'take_logarithm': True, 'log_min_clip': 1e-06, 'log_max_clip': 10.0}, 'phase': {'method': 'paganin', 'delta_beta': 1.0, 'marge': 50, 'unsharp_coeff': 0.0, 'unsharp_sigma': 0.0, 'padding_type': 'edge'}, 'reconstruction': {'method': 'FBP', 'angles_file': None, 'rotation_axis_position': 1358.0, 'axis_correction_file': None, 'angle_offset': 0.0, 'fbp_filter_type': 'ramlak', 'padding_type': 'edge', 'enable_halftomo': False, 'start_x': 0, 'end_x': 2559, 'start_y': 0, 'end_y': 2559, 'start_z': 1000, 'end_z': 1240, 'iterations': 200, 'optim_algorithm': 'chambolle-pock', 'weight_tv': 0.01, 'preconditioning_filter': True, 'positivity_constraint': True}, 'output': {'location': '/scratch/paleo/bamboo/', 'file_prefix': 'bamboo_nxtomomill', 'file_format': 'hdf5'}, 'resources': {'method': 'local', 'gpus': 1, 'gpu_id': [], 'cpu_workers': 0, 'memory_per_node': (90.0, True), 'threads_per_node': (100.0, True), 'queue': 'gpu', 'walltime': (1, 0, 0)}, 'about': {'nabu_version': '2020.1.0', 'nabu_config_version': '2020.1.0', 'verbosity': 'debug', 'overwrite_results': True}} {'dataset': {'location': '/scratch/paleo/bamboo/bamboo_nxtomomill.h5', 'binning': 1, 'binning_z': 1, 'projections_subsampling': 1}, 'preproc': {'flatfield_enabled': True, 'ccd_filter_enabled': False, 'ccd_filter_threshold': 0.04, 'double_flatfield_enabled': True, 'dff_sigma': None, 'take_logarithm': True, 'log_min_clip': 1e-06, 'log_max_clip': 10.0}, 'phase': {'method': 'paganin', 'delta_beta': 1.0, 'marge': 50, 'unsharp_coeff': 0.0, 'unsharp_sigma': 0.0, 'padding_type': 'edge'}, 'reconstruction': {'method': 'FBP', 'angles_file': None, 'rotation_axis_position': 1358.0, 'axis_correction_file': None, 'angle_offset': 0.0, 'fbp_filter_type': 'ramlak', 'padding_type': 'edge', 'enable_halftomo': False, 'start_x': 0, 'end_x': 2559, 'start_y': 0, 'end_y': 2559, 'start_z': 1000, 'end_z': 1240, 'iterations': 200, 'optim_algorithm': 'chambolle-pock', 'weight_tv': 0.01, 'preconditioning_filter': True, 'positivity_constraint': True}, 'output': {'location': '/scratch/paleo/bamboo/', 'file_prefix': 'bamboo_nxtomomill', 'file_format': 'hdf5'}, 'resources': {'method': 'local', 'gpus': 1, 'gpu_id': [], 'cpu_workers': 0, 'memory_per_node': (90.0, True), 'threads_per_node': (100.0, True), 'queue': 'gpu', 'walltime': (1, 0, 0)}, 'about': {'nabu_version': '2020.1.0', 'nabu_config_version': '2020.1.0', 'verbosity': 'debug', 'overwrite_results': True}}
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 2 - Chunk processing ## 2 - Chunk processing
Nabu processes data by chunks of radios (see the [documentation](http://www.silx.org/pub/nabu/doc/definitions.html#radios-and-sinograms) for more explanations). Nabu processes data by chunks of radios (see the [documentation](http://www.silx.org/pub/nabu/doc/definitions.html#radios-and-sinograms) for more explanations).
In a first step, we define how to read chunks of radios. In a first step, we define how to read chunks of radios.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.io.reader import ChunkReader from nabu.io.reader import ChunkReader
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
What is the largest chunk size we can process ? What is the largest chunk size we can process ?
The answer is given by inspecting the current GPU memory, and the processing steps. The answer is given by inspecting the current GPU memory, and the processing steps.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.cuda.utils import get_gpu_memory from nabu.cuda.utils import get_gpu_memory
from nabu.resources.computations import estimate_chunk_size from nabu.resources.computations import estimate_chunk_size
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
chunk_size = estimate_chunk_size( chunk_size = estimate_chunk_size(
get_gpu_memory(0), get_gpu_memory(0),
conf conf
) )
print("Chunk_size = %d" % chunk_size) print("Chunk_size = %d" % chunk_size)
``` ```
%% Output %% Output
Chunk_size = 150 Chunk_size = 150
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Load the first 'chunk_size' lines of all the radios # Load the first 'chunk_size' lines of all the radios
sub_region = (None, None, 0, chunk_size) # start_x, end_x, start_z, end_z sub_region = (None, None, 0, chunk_size) # start_x, end_x, start_z, end_z
chunk_reader = ChunkReader(dataset_infos.projections, sub_region=sub_region, convert_float=True) chunk_reader = ChunkReader(dataset_infos.projections, sub_region=sub_region, convert_float=True)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Load the current chunk # Load the current chunk
chunk_reader.load_files() # takes some time chunk_reader.load_files() # takes some time
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(chunk_reader.files_data.shape) print(chunk_reader.files_data.shape)
print(chunk_reader.files_data.dtype) print(chunk_reader.files_data.dtype)
``` ```
%% Output %% Output
(4000, 150, 2560) (4000, 150, 2560)
float32 float32
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 3 - Initialize the GPU ## 3 - Initialize the GPU
Most of the processing can be done on GPU (or many-core CPU if using OpenCL). Most of the processing can be done on GPU (or many-core CPU if using OpenCL).
With `pycuda.gpuarray` (or its OpenCL counterpart `pyopencl.array`), we manipulate array objects with memory residing on device. This allows to avoid extraneous host <-> device copies. With `pycuda.gpuarray` (or its OpenCL counterpart `pyopencl.array`), we manipulate array objects with memory residing on device. This allows to avoid extraneous host <-> device copies.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import pycuda.gpuarray as garray import pycuda.gpuarray as garray
from nabu.cuda.utils import get_cuda_context from nabu.cuda.utils import get_cuda_context
import numpy as np import numpy as np
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Create a Cuda context on device ID 0 # Create a Cuda context on device ID 0
# By default, all following GPU processings will be bound on this context # By default, all following GPU processings will be bound on this context
ctx = get_cuda_context(device_id=0) ctx = get_cuda_context(device_id=0)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
radios = chunk_reader.files_data radios = chunk_reader.files_data
n_angles, n_z, n_x = radios.shape n_angles, n_z, n_x = radios.shape
# transfer the chunk on GPU # transfer the chunk on GPU
d_radios = garray.to_gpu(radios) d_radios = garray.to_gpu(radios)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 4 - Pre-processing ## 4 - Pre-processing
Pre-processing utilities are available in the `nabu.preproc` module. Pre-processing utilities are available in the `nabu.preproc` module.
Utilities available with the cuda backend are implemented in a module with a `_cuda` suffix. Utilities available with the cuda backend are implemented in a module with a `_cuda` suffix.
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### 4.1 - Flat-field ### 4.1 - Flat-field
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.preproc.ccd_cuda import CudaFlatField from nabu.preproc.ccd_cuda import CudaFlatField
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
radios_indices = sorted(conf.dataset_infos.projections.keys()) radios_indices = sorted(conf.dataset_infos.projections.keys())
# Configure the `FlatField` processor # Configure the `FlatField` processor
cuda_flatfield = CudaFlatField( cuda_flatfield = CudaFlatField(
d_radios.shape, d_radios.shape,
dataset_infos.flats, dataset_infos.flats,
dataset_infos.darks, dataset_infos.darks,
radios_indices=radios_indices, radios_indices=radios_indices,
sub_region=sub_region, sub_region=sub_region,
convert_float=True, convert_float=True,
) )
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Perform the normalization on GPU # Perform the normalization on GPU
if nabu_config["preproc"]["flatfield_enabled"]: if nabu_config["preproc"]["flatfield_enabled"]:
print("Doing flat-field") print("Doing flat-field")
cuda_flatfield.normalize_radios(d_radios) cuda_flatfield.normalize_radios(d_radios)
``` ```
%% Output %% Output
Doing flat-field Doing flat-field
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### 4.2 - Phase retrieval ### 4.2 - Phase retrieval
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.preproc.phase_cuda import CudaPaganinPhaseRetrieval from nabu.preproc.phase_cuda import CudaPaganinPhaseRetrieval
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
energy = dataset_infos.energy energy = dataset_infos.energy
if energy == 0: if energy == 0:
energy = 19 # keV energy = 19 # keV
# Phase retrieval is done on each radio individually, with the sub-region specified above # Phase retrieval is done on each radio individually, with the sub-region specified above
if nabu_config["phase"]["method"] != None: if nabu_config["phase"]["method"] != None:
print("Doing phase retrieval") print("Doing phase retrieval")
cudapaganin = CudaPaganinPhaseRetrieval( cudapaganin = CudaPaganinPhaseRetrieval(
(n_z, n_x), (n_z, n_x),
distance=dataset_infos.distance * 1e2, distance=dataset_infos.distance * 1e2,
energy=energy, energy=energy,
delta_beta=nabu_config["phase"]["delta_beta"], delta_beta=nabu_config["phase"]["delta_beta"],
pixel_size=dataset_infos.pixel_size pixel_size=dataset_infos.pixel_size
) )
for i in range(n_angles): for i in range(n_angles):
cudapaganin.apply_filter(d_radios[i], output=d_radios[i]) cudapaganin.apply_filter(d_radios[i], output=d_radios[i])
``` ```
%% Output %% Output
Doing phase retrieval Doing phase retrieval
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
### 4.3 - Logarithm ### 4.3 - Logarithm
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.preproc.ccd_cuda import CudaLog from nabu.preproc.ccd_cuda import CudaLog
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if nabu_config["preproc"]["take_logarithm"]: if nabu_config["preproc"]["take_logarithm"]:
print("Taking logarithm") print("Taking logarithm")
cuda_log = CudaLog(d_radios.shape, clip_min=0.01) cuda_log = CudaLog(d_radios.shape, clip_min=0.01)
cuda_log.take_logarithm(d_radios) cuda_log.take_logarithm(d_radios)
``` ```
%% Output %% Output
Taking logarithm Taking logarithm
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 5 - Reconstruction ## 5 - Reconstruction
We use the filtered backprojection with `nabu.reconstruction.fbp` We use the filtered backprojection with `nabu.reconstruction.fbp`
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from nabu.reconstruction.fbp import Backprojector from nabu.reconstruction.fbp import Backprojector
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
rec_options = conf.processing_options["reconstruction"] rec_options = conf.processing_options["reconstruction"]
B = Backprojector( B = Backprojector(
(n_angles, n_x), (n_angles, n_x),
angles=rec_options["angles"], rot_center=rec_options["rotation_axis_position"], angles=rec_options["angles"], rot_center=rec_options["rotation_axis_position"],
extra_options={"padding_mode": "edges"} extra_options={"padding_mode": "edges"}
) )
d_recs = garray.zeros((n_z, n_x, n_x), "f") d_recs = garray.zeros((n_z, n_x, n_x), "f")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print("Reconstructing...", end="") print("Reconstructing...", end="")
for i in range(n_z): for i in range(n_z):
B.fbp(radios[:, i, :], output=d_recs[i]) B.fbp(radios[:, i, :], output=d_recs[i])
recs = d_recs.get() recs = d_recs.get()
print(" ... OK") print(" ... OK")
``` ```
%% Output %% Output
Reconstructing... ... OK Reconstructing... ... OK
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 6 - Visualize ## 6 - Visualize
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%pylab nbagg %pylab nbagg
``` ```
%% Output %% Output
Populating the interactive namespace from numpy and matplotlib Populating the interactive namespace from numpy and matplotlib
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
figure() figure()
imshow(recs[0], cmap="gray") imshow(recs[0], cmap="gray")
``` ```
%% Output %% Output
<matplotlib.image.AxesImage at 0x7fe71263fe10> <matplotlib.image.AxesImage at 0x7fe71263fe10>
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
...@@ -261,17 +261,20 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer): ...@@ -261,17 +261,20 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
# No flats at all in the dataset. Do nothing. # No flats at all in the dataset. Do nothing.
return return
if self.processes_file is None and self._load_flats_from_tomwer(): if self.processes_file is None and self._load_flats_from_tomwer():
# Loaded from tomwer_processes.h5 # Loaded from [XXX]_tomwer_processes.h5
return return
# Otherwise load or compute flats/darks with nabu # Otherwise load or compute flats/darks with nabu
self._compute_or_load_flats() self._compute_or_load_flats()
def _load_flats_from_tomwer(self, tomwer_processes_fname=None): def _load_flats_from_tomwer(self):
tomwer_processes_fname = tomwer_processes_fname or "tomwer_processes.h5" tomwer_processes_file = self._get_tomwer_process_file_name()
tomwer_processes_file = os.path.join(self.dataset_scanner.path, "tomwer_processes.h5") if tomwer_processes_file is None or not(os.path.isfile(tomwer_processes_file)):
if not(os.path.isfile(tomwer_processes_file)): # if not found try with the old one
return False tomwer_processes_file = os.path.join(self.dataset_scanner.path,
"tomwer_processes.h5")
if tomwer_processes_file is None or not (os.path.isfile(tomwer_processes_file)):
return False
self.logger.info("Loading darks and refs from %s" % tomwer_processes_file) self.logger.info("Loading darks and refs from %s" % tomwer_processes_file)
new_flats = get_flats_frm_process_file( new_flats = get_flats_frm_process_file(
tomwer_processes_file, self.dataset_scanner.entry tomwer_processes_file, self.dataset_scanner.entry
...@@ -284,6 +287,16 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer): ...@@ -284,6 +287,16 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
return True return True
def _get_tomwer_process_file_name(self, scan):
if scan.path is not None:
basename, _ = os.path.splitext(scan.master_file)
basename = os.path.basename(basename)
basename = "_".join((basename, "tomwer_processes.h5"))
return os.path.join(scan.path, basename)
else:
return None
def _get_results_file(self): def _get_results_file(self):
results_relfname = os.path.splitext( results_relfname = os.path.splitext(
os.path.basename(self.dataset_scanner.master_file) os.path.basename(self.dataset_scanner.master_file)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment