Commit 971557c2 authored by payno's avatar payno
Browse files

[HDF5DatasetAnalyzer] update to handle the new tomwer_processes file name

parent dcdfcfe6
Pipeline #44791 failed with stage
in 32 seconds
%% Cell type:markdown id: tags:
# Using the Nabu library from Python
This notebook shows how to use the Nabu software for performing a basic reconstruction of a tomography dataset.
The computations are done on a local machine with a GPU and Cuda available.
%% Cell type:markdown id: tags:
## 1 - Load the dataset informations
We must provide `nabu` with the the configuration file (`nabu.conf`), describing the path to the dataset and the processing steps. This is the equivalent of the `.par` file in PyHST2. In this file, no information is given on the detector size, energy, distance, etc: these informations are extracted from the dataset metadata.
%% Cell type:code id: tags:
``` python
from nabu.resources.processconfig import ProcessConfig
```
%% Cell type:code id: tags:
``` python
conf = ProcessConfig("/scratch/paleo/bamboo/nabu.conf")
```
%% Output
Loading darks and refs from /scratch/paleo/bamboo/tomwer_processes.h5
Loading darks and refs from /scratch/paleo/bamboo/[XXX]_tomwer_processes.h5
Warning: n_angles was detected to 4000, but 4101 radios are available. You might want to run remove_unused_radios().
Removing unused radios
%% Cell type:code id: tags:
``` python
# We can easily get information on the processing steps.
nabu_config = conf.nabu_config
print(nabu_config)
# The same can be done with the dataset structure
dataset_infos = conf.dataset_infos
# print([getattr(dataset_infos, attr) for attr in ["energy", "distance", "n_angles", "radio_dims"]])
```
%% Output
{'dataset': {'location': '/scratch/paleo/bamboo/bamboo_nxtomomill.h5', 'binning': 1, 'binning_z': 1, 'projections_subsampling': 1}, 'preproc': {'flatfield_enabled': True, 'ccd_filter_enabled': False, 'ccd_filter_threshold': 0.04, 'double_flatfield_enabled': True, 'dff_sigma': None, 'take_logarithm': True, 'log_min_clip': 1e-06, 'log_max_clip': 10.0}, 'phase': {'method': 'paganin', 'delta_beta': 1.0, 'marge': 50, 'unsharp_coeff': 0.0, 'unsharp_sigma': 0.0, 'padding_type': 'edge'}, 'reconstruction': {'method': 'FBP', 'angles_file': None, 'rotation_axis_position': 1358.0, 'axis_correction_file': None, 'angle_offset': 0.0, 'fbp_filter_type': 'ramlak', 'padding_type': 'edge', 'enable_halftomo': False, 'start_x': 0, 'end_x': 2559, 'start_y': 0, 'end_y': 2559, 'start_z': 1000, 'end_z': 1240, 'iterations': 200, 'optim_algorithm': 'chambolle-pock', 'weight_tv': 0.01, 'preconditioning_filter': True, 'positivity_constraint': True}, 'output': {'location': '/scratch/paleo/bamboo/', 'file_prefix': 'bamboo_nxtomomill', 'file_format': 'hdf5'}, 'resources': {'method': 'local', 'gpus': 1, 'gpu_id': [], 'cpu_workers': 0, 'memory_per_node': (90.0, True), 'threads_per_node': (100.0, True), 'queue': 'gpu', 'walltime': (1, 0, 0)}, 'about': {'nabu_version': '2020.1.0', 'nabu_config_version': '2020.1.0', 'verbosity': 'debug', 'overwrite_results': True}}
%% Cell type:markdown id: tags:
## 2 - Chunk processing
Nabu processes data by chunks of radios (see the [documentation](http://www.silx.org/pub/nabu/doc/definitions.html#radios-and-sinograms) for more explanations).
In a first step, we define how to read chunks of radios.
%% Cell type:code id: tags:
``` python
from nabu.io.reader import ChunkReader
```
%% Cell type:markdown id: tags:
What is the largest chunk size we can process ?
The answer is given by inspecting the current GPU memory, and the processing steps.
%% Cell type:code id: tags:
``` python
from nabu.cuda.utils import get_gpu_memory
from nabu.resources.computations import estimate_chunk_size
```
%% Cell type:code id: tags:
``` python
chunk_size = estimate_chunk_size(
get_gpu_memory(0),
conf
)
print("Chunk_size = %d" % chunk_size)
```
%% Output
Chunk_size = 150
%% Cell type:code id: tags:
``` python
# Load the first 'chunk_size' lines of all the radios
sub_region = (None, None, 0, chunk_size) # start_x, end_x, start_z, end_z
chunk_reader = ChunkReader(dataset_infos.projections, sub_region=sub_region, convert_float=True)
```
%% Cell type:code id: tags:
``` python
# Load the current chunk
chunk_reader.load_files() # takes some time
```
%% Cell type:code id: tags:
``` python
print(chunk_reader.files_data.shape)
print(chunk_reader.files_data.dtype)
```
%% Output
(4000, 150, 2560)
float32
%% Cell type:markdown id: tags:
## 3 - Initialize the GPU
Most of the processing can be done on GPU (or many-core CPU if using OpenCL).
With `pycuda.gpuarray` (or its OpenCL counterpart `pyopencl.array`), we manipulate array objects with memory residing on device. This allows to avoid extraneous host <-> device copies.
%% Cell type:code id: tags:
``` python
import pycuda.gpuarray as garray
from nabu.cuda.utils import get_cuda_context
import numpy as np
```
%% Cell type:code id: tags:
``` python
# Create a Cuda context on device ID 0
# By default, all following GPU processings will be bound on this context
ctx = get_cuda_context(device_id=0)
```
%% Cell type:code id: tags:
``` python
radios = chunk_reader.files_data
n_angles, n_z, n_x = radios.shape
# transfer the chunk on GPU
d_radios = garray.to_gpu(radios)
```
%% Cell type:markdown id: tags:
## 4 - Pre-processing
Pre-processing utilities are available in the `nabu.preproc` module.
Utilities available with the cuda backend are implemented in a module with a `_cuda` suffix.
%% Cell type:markdown id: tags:
### 4.1 - Flat-field
%% Cell type:code id: tags:
``` python
from nabu.preproc.ccd_cuda import CudaFlatField
```
%% Cell type:code id: tags:
``` python
radios_indices = sorted(conf.dataset_infos.projections.keys())
# Configure the `FlatField` processor
cuda_flatfield = CudaFlatField(
d_radios.shape,
dataset_infos.flats,
dataset_infos.darks,
radios_indices=radios_indices,
sub_region=sub_region,
convert_float=True,
)
```
%% Cell type:code id: tags:
``` python
# Perform the normalization on GPU
if nabu_config["preproc"]["flatfield_enabled"]:
print("Doing flat-field")
cuda_flatfield.normalize_radios(d_radios)
```
%% Output
Doing flat-field
%% Cell type:markdown id: tags:
### 4.2 - Phase retrieval
%% Cell type:code id: tags:
``` python
from nabu.preproc.phase_cuda import CudaPaganinPhaseRetrieval
```
%% Cell type:code id: tags:
``` python
energy = dataset_infos.energy
if energy == 0:
energy = 19 # keV
# Phase retrieval is done on each radio individually, with the sub-region specified above
if nabu_config["phase"]["method"] != None:
print("Doing phase retrieval")
cudapaganin = CudaPaganinPhaseRetrieval(
(n_z, n_x),
distance=dataset_infos.distance * 1e2,
energy=energy,
delta_beta=nabu_config["phase"]["delta_beta"],
pixel_size=dataset_infos.pixel_size
)
for i in range(n_angles):
cudapaganin.apply_filter(d_radios[i], output=d_radios[i])
```
%% Output
Doing phase retrieval
%% Cell type:markdown id: tags:
### 4.3 - Logarithm
%% Cell type:code id: tags:
``` python
from nabu.preproc.ccd_cuda import CudaLog
```
%% Cell type:code id: tags:
``` python
if nabu_config["preproc"]["take_logarithm"]:
print("Taking logarithm")
cuda_log = CudaLog(d_radios.shape, clip_min=0.01)
cuda_log.take_logarithm(d_radios)
```
%% Output
Taking logarithm
%% Cell type:markdown id: tags:
## 5 - Reconstruction
We use the filtered backprojection with `nabu.reconstruction.fbp`
%% Cell type:code id: tags:
``` python
from nabu.reconstruction.fbp import Backprojector
```
%% Cell type:code id: tags:
``` python
rec_options = conf.processing_options["reconstruction"]
B = Backprojector(
(n_angles, n_x),
angles=rec_options["angles"], rot_center=rec_options["rotation_axis_position"],
extra_options={"padding_mode": "edges"}
)
d_recs = garray.zeros((n_z, n_x, n_x), "f")
```
%% Cell type:code id: tags:
``` python
print("Reconstructing...", end="")
for i in range(n_z):
B.fbp(radios[:, i, :], output=d_recs[i])
recs = d_recs.get()
print(" ... OK")
```
%% Output
Reconstructing... ... OK
%% Cell type:markdown id: tags:
## 6 - Visualize
%% Cell type:code id: tags:
``` python
%pylab nbagg
```
%% Output
Populating the interactive namespace from numpy and matplotlib
%% Cell type:code id: tags:
``` python
figure()
imshow(recs[0], cmap="gray")
```
%% Output
<matplotlib.image.AxesImage at 0x7fe71263fe10>
%% Cell type:code id: tags:
``` python
```
......
......@@ -261,16 +261,19 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
# No flats at all in the dataset. Do nothing.
return
if self.processes_file is None and self._load_flats_from_tomwer():
# Loaded from tomwer_processes.h5
# Loaded from [XXX]_tomwer_processes.h5
return
# Otherwise load or compute flats/darks with nabu
self._compute_or_load_flats()
def _load_flats_from_tomwer(self, tomwer_processes_fname=None):
tomwer_processes_fname = tomwer_processes_fname or "tomwer_processes.h5"
tomwer_processes_file = os.path.join(self.dataset_scanner.path, "tomwer_processes.h5")
if not(os.path.isfile(tomwer_processes_file)):
def _load_flats_from_tomwer(self):
tomwer_processes_file = self._get_tomwer_process_file_name()
if tomwer_processes_file is None or not(os.path.isfile(tomwer_processes_file)):
# if not found try with the old one
tomwer_processes_file = os.path.join(self.dataset_scanner.path,
"tomwer_processes.h5")
if tomwer_processes_file is None or not (os.path.isfile(tomwer_processes_file)):
return False
self.logger.info("Loading darks and refs from %s" % tomwer_processes_file)
new_flats = get_flats_frm_process_file(
......@@ -284,6 +287,16 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
return True
def _get_tomwer_process_file_name(self, scan):
if scan.path is not None:
basename, _ = os.path.splitext(scan.master_file)
basename = os.path.basename(basename)
basename = "_".join((basename, "tomwer_processes.h5"))
return os.path.join(scan.path, basename)
else:
return None
def _get_results_file(self):
results_relfname = os.path.splitext(
os.path.basename(self.dataset_scanner.master_file)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment