Commit e182a53f authored by Pierre Paleo's avatar Pierre Paleo
Browse files

Delete unused config template

parent 8271658b
import unittest
# ~ from nabu.io.paramsparser import Template, hst_nabu_mapping
class TestConfig(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def compare_dicts_keys(dic1, dic2):
s1 = set(dic1.keys())
s2 = set(dic2.keys())
return (s1 == s2, s1 - s2, s2 - s1)
def test_nabu_config_consistency(self):
"""
Check that the different representations of the Nabu configuration are consistent.
Currently the configuration is laid out at 3 different locations:
- resources/templates/templates_all.ini
- resources/config_validator.py
- io/paramsparser.py (hst_nabu_mapping)
"""
# load all the different representations as dicts,
# and compare dicts
# Structure from template_all.ini
config_ini = Template.parser._sections
# Structure from config_validator
config_validator = ConfigValidator
# Structure from "hst_nabu_mapping"
config_mapping = hst_nabu_mapping
comp1 = self.compare_dicts_keys(config_ini, config_validator)
# ~ comp2 = self.compare_dicts_keys(config_validator, config_mapping)
self.assertTrue(
comp1[0],
"""INI template and validator do not have the same sections.
Sections in INI not in validator: %s
Sections in validator not in INI: %s
"""
% (str(comp1[1]), str(comp1[2]))
)
# ~ self.assertTrue(
# ~ comp2[0],
# ~ """
# ~ validator and HST mapping do not have the same sections.
# ~ Sections in validator: %s\n
# ~ Sections in HST mapping: %s\n
# ~ """
# ~ % (str(comp2[1]), str(comp2[2]))
# ~ )
for section in config_ini.keys():
D1 = config_ini[section]
D2 = config_validator[section]
# ~ D3 = config_mapping[section]
comp1 = self.compare_dicts_keys(D1, D2)
# ~ comp2 = self.compare_dicts_keys(D2, D3)
self.assertTrue(
comp1[0],
"""
Section %s of INI template and validator have different keys.
Keys in INI not in validator: %s
Keys in validator not in INI: %s
"""
% (section, str(comp1[1]), str(comp1[2]))
)
# ~ self.assertTrue(
# ~ comp2[0],
# ~ """
# ~ Section %s of validator and HST mapping have different keys.
# ~ Keys in validator not in INI: %s
# ~ Keys in HST mapping not in validator: %s
# ~ """
# ~ % (str(comp1[1]), str(comp1[2]))
# ~ )
def suite():
suite = unittest.TestSuite()
# Obsolete
# ~ suite.addTest(
# ~ unittest.defaultTestLoader.loadTestsFromTestCase(TestConfig)
# ~ )
return suite
if __name__ == '__main__':
unittest.main(defaultTest="suite")
[dataset]
# Dataset name. Optional, by default it is infered from the location/file prefix.
name = my_dataset
# Dataset folder location
location = /path/to/my/dataset
# Prefix of each radio file.
# File type and numerical part are automatically determined
file_prefix = my_dataset_
# Number of the first radio.
num_first_image = 0
# Number of the last radio.
num_last_image = -1
# File prefix of the ref (flat) file, if relevant
flat_file_prefix = refHST
# File prefix of the dark file, if relevant
dark_file_prefix = dark
[preproc]
# Enable flat-field correction
flatfield_enabled = 1
# Whether to enable the CCD hotspots correction.
ccd_filter_enabled = 0
# If ccd_filter_enabled = 1, a median filter is applied on the 3X3 neighborhood
# of every pixel. If a pixel value exceeds the median value more than this parameter,
# then the pixel value is replaced with the median value.
ccd_filter_threshold = 0.04
# After division by the FF, and before the logarithm, the is clipped to this maximum
clip_value = 10.0
# Whether to take logarithm after flat-field.
take_logarithm = 1
[phase]
# Phase retrieval method. Available are: Paganin, None
method = Paganin
# If specified, save the phase retrieval in the specified directory
save_output = /tmp/phase_retrieval
# File prefix for saving the phase retrieval
file_prefix = pag_
# Paganin related parameters
# ----------------------------
# delta/beta ratio for the Paganin method.
paganin_delta_beta = 100.0
# Alternatively to delta/beta, the parameter "Lmicron" of PyHST can be specified.
# L^2 = pi * wavelength * distance * delta/beta
paganin_lmicron = 1
# Marge (in pixels) in the Paganin filtering to avoid local tomography artefacts.
paganin_marge = 50
# Standard deviation of the Gaussian filter when applying an unsharp mask
# after the Paganin filtering. Disabled if set to 0.
paganin_unsharp_sigma = 0
# Unsharp mask strength. The unsharped image is equal to
# UnsharpedImage = (1 + coeff)*originalPaganinImage - coeff * ConvolutedImage
paganin_unsharp_coeff = 0
# Padding type for the filtering step. Available are: mirror, edge, zeros
paganin_padding_type = mirror
[reconstruction]
# In the case you want to override the angles found in the files metadata
angles_file =
# In the case where the axis position is specified for each slice
axis_correction_file =
# Use this if you want to obtain a rotated reconstructed slice.
angle_offset = 0
# Reconstruction method
method = "FBP"
# Filter type for FBP method
fbp_filter_type = "ramlak"
# Padding type for FBP. Available are: zeros, edge, mirror
padding_type = zeros
# Parameters related to half-acquisition tomography
# -------------------------------------------------
# Whether to enable half-acquisition
enable_halftomo = 1
# Parameters for sub-volume reconstruction. Indices start at 0 !
# ----------------------------------------------------------------
# (x, y) are the dimension of a slice, and (z) is the "vertical" axis.
# By default, all the volume is reconstructed slice by slice with varying "z".
start_x = 0
end_x = -1
start_y = 0
end_y = -1
start_z = 0
end_z = -1
# Binning factor in the case where the reconstructed slices are to be binned.
# The slices dimensions will be divided by "slices_binning"
slices_binning = 1
# Whether to also bin in the "z" direction, resulting in a lesser number of reconstructed slices
slices_binning_z = 1
# Projections subsampling factor: take one projection out of "projection_subsampling"
projections_subsampling = 1
# Parameters for iterative algorithms
# ------------------------------------
# Number of iterations
iterations = 200
# Optimization algorithm for iterative methods
optim_algorithm = "CP"
# Regularization parameter for iterative methods
weight_tv = 1.0e-2
# Whether to enable "filter preconditioning" for iterative methods
preconditioning_filter = 1
# Whether to enforce a positivity constraint in the reconstruction.
positivity_constraint = 1
[output]
# Directory where the output reconstruction is stored.
location =
# File prefix. Optional, by default it is inferred from the scanned dataset.
file_prefix = my_dataset
# Output file format. Available are: edf, hdf5
file_format = hdf5
# Whether to write all frames in a single file. Only available with HDF5.
multiframe = 0
# Whether to create a single HDF5 regrouping other files with links. Ignored if multiframe = 1
regroup_files = 0
[resources]
# Computations distribution method. It can be:
# - "local": run the computations on the local machine
# - "slurm": run the computations through SLURM
method = slurm
# Number of nodes (machines) to run the computations
nodes = 1
# Number of GPUs per node
gpus_per_node = 2
# RAM memory per node, either in GB or in percent of the total node memory
memory_per_node = 100%
# Number of logical cores to allocate on each node, either a number of a percentage of the available cores
cores_per_node = 100%
# Time limit for the SLURM resource allocation, in the format Hours:Minutes:Seconds.
# Ignored if method = "local"
walltime = 01:00:00
[about]
# Version of the nabu software
nabu_version = 2019.1
# Version of the nabu configuration file
nabu_config_version = 2019.1
# Level of verbosity of the processing. 0 = terse, 3 = much information.
verbosity = 1
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment