Job Failed #445189
Job #445189 failed for 9d98833f:
It's related to this test_regridding
in test_pipeline_elements_full.py:190:
___________________________________________________________________________ TestGriddedAccumulator.test_regridding ___________________________________________________________________________
self = <nabu.pipeline.helical.tests.test_pipeline_elements_full.TestGriddedAccumulator object at 0x7feff9173520>
def test_regridding(self):
span_info = span_strategy.SpanStrategy(
z_pix_per_proj=self.z_pix_per_proj,
x_pix_per_proj=self.x_pix_per_proj,
detector_shape_vh=self.detector_shape_vh,
phase_margin_pix=self.phase_margin_pix,
projection_angles_deg=self.projection_angles_deg,
require_redundancy=True,
pixel_size_mm=self.pixel_size_mm,
logger=None,
)
# I would like to reconstruct from feaseable height 15 to feaseable height 18
# relatively to the first doable slice in the vertical translation direction
# I get the heights in the detector frame of the first and of the last
self.reconstruction_space = gridded_accumulator.get_reconstruction_space(
span_info=span_info, min_scanwise_z=15, end_scanwise_z=18, phase_margin_pix=self.phase_margin_pix
)
chunk_info = span_info.get_chunk_info((self.reconstruction_space.my_z_min, self.reconstruction_space.my_z_end))
sub_region = (
self.reconstruction_space.my_z_min - self.phase_margin_pix,
self.reconstruction_space.my_z_end + self.phase_margin_pix,
)
# useful projections
proj_num_start, proj_num_end = chunk_info.angle_index_span
# the first of the chunk angular range
my_first_pnum = proj_num_start
self.accumulator = gridded_accumulator.GriddedAccumulator(
gridded_radios=self.reconstruction_space.gridded_radios,
gridded_weights=self.reconstruction_space.gridded_cumulated_weights,
diagnostic_radios=self.reconstruction_space.diagnostic_radios,
diagnostic_weights=self.reconstruction_space.diagnostic_weights,
diagnostic_angles=self.reconstruction_space.diagnostic_proj_angle,
dark=self.dark,
flat_indexes=[0, 7501],
flats=self.flats,
weights=self.weights_field,
double_flat=self.double_flat,
)
# splitting in sub ranges of 100 projections
n_granularity = 100
pnum_start_list = list(np.arange(proj_num_start, proj_num_end, n_granularity))
pnum_end_list = pnum_start_list[1:] + [proj_num_end]
for pnum_start, pnum_end in zip(pnum_start_list, pnum_end_list):
start_in_chunk = pnum_start - my_first_pnum
end_in_chunk = pnum_end - my_first_pnum
self._read_data_and_apply_flats(
slice(pnum_start, pnum_end), slice(start_in_chunk, end_in_chunk), chunk_info, sub_region, span_info
)
res_flatfielded = self.reconstruction_space.gridded_radios / self.reconstruction_space.gridded_cumulated_weights
# but in real pipeline the radio_shape is obtained from the pipeline get_shape utility method
self._init_ccd_corrections( res_flatfielded.shape[1:])
# but in the actual pipeline the argument is not given, and the processed stack is the one internally
# kept by the pipeline object ( self.gridded_radios in the pipeline )
self._ccd_corrections(res_flatfielded)
self._init_phase(res_flatfielded.shape[1:])
processed_radios = self._retrieve_phase(res_flatfielded)
self._init_mlog( processed_radios.shape)
self._take_log( processed_radios )
top_margin = -self.phase_margin_pix if self.phase_margin_pix else None
processed_weights = self.reconstruction_space.gridded_cumulated_weights[:, self.phase_margin_pix: top_margin, :]
self._init_weight_balancer()
self._balance_weights(processed_weights)
> self._init_reconstructor(processed_radios.shape)
debug_helical/lib/python3.8/site-packages/nabu/pipeline/helical/tests/test_pipeline_elements_full.py:190:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
debug_helical/lib/python3.8/site-packages/nabu/pipeline/helical/tests/test_pipeline_elements_full.py:259: in _init_reconstructor
self.d_radios_slim = garray.zeros(one_slice_data_shape, np.float32)
debug_helical/lib/python3.8/site-packages/pycuda/gpuarray.py:1311: in zeros
result = GPUArray(shape, dtype, allocator, order=order)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <[AttributeError("'GPUArray' object has no attribute 'gpudata'") raised in repr()] GPUArray object at 0x7ff054834c10>, shape = (3000, 1100), dtype = dtype('float32')
allocator = <Boost.Python.function object at 0x24a0fa0>, base = None, gpudata = None, strides = (4400, 4), order = 'C'
def __init__(
self,
shape,
dtype,
allocator=drv.mem_alloc,
base=None,
gpudata=None,
strides=None,
order="C",
):
dtype = np.dtype(dtype)
try:
s = 1
for dim in shape:
s *= dim
except TypeError:
# handle dim-0 ndarrays:
if isinstance(shape, np.ndarray):
shape = shape.item()
assert isinstance(shape, numbers.Integral)
s = shape
shape = (shape,)
else:
# handle shapes that are ndarrays
shape = tuple(shape)
if isinstance(s, np.integer):
# bombs if s is a Python integer
s = s.item()
if strides is None:
if order == "F":
strides = _f_contiguous_strides(dtype.itemsize, shape)
elif order == "C":
strides = _c_contiguous_strides(dtype.itemsize, shape)
else:
raise ValueError("invalid order: %s" % order)
else:
# FIXME: We should possibly perform some plausibility
# checking on 'strides' here.
strides = tuple(strides)
self.shape = tuple(shape)
self.dtype = dtype
self.strides = strides
self.mem_size = self.size = s
self.nbytes = self.dtype.itemsize * self.size
self.itemsize = self.dtype.itemsize
self.allocator = allocator
if gpudata is None:
if self.size:
> self.gpudata = self.allocator(self.size * self.dtype.itemsize)
E pycuda._driver.LogicError: cuMemAlloc failed: invalid device context
debug_helical/lib/python3.8/site-packages/pycuda/gpuarray.py:268: LogicError
@mirone can you have a look ?
Steps to reproduce (on a x86 machine with Cuda 10 or 11)
python3 -m venv debug_helical
source debug_helical/bin/activate
pip install --upgrade pip setuptools wheel
pip install cycler certifi
pip install numpy scipy h5py matplotlib lxml fabio
pip install silx
pip install ipython distributed dask_jobqueue
pip install http://www.silx.org/pub/nabu/wheels/amd64_cuda11/pycuda-2022.1-cp38-cp38-linux_x86_64.whl
pip install git+https://github.com/lebedov/scikit-cuda/
pip install git+https://gitlab.esrf.fr/tomotools/nabu@release_2022.2
nabu -V
nabu-test
Edited by Pierre Paleo