Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
tomotools
tomoscan
Commits
e5f9ba60
Commit
e5f9ba60
authored
Mar 09, 2020
by
Pierre Paleo
Browse files
[hdf5tomoscan] move get_compacted_projections to tomoscan.esrf.utils
parent
52f09cb1
Pipeline
#22682
passed with stages
in 2 minutes and 7 seconds
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
tomoscan/esrf/hdf5scan.py
View file @
e5f9ba60
...
...
@@ -41,6 +41,7 @@ from silx.utils.enum import Enum as _Enum
from
tomoscan.utils
import
docstring
from
silx.io.utils
import
get_data
from
..unitsystem
import
metricsystem
from
.utils
import
get_compacted_dataslices
import
logging
import
typing
...
...
@@ -615,80 +616,6 @@ class HDF5TomoScan(TomoScanBase):
else
:
return
None
@
staticmethod
def
_get_compacted_projections
(
projections
):
"""
Regroup urls to get the data more efficiently.
Build a structure mapping projections indices to information on
how to load the data: `{indices_set: data_location}`
where `data_location` contains contiguous indices.
Parameters
-----------
projections: dict
Dictionary where the key is an integer and the value is a silx `DataUrl`.
Returns
--------
merged_projections: dict
Dictionary where the key is a list of indices, and the value
is the corresponding `silx.io.url.DataUrl` with merged data_slice
"""
def
_convert_to_slice
(
idx
):
if
numpy
.
isscalar
(
idx
):
return
slice
(
idx
,
idx
+
1
)
# otherwise, assume already slice object
return
idx
def
is_contiguous_slice
(
slice1
,
slice2
):
if
numpy
.
isscalar
(
slice1
):
slice1
=
slice
(
slice1
,
slice1
+
1
)
if
numpy
.
isscalar
(
slice2
):
slice2
=
slice
(
slice2
,
slice2
+
1
)
return
slice2
.
start
==
slice1
.
stop
def
merge_slices
(
slice1
,
slice2
):
return
slice
(
slice1
.
start
,
slice2
.
stop
)
sorted_files_indices
=
sorted
(
projections
.
keys
())
idx0
=
sorted_files_indices
[
0
]
first_url
=
projections
[
idx0
]
merged_indices
=
[
[
idx0
]
]
data_location
=
[
[
first_url
.
file_path
(),
first_url
.
data_path
(),
_convert_to_slice
(
first_url
.
data_slice
())
]
]
pos
=
0
curr_fp
,
curr_dp
,
curr_slice
=
data_location
[
pos
]
for
idx
in
sorted_files_indices
[
1
:]:
url
=
projections
[
idx
]
next_slice
=
_convert_to_slice
(
url
.
data_slice
())
if
(
url
.
file_path
()
==
curr_fp
)
and
(
url
.
data_path
()
==
curr_dp
)
and
is_contiguous_slice
(
curr_slice
,
next_slice
):
merged_indices
[
pos
].
append
(
idx
)
merged_slices
=
merge_slices
(
curr_slice
,
next_slice
)
data_location
[
pos
][
-
1
]
=
merged_slices
curr_slice
=
merged_slices
else
:
# "jump"
pos
+=
1
merged_indices
.
append
([
idx
])
data_location
.
append
([
url
.
file_path
(),
url
.
data_path
(),
_convert_to_slice
(
url
.
data_slice
())
])
curr_fp
,
curr_dp
,
curr_slice
=
data_location
[
pos
]
# Format result
res
=
{}
for
ind
,
dl
in
zip
(
merged_indices
,
data_location
):
res
[
tuple
(
ind
)]
=
DataUrl
(
file_path
=
dl
[
0
],
data_path
=
dl
[
1
],
data_slice
=
dl
[
2
])
return
res
@
property
def
projections_compacted
(
self
):
"""
...
...
@@ -701,7 +628,7 @@ class HDF5TomoScan(TomoScanBase):
is the corresponding `silx.io.url.DataUrl` with merged data_slice
"""
if
self
.
_projections_compacted
is
None
:
self
.
_projections_compacted
=
self
.
_
get_compacted_
projection
s
(
self
.
projections
)
self
.
_projections_compacted
=
get_compacted_
dataslice
s
(
self
.
projections
)
return
self
.
_projections_compacted
def
__str__
(
self
):
...
...
tomoscan/esrf/utils.py
View file @
e5f9ba60
...
...
@@ -32,6 +32,7 @@ import os
import
fabio
from
silx.io.url
import
DataUrl
from
typing
import
Union
import
numpy
def
get_parameters_frm_par_or_info
(
file_
:
str
)
->
dict
:
...
...
@@ -86,3 +87,77 @@ def extract_urls_from_edf(file_: str, start_index: Union[None, int], n_frames: U
data_slice
=
[
i_frame
,
])
index
+=
1
return
res
def
get_compacted_dataslices
(
urls
):
"""
Regroup urls to get the data more efficiently.
Build a structure mapping files indices to information on
how to load the data: `{indices_set: data_location}`
where `data_location` contains contiguous indices.
Parameters
-----------
urls: dict
Dictionary where the key is an integer and the value is a silx `DataUrl`.
Returns
--------
merged_urls: dict
Dictionary where the key is a list of indices, and the value
is the corresponding `silx.io.url.DataUrl` with merged data_slice
"""
def
_convert_to_slice
(
idx
):
if
numpy
.
isscalar
(
idx
):
return
slice
(
idx
,
idx
+
1
)
# otherwise, assume already slice object
return
idx
def
is_contiguous_slice
(
slice1
,
slice2
):
if
numpy
.
isscalar
(
slice1
):
slice1
=
slice
(
slice1
,
slice1
+
1
)
if
numpy
.
isscalar
(
slice2
):
slice2
=
slice
(
slice2
,
slice2
+
1
)
return
slice2
.
start
==
slice1
.
stop
def
merge_slices
(
slice1
,
slice2
):
return
slice
(
slice1
.
start
,
slice2
.
stop
)
sorted_files_indices
=
sorted
(
urls
.
keys
())
idx0
=
sorted_files_indices
[
0
]
first_url
=
urls
[
idx0
]
merged_indices
=
[
[
idx0
]
]
data_location
=
[
[
first_url
.
file_path
(),
first_url
.
data_path
(),
_convert_to_slice
(
first_url
.
data_slice
())
]
]
pos
=
0
curr_fp
,
curr_dp
,
curr_slice
=
data_location
[
pos
]
for
idx
in
sorted_files_indices
[
1
:]:
url
=
urls
[
idx
]
next_slice
=
_convert_to_slice
(
url
.
data_slice
())
if
(
url
.
file_path
()
==
curr_fp
)
and
(
url
.
data_path
()
==
curr_dp
)
and
is_contiguous_slice
(
curr_slice
,
next_slice
):
merged_indices
[
pos
].
append
(
idx
)
merged_slices
=
merge_slices
(
curr_slice
,
next_slice
)
data_location
[
pos
][
-
1
]
=
merged_slices
curr_slice
=
merged_slices
else
:
# "jump"
pos
+=
1
merged_indices
.
append
([
idx
])
data_location
.
append
([
url
.
file_path
(),
url
.
data_path
(),
_convert_to_slice
(
url
.
data_slice
())
])
curr_fp
,
curr_dp
,
curr_slice
=
data_location
[
pos
]
# Format result
res
=
{}
for
ind
,
dl
in
zip
(
merged_indices
,
data_location
):
res
[
tuple
(
ind
)]
=
DataUrl
(
file_path
=
dl
[
0
],
data_path
=
dl
[
1
],
data_slice
=
dl
[
2
])
return
res
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment