Skip to content
Snippets Groups Projects
Commit c57ab07a authored by Wout De Nolf's avatar Wout De Nolf
Browse files

split_bliss_scan: fix softlinks

parent 6747b209
No related branches found
No related tags found
1 merge request!178Resolve "Workflow to split multi-XAS scan"
Pipeline #223974 passed
......@@ -87,7 +87,8 @@ def _save_subgroup(
link = group_in.get(name, getlink=True)
if isinstance(link, h5py.SoftLink):
group_out[name] = link
target = _relative_link(group_in, link.path, group_out)
group_out[name] = h5py.SoftLink(path=target)
continue
h5item = group_in[name]
......@@ -116,6 +117,17 @@ def _save_subgroup(
_logger.warning("%s of type %s is not supported", h5item.name, type(h5item))
def _relative_link(
org_parent: h5py.Group, link_target: str, new_parent: h5py.Group
) -> str:
parent_path = org_parent.name.replace("/", os.path.sep)
link_target_path = link_target.replace("/", os.path.sep)
rel_link_target_path = os.path.relpath(link_target_path, parent_path)
new_link_target_path = os.path.join(new_parent.name, rel_link_target_path)
return os.path.normpath(new_link_target_path)
def _slice_dataset(h5dataset: h5py.Dataset, dim0_slice: slice) -> numpy.ndarray:
expected_size = (dim0_slice.stop - dim0_slice.start) // dim0_slice.step
......
......@@ -28,7 +28,11 @@ def test_split_bliss_scan(tmp_path, split_function):
"dataset5": 10,
"dataset6": [10, 20],
},
"group2": {"@attr3": "value3", "dataset3": ">../group1/dataset3"},
"group2": {
"@attr3": "value3",
">dataset3": "../group1/dataset3",
">dataset4": "/1.1/group1/dataset4",
},
}
}
......@@ -44,7 +48,11 @@ def test_split_bliss_scan(tmp_path, split_function):
"dataset4": [0, 1, 2],
"dataset5": 10,
},
"group2": {"@attr3": "value3", "dataset3": ">../group1/dataset3"},
"group2": {
"@attr3": "value3",
"dataset3": [0, 1, 2],
"dataset4": [0, 1, 2],
},
},
"1.2": {
"@attr1": "value1",
......@@ -57,12 +65,16 @@ def test_split_bliss_scan(tmp_path, split_function):
"dataset4": [3, 4, 5],
"dataset5": 10,
},
"group2": {"@attr3": "value3", "dataset3": ">../group1/dataset3"},
"group2": {
"@attr3": "value3",
"dataset3": [0, 1, 2],
"dataset4": [3, 4, 5],
},
},
}
in_file = str(tmp_path / "in.h5")
dictdump.dicttoh5(bliss_scan_data, in_file)
dictdump.dicttonx(bliss_scan_data, in_file, add_nx_class=False)
out_file = str(tmp_path / "out.h5")
out_urls = split_function(
......@@ -74,7 +86,7 @@ def test_split_bliss_scan(tmp_path, split_function):
assert out_urls == [f"silx://{out_file}::/1.1", f"silx://{out_file}::/1.2"]
split_data = _normalize_h5data(dictdump.h5todict(out_file, asarray=False))
split_data = _normalize_h5data(dictdump.nxtodict(out_file, asarray=False))
assert split_data == expected_split_data
......@@ -92,12 +104,16 @@ def test_split_bliss_scan_timeout(tmp_path, split_function):
"dataset5": 10,
"dataset6": [10, 20],
},
"group2": {"@attr3": "value3", "dataset3": ">../group1/dataset3"},
"group2": {
"@attr3": "value3",
">dataset3": "../group1/dataset3",
">dataset4": "/1.1/group1/dataset4",
},
}
}
in_file = str(tmp_path / "in.h5")
dictdump.dicttoh5(bliss_scan_data, in_file)
dictdump.dicttonx(bliss_scan_data, in_file, add_nx_class=False)
out_file = tmp_path / "out.h5"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment