Commit b27c5016 authored by operator ID01's avatar operator ID01
Browse files

some scripts for 34IDC datasets

sorry forgot to push these a while back
parent d3b60b7b
......@@ -87,6 +87,7 @@ import collections
import numpy as np
import dateutil.parser
import pdb
from PIL import Image
import silx.io
......@@ -329,7 +330,104 @@ class Scan(h5py.Group):
return detector.name # may be useful
def addTifFile(self, tifpath, detector_num=None,
detector_name=None,
subdir="instrument",
compr_lvl=6,
chunksize=500,
dtype=None):
"""
Adds an 2D or 3D TIF image to the stack of
images or creates a new stack.
If the detector number is not specified,
the images will be appended to the first
detector matching the shape.
Accepts .edf in hdf5 format as it is provided
after opening with silx.
"""
tif = np.array(Image.open(tifpath))
#print(tif.max())
tif = tif[np.newaxis,:,:]
newshape = tif.shape
if dtype is None:
dtype = np.int32
instrument = self[subdir]
### find which detector these frames belong to if not given
if detector_num is None:
i = 0
while True:
key = "detector_%i"%i
if key not in instrument:
break
elif "data" not in instrument[key]:
break
if instrument[key]["data"].shape[1:] == newshape[1:]:
break
detector_num = i
else:
key = "detector_%i"%detector_num
### prepare the target h5py datasets
if key not in instrument or "data" not in instrument[key]:
# create new datasets
"""
if detector_name is not None:
detector = instrument.require_group("detectors/%s"%detector_name)
detector.create_dataset("name", data=detector_name)
#detector = instrument["detectors/%s"%detector_name]
instrument[key] = detector
detector_instance = detectors.order[detector_name]()
disk_chunks = (1,) + detector_instance.chunks
else:
detector = instrument.require_group(key)
disk_chunks = True # auto chunking? or better?: (1,) + newshape[1:]
"""
detector = instrument.require_group(key)
disk_chunks = True # auto chunking? or better?: (1,) + newshape[1:]
#pdb.set_trace()
detector.create_dataset("data",
newshape,
maxshape=(None,newshape[1],newshape[2]),
compression="gzip",
compression_opts=compr_lvl,
chunks=disk_chunks,
dtype=dtype) # Todo: think about chunks - only implement for kmaps
# write metadata
others = detector.require_group("others")
# make a hard link: << change to softlink
if subdir == 'instrument': # spech5 default
#self["measurement/image_%i/data"%detector_num] = self["%s/%s"%(subdir,key)] # hard link
self["measurement/image_%i/data"%detector_num] = h5py.SoftLink(self.name+"/instrument/detector_%s/data"%detector_num) # soft link
else:
# do the resizing
detector = self[subdir][key]
newlen = detector["data"].shape[0] + newshape[0]
detector["data"].resize((newlen, newshape[1], newshape[2]))
if self.debug:
sys.stdout.write("New dataset size: "
"%ix%ix%i"%detector["data"].shape
+ os.linesep)
# process metadata
others = detector["others"]
detector["data"][-1,:,:] = tif
#print(detector["data"][-1,:,:].max(),tif.max())
self.file.flush()
return detector.name # may be useful
def make_roi(self, xmin, xmax, ymin, ymax, image='image_0', store=False,
roinum=None):
......@@ -364,10 +462,10 @@ class Scan(h5py.Group):
sys.stdout.write("Created dataset %s"%new.name + os.linesep)
return roi
def fetch_edf_spec(self, pathonly=False, verbose=True, imgroot=None,
def fetch_edf_spec(self, pathonly=False, verbose=True, imgroot=None, explicit_beamline=None,
**edf_kw):
img_paths = id01SpecH5.fetch_image_files(self, imgroot=imgroot)
img_paths = id01SpecH5.fetch_image_files(self, imgroot=imgroot, explicit_beamline=explicit_beamline)
if not img_paths:
return []
......@@ -391,12 +489,29 @@ class Scan(h5py.Group):
#pdb.set_trace()
self['data_%s'%detnum]=h5py.SoftLink(self.name+"/instrument/detector_%s/data"%detnum)
_i=0
img_paths_tmp.sort() # very lazy but works
for impath in img_paths_tmp:
#impath = impath
if not isfile(impath) and verbose:
if impath.endswith("tif"):
#print("###TIFS####")
try: # load tif file
if verbose:
sys.stdout.write("Loading image %i of %i: %s "%(_i+1, len(img_paths_tmp),impath))
sys.stdout.write("\r") # carriage return only. does it work on Win?
sys.stdout.flush()
#print(impath)
self.addTifFile(impath, **edf_kw)
except Exception as emsg:
sys.stdout.write("Could not load file: %s"%emsg)
sys.stdout.write(os.linesep)
#if self.debug:
raise
elif not isfile(impath) and verbose:
sys.stdout.write("Warning: File not found: %s. Skipping..."%impath + os.linesep)
continue
if pathonly:
elif pathonly:
if not verbose:
break
sys.stdout.write(" Found path %s"%impath + os.linesep)
......@@ -488,10 +603,9 @@ class Sample(h5py.Group):
def addSpecScan(self, specscan, number=None, overwrite=False,
fetch_edf=True, imgroot=None, verbose=True, **edf_kw):
fetch_edf=True, imgroot=None, verbose=True, explicit_beamline=None, **edf_kw):
if not isinstance(specscan, silx.io.spech5.SpecH5Group):
raise ValueError("Need `silx.io.spech5.SpecH5Group` as spec scan input.")
sf = specscan.file
### check if still writing scan:
......@@ -574,7 +688,7 @@ class Sample(h5py.Group):
specscan.visititems(_spech5_deepcopy(scan, specscan.name))
if fetch_edf and scan["measurement"]: # not aborted after 0 points
scan.fetch_edf_spec(verbose=verbose, imgroot=imgroot, **edf_kw)
scan.fetch_edf_spec(verbose=verbose, imgroot=imgroot, explicit_beamline=explicit_beamline, **edf_kw)
return scan
except:
print("An Error ocurred... cleaning up...")
......@@ -583,7 +697,7 @@ class Sample(h5py.Group):
raise
def importSpecFile(self, specfile, numbers=(), exclude=[], **addSpec_kw):
def importSpecFile(self, specfile, numbers=(), exclude=[], explicit_beamline=None, **addSpec_kw):
"""
Convenience method that imports whole or parts of specfiles into the
current `Sample` group.
......@@ -641,6 +755,7 @@ class Sample(h5py.Group):
continue
self.addSpecScan(scan,
numbers[number],
explicit_beamline=explicit_beamline,
**addSpec_kw)
......
......@@ -215,7 +215,7 @@ def _fast_edf_collect(generic_path, idx):
def fetch_image_files(scan, imgroot=None):
def fetch_image_files(scan, imgroot=None,explicit_beamline=None):
# header is now a ndarray
header = scan["instrument/specfile/scan_header"].value
fheader = scan["instrument/specfile/file_header"].value
......@@ -281,9 +281,18 @@ def fetch_image_files(scan, imgroot=None):
#pdb.set_trace()
impaths = [s for s in header if s.startswith("#ULIMA_")]
if not impaths:
print("no '#ULIMA_' tag found in scan header - data saving was switched off")
return dict()
returndict = dict()
if explicit_beamline=='APS34IDC':
print("data from beamline: ", explicit_beamline )
detname="amsGaAs"
datafolder=os.path.join(imgroot,imgroot.split("/")[-2].split("AD")[-1]+"_S%04i"%int(header[0].split()[1])+"/")
tiffs=[datafolder+x for x in os.listdir(datafolder) if x.endswith("tif")]
returndict={detname:tiffs}
#pdb.set_trace()
else:
print("no '#ULIMA_' tag found in scan header - data saving was switched off ...")
#return dict()
returndict = dict()
for impath in impaths:
detname, impath = impath.split() #what if we have several detectors?
impath = os.path.realpath(impath)
......@@ -333,9 +342,9 @@ def fetch_image_files(scan, imgroot=None):
# "Error: %sinr not in image path"%detshort
#impath = impath.replace("_%05d"%startnr, "_%05d")
all_paths = [impath%i for i in inr]
#print all_paths,inr
returndict[detname] = all_paths
print(returndict)
#print all_paths,inr,detname
returndict = {detname:all_paths}
#print(returndict)
return returndict
......
......@@ -108,9 +108,32 @@ class Frelon3(AreaDetector):
)
class AStimepix(AreaDetector):
alias = "AStimepix"
shortalias = "AStp"
def __init__(self, mask=None):
super(AStimepix, self).__init__(directions=("z-", "y+"), # determined by xrayutilities _determine_detdir
pixsize=55e-6,
pixnum=(512,512),
mask=mask,
chunks = (256, 256)
)
@staticmethod
def ff_correct_image(image):
"""
perhaps a flatfield here
"""
pass
@staticmethod
def mask_image(image):
"""
Mind the BIG gaps and the bad columns
"""
pass
order = (MaxiPix, Eiger2M, Andor, MaxiPixGaAs) # for hdf writing
order = (MaxiPix, Eiger2M, Andor, MaxiPixGaAs, AStimepix) # for hdf writing
aliases = collections.OrderedDict((d.alias, d.shortalias) for d in order)
order = collections.OrderedDict((d.alias, d) for d in order)
......@@ -78,3 +78,27 @@ class ID01psic(EmptyGeometry):
super(ID01psic, self).__init__(**kwargs)
class APS34IDCsixc(EmptyGeometry):
def __init__(self, **kwargs):
### geometry of 34IDC diffractometer
### x downstream; z upwards; y to the "outside" (righthanded)
### the order matters!
self.sample_rot['Mu'] = 'z+' # check mu is not 0
self.sample_rot['Theta'] = 'z+'
self.sample_rot['Chi'] = 'x+'
self.sample_rot['Phi'] = 'y+'
#self.sample_rot['rhx'] = 'y+' # can be useful to correct sample tilt?!
#self.sample_rot['rhy'] = 'x-' # can be useful to correct sample tilt?!
#self.sample_rot['rhz'] = 'z+' # can be useful to correct sample tilt?!
self.detector_rot['Delta'] = 'z+'
self.detector_rot['Gamma'] = 'y-'
self.inc_beam = [1,0,0]
# defines whether these motors are used. otherwise set to zero
# typical defaults, can be overridden during __init__:
self.usemotors = set(('Theta', 'Chi','Phi', 'Delta', 'Gamma'))
super(APS34IDCsixc, self).__init__(**kwargs)
......@@ -22,9 +22,9 @@ import scipy.signal
import h5py
import pdb
from .detectors import MaxiPix, Eiger2M # default detector MaxiPix
from .detectors import MaxiPix, Eiger2M, AStimepix # default detector MaxiPix
#from .detectors import Andor
from .geometries import ID01psic # default geometry
from .geometries import ID01psic, APS34IDCsixc # default geometry
#from .geometries import ID01ff # sample mounted sideways # not necessary, it's included in ID01psic
......@@ -397,6 +397,7 @@ def scan_to_qspace_h5(scan, cen_pix=None,
else:
intensity[idx,:,:] = frame
#pdb.set_trace()
### convert data to regular grid in reciprocal space
if idim == [0,1,2]: # No projection
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment