xrs_read.py 130 KB
Newer Older
christoph's avatar
christoph committed
1
2
3
4
5
6
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
from six.moves import input
christoph's avatar
christoph committed
7
8
9
#!/usr/bin/python
# Filename: xrs_read.py

christoph's avatar
christoph committed
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#/*##########################################################################
#
# The XRStools software package for XRS spectroscopy
#
# Copyright (c) 2013-2014 European Synchrotron Radiation Facility
#
# This file is part of the XRStools XRS spectroscopy package developed at
# the ESRF by the DEC and Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
38
39
40
__author__    = "Christoph J. Sahle - ESRF"
__contact__   = "christoph.sahle@esrf.fr"
__license__   = "MIT"
christoph's avatar
christoph committed
41
42
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"

christoph's avatar
christoph committed
43
from . import xrs_rois, xrs_scans, xrs_utilities, math_functions, xrs_fileIO, roifinder_and_gui
44

christoph's avatar
christoph committed
45
import h5py
46
import scipy.io
47
48
import traceback
import sys
christoph's avatar
christoph committed
49
50
51
import os
import numpy as np
import array as arr
52
import pickle
christoph's avatar
christoph committed
53
import matplotlib.pyplot as plt
54

christoph's avatar
christoph committed
55
56
from numpy import array
from itertools import groupby
christoph's avatar
christoph committed
57
from scipy.integrate import trapz
58
from scipy.interpolate import interp1d, Rbf
59
from scipy import signal, optimize
christoph's avatar
christoph committed
60
61
from scipy.ndimage import measurements

christoph's avatar
christoph committed
62
63
# try to import the fast PyMCA parsers
try:
Alessandro Mirone's avatar
Alessandro Mirone committed
64
65
    import PyMca5.PyMcaIO.EdfFile as EdfIO
    import PyMca5.PyMcaIO.specfilewrapper as SpecIO
christoph's avatar
christoph committed
66
67
68
69
    use_PyMca = True
except:
    use_PyMca = False

christoph's avatar
christoph committed
70
print( " >>>>>>>>  use_PyMca " , use_PyMca)
christoph's avatar
christoph committed
71
72
__metaclass__ = type # new style classes

christoph's avatar
christoph committed
73
74
def print_citation_message():
	"""Prints plea for citing the XRStools article when using this software.
75

christoph's avatar
christoph committed
76
	"""
77
78
79
80
81
82
83
84
	print ('                                                                                ')
	print (' ############################# Welcome to XRStools #############################')
	print (' # If you are using this software, please cite the following work:             #')
	print (' # Ch.J. Sahle, A. Mirone, J. Niskanen, J. Inkinen, M. Krisch, and S. Huotari: #') 
	print (' # "Planning, performing, and analyzing X-ray Raman scattering experiments."   #')
	print (' # Journal of Synchrotron Radiation 22, No. 2 (2015): 400-409.                 #')
	print (' ###############################################################################')
	print ('                                                                                ')
85

christoph's avatar
christoph committed
86
print_citation_message()
87

christoph's avatar
christoph committed
88
class Hydra:
christoph's avatar
christoph committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    """Main class for handling XRS data from ID20's multi-analyzer spectrometer 'Hydra'.

    This class is intended to read SPEC- and according EDF-files and generate spectra from 
    multiple individual energy loss scans.

    Note:
        Hydra is the name of the multi-analyzer x-ray Raman scattering spectrometer at ESRF's
        ID20 beamline. This class has been adopted specifically for this spectrometer.

        If you are using this program, please cite the following work:

        Sahle, Ch J., A. Mirone, J. Niskanen, J. Inkinen, M. Krisch, and S. Huotari. 
        "Planning, performing and analyzing X-ray Raman scattering experiments." 
        Journal of Synchrotron Radiation 22, No. 2 (2015): 400-409.

    Args:
myron's avatar
myron committed
105
106
107
108
109
110
111
          * path        (str): Absolute path to directory holding the data.
          * SPECfname   (str): Name of the SPEC-file ('hydra' is the default).
          * EDFprefix   (str): Prefix for the EDF-files ('/edf/' is the default).
          * EDFname     (str): Filename of the EDF-files ('hydra_' is the default).
          * EDFpostfix  (str): Postfix for the EDF-files ('.edf' is the default).
          * en_column   (str): Counter mnemonic for the energy motor ('energy' is the default).
          * moni_column (str): Mnemonic for the monitor counter ('izero' is the default).
christoph's avatar
christoph committed
112
113

    Attributes:
myron's avatar
myron committed
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
          * path        (str): Absolute path to directory holding the data.
          * SPECfname   (str): Name of the SPEC-file ('hydra' is the default).
          * EDFprefix   (str): Prefix for the EDF-files ('/edf/' is the default).
          * EDFname     (str): Filename of the EDF-files ('hydra_' is the default).
          * EDFpostfix  (str): Postfix for the EDF-files ('.edf' is the default).
          * en_column   (str): Counter mnemonic for the energy motor ('energy' is the default).
          * moni_column (str): Mnemonic for the monitor counter ('izero' is the default).
          * scans        (dict): Dictionary holding all loaded scans.
          * scan_numbers (list): List with scan number of all loaded scans.
          * eloss    (np.array): Array with the common energy loss scale for all analyzers.
          * energy   (np.array): Array with the common energy scale for all analyzers.
          * signals  (np.array): Array with the signals for all analyzers (one column per anayzer).
          * errors   (np.array): Array with the poisson errors for all analyzers (one column per anayzer).
          * qvalues      (list): List of momentum transfer values for all analyzers.
          * groups       (dict): Dictionary of groups of scans (instances of the 'scangroup' class,
christoph's avatar
christoph committed
129
            such as 2 'elastic', or 5 'edge1', etc.).
myron's avatar
myron committed
130
131
132
133
134
135
136
137
138
          * cenom        (list): List of center of masses of the elastic lines.
          * E0            (int): Elastic line energy value, mean value of all center of masses.
          * tth          (list): List of all scattering angles (one value for each ROI).
          * resolution   (list): List of FWHM of the elastic lines (one for each analyzer).
          * comp_factor (float): Compensation factor for line-by-line energy dispersion compensation.
          * cenom_dict   (dict): Dictionary holding center-of-masses for of the elastic line.
          * raw_signals  (dict): Dictionary holding pixel- or line-wise signals.
          * raw_errors   (dict): Dictionary holding pixel- or line-wise Poisson errors.
          * TTH_OFFSETS1 (np.array): Two-Theta offsets between individual analyzers inside each analyzer 
christoph's avatar
christoph committed
139
            module in one direction (horizontal for V-boxes, vertical for H-boxes).
myron's avatar
myron committed
140
          * TTH_OFFSETS2 (np.array): Two-Theta offsets between individual analyzers inside each analyzer 
christoph's avatar
christoph committed
141
            module in one direction (horizontal for H-boxes, vertical for V-boxes).
myron's avatar
myron committed
142
          * roi_obj (instance): Instance of the roi_object class from the xrs_rois module defining all
christoph's avatar
christoph committed
143
144
145
146
147
148
149
150
151
152
            ROIs for the current dataset (default is 'None').

    """

    def __init__( self, path, SPECfname='hydra', EDFprefix='/edf/', EDFname='hydra_', \
                        EDFpostfix='.edf', en_column='energy', moni_column='izero' ):

        self.path        = path
        self.SPECfname   = SPECfname

153
        if not os.path.isfile(os.path.join(path, SPECfname)):
christoph's avatar
christoph committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
            raise Exception('IOError! No such file or directory.')

        self.EDFprefix   = EDFprefix
        self.EDFname     = EDFname
        self.EDFpostfix  = EDFpostfix
        self.en_column   = en_column.lower()
        self.moni_column = moni_column.lower()

        self.scans        = {}
        self.scan_numbers = []

        self.eloss    = np.array([])
        self.energy   = np.array([])
        self.signals  = np.array([])
        self.errors   = np.array([])
        self.q_values = []
        self.groups   = {}
        self.cenom    = []
        self.E0       = []
        self.tth      = []
        self.resolution  = []
        self.comp_factor = None
        self.cenom_dict  = {}
        self.raw_signals = {}
        self.raw_errors  = {} 

        self.TTH_OFFSETS1 = np.array([5.0, 0.0, -5.0, 5.0, 0.0, -5.0, 5.0, 0.0, -5.0, 5.0, 0.0, -5.0])
        self.TTH_OFFSETS2 = np.array([-9.71, -9.75, -9.71, -3.24, -3.25, -3.24, 3.24, 3.25, 3.24, 9.71, 9.75, 9.71]) 
        self.PIXEL_SIZE   = 0.055 # pixel size in mm

        self.roi_obj = None

        print_citation_message()

    def save_state_hdf5( self, file_name, group_name, comment="" ):
        """ **save_state_hdf5**

        Save the status of the current instance in an HDF5 file.

        Args:
            file_name  (str): Path and file name for the HDF5-file to be created.
            group_name (str): Group name under which to store status in the HDF5-file.
            comment    (str): Optional comment (no comment is default).
christoph's avatar
christoph committed
197
198

        """
christoph's avatar
christoph committed
199
        h5 = h5py.File(file_name,"a")
christoph's avatar
christoph committed
200

christoph's avatar
christoph committed
201
202
        h5.require_group(group_name)
        h5group =  h5[group_name]
203

christoph's avatar
christoph committed
204
205
206
207
208
        for key in self.__dict__.keys():
            if key in h5group.keys():
                raise Exception( 'Data \'' + key + '\' already present in  ' + file_name + ':' + group_name )
            else:
                h5group[key] = getattr( self, key )
christoph's avatar
christoph committed
209

christoph's avatar
christoph committed
210
211
212
        h5group["comment"]  = comment
        h5.flush()
        h5.close()
christoph's avatar
christoph committed
213

christoph's avatar
christoph committed
214
215
    def load_state_hdf5( self, file_name, group_name ):
        """ **load_state_hdf5**
christoph's avatar
christoph committed
216

christoph's avatar
christoph committed
217
        Load the status of an instance from an HDF5 file.
christoph's avatar
christoph committed
218

christoph's avatar
christoph committed
219
220
221
        Args:
            file_name  (str): Path and filename for the HDF5-file to be created.
            group_name (str): Group name under which to store status in the HDF5-file.
222

christoph's avatar
christoph committed
223
224
        """
        h5 = h5py.File( file_name,"r" )
christoph's avatar
christoph committed
225

christoph's avatar
christoph committed
226
227
228
        h5group =  h5[group_name]
        keys    = {"eloss":array, "energy":array, "signals":array, "errors":array, "q_values":array, 
                    "cenom":array, "E0":float, "tth":array, "resolution":array }
christoph's avatar
christoph committed
229

christoph's avatar
christoph committed
230
231
        for key in keys:
            setattr(self, key, keys[key](array(h5group[key])))
232

christoph's avatar
christoph committed
233
234
        h5.flush()
        h5.close()
christoph's avatar
christoph committed
235

christoph's avatar
christoph committed
236
237
    def set_roiObj( self,roiobj ):
        """ **set_roiObj**
christoph's avatar
christoph committed
238

christoph's avatar
christoph committed
239
        Assign an instance of the 'roi_object' class to the current data set.
240

christoph's avatar
christoph committed
241
242
243
        Args:
            roiobj (instance): Instance of the 'roi_object' class holding all
                information about the definition of the ROIs.
christoph's avatar
christoph committed
244

christoph's avatar
christoph committed
245
246
        """
        self.roi_obj = roiobj
christoph's avatar
christoph committed
247

christoph's avatar
christoph committed
248
    def load_scan( self, scan_numbers, scan_type='generic', direct=True, scaling=None, method='sum' ):
myron's avatar
myron committed
249
        """**load_scan**
christoph's avatar
christoph committed
250

christoph's avatar
christoph committed
251
252
        Load a single or multiple scans.
        Note:
myron's avatar
myron committed
253
254
            When composing a spectrum later, scans of the same scan_type will  
            be averaged over. Scans with scan type 'elastic' or long in their
christoph's avatar
christoph committed
255
            names are recognized and will be treated specially.
christoph's avatar
christoph committed
256

christoph's avatar
christoph committed
257
        Args:
myron's avatar
myron committed
258
259
260
261
              * scan_numbers (int or list): Integer or iterable of scan numbers to be loaded.
              * scan_type            (str): String describing the scan to be loaded (e.g. 'edge1' or 'K-edge').
              * direct           (boolean): Flag, 'True' if the EDF-files should be deleted after loading/integrating the scan.

christoph's avatar
christoph committed
262

christoph's avatar
christoph committed
263
        """
264

christoph's avatar
christoph committed
265
266
267
268
269
270
        # make sure scannumbers are iterable (list)
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers
271

christoph's avatar
christoph committed
272
273
274
275
        # make sure there is a cenom_dict available if direct=True AND method='sum'
        if direct and method=='pixel' and not self.cenom_dict:
            print('Please run the get_compensation_factor method first for pixel-wise compensation.')
            return
276

christoph's avatar
christoph committed
277
278
        # go throught list of scan_numbers and load scans		
        for number in numbers:
279

christoph's avatar
christoph committed
280
281
            # create a name for each scan
            scan_name = 'Scan%03d' % number
282

christoph's avatar
christoph committed
283
284
            # create an instance of the Scan class
            scan = xrs_scans.Scan()
285

christoph's avatar
christoph committed
286
287
288
            # load the scan
            scan.load( self.path, self.SPECfname, self.EDFprefix, self.EDFname, self.EDFpostfix, number, \
                direct=direct, roi_obj=self.roi_obj, scaling=scaling, scan_type=scan_type, \
289
290
                en_column=self.en_column, moni_column=self.moni_column, method=method, \
                cenom_dict=self.cenom_dict, comp_factor=self.comp_factor )
291

christoph's avatar
christoph committed
292
293
            # add it to the scans dict
            self.scans[scan_name] = scan
294

christoph's avatar
christoph committed
295
296
297
            # add the number to the list of scan numbers
            if not number in self.scan_numbers:
                self.scan_numbers.extend([number])
298

christoph's avatar
christoph committed
299
300
    def load_loop( self, beg_nums, num_of_regions, direct=True, method='sum' ):
        """ **load_loop**
christoph's avatar
christoph committed
301

christoph's avatar
christoph committed
302
303
        Loads a whole loop of scans based on their starting numbers and 
        the number of single scans in the loop.
304

christoph's avatar
christoph committed
305
306
307
        Args:
        beg_nums      (list): List of scan numbers of the first scans in each loop.
        num_of_regions (int): Number of scans in each loop.
308

christoph's avatar
christoph committed
309
        """
310

christoph's avatar
christoph committed
311
312
313
        type_names = []
        for n in range(num_of_regions):
            type_names.append('edge'+str(n+1))
314

christoph's avatar
christoph committed
315
316
317
318
        numbers = []
        for n in range(len(beg_nums)):
            for m in range(num_of_regions):
                numbers.append((beg_nums[n]+m))
319

christoph's avatar
christoph committed
320
        type_names = type_names*len(beg_nums)
321

christoph's avatar
christoph committed
322
323
324
325
        for n in range(len(type_names)):
            number = []
            number.append(numbers[n])
            self.load_scan( number, type_names[n], direct=True, method=method )
326

christoph's avatar
christoph committed
327
328
    def delete_scan( self, scan_numbers ):
        """ **delete_scan**
329

christoph's avatar
christoph committed
330
        Deletes scans from the dictionary of scans.
331

christoph's avatar
christoph committed
332
333
        Args:
            scan_numbers (int or list): Integer or list of integers (SPEC scan numbers) to be deleted.
334

christoph's avatar
christoph committed
335
        """
336

christoph's avatar
christoph committed
337
338
339
340
341
342
        # make sure scannumbers are iterable (list)
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers
christoph's avatar
christoph committed
343

christoph's avatar
christoph committed
344
345
346
347
348
        # delete the scan
        for number in numbers:
            scan_name = 'Scan%03d' % number
            del(self.scans[scan_name])
            self.scan_numbers.remove(number)
349

christoph's avatar
christoph committed
350
351
    def get_raw_data( self, method='sum', scaling=None ):
        """ **get_raw_data**
352

christoph's avatar
christoph committed
353
        Applies the ROIs to the EDF-files. 
354

christoph's avatar
christoph committed
355
356
357
358
        This extracts the raw-data from the EDF-files subject to three 
        different methods: 'sum' will simply sum up all pixels inside a
        ROI, 'row' will sum up over the dispersive direction, and 'pixel'
        will not sum at all.
359

christoph's avatar
christoph committed
360
361
362
        Args:
            method (str): Keyword specifying the selected choice of data treatment:
                can be 'sum', 'row', or 'pixel'. Default is 'sum'.
363

christoph's avatar
christoph committed
364
        """
365

christoph's avatar
christoph committed
366
367
368
        if not self.roi_obj:
            print ( 'Did not find a ROI object, please set one first.' )
            return
369

christoph's avatar
christoph committed
370
371
372
373
        for scan in self.scans:
            if len(self.scans[scan].edfmats):
                print ( "Integrating " + scan + " using method \'" + method + '\'.')
                self.scans[scan].get_raw_signals( self.roi_obj , method=method, scaling=scaling )
374

375

christoph's avatar
christoph committed
376
377
    def get_data_new(self, method='sum', scaling=None):
        """ **get_data_new**
378

christoph's avatar
christoph committed
379
        Applies the ROIs to the EDF-files. 
380

christoph's avatar
christoph committed
381
382
383
384
        This extracts the raw-data from the EDF-files subject to three 
        different methods: 'sum' will simply sum up all pixels inside a
        ROI, 'row' will sum up over the dispersive direction, and 'pixel'
        will not sum at all.
385

christoph's avatar
christoph committed
386
387
388
        Args:
            method (str): Keyword specifying the selected choice of data treatment:
                can be 'sum', 'row', or 'pixel'. Default is 'sum'.
389

christoph's avatar
christoph committed
390
        """
391

christoph's avatar
christoph committed
392
393
394
        if not self.cenom_dict:
            print ( 'No compensation factor/center-of-mass info found, please provide first.' )
            return
395

christoph's avatar
christoph committed
396
397
        for scan in self.scans:
            self.scans[scan].get_signals( method=method, cenom_dict=self.cenom_dict, comp_factor=self.comp_factor, scaling=scaling )
398

christoph's avatar
christoph committed
399
400
    def get_data(self):
        """ **get_data**
christoph's avatar
christoph committed
401

christoph's avatar
christoph committed
402
        Applies the ROIs and sums up intensities.
christoph's avatar
christoph committed
403

christoph's avatar
christoph committed
404
405
        Returns:
            'None', if no ROI object is available.
christoph's avatar
christoph committed
406

christoph's avatar
christoph committed
407
        """
408

christoph's avatar
christoph committed
409
410
411
        if not self.roi_obj:
            print ( 'Did not find a ROI object, please set one first.' )
            return
christoph's avatar
christoph committed
412

christoph's avatar
christoph committed
413
414
415
416
        for scan in self.scans:
            if len(self.scans[scan].edfmats):
                print ( "Integrating " + scan )
                self.scans[scan].apply_rois( self.roi_obj )
christoph's avatar
christoph committed
417

christoph's avatar
christoph committed
418
419
    def get_data_pw(self):
        """ **get_data_pw**
420

christoph's avatar
christoph committed
421
        Extracts intensities for each pixel in each ROI.
422

christoph's avatar
christoph committed
423
424
        Returns:
            'None', if no ROI object is available.
425

christoph's avatar
christoph committed
426
        """
427

christoph's avatar
christoph committed
428
429
430
        if not np.any(self.roi_obj.indices):
            print ( 'Did not find a ROI object, please set one first.' )
            return
christoph's avatar
christoph committed
431

christoph's avatar
christoph committed
432
433
434
435
        for scan in self.scans:
            if len(self.scans[scan].edfmats):
                print ( "Integrating pixelwise " + scan )
                self.scans[scan].apply_rois_pw(self.roi_obj)
christoph's avatar
christoph committed
436

437
    def SumDirect( self, scan_numbers, index=None ):
christoph's avatar
christoph committed
438
        """ **SumDirect**
christoph's avatar
christoph committed
439

christoph's avatar
christoph committed
440
        Creates a summed 2D image of a given scan or list of scans.
441

christoph's avatar
christoph committed
442
443
        Args:
            scan_numbers (int or list): Scan number or list of scan numbers to be added up.
christoph's avatar
christoph committed
444

christoph's avatar
christoph committed
445
446
        Returns:
            A 2D np.array of the same size as the detector with the summed image.
christoph's avatar
christoph committed
447

christoph's avatar
christoph committed
448
        """
christoph's avatar
christoph committed
449

christoph's avatar
christoph committed
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
        # make sure scannumbers are iterable (list)
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers

        im_sum    = None
        en_column = None # uses first column in SPEC file as scanned motor
        for number in numbers:
            scan = xrs_scans.Scan()
            scan.load(self.path, self.SPECfname, self.EDFprefix, self.EDFname, self.EDFpostfix, number, \
                    direct=False, roi_obj=None, scaling=None, scan_type='generic', \
                    en_column=en_column, moni_column=self.moni_column)

            if im_sum is None:
466
467
468
469
470
471
472
473
474
475
476
477
                im_sum1 = np.zeros(scan.edfmats[0].shape ,"f")
                im_sum2 = np.zeros(scan.edfmats[0].shape ,"f")
            if not index:
                im_sum1[:] += scan.edfmats.sum(axis=0)
            else:
                im_sum1[:] += scan.edfmats[0:index,:,:].sum(axis=0)
                im_sum2[:] += scan.edfmats[index:,:,:].sum(axis=0)

        if not index:
            return im_sum1
        else:
            return im_sum2-im_sum1
christoph's avatar
christoph committed
478
479
480
481
482
483
484
485
486
487
488
489

    def get_eloss_new(self, method='sum'):
        """ **get_eloss_new**

        Defines the energy loss scale for all ROIs and applies dispersion 
        compensation if applicable.

        Args:
            method (str): Keyword describing which dispersion compensation 
                method to use. Possible choices are 'sum' (no compensation), 
                'pixel' (pixel-by-pixel compensation), or 'row' (line-by-line)
                compensation.
490

christoph's avatar
christoph committed
491
        """
492

christoph's avatar
christoph committed
493
494
495
496
497
498
499
500
501
502
        # make sure an elastic line was loaded
        if not 'elastic' in self.groups:
            print( 'Please load/integrate at least one elastic scan first!' )
            return

        # make sure there is data
        elif not np.any(self.groups['elastic'].raw_signals):
            self.get_raw_data( method=method )

        # make sure there is center of masses available
myron's avatar
myron committed
503
        elif not self.cenom_dict and  method != 'row':
christoph's avatar
christoph committed
504
505
            for scan in self.scans:
                if self.scans[scan].get_type() == 'elastic':
506
                    print('GETIING COMP FACTOR!!!')
christoph's avatar
christoph committed
507
508
                    self.get_compensation_factor( self.scans[scan].scan_number, method=method )

509
        first_key = list(self.raw_signals.keys())[0]
christoph's avatar
christoph committed
510
        # 'sum'
myron's avatar
myron committed
511
        if method == 'sum':
christoph's avatar
christoph committed
512
513
514
            # master eloss scale in eV is the one of the first ROI
            self.signals = np.zeros((len(self.energy),len(self.cenom_dict)))
            self.errors  = np.zeros((len(self.energy),len(self.cenom_dict)))
515
            master_eloss = (self.energy - np.median([self.cenom_dict[key] for key in  self.cenom_dict]))*1.0e3
516
            self.E0      = np.median([self.cenom_dict[key] for key in  self.cenom_dict])
christoph's avatar
christoph committed
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
            for key,ii in zip(sorted(self.cenom_dict), range(len(self.cenom_dict))):
                # signals
                x = ( self.energy - self.cenom_dict[key] )*1.0e3
                y = self.raw_signals[key][:]
                #try:
                #	rbfi = Rbf( x, y, function='linear' )
                #	self.signals[:,ii] = rbfi( master_eloss )
                #except:
                self.signals[:,ii] = np.interp( master_eloss, x, y )
                # errors
                y = self.raw_errors[key][:]
                #try:
                #	rbfi = Rbf( x, y, function='linear' )
                #	self.errors[:,ii] = rbfi( master_eloss )
                #except:
                self.errors[:,ii] = np.interp( master_eloss, x, y )
            self.eloss = master_eloss

        # 'pixel'
myron's avatar
myron committed
536
        elif method ==  'pixel':
christoph's avatar
christoph committed
537
538
539
            # master eloss scale in eV is the one of central pixel in first ROI
            self.signals = np.zeros((len(self.energy),len(self.cenom_dict)))
            self.errors  = np.zeros((len(self.energy),len(self.cenom_dict)))
540
541
            master_eloss = ( self.energy - np.median(self.cenom_dict[first_key][self.cenom_dict[first_key] > 0.0]) )*1.0e3
            self.E0      = np.median(self.cenom_dict[first_key][self.cenom_dict[first_key] > 0.0])
christoph's avatar
christoph committed
542
543
544
545
546
547
548
549
550
            for key,ii in zip(sorted(self.cenom_dict), range(len(self.cenom_dict))):
                print ('Pixel-by-pixel compensation for ' + key +'.')
                signal = np.zeros(len(master_eloss))
                error  = np.zeros(len(master_eloss))
                for dim1 in range(self.cenom_dict[key].shape[0]):
                    for dim2 in range(self.cenom_dict[key].shape[1]):
                        x = ( self.energy - self.cenom_dict[key][dim1, dim2] )*1.0e3
                        # signals
                        y = self.raw_signals[key][:, dim1, dim2]
551
552
553
554
555
556
557
558
559
560
561
562
                        if np.any(y)>0.0:
                            #print "Y AMAX", signal.max()
                            #rbfi    = Rbf( x, y, function='linear' )
                            rbfi = interp1d(x, y,bounds_error=False, fill_value=0.0)
                            #print "rbf AMAX", Rbf( x, y, function='linear' )
                            #signal += rbfi( master_eloss )
                            signal += rbfi( master_eloss )
                            #print "SIGNAL AMAX", signal.max()
                            # errors
                            y = self.raw_errors[key][:, dim1, dim2]
                            rbfi    = Rbf( x, y, function='linear' )
                            error  += rbfi( master_eloss )**2
christoph's avatar
christoph committed
563
564
565
566
567
                self.signals[:,ii] = signal
                self.errors[:,ii]  = np.sqrt(error)
            self.eloss = master_eloss

        # 'row'
myron's avatar
myron committed
568
        elif method == 'row':
christoph's avatar
christoph committed
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
            self.signals = np.zeros((len(self.energy),len(self.roi_obj.red_rois)))
            self.errors  = np.zeros((len(self.energy),len(self.roi_obj.red_rois)))
            energy = self.energy * 1e3 # energy in eV

            for key,ii in zip(sorted(self.raw_signals), range(len(self.raw_signals))):
                y  = self.raw_signals[key]
                meanii = len(range(y.shape[1]))/2
                yc = np.zeros_like(y)
                for jj in range(y.shape[1]):
                    sort  = np.argsort(energy)
                    yc[:,jj] = np.interp( energy[sort] + (jj-meanii)*self.comp_factor*self.PIXEL_SIZE, energy[sort], y[sort,jj], 
                                        left=float('nan'), right=float('nan') )
                self.signals[:,ii] = xrs_utilities.nonzeroavg(yc)
                self.errors[:,ii]  = np.sqrt(self.signals[:,ii])

        else:
            print('Method \''+method+'\' not supported, use either \'sum\', \'pixel\', or \'row\'.')
            return

    def get_eloss( self ):
        """ **get_eloss**

        Finds the energy loss scale for all ROIs by calculating the center 
        of mass (COM) for each ROI's elastic line. Calculates the resolution 
        function (FWHM) of the elastic lines.
594

christoph's avatar
christoph committed
595
        """
596

christoph's avatar
christoph committed
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
        # make sure an elastic line was loaded
        if not 'elastic' in self.groups:
            print( 'Please load/integrate at least one elastic scan first!' )
            return

        # make sure there is data
        elif not self.groups['elastic'].signals.shape[1] == len(self.roi_obj.indices):
            self.get_data()

        else:
            # reset values, in case this is run several times
            self.cenom      = []
            self.resolution = []
            Origin          = None
            valid_cenoms    = []

            for n in range(self.groups['elastic'].signals.shape[1]):
                # find the center of mass for each ROI
                cofm = xrs_utilities.find_center_of_mass(self.groups['elastic'].energy,self.groups['elastic'].signals[:,n])
                self.cenom.append(cofm)
                if self.there_is_a_valid_roi_at( n ):
                    valid_cenoms.append(cofm)
                    if Origin is None:
                        Origin = cofm

                # find the FWHM/resolution for each ROI
                en_scale  = (self.groups['elastic'].energy - self.cenom[n])*1e3
                intensity = self.groups['elastic'].signals_orig[:,n]
                FWHM, x0 = xrs_utilities.fwhm(en_scale,intensity)

            # find master E0
            self.E0 = np.mean(valid_cenoms)

            # define last eloss scale as the 'master' scale for all ROIs
            self.eloss = (self.energy - cofm)*1e3 # energy loss in eV

            # define eloss-scale for each ROI and interpolate onto the 'master' eloss-scale 
            for n in range(self.signals.shape[1]):
                # inserting zeros at beginning and end of the vectors to avoid interpolation errors
                x = (self.energy-self.cenom[n])*1e3
                x = np.insert(x,0,-1e10)
                x = np.insert(x,-1,1e10)
                y = self.signals[:,n]
                y = np.insert(y,0,0)
                y = np.insert(y,-1,0)
                f = interp1d(x,y, bounds_error=False,fill_value=0.0)
                self.signals[:,n] = f(self.eloss)

            # do the same for the errors 
            for n in range(self.signals.shape[1]):
                # inserting zeros at beginning and end of the vectors to avoid interpolation errors
                x = (self.energy-self.cenom[n])*1e3
                x = np.insert(x,0,-1e10)
                x = np.insert(x,-1,1e10)
                y = self.errors[:,n]
                y = np.insert(y,0,0)
                y = np.insert(y,-1,0)
                f = interp1d(x,y, bounds_error=False,fill_value=0.0)
                self.errors[:,n] = f(self.eloss)

    def get_spectrum( self, include_elastic=False, abs_counts=False ):
        """ **get_spectrum**

        Constructs a spectrum based on the scans loaded so far. Defines the energy 
        loss scale based on the elastic lines.

        Args:
            include_elastic (boolean): Boolean flag, does not include the elastic line if
                set to 'False' (this is the default).
            abs_counts      (boolean): Boolean flag, constructs the spectrum in absolute
                counts if set to 'True' (default is 'False')
668

christoph's avatar
christoph committed
669
        """
670

christoph's avatar
christoph committed
671
672
        # find the groups 
        all_groups = xrs_scans.findgroups(self.scans)
christoph's avatar
christoph committed
673

christoph's avatar
christoph committed
674
675
676
677
        # append scans
        for group in all_groups:
            one_group = xrs_scans.makegroup_nointerp(group, absCounts=abs_counts)
            self.groups[one_group.get_type()] = one_group
678

christoph's avatar
christoph committed
679
        self.energy, self.signals, self.errors = xrs_scans.appendScans(self.groups,include_elastic)
680

christoph's avatar
christoph committed
681
682
        # define the energy loss scale
        self.get_eloss()
683

christoph's avatar
christoph committed
684
685
    def get_spectrum_new( self, method='sum', include_elastic=False, abs_counts=False, interpolation=False ):
        """ **get_spectrum_new**
686

christoph's avatar
christoph committed
687
688
689
690
        Constructs a spectrum from the scans loaded so far based on the chosen method: 
        detector pixels can either be summed up (method='sum'), pixel-by-pixel 
        compensation can be applied (method='pixel'), or a line-by-line compensation
        scheme can be applied (method='row').
691

christoph's avatar
christoph committed
692
693
        The energy loss scale will be defined in the process and the data will be
        interpolated onto this scale using norm-conserving wavelet interpolation.
694

christoph's avatar
christoph committed
695
696
697
698
699
700
701
702
703
        Args:
            method              (str): Keyword describing the kind of integration scheme
                to be used (possible values are 'sum', 'pixel', or 'row'), default is 'sum'.
            include_elastic (boolean): Boolean flag, does not include the elastic line if
                set to 'False' (this is the default).
            abs_counts      (boolean): Boolean flag, constructs the spectrum in absolute
                counts if set to 'True' (default is 'False')
            interpolation   (boolean): Boolean flag, if True, signals are interpolated 
                onto energy grid of the first scan in each group of scans.
704

christoph's avatar
christoph committed
705
        """
706

christoph's avatar
christoph committed
707
708
709
710
711
712
713
714
715
716
717
718
719
        if not method in ['pixel', 'sum', 'row']:
            print('Unknown integration method. Use either \'pixel\', \'sum\', or \'row\'')
            return

        # make sure there is an elastic line available
        elastic_number = None
        for key in self.scans:
            if self.scans[key].scan_type == 'elastic':
                elastic_number = self.scans[key].scan_number
                print ('Using scan No. %d for CENOMs.'%elastic_number)
                break
            else:
                pass
720
            
christoph's avatar
christoph committed
721
722
723
        if not elastic_number:
            print( 'Please load/integrate at least one elastic scan first!' )
            return
christoph's avatar
christoph committed
724

christoph's avatar
christoph committed
725
        # get compensation factor/CENOM for an elastic line
myron's avatar
myron committed
726
        if method in [ 'sum' , 'pixel']  and not self.cenom_dict:
christoph's avatar
christoph committed
727
            self.get_compensation_factor( elastic_number, method=method )
christoph's avatar
christoph committed
728

myron's avatar
myron committed
729
        if method == 'row' and not self.comp_factor:
730

christoph's avatar
christoph committed
731
            self.get_compensation_factor( elastic_number, method=method )
christoph's avatar
christoph committed
732

christoph's avatar
christoph committed
733
734
735
736
        # get raw data
        for key in self.scans:
            if not self.scans[key].raw_signals:
                self.scans[key].get_raw_signals( self.roi_obj, method=method )
christoph's avatar
christoph committed
737

christoph's avatar
christoph committed
738
739
740
        # sum up similar scans
        # find all groups of scans
        all_groups = xrs_scans.findgroups(self.scans)
christoph's avatar
christoph committed
741

christoph's avatar
christoph committed
742
743
        # initiate groups
        self.groups = {}
744

christoph's avatar
christoph committed
745
746
747
        # sum up similar scans
        for group in all_groups:
            self.groups[group[0].get_type()] = xrs_scans.sum_scans_to_group( group, method=method, interp=interpolation )
748

christoph's avatar
christoph committed
749
750
751
752
753
754
755
        # stitch groups together into a spectrum
        spectrum = xrs_scans.stitch_groups_to_spectrum( self.groups, method=method, include_elastic=include_elastic )
        self.energy      = spectrum.energy
        self.signals     = spectrum.signals
        self.errors      = spectrum.errors
        self.raw_signals = spectrum.raw_signals
        self.raw_errors  = spectrum.raw_errors
756

christoph's avatar
christoph committed
757
758
        # define energy loss scale and apply compensation if applicable
        self.get_eloss_new( method=method )
759

christoph's avatar
christoph committed
760
761
    def get_q_values( self, inv_angstr=False, energy_loss=None ):
        """ **get_q_values**
762

christoph's avatar
christoph committed
763
        Calculates the momentum transfer for each analyzer.
764

christoph's avatar
christoph committed
765
766
767
768
769
770
        Args:
            inv_angstr (boolean): Boolean flag, if 'True' momentum transfers are calculated in 
                inverse Angstroms.
            energy_loss  (float): Energy loss value at which the momentum transfer is to be 
                calculated. If 'None' is given, the momentum transfer is calculated for every 
                energy loss point of the spectrum.
771

christoph's avatar
christoph committed
772
773
774
        Returns:
            If an energy loss value is passed, the function returns the momentum transfers
            at this energy loss value for each analyzer crystal.
775

christoph's avatar
christoph committed
776
        """
777

christoph's avatar
christoph committed
778
779
780
781
782
783
784
785
786
        # one q-value per analyzer and energy loss point
        q_vals = np.zeros_like(self.signals)
        if inv_angstr:
            for n in range( self.signals.shape[1] ):
                q_vals[:,n] = xrs_utilities.momtrans_inva(self.E0+self.eloss/1e3,self.E0,self.tth[n]) 
        else:
            for n in range( self.signals.shape[1] ):
                q_vals[:,n] = xrs_utilities.momtrans_au(self.E0+self.eloss/1e3,self.E0,self.tth[n])
        self.q_values = q_vals
787

christoph's avatar
christoph committed
788
789
790
791
        # return all q-values if a specific energy loss is given
        if energy_loss:
            ind = np.abs(self.eloss - energy_loss).argmin()
            return self.q_values[ind,:]
792

christoph's avatar
christoph committed
793
794
    def copy_edf_files( self, scan_numbers, dest_dir ):
        """ **copy_edf_files**
795

christoph's avatar
christoph committed
796
        Copies all EDF-files from given scan_numbers into given directory.
797

christoph's avatar
christoph committed
798
        Args:
myron's avatar
myron committed
799
          * scan_numbers (int or list) = Integer or list of integers defining the scan 
christoph's avatar
christoph committed
800
            numbers of scans to be copied.
myron's avatar
myron committed
801
          * dest_dir             (str) = String with absolute path for the destination.
802

christoph's avatar
christoph committed
803
        """
804

christoph's avatar
christoph committed
805
        import shutil
806

christoph's avatar
christoph committed
807
808
809
810
811
812
813
        # make scan_numbers iterable
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers
        fname = self.path + self.SPECfname
814

christoph's avatar
christoph committed
815
816
817
818
819
820
        # go through the scans, find the EDF-files and copy them
        for nscan in numbers:
            if use_PyMca:
                data, motors, counters = xrs_fileIO.PyMcaSpecRead(fname,nscan)
            else:
                data, motors, counters = xrs_fileIO.SpecRead(fname,nscan)
821

christoph's avatar
christoph committed
822
823
824
825
            for m in range(len(counters['ccdno'])):
                ccdnumber = counters['ccdno'][m]
                edfname   = self.path + self.EDFprefix + self.EDFname + "%04d" % ccdnumber + self.EDFpostfix
                shutil.copy2(edfname, dest_dir)
826

christoph's avatar
christoph committed
827
828
    def dump_spectrum_ascii( self, file_name, header='' ):
        """ **dump_spectrum_ascii**
829

christoph's avatar
christoph committed
830
        Stores the energy loss and signals in a txt-file.
831

christoph's avatar
christoph committed
832
833
        Args:
            filename (str): Path and filename to the file to be written.
834

christoph's avatar
christoph committed
835
        """
836

christoph's avatar
christoph committed
837
838
839
840
841
842
843
844
        data = np.zeros((len(self.eloss),self.signals.shape[1]*2+1))
        data[:,0] = self.eloss
        col = 1
        for ii in range(self.signals.shape[1]):
            data[:,col] = self.signals[:,ii]
            col += 1
            data[:,col] = self.errors[:,ii]
            col += 1
845

christoph's avatar
christoph committed
846
        np.savetxt( file_name, data, header=header )
847

christoph's avatar
christoph committed
848
849
    def dump_spectrum_hdf5( self, file_name, group_name, comment='' ):
        """ **dump_spectrum_hdf5**
850

christoph's avatar
christoph committed
851
        Writes the summed spectrum into an HDF5 file.
852

christoph's avatar
christoph committed
853
854
855
856
        Args:
            file_name  (str): Path and file name for the HDF5-file to be created.
            group_name (str): Group name under which to store status in the HDF5-file.
            comment    (str): Optional comment (no comment is default).
857

christoph's avatar
christoph committed
858
        """
859

christoph's avatar
christoph committed
860
861
862
        h5 = h5py.File(file_name,"a")
        h5.require_group(group_name)
        h5group =  h5[group_name]
863

christoph's avatar
christoph committed
864
865
866
        keys = ['energy', 'eloss', 'signals', 'errors']
        for key in keys:
            h5group[key] = getattr( self, key )
867

christoph's avatar
christoph committed
868
869
870
        h5group["comment"]  = comment
        h5.flush()
        h5.close()
871

christoph's avatar
christoph committed
872
873
    def dump_scans_ascii( self, scan_numbers, pre_fix, f_name, post_fix='.dat', header='' ):
        """ **dump_scans_ascii**
874

christoph's avatar
christoph committed
875
        Produce ASCII-type files with columns of energy, signal, and Poisson error.
876

christoph's avatar
christoph committed
877
878
879
880
881
        Args:
            scan_numbers (int or list): SPEC scan numbers of scans to be safed in ASCII format.
            pre_fix  (str): Path to directory where files should be written into.
            f_name   (str): Base name for the files to be written.
            post_fix (str): Extention for the files to be written (default is '.dat').
882

christoph's avatar
christoph committed
883
        """
884

christoph's avatar
christoph committed
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
        # make sure scan_numbers are iterable
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers

        for number in numbers:
            scan_name = 'Scan%03d' % number
            if not scan_name in self.scans.keys():
                print ('Scan No. %03d is currently not loaded.'% number)
                return
            x = self.scans[scan_name].energy
            y = self.scans[scan_name].signals
            z = self.scans[scan_name].errors
            data = np.zeros((x.shape[0], y.shape[1]+z.shape[1]+1 ))
            data[:,0] = x
            data[:,1:1+y.shape[1]] = y
            data[:,1+y.shape[1]:1+y.shape[1]+z.shape[1]] = z
            file_name = pre_fix + f_name + '_' + scan_name + post_fix
            np.savetxt(file_name, data, header=header)

    def print_scan_length( self,scan_numbers ):
        """ **print_scan_length**

        Print out the numper of points in given scans.

        Args:
            scan_numbers (int or list): Scan number or list of scan numbers.
914

christoph's avatar
christoph committed
915
        """
916

christoph's avatar
christoph committed
917
918
919
920
921
922
923
924
925
926
927
928
        # make scan_numbers iterable
        numbers = []
        if not isinstance(scan_numbers,list):
            numbers.append(scan_numbers)
        else:
            numbers = scan_numbers

        # print out the number of points for all scan_numbers
        for ii in numbers:
            name = 'Scan%03d' % ii
            print( 'Length of scan %03d ' %ii + ' is ' + str(len(self.scans[name].energy)) + '.')

christoph's avatar
christoph committed
929
    def get_tths( self, rvd=None, rvu=None, rvb=None, rhl=None, rhr=None, rhb=None, order=[0,1,2,3,4,5] ):
christoph's avatar
christoph committed
930
931
932
933
934
935
        """ **get_tths**

        Calculates the scattering angles for all analyzer crystals based on 
        the mean angle of the analyzer modules.

        Args:
myron's avatar
myron committed
936
937
938
939
940
941
942
              * rhl  (float): Mean scattering angle of the HL module (default is 0.0).
              * rhr  (float): Mean scattering angle of the HR module (default is 0.0).
              * rhb  (float): Mean scattering angle of the HB module (default is 0.0).
              * rvd  (float): Mean scattering angle of the VD module (default is 0.0).
              * rvu  (float): Mean scattering angle of the VU module (default is 0.0).
              * rvb  (float): Mean scattering angle of the VB module (default is 0.0).
              * order (list): List of integers (0-5) that describe the order of modules in which the 
christoph's avatar
christoph committed
943
                ROIs were defined (default is VD, VU, VB, HR, HL, HB; i.e. [0,1,2,3,4,5]).
944

christoph's avatar
christoph committed
945
946
947
948
        """
        # reset all tth values
        self.tth   = []

christoph's avatar
christoph committed
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
        # try to grab from positions from SPEC-file (first scan in list)
        scan_name = list(self.scans.keys())[0]
        if not rvd:
            rvd = self.scans[scan_name].motors['RVD']
        if not rvu:
            rvu = self.scans[scan_name].motors['RVU']
        if not rvb:
            rvb = self.scans[scan_name].motors['RVB']
        if not rhr:
            rhr = self.scans[scan_name].motors['RHR']
        if not rhl:
            rhl = self.scans[scan_name].motors['RHL']
        if not rhb:
            rhb = self.scans[scan_name].motors['RHB']

christoph's avatar
christoph committed
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
        # horizontal modules
        # HL (motor name rhl)
        v_angles = self.TTH_OFFSETS1 
        h_angles = self.TTH_OFFSETS2 + rhl
        HLtth    = []
        for n in range(len(h_angles)):
            HLtth.append( np.arccos(np.cos(np.radians(h_angles[n]))*np.cos(np.radians(v_angles[n])))*180.0/np.pi)

        # HR (motor name rhr)
        v_angles = self.TTH_OFFSETS1 
        h_angles = self.TTH_OFFSETS2 + rhr
        HRtth    = []
        for n in range(len(h_angles)):
            HRtth.append( np.arccos(np.cos(np.radians(h_angles[n]))*np.cos(np.radians(v_angles[n])))*180.0/np.pi)

        # HB (motor name rhb)
        v_angles = self.TTH_OFFSETS1 
        h_angles = self.TTH_OFFSETS2 + rhb
        HBtth = []
        for n in range(len(h_angles)):
            HBtth.append( np.arccos(np.cos(np.radians(h_angles[n]))*np.cos(np.radians(v_angles[n])))*180.0/np.pi)

        # vertical modules
        # VD
        v_angles = self.TTH_OFFSETS2 + rvd
        h_angles = self.TTH_OFFSETS1 
        VDtth    = []
        for n in range(len(h_angles)):
            VDtth.append( np.arccos(np.cos(np.radians(v_angles[n]))*np.cos(np.radians(h_angles[n])))*180.0/np.pi)

        # VU
        v_angles = self.TTH_OFFSETS2 + rvu
        h_angles = self.TTH_OFFSETS1 
        VUtth    = []
        for n in range(len(h_angles)):
            VUtth.append( np.arccos(np.cos(np.radians(v_angles[n]))*np.cos(np.radians(h_angles[n])))*180.0/np.pi)