Skip to content
Snippets Groups Projects

Release 0.10

Merged Michael Daas requested to merge development into master
70 files
+ 3602
872
Compare changes
  • Side-by-side
  • Inline
Files
70
+ 137
44
@@ -9,13 +9,13 @@
Script to convert raw data
'''
from __future__ import division
import os.path
import numpy as np
import logging
import yaml
import numba
import tables as tb
from tqdm import tqdm
@@ -37,7 +37,7 @@ class Analysis(object):
"""
def __init__(self, raw_data_file=None, analyzed_data_file=None,
store_hits=False, cluster_hits=False, align_method=0, chunk_size=1000000):
store_hits=False, cluster_hits=False, analyze_tdc=False, use_tdc_trigger_dist=False, align_method=0, chunk_size=1000000):
'''
Parameters
----------
@@ -55,10 +55,18 @@ class Analysis(object):
Create cluster table, histograms and plots
align_method : integer
Methods to do event alignment
0: New event when number if event headers exceeds number of
0: New event when number of event headers exceeds number of
sub-triggers. Many fallbacks for corrupt data implemented.
1: New event when data word is TLU trigger word, with error checks
2: Force new event always at TLU trigger word, no error checks
analyze_tdc : boolean
If analyze_tdc is True, interpret and analyze also TDC words. Default is False,
meaning that TDC analysis is skipped. This is useful for scans which do no
require an TDC word interpretation (e.g. threshold scan) in order to save time.
use_tdc_trigger_dist : boolean
If True use trigger distance (delay between Hitor and Trigger) in TDC word
interpretation. If False use instead TDC timestamp from TDC word. Default
is False.
'''
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(loglevel)
@@ -73,6 +81,8 @@ class Analysis(object):
self.cluster_hits = cluster_hits
self.chunk_size = chunk_size
self.align_method = align_method
self.analyze_tdc = analyze_tdc
self.use_tdc_trigger_dist = use_tdc_trigger_dist
self.compatibility = False
if not os.path.isfile(raw_data_file):
@@ -108,6 +118,21 @@ class Analysis(object):
with tb.open_file(self.raw_data_file, 'r') as in_file:
self.run_config = au.ConfigDict(in_file.root.configuration.run_config[:])
def get_scan_param_values(self, scan_param_index=None, scan_parameter=None):
''' Return the scan parameter value(s)
scan_param_index: slicing notation of the scan parameter indeces
scan_parameter: string
Name of the scan parameter. If not defined all are returned.
'''
with tb.open_file(self.raw_data_file, 'r') as in_file:
scan_param_table = in_file.root.configuration.scan_params[:]
if scan_param_index:
scan_param_table = scan_param_table[scan_param_index]
if scan_parameter:
scan_param_table = scan_param_table[:][scan_parameter]
return scan_param_table
def _setup_clusterizer(self):
''' Define data structure and settings for hit clusterizer package '''
# Define all field names and data types
@@ -123,17 +148,17 @@ class Analysis(object):
'trigger_tag': 'trigger_tag',
'event_status': 'event_status'
}
hit_dtype = np.dtype([('event_number', '<i8'),
('ext_trg_number', 'u4'),
('trigger_id', 'u1'),
('bcid', '<u2'),
('rel_bcid', 'u1'),
('col', '<u2'),
('row', '<u2'),
('tot', 'u1'),
('scan_param_id', 'u4'),
('trigger_tag', 'u1'),
('event_status', 'u4')])
hit_description = [('event_number', '<i8'),
('ext_trg_number', 'u4'),
('trigger_id', 'u1'),
('bcid', '<u2'),
('rel_bcid', 'u1'),
('col', '<u2'),
('row', '<u2'),
('tot', 'u1'),
('scan_param_id', 'u4'),
('trigger_tag', 'u1'),
('event_status', 'u4')]
cluster_fields = {'event_number': 'event_number',
'column': 'column',
'row': 'row',
@@ -145,27 +170,38 @@ class Analysis(object):
'seed_row': 'seed_row',
'mean_col': 'mean_column',
'mean_row': 'mean_row'}
self.cluster_dtype = np.dtype([('event_number', '<i8'),
('id', '<u2'),
('size', '<u2'),
('tot', '<u2'),
('seed_col', '<u1'),
('seed_row', '<u2'),
('mean_col', '<f4'),
('mean_row', '<f4'),
('dist_col', '<u4'),
('dist_row', '<u4'),
('cluster_shape', '<i8'),
('scan_param_id', 'u4')])
cluster_description = [('event_number', '<i8'),
('id', '<u2'),
('size', '<u2'),
('tot', '<u2'),
('seed_col', '<u1'),
('seed_row', '<u2'),
('mean_col', '<f4'),
('mean_row', '<f4'),
('dist_col', '<u4'),
('dist_row', '<u4'),
('cluster_shape', '<i8'),
('scan_param_id', 'u4')]
# Add TDC data entries
if self.analyze_tdc:
hit_fields.update({'tdc_value': 'tdc_value', 'tdc_timestamp': 'tdc_timestamp', 'tdc_status': 'tdc_status'})
hit_description.extend([('tdc_value', 'u2'), ('tdc_timestamp', 'u2'), ('tdc_status', 'u1')])
cluster_fields.update({'tdc_value': 'tdc_value', 'tdc_status': 'tdc_status'})
cluster_description.extend([('tdc_value', '<u2'), ('tdc_status', '<u1')])
hit_dtype = np.dtype(hit_description)
self.cluster_dtype = np.dtype(cluster_description)
if self.cluster_hits: # Allow analysis without clusterizer installed
# Define end of cluster function to calculate cluster shape
# and cluster distance in column and row direction
def end_of_cluster_function(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index):
@numba.njit
def _end_of_cluster_function(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index):
hit_arr = np.zeros((15, 15), dtype=np.bool_)
center_col = hits[cluster_hit_indices[0]].column
center_row = hits[cluster_hit_indices[0]].row
@@ -212,6 +248,47 @@ class Analysis(object):
clusters[cluster_index].dist_col = max_col - min_col + 1
clusters[cluster_index].dist_row = max_row - min_row + 1
def end_of_cluster_function(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index):
_end_of_cluster_function(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index)
# Define end of cluster function for calculating TDC related cluster properties
def end_of_cluster_function_tdc(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index):
# FIXME: use same loop to safe time
_end_of_cluster_function(hits, clusters, cluster_size,
cluster_hit_indices, cluster_index,
cluster_id, charge_correction,
noisy_pixels, disabled_pixels,
seed_hit_index)
# Calculate cluster TDC and cluster TDC status
cluster_tdc = 0 # TODO: apply optional Delta VCAL conversion, since sum of TDC values from different pixels makes no sense.
cluster_tdc_status = 0 # Logical OR of TDC stati of hits belonging to the cluster
one_hit_has_no_tdc = False # Indicator that one hit belonging to the cluster has no TDC value, in this case set TDC status of cluster to zero
for j in range(clusters[cluster_index].n_hits):
hit_index = cluster_hit_indices[j]
cluster_tdc += hits[hit_index].tdc_value
if hits[hit_index].tdc_status == 0:
# Hit has no TDC value, thus set cluster TDc status to zero.
cluster_tdc_status = 0
one_hit_has_no_tdc = True
continue
if not one_hit_has_no_tdc:
cluster_tdc_status |= hits[hit_index].tdc_status # OR all TDC stati of hits belonging to the cluster
clusters[cluster_index].tdc_value = cluster_tdc
clusters[cluster_index].tdc_status = cluster_tdc_status
# Initialize clusterizer with custom hit/cluster fields
self.clz = HitClusterizer(
hit_fields=hit_fields,
@@ -226,7 +303,11 @@ class Analysis(object):
ignore_same_hits=True)
# Set end_of_cluster function for shape and distance calculation
self.clz.set_end_of_cluster_function(end_of_cluster_function)
if self.analyze_tdc:
# If analyze TDC data, set also end of cluster function for calculating TDC properties
self.clz.set_end_of_cluster_function(end_of_cluster_function_tdc)
else:
self.clz.set_end_of_cluster_function(end_of_cluster_function)
def _range_of_parameter(self, meta_data):
''' Calculate the raw data word indeces of each scan parameter
@@ -288,7 +369,7 @@ class Analysis(object):
row['value'] = in_file.root.meta_data.attrs.chip_id
row.append()
for kw, value in yaml.load(in_file.root.meta_data.attrs.kwargs).iteritems():
for kw, value in yaml.load(in_file.root.meta_data.attrs.kwargs).items():
if kw not in ['start_column', 'stop_column', 'start_row', 'stop_row', 'mask_step', 'maskfile', 'disable', 'n_injections', 'n_triggers', 'limit', 'VCAL_MED', 'VCAL_HIGH', 'VCAL_HIGH_start', 'VCAL_HIGH_stop', 'VCAL_HIGH_step', 'VTH_start', 'VTH_stop', 'VTH_step', 'VTH_name', 'vth_offset', 'DAC', 'type', 'value_start', 'value_stop', 'value_step', 'addresses']:
continue
row = run_config_table.row
@@ -299,7 +380,7 @@ class Analysis(object):
run_config = dict(run_config_table[:])
dac_table = out_file.create_table(out_file.root.configuration, name='dacs', title='DACs', description=DacTable)
for dac, value in yaml.load(in_file.root.meta_data.attrs.dacs).iteritems():
for dac, value in yaml.load(in_file.root.meta_data.attrs.dacs).items():
row = dac_table.row
row['DAC'] = dac
row['value'] = value
@@ -445,7 +526,8 @@ class Analysis(object):
(hits, hist_occ, hist_tot, hist_rel_bcid,
hist_event_status,
hist_bcid_error) = au.init_outs(n_hits=self.chunk_size * 4,
n_scan_params=n_scan_params)
n_scan_params=n_scan_params,
analyze_tdc=self.analyze_tdc)
with tb.open_file(self.analyzed_data_file, 'w', title=in_file.title) as out_file:
try:
@@ -490,6 +572,8 @@ class Analysis(object):
scan_param_id=scan_param_id,
prev_trig_id=self.trg_id,
prev_trg_number=self.prev_trg_number,
analyze_tdc=self.analyze_tdc,
use_tdc_trigger_dist=self.use_tdc_trigger_dist,
last_chunk=self.last_chunk)
if prev_event_number == self.event_number:
@@ -501,7 +585,11 @@ class Analysis(object):
if self.cluster_hits:
_, cluster = self.clz.cluster_hits(hits[:n_hits])
cluster_table.append(cluster)
if self.analyze_tdc:
# Select only clusters where all hits have a valid TDC status
cluster_table.append(cluster[cluster['tdc_status'] == 1])
else:
cluster_table.append(cluster)
# Create actual cluster hists
cs_size = np.bincount(cluster['size'],
minlength=100)[:100]
@@ -567,28 +655,33 @@ class Analysis(object):
complevel=5,
fletcher32=False))
if scan_id in ['threshold_scan', 'global_threshold_tuning', 'local_threshold_tuning']:
if scan_id in ['threshold_scan', 'fast_threshold_scan', 'global_threshold_tuning', 'local_threshold_tuning']:
n_injections = self.run_config['n_injections']
hist_scurve = hist_occ.reshape((192 * 400, -1))
if scan_id == 'threshold_scan':
scan_param_range = [v - self.run_config['VCAL_MED'] for v in range(self.run_config['VCAL_HIGH_start'],
self.run_config['VCAL_HIGH_stop'], self.run_config['VCAL_HIGH_step'])]
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_param_range, n_injections, optimize_fit_range=False)
scan_params = [v - self.run_config['VCAL_MED'] for v in range(self.run_config['VCAL_HIGH_start'],
self.run_config['VCAL_HIGH_stop'], self.run_config['VCAL_HIGH_step'])]
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_params, n_injections, optimize_fit_range=False)
elif scan_id == 'global_threshold_tuning':
scan_param_range = range(self.run_config['VTH_start'], self.run_config['VTH_stop'], -1 * self.run_config['VTH_step'])
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_param_range, n_injections=n_injections,
scan_params = range(self.run_config['VTH_start'], self.run_config['VTH_stop'], -1 * self.run_config['VTH_step'])
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_params, n_injections=n_injections,
invert_x=True, optimize_fit_range=False)
elif scan_id == 'local_threshold_tuning':
min_tdac, max_tdac, _ = au.get_tdac_range(self.run_config['start_column'], self.run_config['stop_column'])
scan_param_range = np.arange(min_tdac, max_tdac)
scan_params = np.arange(min_tdac, max_tdac)
# FIXME: error prone differentiation for diff / lin flavour
if min_tdac < 0:
invert_x = True
else:
invert_x = False
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_param_range, n_injections=n_injections,
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_params, n_injections=n_injections,
invert_x=invert_x, optimize_fit_range=True)
scan_params = [v - self.run_config['VCAL_MED'] for v in range(self.run_config['VCAL_HIGH_start'], self.run_config['VCAL_HIGH_stop'], self.run_config['VCAL_HIGH_step'])]
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_params, n_injections, optimize_fit_range=False)
elif scan_id == 'fast_threshold_scan':
scan_params = self.get_scan_param_values(scan_parameter='vcal_high') - self.get_scan_param_values(scan_parameter='vcal_med')
self.threshold_map, self.noise_map, self.chi2_map = au.fit_scurves_multithread(hist_scurve, scan_params, n_injections, optimize_fit_range=False)
out_file.create_carray(out_file.root, name='ThresholdMap', title='Threshold Map', obj=self.threshold_map,
filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
Loading