Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tbisanz/bdaq53
  • mkalleso/bdaq-53
  • caleb/bdaq53
  • dexarcho/bdaq-53
  • menouni/bdaq53
  • sabdelal/bdaq53
  • askaf/bdaq53
  • lmeng/bdaq53
  • tstreble/bdaq53
  • swertz/bdaq53
  • devdatta/bdaq53
  • dphol/bdaq53
  • fcrescio/bdaq53
  • apaterno/bdaq53
  • silab/bdaq53
15 results
Show changes
Commits on Source (96)
Showing
with 370 additions and 261 deletions
# .coveragerc to control coverage.py
[run]
branch = True
# branch = True # statement coverage instead of branch coverage is sufficient right now
[report]
# Regexes for lines to exclude from consideration
......@@ -8,6 +8,10 @@ exclude_lines =
# Have to re-enable the standard pragma
pragma: no cover
if __name__ == .__main__.:
@numba.njit
@njit
def __repr__
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
......
stages:
- codestyle
- test
- report
# Tests using RD53 and FPGA co-simulation
test:rd53:
stage: test
needs: [] # do not wait for code style
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
......@@ -14,7 +21,7 @@ test:rd53:
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH=$HOME/miniconda/bin:$PATH
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml pip
# Setup co-simulation
- pip install cocotb
- git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..;
......@@ -24,8 +31,11 @@ test:rd53:
- source setup_questa.sh > /dev/null
- pytest -s -v rd53a/test_rd53
# Tests for software without hardware co-simulation
# Tests bdaq software without hardware co-simulation, do scan tests on different runner to save time
test:software:
stage: test
needs:
- codestyle:code_style
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
......@@ -45,10 +55,10 @@ test:software:
# Update miniconda python and install required binary packages
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs iminuit lxml
# - conda install pytest-xdist # Allow testing in parallel
# - conda install pytest-xdist # Allow testing in parallel, does not work due to mocks that act globally
# Install virtual x server for bdaq53 monitor Qt gui tests
- apt-get install -y xvfb
- pip install xvfbwrapper pytest-cov
- pip install xvfbwrapper pytest-cov # pytest plugins: pytest-sugar, pytest-cov for progress visualization and coverage reporting
# Install basil tag from github
- git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..;
# - if [ -z "$CI_COMMIT_TAG"]; then git clone -b v3.0.0 --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; else git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; fi
......@@ -58,12 +68,147 @@ test:software:
- conda init bash # https://github.com/ContinuumIO/docker-images/issues/89
- source ~/.bashrc # since ==> For changes to take effect, close and re-open your current shell. <==
- conda activate # to link properly to pytest
# Do not run virtual x server tests (monitor) in this runner due to segfault
- pytest --cov=bdaq53 bdaq53/tests/test_software/ --ignore=bdaq53/tests/test_software/test_monitor.py --ignore=bdaq53/tests/test_software/test_eudaq.py
coverage: '/^TOTAL.+?(\d+\%)$/'
# Run unit tests with a coverage and create a coberture-xml report to be used by gitlab and also print report to terminal
- export COVERAGE_FILE=.coverage_software
- pytest --cov-report term --cov=bdaq53 bdaq53/tests/test_software/base
# coverage: '/^TOTAL.+?(\d+\%)$/'
artifacts:
paths:
- ".coverage_software"
# Tests for scan script regressions in software without hardware co-simulation
test:scans:
stage: test
needs:
- codestyle:code_style
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
GIT_STRATEGY: clone
tags: # tags to differentiate runners able to run the job
- docker-privileged-xl # Use (faster?) CERN shared runner
image: continuumio/anaconda3:latest # Ubuntu based miniconda image
before_script:
# Install git-lfs
- apt-get update --fix-missing
- apt-get install -y curl
- curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
- apt-get install -y git-lfs
- git lfs install
- git submodule sync --recursive
- git submodule update --init --recursive
# Update miniconda python and install required binary packages
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs iminuit lxml
# - conda install pytest-xdist # Allow testing in parallel, does not work due to mocks that act globally
# Install virtual x server for bdaq53 monitor Qt gui tests
- apt-get install -y xvfb
- pip install xvfbwrapper pytest-cov # pytest plugins: pytest-sugar, pytest-cov for progress visualization and coverage reporting
# Install basil tag from github
- git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..;
# - if [ -z "$CI_COMMIT_TAG"]; then git clone -b v3.0.0 --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; else git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; fi
script:
- python setup.py develop
# Try to activate conda evironment in the newest broken miniconda docker
- conda init bash # https://github.com/ContinuumIO/docker-images/issues/89
- source ~/.bashrc # since ==> For changes to take effect, close and re-open your current shell. <==
- conda activate # to link properly to pytest
- export COVERAGE_FILE=.coverage_scans
- pytest --cov-report term --cov=bdaq53 bdaq53/tests/test_software/scans --ignore=bdaq53/tests/test_software/scans/test_eudaq.py
# coverage: '/^TOTAL.+?(\d+\%)$/'
artifacts:
paths:
- ".coverage_scans"
# Tests for scan script regressions in software without hardware co-simulation
test:analysis:
stage: test
needs:
- codestyle:code_style
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
GIT_STRATEGY: clone
tags: # tags to differentiate runners able to run the job
- docker # Use CERN shared runners
image: continuumio/anaconda3:latest # Ubuntu based miniconda image
before_script:
# Install git-lfs
- apt-get update --fix-missing
- apt-get install -y curl
- curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
- apt-get install -y git-lfs
- git lfs install
- git submodule sync --recursive
- git submodule update --init --recursive
# Update miniconda python and install required binary packages
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs iminuit lxml
# - conda install pytest-xdist # Allow testing in parallel, does not work due to mocks that act globally
# Install virtual x server for bdaq53 monitor Qt gui tests
- apt-get install -y xvfb
- pip install xvfbwrapper pytest-cov # pytest plugins: pytest-sugar, pytest-cov for progress visualization and coverage reporting
# Install basil tag from github
- git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..;
# - if [ -z "$CI_COMMIT_TAG"]; then git clone -b v3.0.0 --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; else git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..; fi
script:
- python setup.py develop
# Try to activate conda evironment in the newest broken miniconda docker
- conda init bash # https://github.com/ContinuumIO/docker-images/issues/89
- source ~/.bashrc # since ==> For changes to take effect, close and re-open your current shell. <==
- conda activate # to link properly to pytest
- export COVERAGE_FILE=.coverage_analysis
- pytest --cov-report term --cov=bdaq53 bdaq53/tests/test_software/analysis --ignore=bdaq53/tests/test_software/analysis/test_monitor.py
# coverage: '/^TOTAL.+?(\d+\%)$/'
artifacts:
paths:
- ".coverage_analysis"
# Combine coverage reports and report to gitlab, https://docs.gitlab.com/ee/user/project/merge_requests/test_coverage_visualization.html
report:coverage:
stage: report
needs: # wait for coverage reporting test jobs
- test:software
- test:scans
- test:analysis
tags: # tags to differentiate runners able to run the job
- docker # Use CERN shared runners
image: continuumio/anaconda3:latest # Ubuntu based miniconda image
before_script:
# Install git-lfs
- apt-get update --fix-missing
- apt-get install -y curl
- curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash
- apt-get install -y git-lfs
- git lfs install
- git submodule sync --recursive
- git submodule update --init --recursive
# Update miniconda python and install required binary packages
- conda update --yes conda
- conda install --yes coverage pytest
- pip install pytest-cov # pytest plugins: pytest-sugar, pytest-cov for progress visualization and coverage reporting
script:
# Try to activate conda evironment in the newest broken miniconda docker
- conda init bash # https://github.com/ContinuumIO/docker-images/issues/89
- source ~/.bashrc # since ==> For changes to take effect, close and re-open your current shell. <==
- conda activate # to link properly to pytest
- coverage combine .coverage_software .coverage_analysis .coverage_scans # combine .coverage* files
- coverage xml # create cobertura formatted coverage.xml, that can be used by gitlab
- coverage report # print to terminal to allow regex to catch output
coverage: '/^TOTAL.+?(\d+\%)$/' # regex parse to show on gitlab
artifacts:
name: all-coverage-reports
paths:
- "*.coverage*"
expire_in: 4 month
reports:
cobertura: coverage.xml
# Tests for eudaq newest v1.x-dev branch compatibility
test:eudaq:
stage: test
needs:
- codestyle:code_style
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
......@@ -96,12 +241,14 @@ test:eudaq:
- source ~/.bashrc # since ==> For changes to take effect, close and re-open your current shell. <==
- conda activate # to link properly to pytest
# Do not run virtual x server tests (monitor) in this runner due to segfault
- pytest -v bdaq53/tests/test_software/test_eudaq.py
- pytest -v bdaq53/tests/test_software/scans/test_eudaq.py
# Tests that need bdaq53 readout hardware and RD53 FE
# Needs Ubuntu system with xvfb, Miniconda3 and full Xilinx installed and
# Gitlab-runner with shell executor: https://docs.gitlab.com/runner/executors/
test:hardware:
stage: test
needs: [] # do not wait for code style
allow_failure: true
variables:
GIT_STRATEGY: clone
......@@ -116,7 +263,7 @@ test:hardware:
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH=$HOME/miniconda/bin:$PATH
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml pip
# Setup co-simulation
- pip install xvfbwrapper
- yum install libX11 -y
......@@ -146,7 +293,8 @@ test:hardware:
expire_in: 1 month
# Tests for code style violations in new code lines
test:code_style:
codestyle:code_style:
stage: codestyle
variables:
# Otherwise lock file of cancelled runs might stall future check outs
# https://gitlab.cern.ch/silab/bdaq53/issues/293
......@@ -168,6 +316,8 @@ test:code_style:
# Tests using ITkPixV1 and FPGA co-simulation
test:itkpixv1:
stage: test
needs: [] # do not wait for code style
variables:
GIT_STRATEGY: clone
tags: # tags to differentiate runners able to run the job
......@@ -180,7 +330,7 @@ test:itkpixv1:
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH=$HOME/miniconda/bin:$PATH
- conda update --yes conda
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml
- conda install --yes numpy bitarray pytest pyyaml scipy numba pytables pyqt matplotlib tqdm pyzmq blosc psutil pexpect coloredlogs ptyprocess iminuit lxml pip
# Setup co-simulation
- pip install cocotb
- git clone -b development --depth 1 https://github.com/SiLab-Bonn/basil.git; cd basil; python setup.py develop; cd ..;
......
......@@ -14,6 +14,7 @@ import logging
import multiprocessing
import time
import queue
from functools import reduce
import numpy as np
import numba
......@@ -22,8 +23,11 @@ from bdaq53.analysis import analysis_utils as au
logger = logging.getLogger('OnlineAnalysis')
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
@numba.njit
@numba.njit(cache=True, fastmath=True)
def histogram(raw_data, occ_hist, data_word, is_fe_high_word, is_data_header):
''' Raw data to 2D occupancy histogram '''
......@@ -134,190 +138,145 @@ def histogram_tot(raw_data, hist_tot, data_word, is_fe_high_word, is_data_header
return data_word, is_fe_high_word, is_data_header
class OccupancyHistogramming(object):
''' Fast histogramming of raw data to a 2D hit histogramm
class OnlineHistogrammingBase():
''' Base class to do online analysis with raw data from chip.
No event building and seperate process for speed up
The output data is a histogram of a given shape.
'''
_queue_timeout = 0.01 # max blocking time to delete object [s]
def __init__(self):
def __init__(self, shape):
self._raw_data_queue = multiprocessing.Queue()
self.stop = multiprocessing.Event()
self.lock = multiprocessing.Lock()
self.last_add = None # time of last add to queue
self.shape = shape
self.analysis_function_kwargs = {}
def init(self):
# Create shared memory 32 bit unsigned int numpy array
shared_array_base = multiprocessing.Array(ctypes.c_uint, 400 * 192)
n_values = reduce(lambda x, y: x * y, self.shape)
shared_array_base = multiprocessing.Array(ctypes.c_uint, n_values)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
self.occ_hist = shared_array.reshape(400, 192)
self.hist = shared_array.reshape(*self.shape)
self.idle_worker = multiprocessing.Event()
self.p = multiprocessing.Process(target=self.worker,
args=(self._raw_data_queue, shared_array_base,
self.lock, self.stop, ))
self.lock, self.stop, self.idle_worker))
self.p.start()
logger.info('Starting process %d', self.p.pid)
def analysis_function(self, raw_data, hist, *args):
raise NotImplementedError("You have to implement the analysis_funtion")
def add(self, raw_data):
''' Add raw data to be histogrammed '''
self.last_add = time.time() # time of last add to queue
self.idle_worker.clear() # after addding data worker cannot be idle
self._raw_data_queue.put(raw_data)
def _reset_hist(self):
with self.lock:
self.hist = self.hist.reshape(-1)
for i in range(self.hist.shape[0]):
self.hist[i] = 0
self.hist = self.hist.reshape(self.shape)
def reset(self, wait=True, timeout=0.5):
''' Reset histogram '''
if not wait:
if self._raw_data_queue.qsize() != 0:
logger.warning('Resetting histogram while adding data')
if not self._raw_data_queue.empty() or not self.idle_worker.is_set():
logger.warning('Resetting histogram while filling data')
else:
n_time = 0
while self._raw_data_queue.qsize() != 0:
time.sleep(0.01)
n_time += 1
if n_time * 0.01 > timeout:
logger.warning('Resetting histogram while adding data')
break
with self.lock:
# No overwrite with a new zero array due to shared memory
for col in range(400):
for row in range(192):
self.occ_hist[col, row] = 0
if not self.idle_worker.wait(timeout):
logger.warning('Resetting histogram while filling data')
self._reset_hist()
def get(self, wait=True, timeout=0.5, reset=True):
def get(self, wait=True, timeout=None, reset=True):
''' Get the result histogram '''
if not wait:
if self._raw_data_queue.qsize() != 0:
logger.warning('Getting histogram while adding data')
if not self._raw_data_queue.empty() or not self.idle_worker.is_set():
logger.warning('Getting histogram while analyzing data')
else:
n_time = 0
while self._raw_data_queue.qsize() != 0:
time.sleep(0.01)
n_time += 1
if n_time * 0.01 > timeout:
logger.warning('Getting histogram while adding data')
break
with self.lock:
if reset:
occ_hist = self.occ_hist.copy()
# No overwrite with a new zero array due to shared memory
for col in range(400):
for row in range(192):
self.occ_hist[col, row] = 0
return occ_hist
else:
return self.occ_hist
def worker(self, raw_data_queue, shared_array_base, lock, stop):
if not self.idle_worker.wait(timeout):
logger.warning('Getting histogram while analyzing data. Consider increasing the timeout.')
if reset:
hist = self.hist.copy()
# No overwrite with a new zero array due to shared memory
self._reset_hist()
return hist
else:
return self.hist
def worker(self, raw_data_queue, shared_array_base, lock, stop, idle):
''' Histogramming in seperate process '''
occ_hist = np.ctypeslib.as_array(shared_array_base.get_obj()).reshape(400, 192)
is_fe_high_word = -1
is_data_header = 0
data_word = 0
hist = np.ctypeslib.as_array(shared_array_base.get_obj()).reshape(self.shape)
while not stop.is_set():
try:
raw_data = raw_data_queue.get(timeout=self._queue_timeout)
idle.clear()
with lock:
data_word, is_fe_high_word, is_data_header = histogram(raw_data, occ_hist, data_word, is_fe_high_word, is_data_header=is_data_header)
return_values = self.analysis_function(raw_data, hist, **self.analysis_function_kwargs)
self.analysis_function_kwargs.update(zip(self.analysis_function_kwargs, return_values))
except queue.Empty:
idle.set()
continue
except KeyboardInterrupt: # Need to catch KeyboardInterrupt from main process
stop.set()
idle.set()
def __del__(self):
def close(self):
''' Close process and wait till done. Likely needed to give access to pytable file handle.'''
logger.info('Stopping process %d', self.p.pid)
self._raw_data_queue.close()
self._raw_data_queue.join_thread() # Needed otherwise IOError: [Errno 232] The pipe is being closed
self.stop.set()
self.p.join()
def __del__(self):
if self.p.is_alive():
logger.warning('Process still running. Was close() called?')
self.close()
class TotHistogramming(object):
''' Fast histogramming of raw data to a TOT histogramm for each pixel
No event building and seperate process for speed up
class OccupancyHistogramming(OnlineHistogrammingBase):
''' Fast histogramming of raw data to a 2D hit histogramm
No event building.
'''
_queue_timeout = 0.01 # max blocking time to delete object [s]
def __init__(self):
self._raw_data_queue = multiprocessing.Queue()
self.stop = multiprocessing.Event()
self.lock = multiprocessing.Lock()
def __init__(self, chip_type='rd53a'):
if 'rd53a' in chip_type.lower():
super().__init__(shape=(400, 192))
self.analysis_function_kwargs = {'is_fe_high_word': -1, 'is_data_header': 0, 'data_word': 0}
# Create shared memory 32 bit unsigned int numpy array
shared_array_base = multiprocessing.Array(ctypes.c_uint, 400 * 192 * 16)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
self.tot_hist = shared_array.reshape(400, 192, 16)
def analysis_function(self, raw_data, hist, is_fe_high_word, is_data_header, data_word):
return histogram(raw_data, hist, is_fe_high_word, is_data_header, data_word)
setattr(OccupancyHistogramming, 'analysis_function', analysis_function)
else:
raise NotImplementedError('Chip type %s not supported!' % chip_type)
self.init()
self.p = multiprocessing.Process(target=self.worker,
args=(self._raw_data_queue, shared_array_base,
self.lock, self.stop, ))
self.p.start()
def add(self, raw_data):
''' Add raw data to be histogrammed '''
self._raw_data_queue.put(raw_data)
class TotHistogramming(OnlineHistogrammingBase):
''' Fast histogramming of raw data to a TOT histogramm for each pixel
def reset(self, wait=True, timeout=0.5):
''' Reset histogram '''
if not wait:
if self._raw_data_queue.qsize() != 0:
logger.warning('Resetting histogram while adding data')
else:
n_time = 0
while self._raw_data_queue.qsize() != 0:
time.sleep(0.01)
n_time += 1
if n_time * 0.01 > timeout:
logger.warning('Resetting histogram while adding data')
break
with self.lock:
# No overwrite with a new zero array due to shared memory
for col in range(400):
for row in range(192):
for tot in range(16):
self.tot_hist[col, row, tot] = 0
No event building.
'''
_queue_timeout = 0.01 # max blocking time to delete object [s]
def get(self, wait=True, timeout=0.5, reset=True):
''' Get the result histogram '''
if not wait:
if self._raw_data_queue.qsize() != 0:
logger.warning('Getting histogram while adding data')
else:
n_time = 0
while self._raw_data_queue.qsize() != 0:
time.sleep(0.01)
n_time += 1
if n_time * 0.01 > timeout:
logger.warning('Getting histogram while adding data')
break
with self.lock:
if reset:
tot_hist = self.tot_hist.copy()
# No overwrite with a new zero array due to shared memory
for col in range(400):
for row in range(192):
for tot in range(16):
self.tot_hist[col, row, tot] = 0
return tot_hist
else:
return self.tot_hist
def worker(self, raw_data_queue, shared_array_base, lock, stop):
''' Histogramming in seperate process '''
tot_hist = np.ctypeslib.as_array(shared_array_base.get_obj()).reshape(400, 192, 16)
is_fe_high_word = -1
is_data_header = 0
data_word = 0
while not stop.is_set():
try:
raw_data = raw_data_queue.get(timeout=self._queue_timeout)
with lock:
data_word, is_fe_high_word, is_data_header = histogram_tot(raw_data, tot_hist, data_word, is_fe_high_word, is_data_header=is_data_header)
except queue.Empty:
continue
except KeyboardInterrupt: # Need to catch KeyboardInterrupt from main process
stop.set()
def __init__(self, chip_type='rd53a'):
if 'rd53a' in chip_type.lower():
super().__init__(shape=(400, 192, 16))
self.analysis_function_kwargs = {'is_fe_high_word': -1, 'is_data_header': 0, 'data_word': 0}
def __del__(self):
self._raw_data_queue.close()
self._raw_data_queue.join_thread() # Needed otherwise IOError: [Errno 232] The pipe is being closed
self.stop.set()
self.p.join()
def analysis_function(self, raw_data, hist, is_fe_high_word, is_data_header, data_word):
return histogram_tot(raw_data, hist, is_fe_high_word, is_data_header, data_word)
setattr(TotHistogramming, 'analysis_function', analysis_function)
else:
raise NotImplementedError('Chip type %s not supported!' % chip_type)
self.init()
if __name__ == "__main__":
......
This diff is collapsed.
......@@ -16,7 +16,6 @@ from basil.utils.BitLogic import BitLogic
from bdaq53.chips.chip_base import ChipBase, RegisterObject, MaskObject
from bdaq53.analysis import analysis_utils
from bdaq53.analysis import rd53a_analysis as ana
from bdaq53 import manage_databases
from bdaq53.system import logger as logger
FLAVOR_COLS = {'SYNC': range(0, 128),
......@@ -505,8 +504,6 @@ class RD53A(ChipBase):
else:
raise TypeError('Supplied config has unknown format!')
manage_databases.check_chip_in_database(self.chip_type, self.chip_sn)
self.registers = RegisterObject(self, 'rd53a_registers.yaml')
masks = {'enable': {'default': False},
......
......@@ -16,7 +16,8 @@ import os
import time
from datetime import datetime
import yaml
import urllib.error, urllib.request
import urllib.request
import urllib.error
import sqlite3
from lxml import html
......@@ -83,10 +84,10 @@ def _cache_rd53a_db(outfile=None, **_):
try:
urllib.request.urlretrieve(uri, outfile)
except urllib.error.HTTPError:
except (urllib.error.HTTPError, urllib.error.URLError):
raise DBUnreachableError('Cannot connect to RD53A database. Are you offline?')
log.info('RD53 database has been cached to {0} in {1:1.3f}s'.format(outfile, time.time() - start_timer))
log.debug('RD53 database has been cached to {0} in {1:1.3f}s'.format(outfile, time.time() - start_timer))
def _get_chip_from_rd53a_db_cache(wafer_no, col, row, infile=None, **_):
......@@ -126,13 +127,13 @@ def _get_chip_from_rd53a_db_cache(wafer_no, col, row, infile=None, **_):
log.error('Cannot find cached version of RD53A database!')
raise DBCacheNotFoundError("No such file: '{0}'".format(infile))
log.info('Reading data from cache file {}'.format(infile))
log.debug('Reading data from cache file {}'.format(infile))
creation_date = os.path.getmtime(infile)
if time.time() - creation_date > (24 * 60 * 60):
log.warning('Cached data is older than 24h, consider creating a new cache. Creation date {}'.format(datetime.fromtimestamp(creation_date)))
elif time.time() - creation_date > 1:
log.info('Cache file was created {}'.format(datetime.fromtimestamp(creation_date)))
log.debug('Cache file was created {}'.format(datetime.fromtimestamp(creation_date)))
cache = sqlite3.connect(infile)
cache.row_factory = dict_factory
......@@ -225,7 +226,8 @@ def check_chip_in_database(chip_type, chip_sn):
'''
if chip_type != 'rd53a':
raise NotImplementedError('This functionality is not yet implement for {}'. format(chip_type))
log.warning('Database lookup activated, but not available for {}...'.format(chip_type))
return
log.info('Checking database for chip {}...'.format(chip_sn))
......
......@@ -144,6 +144,7 @@ class SensorIVScan(object):
add = 1 if VBIAS_stop > 0 else -1
pbar = tqdm(total=abs(VBIAS_stop), unit='Volt')
last_v = 0
for v_bias in range(VBIAS_start, VBIAS_stop + add, VBIAS_step):
self.periphery.power_on_HV(module_name, hv_voltage=v_bias, hv_current_limit=hv_current_limit, verbose=False)
......@@ -170,7 +171,8 @@ class SensorIVScan(object):
row['current_error'] = np.std(c_arr)
row.append()
self.raw_data_table.flush()
pbar.update(abs(VBIAS_step))
pbar.update(abs(v_bias) - last_v)
last_v = abs(v_bias)
# Abort scan if in current limit
if abs(current) >= hv_current_limit * 0.98:
......
......@@ -69,8 +69,7 @@ if __name__ == '__main__':
tuning_configuration['VCAL_HIGH'] = tuning_configuration['VCAL_MED'] + 129
with GDACTuning(scan_config=tuning_configuration) as global_tuning:
global_tuning.scan()
global_tuning.analyze()
global_tuning.start()
# Tune local thresholds
for flavor in flavors:
......@@ -84,13 +83,8 @@ if __name__ == '__main__':
tuning_configuration['start_column'] = 264
tuning_configuration['stop_column'] = 400
tuning_configuration['VCAL_HIGH'] = tuning_configuration['VCAL_MED'] + 129
with TDACTuning(scan_config=tuning_configuration) as local_tuning:
local_tuning.scan()
local_tuning.analyze()
# Set maskfile to 'auto' *after* first TDAC tuning
tuning_configuration['maskfile'] = 'auto'
local_tuning.start()
with BumpConnCTalkScan(scan_config=scan_configuration) as disconn_bumps_scan:
disconn_bumps_scan.start()
......@@ -23,8 +23,6 @@ scan_configuration = {
'start_row': 0,
'stop_row': 192,
'maskfile': None,
# Target threshold
'VCAL_MED': 500,
'VCAL_HIGH': 680 # 180 DVCAL corresponds to about 2000 e
......@@ -50,14 +48,10 @@ if __name__ == '__main__':
noise_occ_scan_configuration['stop_row'] = scan_configuration['stop_row']
with GDACTuning(scan_config=scan_configuration) as global_tuning:
global_tuning.scan()
gdacs = global_tuning.analyze()
global_tuning.start()
with TDACTuning(scan_config=scan_configuration) as local_tuning:
local_tuning.scan()
tdacs = local_tuning.analyze()
scan_configuration['maskfile'] = 'auto' # Definitely use maskfile created by tuning from here on
local_tuning.start()
# First noise occupancy scan
with NoiseOccScan(scan_config=noise_occ_scan_configuration) as noise_occ_scan:
......
......@@ -62,12 +62,10 @@ if __name__ == '__main__':
noise_occ_scan_configuration['stop_row'] = scan_configuration['stop_row']
with GDACTuning(scan_config=scan_configuration) as global_tuning:
global_tuning.scan()
gdacs = global_tuning.analyze() # FIXME: no return value in scan steps possible anymore due to multi module feature
global_tuning.start()
with TDACTuning(scan_config=scan_configuration) as local_tuning:
local_tuning.scan()
tdacs = local_tuning.analyze() # FIXME: no return value in scan steps possible anymore due to multi module feature
local_tuning.start()
scan_configuration['maskfile'] = 'auto' # Definitely use maskfile created by tuning from here on
tot_params = scan_configuration.copy()
......@@ -76,8 +74,7 @@ if __name__ == '__main__':
tot_tuning.start()
with TDACTuning(scan_config=scan_configuration) as local_tuning:
local_tuning.scan()
tdacs = local_tuning.analyze() # FIXME: no return value in scan steps possible anymore due to multi module feature
local_tuning.start()
# First noise occupancy scan
with NoiseOccScan(scan_config=noise_occ_scan_configuration) as noise_occ_scan:
......
......@@ -315,7 +315,7 @@ class BumpConSourceScan(ScanBase):
if use_hitor:
self.enable_hitor(False)
self.data.hist_occ.stop.set()
self.data.hist_occ.close()
self.n_trigger = self.data.n_trigger # Print number of triggers in ScanBase
self.chip.registers.reset_all()
......
......@@ -81,7 +81,7 @@ class BumpConnThrShScan(ScanBase):
stop_row : int [0:192]
Row to stop the scan. This row is excluded from the scan.
'''
self.data.module_name = self.module_conf['_module_name']
self.data.module_name = self.module_settings['name']
if not self.periphery.enabled:
raise Exception('Periphery module needs to be enabled!')
......@@ -278,7 +278,7 @@ class BumpConnThrShScan(ScanBase):
obj=np.array(scan_param_ids_list), filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
# Stop analysis process
self.data.hist_occ.stop.set()
self.data.hist_occ.close()
self.log.success('Scan finished')
......
......@@ -144,11 +144,11 @@ class ExtTriggerScan(ScanBase):
Maximum amount of triggers to record. Set to False for no limit.
'''
if scan_timeout:
self.data.pbar = tqdm(total=scan_timeout, unit='') # [s]
pbar = tqdm(total=scan_timeout, unit='') # [s]
elif max_triggers:
self.data.pbar = tqdm(total=max_triggers, unit=' Triggers')
pbar = tqdm(total=max_triggers, unit=' Triggers')
elif min_spec_occupancy:
self.data.pbar = tqdm(total=100, unit=' % Hits')
pbar = tqdm(total=100, unit=' % Hits')
start_time = time.time()
......@@ -156,8 +156,8 @@ class ExtTriggerScan(ScanBase):
if scan_timeout:
current_time = time.time()
if current_time - start_time > scan_timeout:
self.data.pbar.update(1)
self.data.pbar.close()
pbar.update(1)
pbar.close()
self.log.info('Scan timeout was reached')
return True
return False
......@@ -204,9 +204,9 @@ class ExtTriggerScan(ScanBase):
# Update progress bar
try:
if scan_timeout:
self.data.pbar.update(1)
pbar.update(1)
elif max_triggers:
self.data.pbar.update(self.bdaq.get_trigger_counter() - triggers)
pbar.update(self.bdaq.get_trigger_counter() - triggers)
elif min_spec_occupancy:
clipped_occ = self.data.occupancy.copy()
clipped_occ[clipped_occ > min_spec_occupancy] = min_spec_occupancy
......@@ -215,29 +215,29 @@ class ExtTriggerScan(ScanBase):
num_enabled_pixels = np.count_nonzero(self.data.enabled_pixels)
num_required_hits = min_spec_occupancy * num_enabled_pixels
self.data.pbar.n = int(100. * clipped_num_hits / num_required_hits)
self.data.pbar.refresh()
pbar.n = int(100. * clipped_num_hits / num_required_hits)
pbar.refresh()
except ValueError:
pass
# Stop scan if fraction of pixels reached minimum hits per pixel
if min_spec_occupancy and np.count_nonzero(self.data.occupancy >= min_spec_occupancy) >= fraction * num_enabled_pixels:
self.stop_scan.set()
self.data.pbar.close()
pbar.close()
self.log.info('Reached required minimal number of hits per pixel ({0})'.format(min_spec_occupancy))
# Stop scan if reached trigger limit
if max_triggers and triggers >= max_triggers:
self.stop_scan.set()
self.data.pbar.close()
pbar.close()
self.log.info('Trigger limit was reached: {0}'.format(max_triggers))
except KeyboardInterrupt: # React on keyboard interupt
self.stop_scan.set()
self.data.pbar.close()
pbar.close()
self.log.info('Scan was stopped due to keyboard interrupt')
self.data.pbar.close()
pbar.close()
self.chip._az_stop()
......@@ -249,8 +249,7 @@ class ExtTriggerScan(ScanBase):
if min_spec_occupancy: # close online analysis for each chip
for self.data in self._scan_data_containers:
self.data.hist_occ.stop.set()
time.sleep(0.1)
self.data.hist_occ.close()
self.n_trigger = self.data.n_trigger # Print number of triggers in ScanBase
# FIXME: This was a workaround from long ago. Is this still necessary?
......
......@@ -93,7 +93,7 @@ class MergedBumpsScan(ScanBase):
def _analyze(self):
with analysis.Analysis(raw_data_file=self.output_filename + '.h5', **self.configuration['bench']['analysis']) as a:
a.analyze_data()
n_injections = a.run_config['n_injections']
n_injections = a.scan_config['n_injections']
with tb.open_file(self.output_filename + '_interpreted.h5', 'r+') as in_file:
hist_occ = in_file.root.HistOcc[:].reshape((400, 192))
......
......@@ -203,7 +203,7 @@ class NoiseOccAdvScan(ScanBase):
pbar.close()
self.data.hist_occ.stop.set()
self.data.hist_occ.close()
disable_mask = np.ones((400, 192), dtype=bool)
disable_mask[self.data.noisy_map == 'n'] = False
......@@ -249,12 +249,12 @@ class NoiseOccAdvScan(ScanBase):
initial_disable_mask = in_file.root.DisabledPixelsAtStart[:]
# Get scan parameters
run_config = au.ConfigDict(in_file.root.configuration.run_config[:])
scan_config = au.ConfigDict(in_file.root.configuration_in.scan.scan_config[:])
start_column = run_config['start_column']
stop_column = run_config['stop_column']
start_row = run_config['start_row']
stop_row = run_config['stop_row']
start_column = scan_config['start_column']
stop_column = scan_config['stop_column']
start_row = scan_config['start_row']
stop_row = scan_config['stop_row']
n_pixels = (stop_column - start_column) * (stop_row - start_row)
......
......@@ -186,7 +186,7 @@ class ThresholdScan(ScanBase):
else:
self.log.warning('Maximum injection reached. Abort.')
self.data.hist_occ.stop.set() # stop analysis process
self.data.hist_occ.close() # stop analysis process
self.log.success('Scan finished')
# Used to overwrite data storing function: self.readout.handle_data
......
......@@ -174,14 +174,22 @@ class TDACTuning(ScanBase):
self.data.tdac_map[:, :] = best_results_map[:, :, 0]
pbar.close()
self.data.hist_occ.stop.set() # stop analysis process
self.data.hist_occ.close() # stop analysis process
self.log.success('Scan finished')
if len(self.data.flavors) == 1:
self.log.success('Mean TDAC is {0:1.2f}.'.format(np.mean(self.chip.masks['tdac'][start_column:stop_column, start_row:stop_row])))
enable_mask = self.chip.masks['enable'][start_column:stop_column, start_row:stop_row]
tdac_mask = self.chip.masks['tdac'][start_column:stop_column, start_row:stop_row]
mean_tdac = np.mean(tdac_mask[enable_mask])
self.log.success('Mean TDAC is {0:1.2f}.'.format(mean_tdac))
else:
self.log.success('Mean TDAC is {0:1.2f} for LIN and {1:1.2f} for DIFF.'.format(np.mean(self.chip.masks['tdac'][max(128, start_column):min(264, stop_column), start_row:stop_row]),
np.mean(self.chip.masks['tdac'][max(264, start_column):min(400, stop_column), start_row:stop_row])))
enable_mask_lin = self.chip.masks['enable'][max(128, start_column):min(264, stop_column), start_row:stop_row]
tdac_mask_lin = self.chip.masks['tdac'][max(128, start_column):min(264, stop_column), start_row:stop_row]
mean_tdac_lin = np.mean(tdac_mask_lin[enable_mask_lin])
enable_mask_diff = self.chip.masks['enable'][max(264, start_column):min(400, stop_column), start_row:stop_row]
tdac_mask_diff = self.chip.masks['tdac'][max(264, start_column):min(400, stop_column), start_row:stop_row]
mean_tdac_diff = np.mean(tdac_mask_diff[enable_mask_diff])
self.log.success('Mean TDAC is {0:1.2f} for LIN and {1:1.2f} for DIFF.'.format(mean_tdac_lin, mean_tdac_diff))
self.chip.masks['tdac'][start_column:stop_column, start_row:stop_row] = self.data.tdac_map[start_column:stop_column, start_row:stop_row]
def analyze_data_online(self, data_tuple, receiver=None):
......
......@@ -242,7 +242,7 @@ class NoiseTuning(ScanBase):
self.log.success('Found optimal TDAC settings with a mean of {0:1.2f} and disabled {1} untunable pixels.'.format(mean_tdac, n_disabled_pixels))
self.log.success('Lowest possible {0} is {1}.'.format(VTH_name, self.data.vth))
self.data.hist_occ.stop.set() # stop analysis process
self.data.hist_occ.close() # stop analysis process
self.log.success('Scan finished')
# Used to overwrite data storing function: self.readout.handle_data
......
......@@ -47,6 +47,7 @@ class TuneTlu(ScanBase):
def _configure(self, **_):
self.bdaq.configure_tlu_module()
self.configuration['bench']['analysis']['module_plotting'] = False # Chip data not available
def _scan(self, trigger_data_delay=range(0, 2**8), sleep=2, **_):
pbar = tqdm(total=len(trigger_data_delay), unit='Setting')
......@@ -75,7 +76,7 @@ class TuneTlu(ScanBase):
raise RuntimeError('No trigger words recorded')
# Get scan parameters
scan_parameters = in_file_h5.root.configuration.scan_params[:]['trigger_data_delay']
scan_parameters = in_file_h5.root.configuration_out.scan.scan_params[:]['trigger_data_delay']
n_scan_pars = scan_parameters.shape[0]
# Output data
......
......@@ -156,7 +156,7 @@ class TotTuning(ScanBase):
self.log.warning('Number of maximum iteration is reached. Could not tune ToT for: ' + ', '.join(self.data.active_FEs))
self.data.start_data_taking = False
self.data.tot_hist.stop.set() # stop analysis process
self.data.tot_hist.close() # stop and wait for analysis process
self.log.success('Scan finished')
def analyze_data_online(self, data_tuple, receiver=None):
......