From 0b9cf3bf6a90c6ea08db108967f31a43b74ad085 Mon Sep 17 00:00:00 2001 From: Daniel Campora <dcampora@cern.ch> Date: Sun, 21 Feb 2021 02:39:29 +0100 Subject: [PATCH 1/7] First prototype that converts cmake include directories into target include directories. --- .../include/SciFiCalculateClusterCountV6.cuh | 34 +++++++++++++++ .../include/SciFiPreDecodeV6.cuh | 36 ++++++++++++++++ .../include/SciFiRawBankDecoderV4.cuh | 42 +++++++++++++++++++ 3 files changed, 112 insertions(+) create mode 100644 device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh create mode 100644 device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh create mode 100644 device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh diff --git a/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh b/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh new file mode 100644 index 00000000000..9d48c0abbcd --- /dev/null +++ b/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh @@ -0,0 +1,34 @@ +/*****************************************************************************\ +* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * +\*****************************************************************************/ +#pragma once + +#include "SciFiDefinitions.cuh" +#include "SciFiRaw.cuh" +#include "SciFiEventModel.cuh" +#include "AlgorithmTypes.cuh" + +namespace scifi_calculate_cluster_count_v6 { + struct Parameters { + HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; + MASK_INPUT(dev_event_list_t) dev_event_list; + DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; + DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; + DEVICE_OUTPUT(dev_scifi_hit_count_t, unsigned) dev_scifi_hit_count; + }; + + struct scifi_calculate_cluster_count_v6_t : public DeviceAlgorithm, Parameters { + void set_arguments_size( + ArgumentReferences<Parameters> arguments, + const RuntimeOptions&, + const Constants&, + const HostBuffers&) const; + + void operator()( + const ArgumentReferences<Parameters>& arguments, + const RuntimeOptions& runtime_options, + const Constants& constants, + HostBuffers&, + const Allen::Context& context) const; + }; +} // namespace scifi_calculate_cluster_count_v6 diff --git a/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh b/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh new file mode 100644 index 00000000000..e9c91f7943f --- /dev/null +++ b/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh @@ -0,0 +1,36 @@ +/*****************************************************************************\ +* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * +\*****************************************************************************/ +#pragma once + +#include "SciFiDefinitions.cuh" +#include "SciFiRaw.cuh" +#include "SciFiEventModel.cuh" +#include "AlgorithmTypes.cuh" + +namespace scifi_pre_decode_v6 { + struct Parameters { + HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; + HOST_INPUT(host_accumulated_number_of_scifi_hits_t, unsigned) host_accumulated_number_of_scifi_hits; + DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; + DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; + MASK_INPUT(dev_event_list_t) dev_event_list; + DEVICE_INPUT(dev_scifi_hit_offsets_t, unsigned) dev_scifi_hit_offsets; + DEVICE_OUTPUT(dev_cluster_references_t, unsigned) dev_cluster_references; + }; + + struct scifi_pre_decode_v6_t : public DeviceAlgorithm, Parameters { + void set_arguments_size( + ArgumentReferences<Parameters> arguments, + const RuntimeOptions&, + const Constants&, + const HostBuffers&) const; + + void operator()( + const ArgumentReferences<Parameters>& arguments, + const RuntimeOptions& runtime_options, + const Constants& constants, + HostBuffers&, + const Allen::Context& context) const; + }; +} // namespace scifi_pre_decode_v6 \ No newline at end of file diff --git a/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh b/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh new file mode 100644 index 00000000000..e44bfae4bf3 --- /dev/null +++ b/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh @@ -0,0 +1,42 @@ +/*****************************************************************************\ +* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * +\*****************************************************************************/ +#pragma once + +#include "SciFiDefinitions.cuh" +#include "SciFiRaw.cuh" +#include "SciFiEventModel.cuh" +#include "AlgorithmTypes.cuh" + +namespace scifi_raw_bank_decoder_v4 { + struct Parameters { + HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; + HOST_INPUT(host_accumulated_number_of_scifi_hits_t, unsigned) host_accumulated_number_of_scifi_hits; + DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; + DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; + DEVICE_INPUT(dev_scifi_hit_offsets_t, unsigned) dev_scifi_hit_offsets; + DEVICE_INPUT(dev_cluster_references_t, unsigned) dev_cluster_references; + MASK_INPUT(dev_event_list_t) dev_event_list; + DEVICE_INPUT(dev_number_of_events_t, unsigned) dev_number_of_events; + DEVICE_OUTPUT(dev_scifi_hits_t, char) dev_scifi_hits; + PROPERTY(block_dim_t, "block_dim", "block dimensions", DeviceDimensions) block_dim; + }; + + struct scifi_raw_bank_decoder_v4_t : public DeviceAlgorithm, Parameters { + void set_arguments_size( + ArgumentReferences<Parameters> arguments, + const RuntimeOptions&, + const Constants&, + const HostBuffers&) const; + + void operator()( + const ArgumentReferences<Parameters>& arguments, + const RuntimeOptions& runtime_options, + const Constants& constants, + HostBuffers&, + const Allen::Context& context) const; + + private: + Property<block_dim_t> m_block_dim {this, {{256, 1, 1}}}; + }; +} // namespace scifi_raw_bank_decoder_v4 \ No newline at end of file -- GitLab From ef039e384a409c88a564757b15a92b4179c3149c Mon Sep 17 00:00:00 2001 From: Roel Aaij <raaij@nikhef.nl> Date: Fri, 10 Sep 2021 11:07:56 +0200 Subject: [PATCH 2/7] Update CI scripts for rename of BUILD_TESTS to BUILD_TESTING --- scripts/ci/config/README.md | 91 +++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 scripts/ci/config/README.md diff --git a/scripts/ci/config/README.md b/scripts/ci/config/README.md new file mode 100644 index 00000000000..062109370af --- /dev/null +++ b/scripts/ci/config/README.md @@ -0,0 +1,91 @@ +# Allen CI configuration + +## Adding new devices + +1. Add an entry for the device to `devices.yaml`. Set `TARGET`, `DEVICE_ID`, and the `tag:` accordingly + +2. Add a job entry to run in the minimal pipeline: e.g. +```yaml +epyc7502: + extends: + - .epyc7502 + - .run_job + +``` + +3. Add a job entry to run in the full pipeline, taking care to `extends:` from the right key based on the `TARGET` of the device: +```yaml +epyc7502-full: + extends: + - .epyc7502 + - .[cuda/hip/cpu]_run_job + - .run_jobs_full +``` + +4. Add the jobs to the dependencies of `.device-jobs` and `.depend_full_run_jobs`: +```yaml +.device-jobs: + dependencies: + - ... + +.depend_full_run_jobs: + dependencies: + - ... +``` + +5. If you added a new CUDA device, check `OVERRIDE_CUDA_ARCH_FLAG` in `.gitlab-ci.yml` contains the right flags for this device + +## Adding new tests + +See [Gitlab CI documentation](https://docs.gitlab.com/ee/ci/yaml/#parallel-matrix-jobs) for more information on how the `parallel:matrix` keyword works. + +### to the minimal pipeline + +Add a key to `.run_matrix_jobs_minimal:parallel:matrix:` in `common-run.yaml` e.g. +```yaml + # efficiency tests + - TEST_NAME: "run_physics_efficiency" # name of the test - runs the bash script scripts/ci/jobs/$TEST_NAME.sh + SEQUENCE: ["hlt1_pp_validation"] # sequence(s) to run the test on + DATA_TAG: ["Upgrade_BsPhiPhi_MD_FTv4_DIGI_1k"] # input dataset +``` + +Other variables can be set (but are optional - see below). + +### to the full pipeline + +Add a key to `.run_matrix_jobs_full:parallel:matrix:` in `common-run.yaml` e.g. +```yaml + - TEST_NAME: "run_throughput" # name of the test - runs the bash script scripts/ci/jobs/$TEST_NAME.sh + BUILD_TYPE: ["RelWithDebInfo"] # use RelWithDebInfo build +# OPTIONS: [""] # leave out for default build, with no additional build options + SEQUENCE: ["hlt1_pp_default"] # sequence + DATA_TAG: ["SMOG2_pppHe_1k"] # dataset name +# GEOMETRY: [""] # don't add this, to use the default geom +``` + +If your test needs a build of Allen that is not yet included in the `build` stage, you will need to create one. + +In order to ensure the correct build from the `build` stage is used in your test, make sure that the + +- `${LCG_ARCHITECTURE}` (default value is set by `.run_jobs` key) +- `${BUILD_TYPE}` (default is `RelWithDebInfo` set by `.run_jobs` key) +- `${SEQUENCE}` (must be set in `.run_matrix_jobs_full:parallel:matrix:`) +- `${OPTIONS}` (optional, can be set in `.run_matrix_jobs_full:parallel:matrix:`) +- `${GEOMETRY}` (optional, can be left undefined or set if a specific geometry is needed) + +variables are correctly set and match. + +## Adding new efficiency reference files + +Create the reference file with the format `test/reference/${DATA_TAG}_${DEVICE_ID}.txt`. + + +## Adding new builds + +The `parallel:matrix:` keys will need to be modified in either `.build_job_minimal_matrix` or `.build_job_additional_matrix`. + +N.B. + +- `$TARGET` does not need to be set in `parallel:matrix:` for the full builds, but it will need to be for the minimal builds. +- `$OPTIONS` can be left blank or undefined. If options need to be passed to CMake e.g. `-DBUILD_TESTING=ON -DENABLE_CONTRACTS=ON`, then `$OPTIONS` can be set to `BUILD_TESTING+ENABLE_CONTRACTS` which will set both CMake options to `ON` by default. If you need this to be something other than `ON`, then you can do `BUILD_TESTING=OFF+ENABLE_CONTRACTS=OFF`, for example. +- In downstream `run`-stage jobs, the `$OPTIONS` variable content *must* match for the build to be found properly. -- GitLab From adb4f60f374ad0a6771a63bf4b333f91b53dd8bf Mon Sep 17 00:00:00 2001 From: Roel Aaij <raaij@nikhef.nl> Date: Wed, 2 Feb 2022 00:10:19 +0100 Subject: [PATCH 3/7] Update dependencies and cleanup cmake modules --- cmake/AllenDependencies.cmake | 4 +- cmake/modules/FindPythonLibsNew.cmake | 195 ---------------- cmake/modules/FindZMQ.cmake | 20 -- cmake/modules/Findpybind11.cmake | 97 -------- cmake/pybind11Tools.cmake | 209 ------------------ .../FindTBB.cmake | 0 .../Findcppgsl.cmake | 0 7 files changed, 3 insertions(+), 522 deletions(-) delete mode 100644 cmake/modules/FindPythonLibsNew.cmake delete mode 100644 cmake/modules/FindZMQ.cmake delete mode 100644 cmake/modules/Findpybind11.cmake delete mode 100644 cmake/pybind11Tools.cmake rename cmake/{modules => standalone_modules}/FindTBB.cmake (100%) rename cmake/{modules => standalone_modules}/Findcppgsl.cmake (100%) diff --git a/cmake/AllenDependencies.cmake b/cmake/AllenDependencies.cmake index b7e5070030f..9dcfdce90b3 100644 --- a/cmake/AllenDependencies.cmake +++ b/cmake/AllenDependencies.cmake @@ -30,7 +30,7 @@ if (NOT STANDALONE) endif() # -- Public dependencies - lhcb_find_package(Rec 34.0 REQUIRED) + lhcb_find_package(Rec REQUIRED) find_package(AIDA REQUIRED) find_package(fmt REQUIRED) @@ -63,6 +63,8 @@ if (NOT STANDALONE) endif() set(TARGET_DEVICE ${device} CACHE STRING "Target architecture of the device") +else() + list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/standalone_modules) endif() message(STATUS "Allen device target: " ${TARGET_DEVICE}) diff --git a/cmake/modules/FindPythonLibsNew.cmake b/cmake/modules/FindPythonLibsNew.cmake deleted file mode 100644 index b29b287de72..00000000000 --- a/cmake/modules/FindPythonLibsNew.cmake +++ /dev/null @@ -1,195 +0,0 @@ -# - Find python libraries -# This module finds the libraries corresponding to the Python interpreter -# FindPythonInterp provides. -# This code sets the following variables: -# -# PYTHONLIBS_FOUND - have the Python libs been found -# PYTHON_PREFIX - path to the Python installation -# PYTHON_LIBRARIES - path to the python library -# PYTHON_INCLUDE_DIRS - path to where Python.h is found -# PYTHON_MODULE_EXTENSION - lib extension, e.g. '.so' or '.pyd' -# PYTHON_MODULE_PREFIX - lib name prefix: usually an empty string -# PYTHON_SITE_PACKAGES - path to installation site-packages -# PYTHON_IS_DEBUG - whether the Python interpreter is a debug build -# -# Thanks to talljimbo for the patch adding the 'LDVERSION' config -# variable usage. - -#============================================================================= -# Copyright 2001-2009 Kitware, Inc. -# Copyright 2012 Continuum Analytics, Inc. -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# * Neither the names of Kitware, Inc., the Insight Software Consortium, -# nor the names of their contributors may be used to endorse or promote -# products derived from this software without specific prior written -# permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#============================================================================= - -# Checking for the extension makes sure that `LibsNew` was found and not just `Libs`. -if(PYTHONLIBS_FOUND AND PYTHON_MODULE_EXTENSION) - return() -endif() - -# Use the Python interpreter to find the libs. -if(PythonLibsNew_FIND_REQUIRED) - find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} REQUIRED) -else() - find_package(PythonInterp ${PythonLibsNew_FIND_VERSION}) -endif() - -if(NOT PYTHONINTERP_FOUND) - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# According to http://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter -# testing whether sys has the gettotalrefcount function is a reliable, cross-platform -# way to detect a CPython debug interpreter. -# -# The library suffix is from the config var LDVERSION sometimes, otherwise -# VERSION. VERSION will typically be like "2.7" on unix, and "27" on windows. -execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "from distutils import sysconfig as s;import sys;import struct; -print('.'.join(str(v) for v in sys.version_info)); -print(sys.prefix); -print(s.get_python_inc(plat_specific=True)); -print(s.get_python_lib(plat_specific=True)); -print(s.get_config_var('SO')); -print(hasattr(sys, 'gettotalrefcount')+0); -print(struct.calcsize('@P')); -print(s.get_config_var('LDVERSION') or s.get_config_var('VERSION')); -print(s.get_config_var('LIBDIR') or ''); -print(s.get_config_var('MULTIARCH') or ''); -" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE _PYTHON_VALUES - ERROR_VARIABLE _PYTHON_ERROR_VALUE) - -if(NOT _PYTHON_SUCCESS MATCHES 0) - if(PythonLibsNew_FIND_REQUIRED) - message(FATAL_ERROR - "Python config failure:\n${_PYTHON_ERROR_VALUE}") - endif() - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# Convert the process output into a list -string(REGEX REPLACE ";" "\\\\;" _PYTHON_VALUES ${_PYTHON_VALUES}) -string(REGEX REPLACE "\n" ";" _PYTHON_VALUES ${_PYTHON_VALUES}) -list(GET _PYTHON_VALUES 0 _PYTHON_VERSION_LIST) -list(GET _PYTHON_VALUES 1 PYTHON_PREFIX) -list(GET _PYTHON_VALUES 2 PYTHON_INCLUDE_DIR) -list(GET _PYTHON_VALUES 3 PYTHON_SITE_PACKAGES) -list(GET _PYTHON_VALUES 4 PYTHON_MODULE_EXTENSION) -list(GET _PYTHON_VALUES 5 PYTHON_IS_DEBUG) -list(GET _PYTHON_VALUES 6 PYTHON_SIZEOF_VOID_P) -list(GET _PYTHON_VALUES 7 PYTHON_LIBRARY_SUFFIX) -list(GET _PYTHON_VALUES 8 PYTHON_LIBDIR) -list(GET _PYTHON_VALUES 9 PYTHON_MULTIARCH) - -# Make sure the Python has the same pointer-size as the chosen compiler -# Skip if CMAKE_SIZEOF_VOID_P is not defined -if(CMAKE_SIZEOF_VOID_P AND (NOT "${PYTHON_SIZEOF_VOID_P}" STREQUAL "${CMAKE_SIZEOF_VOID_P}")) - if(PythonLibsNew_FIND_REQUIRED) - math(EXPR _PYTHON_BITS "${PYTHON_SIZEOF_VOID_P} * 8") - math(EXPR _CMAKE_BITS "${CMAKE_SIZEOF_VOID_P} * 8") - message(FATAL_ERROR - "Python config failure: Python is ${_PYTHON_BITS}-bit, " - "chosen compiler is ${_CMAKE_BITS}-bit") - endif() - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# The built-in FindPython didn't always give the version numbers -string(REGEX REPLACE "\\." ";" _PYTHON_VERSION_LIST ${_PYTHON_VERSION_LIST}) -list(GET _PYTHON_VERSION_LIST 0 PYTHON_VERSION_MAJOR) -list(GET _PYTHON_VERSION_LIST 1 PYTHON_VERSION_MINOR) -list(GET _PYTHON_VERSION_LIST 2 PYTHON_VERSION_PATCH) - -# Make sure all directory separators are '/' -string(REGEX REPLACE "\\\\" "/" PYTHON_PREFIX ${PYTHON_PREFIX}) -string(REGEX REPLACE "\\\\" "/" PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE_DIR}) -string(REGEX REPLACE "\\\\" "/" PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES}) - -if(CMAKE_HOST_WIN32) - set(PYTHON_LIBRARY - "${PYTHON_PREFIX}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib") - - # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the - # original python installation. They may be found relative to PYTHON_INCLUDE_DIR. - if(NOT EXISTS "${PYTHON_LIBRARY}") - get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY) - set(PYTHON_LIBRARY - "${_PYTHON_ROOT}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib") - endif() - - # raise an error if the python libs are still not found. - if(NOT EXISTS "${PYTHON_LIBRARY}") - message(FATAL_ERROR "Python libraries not found") - endif() - -else() - if(PYTHON_MULTIARCH) - set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}") - else() - set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}") - endif() - #message(STATUS "Searching for Python libs in ${_PYTHON_LIBS_SEARCH}") - # Probably this needs to be more involved. It would be nice if the config - # information the python interpreter itself gave us were more complete. - find_library(PYTHON_LIBRARY - NAMES "python${PYTHON_LIBRARY_SUFFIX}" - PATHS ${_PYTHON_LIBS_SEARCH} - NO_DEFAULT_PATH) - - # If all else fails, just set the name/version and let the linker figure out the path. - if(NOT PYTHON_LIBRARY) - set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX}) - endif() -endif() - -MARK_AS_ADVANCED( - PYTHON_LIBRARY - PYTHON_INCLUDE_DIR -) - -# We use PYTHON_INCLUDE_DIR, PYTHON_LIBRARY and PYTHON_DEBUG_LIBRARY for the -# cache entries because they are meant to specify the location of a single -# library. We now set the variables listed by the documentation for this -# module. -SET(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}") -SET(PYTHON_LIBRARIES "${PYTHON_LIBRARY}") -SET(PYTHON_DEBUG_LIBRARIES "${PYTHON_DEBUG_LIBRARY}") - -find_package_message(PYTHON - "Found PythonLibs: ${PYTHON_LIBRARY}" - "${PYTHON_EXECUTABLE}${PYTHON_VERSION}") - -set(PYTHONLIBS_FOUND TRUE) diff --git a/cmake/modules/FindZMQ.cmake b/cmake/modules/FindZMQ.cmake deleted file mode 100644 index 779ff0d8246..00000000000 --- a/cmake/modules/FindZMQ.cmake +++ /dev/null @@ -1,20 +0,0 @@ -############################################################################### -# (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration # -############################################################################### -# - Try to find ZMQ -# Once done this will define -# ZMQ_FOUND - System has ZMQ -# ZMQ_INCLUDE_DIRS - The ZMQ include directories -# ZMQ_LIBRARIES - The libraries needed to use ZMQ -# ZMQ_DEFINITIONS - Compiler switches required for using ZMQ - -find_path ( ZMQ_INCLUDE_DIR zmq.h ) -find_library ( ZMQ_LIBRARY NAMES zmq ) - -set ( ZMQ_LIBRARIES ${ZMQ_LIBRARY} ) -set ( ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR} ) - -include ( FindPackageHandleStandardArgs ) -# handle the QUIETLY and REQUIRED arguments and set ZMQ_FOUND to TRUE -# if all listed variables are TRUE -find_package_handle_standard_args ( ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR ) \ No newline at end of file diff --git a/cmake/modules/Findpybind11.cmake b/cmake/modules/Findpybind11.cmake deleted file mode 100644 index 31afb224a1a..00000000000 --- a/cmake/modules/Findpybind11.cmake +++ /dev/null @@ -1,97 +0,0 @@ -# - Find the NumPy libraries - -# This module finds if Pybind11 is installed, and sets the following variables -# indicating where it is. -# -# TODO: Update to provide the libraries and paths for linking npymath lib. -# -# PYBIND11_FOUND - was Pybind11 found -# PYBIND11_VERSION - the version of Pybind11 found as a string -# PYBIND11_VERSION_MAJOR - the major version number of Pybind11 -# PYBIND11_VERSION_MINOR - the minor version number of Pybind11 -# PYBIND11_VERSION_PATCH - the patch version number of Pybind11 -# PYBIND11_VERSION_DECIMAL - e.g. version 1.6.1 is 10601 -# PYBIND11_INCLUDE_DIRS - path to the Pybind11 include files - -#============================================================================ -# Copyright 2012 Continuum Analytics, Inc. -# -# MIT License -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to permit -# persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -#============================================================================ - -# Finding Pybind11 involves calling the Python interpreter -find_package(pybind11 CONFIG QUIET) -if (pybind11_FOUND) - set(PYBIND11_FOUND TRUE) -else() - if(Pybind11_FIND_REQUIRED) - find_package(PythonInterp REQUIRED) - else() - find_package(PythonInterp) - endif() - - if(NOT PYTHONINTERP_FOUND) - set(PYBIND11_FOUND FALSE) - endif() - - execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import pybind11 as pb; print(pb.__version__); print(pb.get_include());" - RESULT_VARIABLE _PYBIND11_SEARCH_SUCCESS - OUTPUT_VARIABLE _PYBIND11_VALUES - ERROR_VARIABLE _PYBIND11_ERROR_VALUE - OUTPUT_STRIP_TRAILING_WHITESPACE) - - if(NOT _PYBIND11_SEARCH_SUCCESS MATCHES 0) - if(Pybind11_FIND_REQUIRED) - message(FATAL_ERROR - "pybind11 import failure:\n${_PYBIND11_ERROR_VALUE}") - endif() - set(PYBIND11_FOUND FALSE) - else() - set(PYBIND11_FOUND TRUE) - endif() - - if (PYBIND11_FOUND) - # Convert the process output into a list - string(REGEX REPLACE ";" "\\\\;" _PYBIND11_VALUES ${_PYBIND11_VALUES}) - string(REGEX REPLACE "\n" ";" _PYBIND11_VALUES ${_PYBIND11_VALUES}) - list(GET _PYBIND11_VALUES 0 PYBIND11_VERSION) - list(GET _PYBIND11_VALUES 1 PYBIND11_INCLUDE_DIRS) - - # Make sure all directory separators are '/' - string(REGEX REPLACE "\\\\" "/" PYBIND11_INCLUDE_DIRS ${PYBIND11_INCLUDE_DIRS}) - - # Get the major and minor version numbers - string(REGEX REPLACE "\\." ";" _PYBIND11_VERSION_LIST ${PYBIND11_VERSION}) - list(GET _PYBIND11_VERSION_LIST 0 PYBIND11_VERSION_MAJOR) - list(GET _PYBIND11_VERSION_LIST 1 PYBIND11_VERSION_MINOR) - list(GET _PYBIND11_VERSION_LIST 2 PYBIND11_VERSION_PATCH) - string(REGEX MATCH "[0-9]*" PYBIND11_VERSION_PATCH ${PYBIND11_VERSION_PATCH}) - math(EXPR PYBIND11_VERSION_DECIMAL - "(${PYBIND11_VERSION_MAJOR} * 10000) + (${PYBIND11_VERSION_MINOR} * 100) + ${PYBIND11_VERSION_PATCH}") - - find_package_message(PYBIND11 - "Found Pybind11: version \"${PYBIND11_VERSION}\" ${PYBIND11_INCLUDE_DIRS}" - "${PYBIND11_INCLUDE_DIRS}${PYBIND11_VERSION}") - endif() -endif() diff --git a/cmake/pybind11Tools.cmake b/cmake/pybind11Tools.cmake deleted file mode 100644 index 52a70c23c40..00000000000 --- a/cmake/pybind11Tools.cmake +++ /dev/null @@ -1,209 +0,0 @@ -# tools/pybind11Tools.cmake -- Build system for the pybind11 modules -# -# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch> -# -# All rights reserved. Use of this source code is governed by a -# BSD-style license that can be found in the LICENSE file. - -cmake_minimum_required(VERSION 2.8.12) - -# Add a CMake parameter for choosing a desired Python version -if(NOT PYBIND11_PYTHON_VERSION) - set(PYBIND11_PYTHON_VERSION "" CACHE STRING "Python version to use for compiling modules") -endif() - -set(Python_ADDITIONAL_VERSIONS 3.7 3.6 3.5 3.4) -find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} REQUIRED) - -include(CheckCXXCompilerFlag) -include(CMakeParseArguments) - -if(NOT PYBIND11_CPP_STANDARD AND NOT CMAKE_CXX_STANDARD) - if(NOT MSVC) - check_cxx_compiler_flag("-std=c++14" HAS_CPP14_FLAG) - - if (HAS_CPP14_FLAG) - set(PYBIND11_CPP_STANDARD -std=c++14) - else() - check_cxx_compiler_flag("-std=c++11" HAS_CPP11_FLAG) - if (HAS_CPP11_FLAG) - set(PYBIND11_CPP_STANDARD -std=c++11) - else() - message(FATAL_ERROR "Unsupported compiler -- pybind11 requires C++11 support!") - endif() - endif() - elseif(MSVC) - set(PYBIND11_CPP_STANDARD /std:c++14) - endif() - - set(PYBIND11_CPP_STANDARD ${PYBIND11_CPP_STANDARD} CACHE STRING - "C++ standard flag, e.g. -std=c++11, -std=c++14, /std:c++14. Defaults to C++14 mode." FORCE) -endif() - -# Checks whether the given CXX/linker flags can compile and link a cxx file. cxxflags and -# linkerflags are lists of flags to use. The result variable is a unique variable name for each set -# of flags: the compilation result will be cached base on the result variable. If the flags work, -# sets them in cxxflags_out/linkerflags_out internal cache variables (in addition to ${result}). -function(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out linkerflags_out) - set(CMAKE_REQUIRED_LIBRARIES ${linkerflags}) - check_cxx_compiler_flag("${cxxflags}" ${result}) - if (${result}) - set(${cxxflags_out} "${cxxflags}" CACHE INTERNAL "" FORCE) - set(${linkerflags_out} "${linkerflags}" CACHE INTERNAL "" FORCE) - endif() -endfunction() - -# Internal: find the appropriate link time optimization flags for this compiler -function(_pybind11_add_lto_flags target_name prefer_thin_lto) - if (NOT DEFINED PYBIND11_LTO_CXX_FLAGS) - set(PYBIND11_LTO_CXX_FLAGS "" CACHE INTERNAL "") - set(PYBIND11_LTO_LINKER_FLAGS "" CACHE INTERNAL "") - - if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") - set(cxx_append "") - set(linker_append "") - if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT APPLE) - # Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it - set(linker_append ";$<$<CONFIG:MinSizeRel>:-O3>") - elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU") - set(cxx_append ";-fno-fat-lto-objects") - endif() - - if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND prefer_thin_lto) - _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO_THIN - "-flto=thin${cxx_append}" "-flto=thin${linker_append}" - PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS) - endif() - - if (NOT HAS_FLTO_THIN) - _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO - "-flto${cxx_append}" "-flto${linker_append}" - PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS) - endif() - elseif (CMAKE_CXX_COMPILER_ID MATCHES "Intel") - # Intel equivalent to LTO is called IPO - _pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO - "-ipo" "-ipo" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS) - elseif(MSVC) - # cmake only interprets libraries as linker flags when they start with a - (otherwise it - # converts /LTCG to \LTCG as if it was a Windows path). Luckily MSVC supports passing flags - # with - instead of /, even if it is a bit non-standard: - _pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG - "/GL" "-LTCG" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS) - endif() - - if (PYBIND11_LTO_CXX_FLAGS) - message(STATUS "LTO enabled") - else() - message(STATUS "LTO disabled (not supported by the compiler and/or linker)") - endif() - endif() - - # Enable LTO flags if found, except for Debug builds - if (PYBIND11_LTO_CXX_FLAGS) - target_compile_options(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_CXX_FLAGS}>") - endif() - if (PYBIND11_LTO_LINKER_FLAGS) - target_link_libraries(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_LINKER_FLAGS}>") - endif() -endfunction() - -# Build a Python extension module: -# pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL] -# [NO_EXTRAS] [THIN_LTO] source1 [source2 ...]) -# -function(pybind11_add_module target_name) - set(options MODULE SHARED EXCLUDE_FROM_ALL NO_EXTRAS THIN_LTO) - cmake_parse_arguments(ARG "${options}" "" "" ${ARGN}) - - if(ARG_MODULE AND ARG_SHARED) - message(FATAL_ERROR "Can't be both MODULE and SHARED") - elseif(ARG_SHARED) - set(lib_type SHARED) - else() - set(lib_type MODULE) - endif() - - if(ARG_EXCLUDE_FROM_ALL) - set(exclude_from_all EXCLUDE_FROM_ALL) - endif() - - add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS}) - - target_include_directories(${target_name} - PRIVATE ${PYBIND11_INCLUDE_DIR} # from project CMakeLists.txt - PRIVATE ${pybind11_INCLUDE_DIR} # from pybind11Config - PRIVATE ${PYTHON_INCLUDE_DIRS}) - - # Python debug libraries expose slightly different objects - # https://docs.python.org/3.6/c-api/intro.html#debugging-builds - # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib - if(PYTHON_IS_DEBUG) - target_compile_definitions(${target_name} PRIVATE Py_DEBUG) - endif() - - # The prefix and extension are provided by FindPythonLibsNew.cmake - set_target_properties(${target_name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}") - set_target_properties(${target_name} PROPERTIES SUFFIX "${PYTHON_MODULE_EXTENSION}") - - # -fvisibility=hidden is required to allow multiple modules compiled against - # different pybind versions to work properly, and for some features (e.g. - # py::module_local). We force it on everything inside the `pybind11` - # namespace; also turning it on for a pybind module compilation here avoids - # potential warnings or issues from having mixed hidden/non-hidden types. - set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden") - - if(WIN32 OR CYGWIN) - # Link against the Python shared library on Windows - target_link_libraries(${target_name} PRIVATE ${PYTHON_LIBRARIES}) - elseif(APPLE) - # It's quite common to have multiple copies of the same Python version - # installed on one's system. E.g.: one copy from the OS and another copy - # that's statically linked into an application like Blender or Maya. - # If we link our plugin library against the OS Python here and import it - # into Blender or Maya later on, this will cause segfaults when multiple - # conflicting Python instances are active at the same time (even when they - # are of the same version). - - # Windows is not affected by this issue since it handles DLL imports - # differently. The solution for Linux and Mac OS is simple: we just don't - # link against the Python library. The resulting shared library will have - # missing symbols, but that's perfectly fine -- they will be resolved at - # import time. - - target_link_libraries(${target_name} PRIVATE "-undefined dynamic_lookup") - - if(ARG_SHARED) - # Suppress CMake >= 3.0 warning for shared libraries - set_target_properties(${target_name} PROPERTIES MACOSX_RPATH ON) - endif() - endif() - - # Make sure C++11/14 are enabled - target_compile_options(${target_name} PUBLIC ${PYBIND11_CPP_STANDARD}) - - if(ARG_NO_EXTRAS) - return() - endif() - - _pybind11_add_lto_flags(${target_name} ${ARG_THIN_LTO}) - - if (NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug) - # Strip unnecessary sections of the binary on Linux/Mac OS - if(CMAKE_STRIP) - if(APPLE) - add_custom_command(TARGET ${target_name} POST_BUILD - COMMAND ${CMAKE_STRIP} -x $<TARGET_FILE:${target_name}>) - else() - add_custom_command(TARGET ${target_name} POST_BUILD - COMMAND ${CMAKE_STRIP} $<TARGET_FILE:${target_name}>) - endif() - endif() - endif() - - if(MSVC) - # /MP enables multithreaded builds (relevant when there are many files), /bigobj is - # needed for bigger binding projects due to the limit to 64k addressable sections - target_compile_options(${target_name} PRIVATE /MP /bigobj) - endif() -endfunction() diff --git a/cmake/modules/FindTBB.cmake b/cmake/standalone_modules/FindTBB.cmake similarity index 100% rename from cmake/modules/FindTBB.cmake rename to cmake/standalone_modules/FindTBB.cmake diff --git a/cmake/modules/Findcppgsl.cmake b/cmake/standalone_modules/Findcppgsl.cmake similarity index 100% rename from cmake/modules/Findcppgsl.cmake rename to cmake/standalone_modules/Findcppgsl.cmake -- GitLab From 95e3cf1db801c90c0b835287edf3c73dfe0cb3bd Mon Sep 17 00:00:00 2001 From: ascarabo <alessandro.scarabotto@cern.ch> Date: Thu, 3 Feb 2022 12:07:06 +0100 Subject: [PATCH 4/7] subdetector banks in BanksAndOffsets --- host/global_event_cut/test/mep_gec.cpp | 6 +++--- main/include/BankTypes.h | 2 +- main/include/MDFProvider.h | 3 ++- main/include/TESProvider.h | 3 ++- main/include/TransposeTypes.h | 4 +++- main/src/Provider.cpp | 2 +- main/src/SliceUtils.cpp | 6 ++++-- main/src/Transpose.cpp | 6 ++++-- mdf/test/test_mdf_transpose.cpp | 2 +- mdf/test/test_providers.cpp | 2 +- 10 files changed, 22 insertions(+), 14 deletions(-) diff --git a/host/global_event_cut/test/mep_gec.cpp b/host/global_event_cut/test/mep_gec.cpp index 97e450dd08c..e83c20613ef 100644 --- a/host/global_event_cut/test/mep_gec.cpp +++ b/host/global_event_cut/test/mep_gec.cpp @@ -106,7 +106,7 @@ int main(int argc, char* argv[]) for (auto& [ids, at] : {std::tuple {std::ref(scifi_block_ids), BankTypes::FT}, std::tuple {std::ref(ut_block_ids), BankTypes::UT}}) { if (allen_type == to_integral(at)) { - auto& [spans, offset, offsets, offsets_size] = slices[allen_type][0]; + auto& [spans, offset, offsets, offsets_size, subdetectors_banks] = slices[allen_type][0]; ids.get().emplace_back(i_block); spans.emplace_back(const_cast<char*>(block_span.data()), block_span.size()); // auto* data_start = spans[0].begin(); @@ -121,7 +121,7 @@ int main(int argc, char* argv[]) slices, 0, bank_ids, {BankTypes::UT, BankTypes::FT}, banks_count, events, mep_header, blocks, {0, interval}); auto scifi_allen_type = to_integral(BankTypes::FT); - auto const& [scifi_data, scifi_data_size, scifi_offsets, scifi_offsets_size] = slices[scifi_allen_type][0]; + auto const& [scifi_data, scifi_data_size, scifi_offsets, scifi_offsets_size, subdetectors_banks] = slices[scifi_allen_type][0]; auto n_scifi_fragments = scifi_block_ids.size(); @@ -139,7 +139,7 @@ int main(int argc, char* argv[]) auto slice_to_banks = [&slices](int slice_index, BankTypes bank_type) { auto bt = to_integral(bank_type); - auto const& [data, data_size, offsets, offsets_size] = slices[bt][slice_index]; + auto const& [data, data_size, offsets, offsets_size, subdetectors_banks] = slices[bt][slice_index]; BanksAndOffsets bno; auto& spans = std::get<0>(bno); spans.reserve(data.size()); diff --git a/main/include/BankTypes.h b/main/include/BankTypes.h index 20cbe08405f..b57d3c3eb0c 100644 --- a/main/include/BankTypes.h +++ b/main/include/BankTypes.h @@ -59,7 +59,7 @@ constexpr auto to_integral(ENUM e) -> typename std::underlying_type<ENUM>::type return static_cast<typename std::underlying_type<ENUM>::type>(e); } -using BanksAndOffsets = std::tuple<std::vector<gsl::span<const char>>, size_t, gsl::span<const unsigned int>, int>; +using BanksAndOffsets = std::tuple<std::vector<gsl::span<const char>>, size_t, gsl::span<const unsigned int>, int, std::vector<std::vector<unsigned char>>>; template<BankTypes... BANKS> std::unordered_set<BankTypes> banks_set() diff --git a/main/include/MDFProvider.h b/main/include/MDFProvider.h index 56bf8c11392..4a0603e06a1 100644 --- a/main/include/MDFProvider.h +++ b/main/include/MDFProvider.h @@ -274,10 +274,11 @@ public: auto const& banks = std::get<0>(tup); auto const offsets = std::get<2>(tup); auto const offsets_size = std::get<3>(tup); + auto const subdetectors_banks = std::get<4>(tup); gsl::span<char const> b {banks[0].data(), offsets[offsets_size - 1]}; gsl::span<unsigned int const> o {offsets.data(), static_cast<::offsets_size>(offsets_size)}; - return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), m_banks_version[ib]}; + return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), m_banks_version[ib], subdetectors_banks}; } /** diff --git a/main/include/TESProvider.h b/main/include/TESProvider.h index 6c887fa9b73..7feecb5712c 100644 --- a/main/include/TESProvider.h +++ b/main/include/TESProvider.h @@ -67,9 +67,10 @@ public: // bank content auto data_size = static_cast<span_size_t<char const>>(banks.size()); gsl::span<char const> b {banks.data(), data_size}; + std::vector<std::vector<unsigned char>> subdetectors_banks; m_banks_and_offsets[allen_bank_index] = { - {std::move(b)}, static_cast<std::size_t>(data_size), {offsets.data(), 2u}, version}; + {std::move(b)}, static_cast<std::size_t>(data_size), {offsets.data(), 2u}, version, subdetectors_banks}; } return 0; diff --git a/main/include/TransposeTypes.h b/main/include/TransposeTypes.h index 106b45b480f..0894e832672 100644 --- a/main/include/TransposeTypes.h +++ b/main/include/TransposeTypes.h @@ -32,7 +32,9 @@ namespace Allen { // A slice contains transposed bank data, offsets to the start of each // set of banks and the number of sets of banks - using Slice = std::tuple<std::vector<gsl::span<char>>, size_t, gsl::span<unsigned int>, size_t>; + //adding vector<vector> for subdetectors banks to access error banks from BankAndOffsets object + using Slice = std::tuple<std::vector<gsl::span<char>>, size_t, gsl::span<unsigned int>, size_t, std::vector<std::vector<unsigned char>>>; + using BankSlices = std::vector<Slice>; using Slices = std::array<BankSlices, NBankTypes>; diff --git a/main/src/Provider.cpp b/main/src/Provider.cpp index cd6c80f6541..a03f4a8c1a4 100644 --- a/main/src/Provider.cpp +++ b/main/src/Provider.cpp @@ -28,7 +28,7 @@ namespace { std::unordered_set<BankTypes> Allen::configured_bank_types(std::string const& json_file) { // Bank types - std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN}; + std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN, BankTypes::MUON, BankTypes::ECal, BankTypes::HCal}; ConfigurationReader configuration_reader {json_file}; auto const& configuration = configuration_reader.params(); for (auto const& [key, props] : configuration) { diff --git a/main/src/SliceUtils.cpp b/main/src/SliceUtils.cpp index 439116e97af..e0451224952 100644 --- a/main/src/SliceUtils.cpp +++ b/main/src/SliceUtils.cpp @@ -29,7 +29,7 @@ void reset_slice( // "Reset" the slice for (auto bank_type : bank_types) { auto ib = to_integral(bank_type); - auto& [banks, data_size, offsets, offsets_size] = slices[ib][slice_index]; + auto& [banks, data_size, offsets, offsets_size, subdetectors_banks] = slices[ib][slice_index]; std::fill(offsets.begin(), offsets.end(), 0); offsets_size = 1; if (mep) { @@ -66,8 +66,10 @@ Allen::Slices allocate_slices( if (n_bytes) { spans.emplace_back(events_mem, n_bytes); } + //subdetector vector to access error banks + std::vector<std::vector<unsigned char>> subdetectors_banks; bank_slices.emplace_back( - std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1); + std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1, subdetectors_banks); } } return slices; diff --git a/main/src/Transpose.cpp b/main/src/Transpose.cpp index c3839036d28..a5f4d7e2dcc 100644 --- a/main/src/Transpose.cpp +++ b/main/src/Transpose.cpp @@ -223,7 +223,7 @@ std::tuple<bool, bool, bool> transpose_event( // little space to fit this event for (auto allen_type : bank_types) { auto const ia = to_integral(allen_type); - const auto& [slice, slice_size, slice_offsets, offsets_size] = slices[ia][slice_index]; + const auto& [slice, slice_size, slice_offsets, offsets_size, subdetectors_banks] = slices[ia][slice_index]; // Use the event size of the next event here instead of the // per bank size because that's not yet known for the next // event @@ -442,8 +442,10 @@ Allen::Slices allocate_slices( if (n_bytes) { spans.emplace_back(events_mem, n_bytes); } + //subdetector vector to access error banks + std::vector<std::vector<unsigned char>> subdetectors_banks; bank_slices.emplace_back( - std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1); + std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1, subdetectors_banks); } } return slices; diff --git a/mdf/test/test_mdf_transpose.cpp b/mdf/test/test_mdf_transpose.cpp index ed5c567877a..6c36aa4f726 100644 --- a/mdf/test/test_mdf_transpose.cpp +++ b/mdf/test/test_mdf_transpose.cpp @@ -300,7 +300,7 @@ TEST_CASE("MDF slice full", "[MDF slice]") // comparing event and run numbers from ODIN size_t i = 0; auto oi = to_integral(BankTypes::ODIN); - for (auto const& [banks, _, event_offsets, n_offsets] : slices[oi]) { + for (auto const& [banks, _, event_offsets, n_offsets, subdetectors_banks] : slices[oi]) { for (size_t j = 0; j < n_offsets - 1; ++j) { auto const& read_odin = odins[i]; auto const* odin_data = diff --git a/mdf/test/test_providers.cpp b/mdf/test/test_providers.cpp index c3741811982..5e15b04fddb 100644 --- a/mdf/test/test_providers.cpp +++ b/mdf/test/test_providers.cpp @@ -64,7 +64,7 @@ namespace { BanksAndOffsets mep_banks(Slices& slices, BankTypes bank_type, size_t slice_index) { auto ib = to_integral<BankTypes>(bank_type); - auto const& [banks, banks_size, offsets, offsets_size] = slices[ib][slice_index]; + auto const& [banks, banks_size, offsets, offsets_size, subdetectors_banks] = slices[ib][slice_index]; span<char const> b {banks[0].data(), offsets[offsets_size - 1]}; span<unsigned int const> o {offsets.data(), static_cast<::offsets_size>(offsets_size)}; return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), banks_version[ib]}; -- GitLab From c95b878fb41823e65966dfe614ee352eedcd371b Mon Sep 17 00:00:00 2001 From: ascarabo <alessandro.scarabotto@cern.ch> Date: Mon, 7 Feb 2022 17:55:49 +0100 Subject: [PATCH 5/7] hlt1 error banks cut line --- configuration/python/AllenConf/HLT1.py | 19 ++++- configuration/python/AllenConf/utils.py | 8 +- configuration/sequences/passthrough.py | 31 ++++--- .../include/HostErrorBanksCut.h | 41 ++++++++++ .../src/HostErrorBanksCut.cpp | 81 +++++++++++++++++++ main/include/BankMapping.h | 43 ++++++---- main/include/TESProvider.h | 4 +- main/src/Provider.cpp | 3 +- mdf/test/test_providers.cpp | 2 +- 9 files changed, 203 insertions(+), 29 deletions(-) create mode 100644 host/global_event_cut/include/HostErrorBanksCut.h create mode 100644 host/global_event_cut/src/HostErrorBanksCut.cpp diff --git a/configuration/python/AllenConf/HLT1.py b/configuration/python/AllenConf/HLT1.py index 0225d71825f..e28a68f1f20 100644 --- a/configuration/python/AllenConf/HLT1.py +++ b/configuration/python/AllenConf/HLT1.py @@ -1,7 +1,7 @@ ############################################################################### # (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration # ############################################################################### -from AllenConf.utils import initialize_number_of_events, mep_layout, gec +from AllenConf.utils import initialize_number_of_events, mep_layout, gec, error_banks_cut from AllenConf.hlt1_reconstruction import hlt1_reconstruction, validator_node from AllenConf.hlt1_inclusive_hadron_lines import make_track_mva_line, make_two_track_mva_line, make_kstopipi_line, make_two_track_line_ks from AllenConf.hlt1_charm_lines import make_d2kk_line, make_d2pipi_line, make_two_ks_line @@ -27,6 +27,14 @@ def make_line_composite_node_with_gec(line_name, NodeLogic.LAZY_AND, force_order=True) +def make_line_composite_node_with_ebc(line_name, + line_algorithm, + ebc_name="ebc"): + return CompositeNode( + line_name, [error_banks_cut(name=ebc_name), line_algorithm], + NodeLogic.LAZY_AND, + force_order=True) + @configurable def line_maker(line_name, line_algorithm, enableGEC=True): @@ -36,6 +44,15 @@ def line_maker(line_name, line_algorithm, enableGEC=True): node = line_algorithm return line_algorithm, node +@configurable +def line_maker_ebc(line_name, line_algorithm, enableEBC=True): + if (enableEBC): + node = make_line_composite_node_with_ebc(line_name, line_algorithm) + else: + node = line_algorithm + + return line_algorithm, node + def default_physics_lines(velo_tracks, forward_tracks, kalman_velo_only, secondary_vertices, calo_matching_objects): diff --git a/configuration/python/AllenConf/utils.py b/configuration/python/AllenConf/utils.py index 26eedea7094..5b1fd72e44d 100644 --- a/configuration/python/AllenConf/utils.py +++ b/configuration/python/AllenConf/utils.py @@ -4,7 +4,7 @@ from AllenCore.generator import make_algorithm from AllenConf.algorithms import (host_init_number_of_events_t, host_data_provider_t, - host_global_event_cut_t, layout_provider_t) + host_global_event_cut_t, layout_provider_t, host_error_banks_cut_t) def initialize_number_of_events(): @@ -40,6 +40,12 @@ def gec(name="gec", min_scifi_ut_clusters=0, max_scifi_ut_clusters=9750): return gec +def error_banks_cut(name="error_banks_cut"): + number_of_events = initialize_number_of_events() + error_banks_cut = make_algorithm(host_error_banks_cut_t, name=name) + return error_banks_cut + + def mep_layout(): layout = make_algorithm(layout_provider_t, name="mep_layout") diff --git a/configuration/sequences/passthrough.py b/configuration/sequences/passthrough.py index 09ae23b7f1a..5dcacdd3281 100644 --- a/configuration/sequences/passthrough.py +++ b/configuration/sequences/passthrough.py @@ -7,7 +7,7 @@ from AllenConf.hlt1_calibration_lines import make_passthrough_line from AllenConf.persistency import make_global_decision from AllenConf.odin import decode_odin from AllenConf.algorithms import data_provider_t -from AllenConf.HLT1 import line_maker +from AllenConf.HLT1 import line_maker, line_maker_ebc from AllenConf.validators import rate_validation bank_providers = [decode_odin()['dev_odin_raw_input'].producer] @@ -18,23 +18,36 @@ bank_providers = [decode_odin()['dev_odin_raw_input'].producer] # bank_providers.append( # make_algorithm(data_provider_t, name=det + "_banks", bank_type=bt)) -passthrough_line = line_maker( - "Hlt1Passthrough", +# passthrough_line = line_maker( +# "Hlt1Passthrough", +# make_passthrough_line( +# name="Hlt1Passthrough", +# pre_scaler_hash_string="passthrough_line_pre", +# post_scaler_hash_string="passthrough_line_post"), +# enableGEC=False) + +#passthrough line cutting events with error banks +passthrough_error_banks_cut_line = line_maker_ebc( + "Hlt1PassthroughErrorBanksCut", make_passthrough_line( - name="Hlt1Passthrough", - pre_scaler_hash_string="passthrough_line_pre", - post_scaler_hash_string="passthrough_line_post"), - enableGEC=False) + name="Hlt1PassthroughErrorBanksCut", + pre_scaler_hash_string="passthrough_error_banks_line_pre", + post_scaler_hash_string="passthrough_error_banks_line_post"), enableEBC=True) + + +# line_algorithms = [passthrough_line[0]] +line_algorithms = [passthrough_error_banks_cut_line[0]] -line_algorithms = [passthrough_line[0]] global_decision = make_global_decision(lines=line_algorithms) providers = CompositeNode( "Providers", bank_providers, NodeLogic.NONLAZY_AND, force_order=False) +# lines = CompositeNode( +# "AllLines", [passthrough_line[1]], NodeLogic.NONLAZY_OR, force_order=False) lines = CompositeNode( - "AllLines", [passthrough_line[1]], NodeLogic.NONLAZY_OR, force_order=False) + "AllLines", [passthrough_error_banks_cut_line[1]], NodeLogic.NONLAZY_OR, force_order=False) passthrough_sequence = CompositeNode( "Passthrough", diff --git a/host/global_event_cut/include/HostErrorBanksCut.h b/host/global_event_cut/include/HostErrorBanksCut.h new file mode 100644 index 00000000000..a60f8e6497b --- /dev/null +++ b/host/global_event_cut/include/HostErrorBanksCut.h @@ -0,0 +1,41 @@ +/*****************************************************************************\ +* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * +\*****************************************************************************/ +#pragma once + +#include "Common.h" +#include "AlgorithmTypes.cuh" +#include "InputProvider.h" +#include "BankMapping.h" +#include <gsl/span> + +namespace host_error_banks_cut { + struct Parameters { + HOST_OUTPUT(host_event_list_output_t, unsigned) host_event_list; + HOST_OUTPUT(host_number_of_events_t, unsigned) host_number_of_events; + HOST_OUTPUT(host_number_of_selected_events_t, unsigned) host_number_of_selected_events; + DEVICE_OUTPUT(dev_number_of_events_t, unsigned) dev_number_of_events; + MASK_OUTPUT(dev_event_list_output_t) dev_event_list; + }; + + // Algorithm + struct host_error_banks_cut_t : public HostAlgorithm, Parameters { + void set_arguments_size( + ArgumentReferences<Parameters> arguments, + const RuntimeOptions& runtime_options, + const Constants&, + const HostBuffers&) const; + + void operator()( + const ArgumentReferences<Parameters>& arguments, + const RuntimeOptions& runtime_options, + const Constants&, + HostBuffers& host_buffers, + const Allen::Context& context) const; + + }; +} // namespace host_error_banks_cut + + + + diff --git a/host/global_event_cut/src/HostErrorBanksCut.cpp b/host/global_event_cut/src/HostErrorBanksCut.cpp new file mode 100644 index 00000000000..7b4c7f866e4 --- /dev/null +++ b/host/global_event_cut/src/HostErrorBanksCut.cpp @@ -0,0 +1,81 @@ +/*****************************************************************************\ +* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * +\*****************************************************************************/ +#include "HostErrorBanksCut.h" + +INSTANTIATE_ALGORITHM(host_error_banks_cut::host_error_banks_cut_t) + + +void host_error_banks_cut::host_error_banks_cut_t::set_arguments_size( + ArgumentReferences<Parameters> arguments, + const RuntimeOptions& runtime_options, + const Constants&, + const HostBuffers&) const +{ + const auto number_of_events = + std::get<1>(runtime_options.event_interval) - std::get<0>(runtime_options.event_interval); + + set_size<host_number_of_selected_events_t>(arguments, 1); + set_size<host_number_of_events_t>(arguments, 1); + set_size<host_event_list_output_t>(arguments, number_of_events); + set_size<dev_number_of_events_t>(arguments, 1); + set_size<dev_event_list_output_t>(arguments, number_of_events); +} + +void host_error_banks_cut::host_error_banks_cut_t::operator()( + const ArgumentReferences<Parameters>& arguments, + const RuntimeOptions& runtime_options, + const Constants&, + HostBuffers& host_buffers, + const Allen::Context& context) const +{ + const auto event_start = std::get<0>(runtime_options.event_interval); + const auto event_end = std::get<1>(runtime_options.event_interval); + const auto number_of_events = event_end - event_start; + + // Initialize number of events + data<host_number_of_events_t>(arguments)[0] = number_of_events; + + unsigned size_of_list = 0; + std::vector<bool> events_with_error_banks(number_of_events, false); + + for(auto bank_type: {BankTypes::MUON,BankTypes::ECal,BankTypes::HCal}) { + + auto bno = runtime_options.input_provider->banks(bank_type, runtime_options.slice_index); + auto events_raw_banks = std::get<4>(bno); + + for (unsigned index_event = 0; index_event < event_end; ++index_event) { + if (index_event >= event_start) { + //looping over mfps of that subdetector + for(auto raw_banks: events_raw_banks) { + auto lhcb_bank = raw_banks[index_event]; + //list of error banks + if (std::count(Allen::ErrorBanksList.begin(), Allen::ErrorBanksList.end(), static_cast<int>(lhcb_bank))){ + events_with_error_banks[index_event] = true;} + }//loop mfps + }//events range + }//loop events + + }//loop subdetectors + + for (unsigned index_event = 0; index_event < event_end; ++index_event) { + if(index_event >= event_start && events_with_error_banks[index_event] == false){ + data<host_event_list_output_t>(arguments)[size_of_list++] = index_event - event_start;}} + + data<host_number_of_selected_events_t>(arguments)[0] = size_of_list; + + // Reduce the size of the event lists to the selected events + reduce_size<host_event_list_output_t>(arguments, first<host_number_of_selected_events_t>(arguments)); + reduce_size<dev_event_list_output_t>(arguments, first<host_number_of_selected_events_t>(arguments)); + + // Copy data to the device + Allen::copy_async<dev_number_of_events_t, host_number_of_events_t>(arguments, context); + Allen::copy_async<dev_event_list_output_t, host_event_list_output_t>(arguments, context); + + if (runtime_options.fill_extra_host_buffers) { + host_buffers.host_number_of_selected_events = first<host_number_of_selected_events_t>(arguments); + for (unsigned i = 0; i < size<host_event_list_output_t>(arguments); ++i) { + host_buffers.host_event_list[i] = event_start + data<host_event_list_output_t>(arguments)[i]; + } + } +} diff --git a/main/include/BankMapping.h b/main/include/BankMapping.h index e61dc9a6efe..9876ebe0fb1 100644 --- a/main/include/BankMapping.h +++ b/main/include/BankMapping.h @@ -22,20 +22,35 @@ namespace Allen { {LHCb::RawBank::OTError, BankTypes::OTError}, // used for PV MC info {LHCb::RawBank::OTRaw, BankTypes::OTRaw}}; // used for track MC info - const std::unordered_map<SourceIdSys, BankTypes> subdetectors = { - {SourceIdSys::SourceIdSys_ODIN, BankTypes::ODIN}, - {SourceIdSys::SourceIdSys_VELO_A, BankTypes::VP}, - {SourceIdSys::SourceIdSys_VELO_C, BankTypes::VP}, - {SourceIdSys::SourceIdSys_VELO_A, BankTypes::VPRetinaCluster}, - {SourceIdSys::SourceIdSys_VELO_C, BankTypes::VPRetinaCluster}, - {SourceIdSys::SourceIdSys_UT_A, BankTypes::UT}, - {SourceIdSys::SourceIdSys_UT_C, BankTypes::UT}, - {SourceIdSys::SourceIdSys_SCIFI_A, BankTypes::FT}, - {SourceIdSys::SourceIdSys_SCIFI_C, BankTypes::FT}, - {SourceIdSys::SourceIdSys_MUON_A, BankTypes::MUON}, - {SourceIdSys::SourceIdSys_MUON_C, BankTypes::MUON}, - {SourceIdSys::SourceIdSys_HCAL, BankTypes::HCal}, - {SourceIdSys::SourceIdSys_ECAL, BankTypes::ECal}}; + //known error raw banks from https://edms.cern.ch/ui/#!master/navigator/document?P:100034379:100699586:subDocs + //to be updated if changes + const std::vector<int> ErrorBanksList { + 29, //VeloError + 50, //MuonError + 68, //UTError (still not available) + 78, //CaloError + 81, //RichError + 82, //FTSpecial + 192, //DaqErrorBase + 250, //DaqErrorFragmentThrottled + 251, //DaqErrorBXIDCorrupted + 252, //DaqErrorBXIDJump + 253, //DaqErrorFragmentMissing + 254, //DaqErrorFragmentTruncated + 255 //DaqErrorInvalid + }; + + const std::unordered_map<SourceIdSys, BankTypes> subdetectors = {{SourceIdSys::SourceIdSys_ODIN, BankTypes::ODIN}, + {SourceIdSys::SourceIdSys_VELO_A, BankTypes::VP}, + {SourceIdSys::SourceIdSys_VELO_C, BankTypes::VP}, + {SourceIdSys::SourceIdSys_UT_A, BankTypes::UT}, + {SourceIdSys::SourceIdSys_UT_C, BankTypes::UT}, + {SourceIdSys::SourceIdSys_SCIFI_A, BankTypes::FT}, + {SourceIdSys::SourceIdSys_SCIFI_C, BankTypes::FT}, + {SourceIdSys::SourceIdSys_MUON_A, BankTypes::MUON}, + {SourceIdSys::SourceIdSys_MUON_C, BankTypes::MUON}, + {SourceIdSys::SourceIdSys_HCAL, BankTypes::HCal}, + {SourceIdSys::SourceIdSys_ECAL, BankTypes::ECal}}; const unsigned NSourceIdSys = to_integral(SourceIdSys::SourceIdSys_TDET) + 1; } // namespace Allen diff --git a/main/include/TESProvider.h b/main/include/TESProvider.h index 7feecb5712c..0d403fbc8e4 100644 --- a/main/include/TESProvider.h +++ b/main/include/TESProvider.h @@ -67,10 +67,10 @@ public: // bank content auto data_size = static_cast<span_size_t<char const>>(banks.size()); gsl::span<char const> b {banks.data(), data_size}; - std::vector<std::vector<unsigned char>> subdetectors_banks; + std::vector<std::vector<unsigned char>> subdetector_banks; m_banks_and_offsets[allen_bank_index] = { - {std::move(b)}, static_cast<std::size_t>(data_size), {offsets.data(), 2u}, version, subdetectors_banks}; + {std::move(b)}, static_cast<std::size_t>(data_size), {offsets.data(), 2u}, version, subdetector_banks}; } return 0; diff --git a/main/src/Provider.cpp b/main/src/Provider.cpp index a03f4a8c1a4..72d398b6047 100644 --- a/main/src/Provider.cpp +++ b/main/src/Provider.cpp @@ -28,7 +28,7 @@ namespace { std::unordered_set<BankTypes> Allen::configured_bank_types(std::string const& json_file) { // Bank types - std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN, BankTypes::MUON, BankTypes::ECal, BankTypes::HCal}; + std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN, BankTypes::MUON,BankTypes::ECal,BankTypes::HCal}; ConfigurationReader configuration_reader {json_file}; auto const& configuration = configuration_reader.params(); for (auto const& [key, props] : configuration) { @@ -41,6 +41,7 @@ std::unordered_set<BankTypes> Allen::configured_bank_types(std::string const& js } else { bank_types.emplace(bt); + std::cout<<"bank type added: "<<static_cast<int>(bt)<<std::endl; } } } diff --git a/mdf/test/test_providers.cpp b/mdf/test/test_providers.cpp index 5e15b04fddb..df0f0d4049a 100644 --- a/mdf/test/test_providers.cpp +++ b/mdf/test/test_providers.cpp @@ -67,7 +67,7 @@ BanksAndOffsets mep_banks(Slices& slices, BankTypes bank_type, size_t slice_inde auto const& [banks, banks_size, offsets, offsets_size, subdetectors_banks] = slices[ib][slice_index]; span<char const> b {banks[0].data(), offsets[offsets_size - 1]}; span<unsigned int const> o {offsets.data(), static_cast<::offsets_size>(offsets_size)}; - return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), banks_version[ib]}; + return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), banks_version[ib], subdetectors_banks}; } size_t transpose_mep( -- GitLab From 9a23388280308e84876d2160db20c3099a3856bd Mon Sep 17 00:00:00 2001 From: ascarabo <alessandro.scarabotto@cern.ch> Date: Mon, 21 Mar 2022 18:18:33 +0100 Subject: [PATCH 6/7] remove files --- .../include/SciFiCalculateClusterCountV6.cuh | 34 --------------- .../include/SciFiPreDecodeV6.cuh | 36 ---------------- .../include/SciFiRawBankDecoderV4.cuh | 42 ------------------- 3 files changed, 112 deletions(-) delete mode 100644 device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh delete mode 100644 device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh delete mode 100644 device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh diff --git a/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh b/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh deleted file mode 100644 index 9d48c0abbcd..00000000000 --- a/device/SciFi/preprocessing/include/SciFiCalculateClusterCountV6.cuh +++ /dev/null @@ -1,34 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * -\*****************************************************************************/ -#pragma once - -#include "SciFiDefinitions.cuh" -#include "SciFiRaw.cuh" -#include "SciFiEventModel.cuh" -#include "AlgorithmTypes.cuh" - -namespace scifi_calculate_cluster_count_v6 { - struct Parameters { - HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; - MASK_INPUT(dev_event_list_t) dev_event_list; - DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; - DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; - DEVICE_OUTPUT(dev_scifi_hit_count_t, unsigned) dev_scifi_hit_count; - }; - - struct scifi_calculate_cluster_count_v6_t : public DeviceAlgorithm, Parameters { - void set_arguments_size( - ArgumentReferences<Parameters> arguments, - const RuntimeOptions&, - const Constants&, - const HostBuffers&) const; - - void operator()( - const ArgumentReferences<Parameters>& arguments, - const RuntimeOptions& runtime_options, - const Constants& constants, - HostBuffers&, - const Allen::Context& context) const; - }; -} // namespace scifi_calculate_cluster_count_v6 diff --git a/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh b/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh deleted file mode 100644 index e9c91f7943f..00000000000 --- a/device/SciFi/preprocessing/include/SciFiPreDecodeV6.cuh +++ /dev/null @@ -1,36 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * -\*****************************************************************************/ -#pragma once - -#include "SciFiDefinitions.cuh" -#include "SciFiRaw.cuh" -#include "SciFiEventModel.cuh" -#include "AlgorithmTypes.cuh" - -namespace scifi_pre_decode_v6 { - struct Parameters { - HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; - HOST_INPUT(host_accumulated_number_of_scifi_hits_t, unsigned) host_accumulated_number_of_scifi_hits; - DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; - DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; - MASK_INPUT(dev_event_list_t) dev_event_list; - DEVICE_INPUT(dev_scifi_hit_offsets_t, unsigned) dev_scifi_hit_offsets; - DEVICE_OUTPUT(dev_cluster_references_t, unsigned) dev_cluster_references; - }; - - struct scifi_pre_decode_v6_t : public DeviceAlgorithm, Parameters { - void set_arguments_size( - ArgumentReferences<Parameters> arguments, - const RuntimeOptions&, - const Constants&, - const HostBuffers&) const; - - void operator()( - const ArgumentReferences<Parameters>& arguments, - const RuntimeOptions& runtime_options, - const Constants& constants, - HostBuffers&, - const Allen::Context& context) const; - }; -} // namespace scifi_pre_decode_v6 \ No newline at end of file diff --git a/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh b/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh deleted file mode 100644 index e44bfae4bf3..00000000000 --- a/device/SciFi/preprocessing/include/SciFiRawBankDecoderV4.cuh +++ /dev/null @@ -1,42 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration * -\*****************************************************************************/ -#pragma once - -#include "SciFiDefinitions.cuh" -#include "SciFiRaw.cuh" -#include "SciFiEventModel.cuh" -#include "AlgorithmTypes.cuh" - -namespace scifi_raw_bank_decoder_v4 { - struct Parameters { - HOST_INPUT(host_number_of_events_t, unsigned) host_number_of_events; - HOST_INPUT(host_accumulated_number_of_scifi_hits_t, unsigned) host_accumulated_number_of_scifi_hits; - DEVICE_INPUT(dev_scifi_raw_input_t, char) dev_scifi_raw_input; - DEVICE_INPUT(dev_scifi_raw_input_offsets_t, unsigned) dev_scifi_raw_input_offsets; - DEVICE_INPUT(dev_scifi_hit_offsets_t, unsigned) dev_scifi_hit_offsets; - DEVICE_INPUT(dev_cluster_references_t, unsigned) dev_cluster_references; - MASK_INPUT(dev_event_list_t) dev_event_list; - DEVICE_INPUT(dev_number_of_events_t, unsigned) dev_number_of_events; - DEVICE_OUTPUT(dev_scifi_hits_t, char) dev_scifi_hits; - PROPERTY(block_dim_t, "block_dim", "block dimensions", DeviceDimensions) block_dim; - }; - - struct scifi_raw_bank_decoder_v4_t : public DeviceAlgorithm, Parameters { - void set_arguments_size( - ArgumentReferences<Parameters> arguments, - const RuntimeOptions&, - const Constants&, - const HostBuffers&) const; - - void operator()( - const ArgumentReferences<Parameters>& arguments, - const RuntimeOptions& runtime_options, - const Constants& constants, - HostBuffers&, - const Allen::Context& context) const; - - private: - Property<block_dim_t> m_block_dim {this, {{256, 1, 1}}}; - }; -} // namespace scifi_raw_bank_decoder_v4 \ No newline at end of file -- GitLab From 2cc03f93f04a81ce37f497051d36b188c50c71d6 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Mon, 21 Mar 2022 17:30:17 +0000 Subject: [PATCH 7/7] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Allen/-/jobs/20457597 --- configuration/python/AllenConf/HLT1.py | 2 ++ configuration/python/AllenConf/utils.py | 8 ++--- configuration/sequences/passthrough.py | 9 ++--- .../include/HostErrorBanksCut.h | 5 --- .../src/HostErrorBanksCut.cpp | 36 ++++++++++--------- host/global_event_cut/test/mep_gec.cpp | 3 +- main/include/BankMapping.h | 30 ++++++++-------- main/include/BankTypes.h | 7 +++- main/include/MDFProvider.h | 3 +- main/include/TransposeTypes.h | 9 +++-- main/src/Provider.cpp | 4 +-- main/src/SliceUtils.cpp | 8 +++-- main/src/Transpose.cpp | 8 +++-- mdf/test/test_providers.cpp | 3 +- 14 files changed, 78 insertions(+), 57 deletions(-) diff --git a/configuration/python/AllenConf/HLT1.py b/configuration/python/AllenConf/HLT1.py index e28a68f1f20..066a9ea0670 100644 --- a/configuration/python/AllenConf/HLT1.py +++ b/configuration/python/AllenConf/HLT1.py @@ -27,6 +27,7 @@ def make_line_composite_node_with_gec(line_name, NodeLogic.LAZY_AND, force_order=True) + def make_line_composite_node_with_ebc(line_name, line_algorithm, ebc_name="ebc"): @@ -44,6 +45,7 @@ def line_maker(line_name, line_algorithm, enableGEC=True): node = line_algorithm return line_algorithm, node + @configurable def line_maker_ebc(line_name, line_algorithm, enableEBC=True): if (enableEBC): diff --git a/configuration/python/AllenConf/utils.py b/configuration/python/AllenConf/utils.py index 5b1fd72e44d..8d1969c3e82 100644 --- a/configuration/python/AllenConf/utils.py +++ b/configuration/python/AllenConf/utils.py @@ -2,9 +2,9 @@ # (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration # ############################################################################### from AllenCore.generator import make_algorithm -from AllenConf.algorithms import (host_init_number_of_events_t, - host_data_provider_t, - host_global_event_cut_t, layout_provider_t, host_error_banks_cut_t) +from AllenConf.algorithms import ( + host_init_number_of_events_t, host_data_provider_t, + host_global_event_cut_t, layout_provider_t, host_error_banks_cut_t) def initialize_number_of_events(): @@ -40,13 +40,13 @@ def gec(name="gec", min_scifi_ut_clusters=0, max_scifi_ut_clusters=9750): return gec + def error_banks_cut(name="error_banks_cut"): number_of_events = initialize_number_of_events() error_banks_cut = make_algorithm(host_error_banks_cut_t, name=name) return error_banks_cut - def mep_layout(): layout = make_algorithm(layout_provider_t, name="mep_layout") return { diff --git a/configuration/sequences/passthrough.py b/configuration/sequences/passthrough.py index 5dcacdd3281..96ad187206d 100644 --- a/configuration/sequences/passthrough.py +++ b/configuration/sequences/passthrough.py @@ -32,13 +32,12 @@ passthrough_error_banks_cut_line = line_maker_ebc( make_passthrough_line( name="Hlt1PassthroughErrorBanksCut", pre_scaler_hash_string="passthrough_error_banks_line_pre", - post_scaler_hash_string="passthrough_error_banks_line_post"), enableEBC=True) - + post_scaler_hash_string="passthrough_error_banks_line_post"), + enableEBC=True) # line_algorithms = [passthrough_line[0]] line_algorithms = [passthrough_error_banks_cut_line[0]] - global_decision = make_global_decision(lines=line_algorithms) providers = CompositeNode( @@ -47,7 +46,9 @@ providers = CompositeNode( # lines = CompositeNode( # "AllLines", [passthrough_line[1]], NodeLogic.NONLAZY_OR, force_order=False) lines = CompositeNode( - "AllLines", [passthrough_error_banks_cut_line[1]], NodeLogic.NONLAZY_OR, force_order=False) + "AllLines", [passthrough_error_banks_cut_line[1]], + NodeLogic.NONLAZY_OR, + force_order=False) passthrough_sequence = CompositeNode( "Passthrough", diff --git a/host/global_event_cut/include/HostErrorBanksCut.h b/host/global_event_cut/include/HostErrorBanksCut.h index a60f8e6497b..4f1f6f59310 100644 --- a/host/global_event_cut/include/HostErrorBanksCut.h +++ b/host/global_event_cut/include/HostErrorBanksCut.h @@ -32,10 +32,5 @@ namespace host_error_banks_cut { const Constants&, HostBuffers& host_buffers, const Allen::Context& context) const; - }; } // namespace host_error_banks_cut - - - - diff --git a/host/global_event_cut/src/HostErrorBanksCut.cpp b/host/global_event_cut/src/HostErrorBanksCut.cpp index 7b4c7f866e4..4b39e2fd4fd 100644 --- a/host/global_event_cut/src/HostErrorBanksCut.cpp +++ b/host/global_event_cut/src/HostErrorBanksCut.cpp @@ -5,7 +5,6 @@ INSTANTIATE_ALGORITHM(host_error_banks_cut::host_error_banks_cut_t) - void host_error_banks_cut::host_error_banks_cut_t::set_arguments_size( ArgumentReferences<Parameters> arguments, const RuntimeOptions& runtime_options, @@ -39,29 +38,32 @@ void host_error_banks_cut::host_error_banks_cut_t::operator()( unsigned size_of_list = 0; std::vector<bool> events_with_error_banks(number_of_events, false); - for(auto bank_type: {BankTypes::MUON,BankTypes::ECal,BankTypes::HCal}) { + for (auto bank_type : {BankTypes::MUON, BankTypes::ECal, BankTypes::HCal}) { auto bno = runtime_options.input_provider->banks(bank_type, runtime_options.slice_index); auto events_raw_banks = std::get<4>(bno); for (unsigned index_event = 0; index_event < event_end; ++index_event) { if (index_event >= event_start) { - //looping over mfps of that subdetector - for(auto raw_banks: events_raw_banks) { - auto lhcb_bank = raw_banks[index_event]; - //list of error banks - if (std::count(Allen::ErrorBanksList.begin(), Allen::ErrorBanksList.end(), static_cast<int>(lhcb_bank))){ - events_with_error_banks[index_event] = true;} - }//loop mfps - }//events range - }//loop events - - }//loop subdetectors - + // looping over mfps of that subdetector + for (auto raw_banks : events_raw_banks) { + auto lhcb_bank = raw_banks[index_event]; + // list of error banks + if (std::count(Allen::ErrorBanksList.begin(), Allen::ErrorBanksList.end(), static_cast<int>(lhcb_bank))) { + events_with_error_banks[index_event] = true; + } + } // loop mfps + } // events range + } // loop events + + } // loop subdetectors + for (unsigned index_event = 0; index_event < event_end; ++index_event) { - if(index_event >= event_start && events_with_error_banks[index_event] == false){ - data<host_event_list_output_t>(arguments)[size_of_list++] = index_event - event_start;}} - + if (index_event >= event_start && events_with_error_banks[index_event] == false) { + data<host_event_list_output_t>(arguments)[size_of_list++] = index_event - event_start; + } + } + data<host_number_of_selected_events_t>(arguments)[0] = size_of_list; // Reduce the size of the event lists to the selected events diff --git a/host/global_event_cut/test/mep_gec.cpp b/host/global_event_cut/test/mep_gec.cpp index e83c20613ef..5cd3cae8d2c 100644 --- a/host/global_event_cut/test/mep_gec.cpp +++ b/host/global_event_cut/test/mep_gec.cpp @@ -121,7 +121,8 @@ int main(int argc, char* argv[]) slices, 0, bank_ids, {BankTypes::UT, BankTypes::FT}, banks_count, events, mep_header, blocks, {0, interval}); auto scifi_allen_type = to_integral(BankTypes::FT); - auto const& [scifi_data, scifi_data_size, scifi_offsets, scifi_offsets_size, subdetectors_banks] = slices[scifi_allen_type][0]; + auto const& [scifi_data, scifi_data_size, scifi_offsets, scifi_offsets_size, subdetectors_banks] = + slices[scifi_allen_type][0]; auto n_scifi_fragments = scifi_block_ids.size(); diff --git a/main/include/BankMapping.h b/main/include/BankMapping.h index 9876ebe0fb1..bccce90e3e1 100644 --- a/main/include/BankMapping.h +++ b/main/include/BankMapping.h @@ -22,22 +22,22 @@ namespace Allen { {LHCb::RawBank::OTError, BankTypes::OTError}, // used for PV MC info {LHCb::RawBank::OTRaw, BankTypes::OTRaw}}; // used for track MC info - //known error raw banks from https://edms.cern.ch/ui/#!master/navigator/document?P:100034379:100699586:subDocs - //to be updated if changes + // known error raw banks from https://edms.cern.ch/ui/#!master/navigator/document?P:100034379:100699586:subDocs + // to be updated if changes const std::vector<int> ErrorBanksList { - 29, //VeloError - 50, //MuonError - 68, //UTError (still not available) - 78, //CaloError - 81, //RichError - 82, //FTSpecial - 192, //DaqErrorBase - 250, //DaqErrorFragmentThrottled - 251, //DaqErrorBXIDCorrupted - 252, //DaqErrorBXIDJump - 253, //DaqErrorFragmentMissing - 254, //DaqErrorFragmentTruncated - 255 //DaqErrorInvalid + 29, // VeloError + 50, // MuonError + 68, // UTError (still not available) + 78, // CaloError + 81, // RichError + 82, // FTSpecial + 192, // DaqErrorBase + 250, // DaqErrorFragmentThrottled + 251, // DaqErrorBXIDCorrupted + 252, // DaqErrorBXIDJump + 253, // DaqErrorFragmentMissing + 254, // DaqErrorFragmentTruncated + 255 // DaqErrorInvalid }; const std::unordered_map<SourceIdSys, BankTypes> subdetectors = {{SourceIdSys::SourceIdSys_ODIN, BankTypes::ODIN}, diff --git a/main/include/BankTypes.h b/main/include/BankTypes.h index b57d3c3eb0c..3b70ea5a64c 100644 --- a/main/include/BankTypes.h +++ b/main/include/BankTypes.h @@ -59,7 +59,12 @@ constexpr auto to_integral(ENUM e) -> typename std::underlying_type<ENUM>::type return static_cast<typename std::underlying_type<ENUM>::type>(e); } -using BanksAndOffsets = std::tuple<std::vector<gsl::span<const char>>, size_t, gsl::span<const unsigned int>, int, std::vector<std::vector<unsigned char>>>; +using BanksAndOffsets = std::tuple< + std::vector<gsl::span<const char>>, + size_t, + gsl::span<const unsigned int>, + int, + std::vector<std::vector<unsigned char>>>; template<BankTypes... BANKS> std::unordered_set<BankTypes> banks_set() diff --git a/main/include/MDFProvider.h b/main/include/MDFProvider.h index 4a0603e06a1..3b2fa45be2a 100644 --- a/main/include/MDFProvider.h +++ b/main/include/MDFProvider.h @@ -278,7 +278,8 @@ public: gsl::span<char const> b {banks[0].data(), offsets[offsets_size - 1]}; gsl::span<unsigned int const> o {offsets.data(), static_cast<::offsets_size>(offsets_size)}; - return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), m_banks_version[ib], subdetectors_banks}; + return BanksAndOffsets { + {std::move(b)}, offsets[offsets_size - 1], std::move(o), m_banks_version[ib], subdetectors_banks}; } /** diff --git a/main/include/TransposeTypes.h b/main/include/TransposeTypes.h index 0894e832672..c4905045c1d 100644 --- a/main/include/TransposeTypes.h +++ b/main/include/TransposeTypes.h @@ -32,8 +32,13 @@ namespace Allen { // A slice contains transposed bank data, offsets to the start of each // set of banks and the number of sets of banks - //adding vector<vector> for subdetectors banks to access error banks from BankAndOffsets object - using Slice = std::tuple<std::vector<gsl::span<char>>, size_t, gsl::span<unsigned int>, size_t, std::vector<std::vector<unsigned char>>>; + // adding vector<vector> for subdetectors banks to access error banks from BankAndOffsets object + using Slice = std::tuple< + std::vector<gsl::span<char>>, + size_t, + gsl::span<unsigned int>, + size_t, + std::vector<std::vector<unsigned char>>>; using BankSlices = std::vector<Slice>; using Slices = std::array<BankSlices, NBankTypes>; diff --git a/main/src/Provider.cpp b/main/src/Provider.cpp index 72d398b6047..da08ad4d846 100644 --- a/main/src/Provider.cpp +++ b/main/src/Provider.cpp @@ -28,7 +28,7 @@ namespace { std::unordered_set<BankTypes> Allen::configured_bank_types(std::string const& json_file) { // Bank types - std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN, BankTypes::MUON,BankTypes::ECal,BankTypes::HCal}; + std::unordered_set<BankTypes> bank_types = {BankTypes::ODIN, BankTypes::MUON, BankTypes::ECal, BankTypes::HCal}; ConfigurationReader configuration_reader {json_file}; auto const& configuration = configuration_reader.params(); for (auto const& [key, props] : configuration) { @@ -41,7 +41,7 @@ std::unordered_set<BankTypes> Allen::configured_bank_types(std::string const& js } else { bank_types.emplace(bt); - std::cout<<"bank type added: "<<static_cast<int>(bt)<<std::endl; + std::cout << "bank type added: " << static_cast<int>(bt) << std::endl; } } } diff --git a/main/src/SliceUtils.cpp b/main/src/SliceUtils.cpp index e0451224952..cafc3ba5633 100644 --- a/main/src/SliceUtils.cpp +++ b/main/src/SliceUtils.cpp @@ -66,10 +66,14 @@ Allen::Slices allocate_slices( if (n_bytes) { spans.emplace_back(events_mem, n_bytes); } - //subdetector vector to access error banks + // subdetector vector to access error banks std::vector<std::vector<unsigned char>> subdetectors_banks; bank_slices.emplace_back( - std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1, subdetectors_banks); + std::move(spans), + n_bytes, + offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, + 1, + subdetectors_banks); } } return slices; diff --git a/main/src/Transpose.cpp b/main/src/Transpose.cpp index a5f4d7e2dcc..87898b2949b 100644 --- a/main/src/Transpose.cpp +++ b/main/src/Transpose.cpp @@ -442,10 +442,14 @@ Allen::Slices allocate_slices( if (n_bytes) { spans.emplace_back(events_mem, n_bytes); } - //subdetector vector to access error banks + // subdetector vector to access error banks std::vector<std::vector<unsigned char>> subdetectors_banks; bank_slices.emplace_back( - std::move(spans), n_bytes, offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, 1, subdetectors_banks); + std::move(spans), + n_bytes, + offsets_span {offsets_mem, static_cast<offsets_size>(n_offsets + 1)}, + 1, + subdetectors_banks); } } return slices; diff --git a/mdf/test/test_providers.cpp b/mdf/test/test_providers.cpp index df0f0d4049a..c965bcbf3f9 100644 --- a/mdf/test/test_providers.cpp +++ b/mdf/test/test_providers.cpp @@ -67,7 +67,8 @@ BanksAndOffsets mep_banks(Slices& slices, BankTypes bank_type, size_t slice_inde auto const& [banks, banks_size, offsets, offsets_size, subdetectors_banks] = slices[ib][slice_index]; span<char const> b {banks[0].data(), offsets[offsets_size - 1]}; span<unsigned int const> o {offsets.data(), static_cast<::offsets_size>(offsets_size)}; - return BanksAndOffsets {{std::move(b)}, offsets[offsets_size - 1], std::move(o), banks_version[ib], subdetectors_banks}; + return BanksAndOffsets { + {std::move(b)}, offsets[offsets_size - 1], std::move(o), banks_version[ib], subdetectors_banks}; } size_t transpose_mep( -- GitLab