Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • gpietrzy/Rec
  • nbehling/Rec
  • rrabadan/Rec
  • hyeung/Rec
  • smokhnen/Rec
  • padeken/Rec
  • peilian/Rec
  • lambda-hse/Rec
  • mstahl/Rec
  • kklimasz/Rec
  • mimazure/Rec
  • aszabels/Rec
  • wkrzemie/Rec
  • aalvesju/Rec
  • fkeizer/Rec
  • valassi/Rec
  • raaij/Rec
  • sstahl/Rec
  • jonrob/Rec
  • dcampora/Rec
  • graven/Rec
  • lhcb/Rec
22 results
Show changes
Commits on Source (88)
Showing
with 931 additions and 474 deletions
......@@ -17,9 +17,9 @@ option(THOR_BUILD_TEST_FUNCTOR_CACHE "Build functor cache for THOR functors" ON)
if(THOR_BUILD_TEST_FUNCTOR_CACHE)
# Import the cache creation module
include(LoKiFunctorsCache)
# need to make sure the FunctorFactory is built
set(cache_deps FunctorCore)
# make sure GaudiConfig2 database has been correctly completed
......@@ -38,7 +38,10 @@ if(THOR_BUILD_TEST_FUNCTOR_CACHE)
endforeach()
endforeach()
# Also make sure that FunctorFactory is available
# Also make sure that FunctorFactory is available FIXME Since the new
# FunctorFactory in Rec!2699, I'm not sure if the directory is still needed
# but the comment never said "why" it was in the first place so it's hard
# to judge. (Same comment for SelAlgorithms...)
if(NOT EXISTS ${PROJECT_SOURCE_DIR}/Phys/FunctorCore/CMakeLists.txt)
message(FATAL_ERROR "Functor test cache can be build only if Phys/FunctorCore is present in the current project too")
endif()
......@@ -53,27 +56,15 @@ if(THOR_BUILD_TEST_FUNCTOR_CACHE)
# For now there is no need for a ThOr-specific alternative.
set(LOKI_FUNCTORS_CACHE_POST_ACTION_OPTS)
loki_functors_cache(FunctorVectorTestCache
options/FlagInsideCacheGeneration.py
options/DisableLoKiCacheFunctors.py
loki_functors_cache(FunctorDatahandleTest
options/SilenceErrors.py
options/SuppressLogMessages.py
${PROJECT_SOURCE_DIR}/Phys/FunctorCore/tests/options/test_vector_functors.py
options/ThOr_create_cache_opts.py
${PROJECT_SOURCE_DIR}/Phys/FunctorCore/tests/options/functor_datahandle_test.py
FACTORIES FunctorFactory
LINK_LIBRARIES Rec::FunctorCoreLib
DEPENDS ${cache_deps}
SPLIT 75
)
loki_functors_cache(FunctorTestCache
options/DisableLoKiCacheFunctors.py
options/SilenceErrors.py
options/SuppressLogMessages.py
${PROJECT_SOURCE_DIR}/Phys/FunctorCore/tests/options/test_functors.py
FACTORIES FunctorFactory
LINK_LIBRARIES Rec::FunctorCoreLib
DEPENDS ${cache_deps}
SPLIT 75
SPLIT 2
)
endif(THOR_BUILD_TEST_FUNCTOR_CACHE)
###############################################################################
# (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration #
# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
......@@ -9,5 +9,8 @@
# or submit itself to any jurisdiction. #
###############################################################################
from Configurables import ApplicationMgr
ApplicationMgr().Environment['LOKI_DISABLE_CACHE'] = '1'
ApplicationMgr().Environment['LOKI_DISABLE_CLING'] = '1'
ApplicationMgr().Environment['THOR_DISABLE_JIT'] = '1'
ApplicationMgr().Environment['THOR_DISABLE_CACHE'] = '1'
ApplicationMgr().Environment['THOR_JIT_EXTRA_ARGS'] = ''
ApplicationMgr().Environment['THOR_JIT_LIBDIR'] = '.'
......@@ -52,7 +52,6 @@ gaudi_add_module(FunctorCore
FunctorCoreLib
Gaudi::GaudiAlgLib
Gaudi::GaudiKernel
ROOT::Core
)
gaudi_add_executable(TestFunctors
......@@ -74,7 +73,170 @@ gaudi_add_executable(InstantiateFunctors
TEST
)
# This target only exists to try and have a reliable way to figure out when to
# rebuild the preprocessed header
gaudi_add_executable(JIT_INCLUDES_TEST
SOURCES src/functor_jit_dummy/test_includes.cpp
LINK FunctorCoreLib
LHCb::PhysEvent
LHCb::TrackEvent
LHCb::MCEvent
Rec::ParticleCombinersLib
)
gaudi_install(PYTHON)
gaudi_add_tests(QMTest)
gaudi_add_tests(pytest ${CMAKE_CURRENT_SOURCE_DIR}/python)
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
set(preprocessed_header_name "preprocessed_functorfactory_header.ii")
# When building the FunctorCache or when using a monobuild there is no install
# directory, thus we need to support running from the build directory and from
# the InstallArea. Executables like the below functor_jitter script and
# libraries are easy because the `CMAKE_CURRENT_BINARY_DIR` and the
# InstallArea are automatically in the `PATH` and `LD_LIBRARY_PATH` env
# variables. But to find the preprocessed header we need to play a small trick:
# The lhcb_env command sets an env variable for the build environment and the
# installed project, so that's what we do first and point to the InstallArea.
# Then we issue the command again but pass the PRIVATE flag which will only set
# the variable for the build directory thus overwriting the previously set env
# var for the build directory only.
lhcb_env(SET
FUNCTORFACTORY_PREPROCESSED_HEADER
"${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/${preprocessed_header_name}"
)
lhcb_env(PRIVATE SET
FUNCTORFACTORY_PREPROCESSED_HEADER
"${CMAKE_CURRENT_BINARY_DIR}/${preprocessed_header_name}"
)
# # generate temporary file because I don't want to waste more time tyring to
# figure out how to freaking handle stupid whitespace in generator expressions
# and lists
file(GENERATE
OUTPUT "tmp_preprocessor.sh"
CONTENT "# auto generated
exec ${CMAKE_CXX_COMPILER} -x c++ -std=c++${GAUDI_CXX_STANDARD} \
-D$<JOIN:$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:TestFunctors,COMPILE_DEFINITIONS>>, -D> \
${CMAKE_CXX_FLAGS} \
${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}} \
-I$<JOIN:$<FILTER:$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:FunctorCoreLib,INCLUDE_DIRECTORIES>>,INCLUDE,/Rec/>, -I> \
-isystem $<JOIN:$<FILTER:$<FILTER:$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:FunctorCoreLib,INCLUDE_DIRECTORIES>>,EXCLUDE,/usr/include>,EXCLUDE,/Rec/>, -isystem > \
-E ${CMAKE_CURRENT_SOURCE_DIR}/include/Functors/JIT_includes.h \
-o ${preprocessed_header_name}"
)
# generate the preprocessed header which depends on JIT_INCLUDE_TEST
add_custom_command(OUTPUT ${preprocessed_header_name}
COMMAND sh tmp_preprocessor.sh
DEPENDS ${generated_header_name} "tmp_preprocessor.sh"
JIT_INCLUDES_TEST
)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${preprocessed_header_name}" TYPE INCLUDE)
# avoid "warning: style of line directive is a GCC extension" because we
# include a preprocessed header. Are there better solutions? We could first
# precompile the preprocessed header in initalize() and then use that pch...
# something for later
string(REPLACE " -pedantic" "" cxx_flags_without_pedantic ${CMAKE_CXX_FLAGS})
# the below logic assumes that `CMAKE_CXX_COMPILER` points to a compiler
# wrapper. These wrappers are created by the cmake logic defined in
# lcg-toolchains
file(READ "${CMAKE_CXX_COMPILER}" CMAKE_CXX_COMPILER_CONTENT)
string(REPLACE " \"$@\"" "" CMAKE_CXX_COMPILER_CONTENT ${CMAKE_CXX_COMPILER_CONTENT})
string(STRIP ${CMAKE_CXX_COMPILER_CONTENT} CMAKE_CXX_COMPILER_CONTENT)
# Specify the libraries a JIT compiled functor library will link against. The
# list here is defined based upon the includes present in
# `FunctorCore/include/JIT_includes.h`
set(JIT_LINK_LIBS "-lFunctorCore -lParticleCombiners -lTrackEvent -lPhysEvent -lMCEvent -lRecEvent -lHltEvent")
file(GENERATE
OUTPUT "functor_jitter_tmp"
CONTENT "#!/usr/bin/env python
# Auto-generated script to create a jitter for the FunctorFactory
import os
import subprocess as sp
import sys
from multiprocessing import Pool
header = os.environ['FUNCTORFACTORY_PREPROCESSED_HEADER']
if len(sys.argv) != 4:
raise Exception(
'expect 4 arguments! e.g. functor_jitter {N_jobs} {source_directory} {output_lib_name}'
)
n_jobs = None if sys.argv[1] == '-1' else int(sys.argv[1])
source_dir = sys.argv[2]
lib_name = sys.argv[3]
files = os.listdir(source_dir)
os.chdir(source_dir)
# debug info is only needed for debugging or the throughput tests. Those jobs
# should set this env var otherwise if not set we explicitly force the debug
# level to zero to reduce memory and compilation time overhead of JIT by a
# factor of >2
extra_args = os.environ.get('THOR_JIT_EXTRA_ARGS', '-g0')
cmd = '''
${CMAKE_CXX_COMPILER_CONTENT}'''
my_pool = Pool(n_jobs)
return_codes = []
for file in files:
compile_cmd = cmd + ' -std=c++${GAUDI_CXX_STANDARD} \
-D$<JOIN:$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:FunctorCoreLib,COMPILE_DEFINITIONS>>, -D> \
${cxx_flags_without_pedantic} \
${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}} -fPIC \
{2} -include {0} -c {1}'.format(header, file, extra_args)
res = my_pool.apply_async(sp.call, (compile_cmd, ), {'shell': True})
return_codes.append(res)
my_pool.close()
my_pool.join()
if not all([r.successful() for r in return_codes]):
print('Nonzero exit codes in compilation jobs')
exit(1)
# we know all our libs will be on the LD_LIBRARY_PATH so just point the linker there
my_env = os.environ.copy()
my_env['LIBRARY_PATH'] = my_env['LD_LIBRARY_PATH']
# include the CXX_FLAGS here again. This is for example needed when linking
# using clang as those flags contain the --gcc-tolchain= flag poiting clang to
# the gcc installation.
link_cmd = cmd + ' ${cxx_flags_without_pedantic} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}} \
-fPIC -shared {1} ${JIT_LINK_LIBS} -o {0} *.o'.format(lib_name, extra_args)
exit(sp.call(link_cmd, shell=True, env=my_env))
")
# we don't yet have cmake 3.20 so file(GENERATE) doesn't accept permissions yet
# thus we add a proxy command that copies the generated file and makes it
# executable
add_custom_command(OUTPUT "functor_jitter" DEPENDS "functor_jitter_tmp"
COMMAND cp "functor_jitter_tmp" "functor_jitter"
COMMAND chmod a+x "functor_jitter"
)
install(PROGRAMS "${CMAKE_CURRENT_BINARY_DIR}/functor_jitter" TYPE BIN)
add_custom_target(FunctorCoreJit ALL DEPENDS ${preprocessed_header_name} "functor_jitter")
# this is only here to handle dependencies of a FunctorCache outside Rec e.g.
# in Moore TODO this is technically a hack since `FunctorCoreJit` is only a
# runtime dependency but at the moment, there is no appropriate target for
# runtime dependencies to which `FunctorCoreJit` should be added.
add_dependencies(FunctorCore FunctorCoreJit)
......@@ -29,5 +29,6 @@ namespace Functors::Cache {
/** @brief Generate a Gaudi component name from a hash.
*/
std::string id( HashType hash );
std::string hashToStr( HashType hash );
std::string hashToFuncName( HashType hash );
} // namespace Functors::Cache
......@@ -495,11 +495,5 @@ namespace Functors {
if ( !m_functor ) { throw std::runtime_error( "Empty Functor<Out(In...)> return type queried" ); }
return m_functor->rtype();
}
/** This is useful for the functor cache.
*
* @todo Give a more useful description...
*/
using Factory = typename ::Gaudi::PluginService::Factory<AnyFunctor*()>;
};
} // namespace Functors
......@@ -29,83 +29,69 @@ namespace Functors {
* @brief Interface for turning strings into Functor<Out(In)> instances.
*/
struct IFactory : extend_interfaces<IInterface> {
protected:
enum CompilationBehaviourBit { TryCache = 0x1, TryJIT = 0x2, ExceptionOnFailure = 0x4 };
public:
/** Enum to flag whether the requested functor should be obtained from the
* functor cache, by JIT compilation, or either (the default). Also
* specifies whether or not failure should result in an exception or a
* null functor object being returned. Note that if all backends are
* disabled then a null functor object will always be returned and no
* exception will be raised.
/**
*/
enum CompilationBehaviour {
CacheOrJIT = TryCache | TryJIT | ExceptionOnFailure,
CacheOnly = TryCache | ExceptionOnFailure,
JITOnly = TryJIT | ExceptionOnFailure,
QuietCacheOnly = TryCache,
QuietJITOnly = TryJIT
};
/** Default combination behaviour
*/
static constexpr CompilationBehaviour DefaultCompilationBehaviour = CacheOrJIT;
protected:
using functor_base_t = std::unique_ptr<Functors::AnyFunctor>;
constexpr static auto functor_base_t_str = "std::unique_ptr<Functors::AnyFunctor>";
/** Implementation method that gets an input/output-type-agnostic std::unique_ptr<AnyFunctor>
* object from either cling or the cache.
/**
* @brief internal implementation method to register an input and
* output-type-agnostic std::unique_ptr<AnyFunctor> object with the
* factory
*
* @param do_copy lambda created in register_functor, used to first
* cast and then copy the created functor into its
* registered destination location.
* @param functor_type string representation of the type of the functor
* @param desc ThOr::FunctorDesc object holding the functor code
* and "pretty" representation.
*/
virtual functor_base_t get_impl( Gaudi::Algorithm* owner, std::string_view functor_type,
ThOr::FunctorDesc const& desc, CompilationBehaviour ) = 0;
virtual void do_register( std::function<void( std::unique_ptr<Functors::AnyFunctor> )> do_copy,
std::string_view functor_type, ThOr::FunctorDesc const& desc ) = 0;
public:
DeclareInterfaceID( IFactory, 1, 0 );
DeclareInterfaceID( IFactory, 2, 0 );
/** Factory method to get a C++ functor object from a string, either from
* the cache or using JIT compilation.
/** Factory method to register a C++ functor object to be created by this service.
*
* @param owner The algorithm that owns the functor, this is needed to
* set up the functor's data dependencies correctly.
* @param desc ThOr::FunctorDesc object holding the functor code, list
* of headers required to compile it and "pretty"
* representation.
* @param compile CompilationBehaviour enum value specifying what should
* be tried when compiling this functor. By default the
* functor cache will be tried first, and the factory will
* fall back on JIT compilation. This does not override the
* global settings of the factory.
* @tparam FType Functor<Out(In)> type that will be returned. This
* specifies precisely how the functor is instantiated.
* @return Functor<Out(In)> object of the given type, may be empty.
* @param functor Functor of type FType that will be set by in
* the FunctorFactories start() call.
* Note: The functor is not usable until after Factory->start()!!
* @param desc ThOr::FunctorDesc object holding the functor code
* and "pretty" representation.
*/
template <typename FType>
FType get( Gaudi::Algorithm* owner, ThOr::FunctorDesc const& desc,
CompilationBehaviour compile = DefaultCompilationBehaviour ) {
auto any_functor = get_impl( owner, System::typeinfoName( typeid( FType ) ), desc, compile );
if ( any_functor ) { // check the unique_ptr<AnyFunctor> isn't empty
auto ftype_ptr = dynamic_cast<FType*>( any_functor.get() ); // cast AnyFunctor* -> FType* (base -> derived)
if ( ftype_ptr ) { // check the AnyFunctor -> Functor conversion was OK
return std::move( *ftype_ptr ); // move the contents into the FType we return by value
} else {
void register_functor( Gaudi::Algorithm* owner, FType& functor, ThOr::FunctorDesc const& desc ) {
// This lambda is a helper to perform the actual initalization of the
// algorithms' functor given the created AnyFunctor ptr from the
// FunctorFactory. It does 2 things:
// 1. It will remember the actual concrete type, e.g.
// Functors::Functor<bool()>, we need to cast the pointer to before we
// can perform the copy. Otherwise we wold run into the classic object
// slicing problem as we only invoke the base class' move constructor
// 2. Remember the actual address we need to copy into (functor&).
// Note: we take ownership of the passed in pointer which is important because we
// are going to move the guts of the passed in object into the registered
// algorithm's functor
auto do_copy = [owner, &functor]( std::unique_ptr<Functors::AnyFunctor> b ) {
auto ftype_ptr = dynamic_cast<FType*>( b.get() ); // cast AnyFunctor* -> FType* (base -> derived)
if ( !ftype_ptr ) {
// This should only happen if you have a bug (e.g. you used a
// SIMDWrapper type that has a different meaning depending on the
// compilation flags in the stack/cling). We can't fix that at
// runtime so let's just fail hard.
throw GaudiException{"Failed to cast factory return type (" +
System::typeinfoName( typeid( decltype( *any_functor.get() ) ) ) +
") to desired type (" + System::typeinfoName( typeid( FType ) ) + "), rtype is (" +
System::typeinfoName( any_functor->rtype() ) + ") and it " +
( any_functor->wasJITCompiled() ? "was" : "was not" ) + " JIT compiled",
"Functors::IFactory::get<FType>( owner, desc, compile )", StatusCode::FAILURE};
System::typeinfoName( typeid( decltype( *b ) ) ) + ") to desired type (" +
System::typeinfoName( typeid( FType ) ) + "), rtype is (" +
System::typeinfoName( b->rtype() ) + ") ",
"Functors::IFactory::register_functor( owner, functor, desc)", StatusCode::FAILURE};
}
}
// Return an empty FType object. This can happen if e.g. you disabled
// both cling and the cache, as is done during cache generation, so we
// should not abort the application...
return {};
functor = std::move( *ftype_ptr );
functor.bind( owner );
};
do_register( do_copy, System::typeinfoName( typeid( FType ) ), desc );
}
};
} // namespace Functors
/*****************************************************************************\
* (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration *
* *
* This software is distributed under the terms of the GNU General Public *
* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". *
* *
* In applying this licence, CERN does not waive the privileges and immunities *
* granted to it by virtue of its status as an Intergovernmental Organization *
* or submit itself to any jurisdiction. *
\*****************************************************************************/
/*
*
* This header only exists to capture all includes which are necessary for JIT
* compilation via the FunctorFactory.
*
* Do NOT include this in any cpp files!!
*
*/
#include <any>
// Functors
#include "Functors/Adapters.h"
#include "Functors/Combination.h"
#include "Functors/Composite.h"
#include "Functors/Example.h"
#include "Functors/Filter.h"
#include "Functors/Function.h"
#include "Functors/MVA.h"
#include "Functors/NeutralLike.h"
#include "Functors/Particle.h"
#include "Functors/Simulation.h"
#include "Functors/TES.h"
#include "Functors/TrackLike.h"
// PhysEvent
#include "Event/Particle.h"
#include "Event/Particle_v2.h"
#include "Event/Vertex.h"
// TrackEvent
#include "Event/PrFittedForwardTracks.h"
#include "Event/Track_v1.h"
#include "Event/Track_v2.h"
#include "Event/Track_v3.h"
// TrackKernel
#include "TrackKernel/TrackCompactVertex.h"
// SelTools
#include "SelTools/MatrixNet.h"
#include "SelTools/SigmaNet.h"
// SelKernel
#include "SelKernel/ParticleCombination.h"
#include "SelKernel/VertexRelation.h"
// PrKernel
#include "PrKernel/PrSelection.h"
......@@ -12,10 +12,7 @@
#pragma once
#include "Functors/Function.h"
#include "Functors/Utilities.h"
#include "Kernel/IParticlePropertySvc.h"
#include "Kernel/ParticleProperty.h"
#include "LHCbMath/MatVec.h"
#include "SelKernel/Utilities.h"
/** @file Particle.h
......@@ -71,7 +68,7 @@ namespace Functors::Particle {
template <typename Particle>
auto operator()( Particle const& particle ) const {
using Sel::Utils::pid;
return pid( particle ) == m_id_value;
return pid( Sel::Utils::deref_if_ptr( particle ) ) == m_id_value;
}
private:
......@@ -88,13 +85,15 @@ namespace Functors::Particle {
IsAbsParticleID( IDInput id_input ) : ParticlePropertyUser<IDInput>( id_input ) {}
void bind( TopLevelInfo& top_level ) {
using std::abs;
m_id_value = abs( ParticlePropertyUser<IDInput>::get_particle_id( top_level ) );
}
template <typename Particle>
auto operator()( Particle const& particle ) const {
using Sel::Utils::pid;
return abs( pid( particle ) ) == m_id_value;
using std::abs;
return abs( pid( Sel::Utils::deref_if_ptr( particle ) ) ) == m_id_value;
}
private:
......
......@@ -112,7 +112,52 @@ namespace Functors::detail {
void bind_helper( Algorithm* alg, std::index_sequence<Is...> ) {
static_assert( std::is_base_of_v<IDataHandleHolder, Algorithm>,
"You must include the full declaration of the owning algorithm type!" );
( std::get<Is>( m_handles ).emplace( m_tes_locs[Is], alg ), ... );
if ( alg->msgLevel( MSG::DEBUG ) ) {
alg->debug() << "Init of DataHandles of Functor: " + get_name( m_f ) << endmsg;
}
( init_data_handle( std::get<Is>( m_handles ).emplace( m_tes_locs[Is], alg ), alg ), ... );
}
/**
* @brief Initialize a TES DataHandle and check that the owning algorithm
* was configured correctly and already holds our input in ExtraInputs
*
* For more info on the logic please see the detailed explanation of how
* functors obtain their data dependencies in the doc of the FunctorFactory.
*
* @param handle This handle will be initialized
* @param alg Algorithm/Tool which owns this functor
*/
template <typename T, typename Algorithm>
void init_data_handle( DataObjectReadHandle<T>& handle, Algorithm* alg ) {
if ( alg->msgLevel( MSG::DEBUG ) ) {
alg->debug() << " + " << handle.objKey()
<< " (will call init(): " << ( alg->FSMState() == Gaudi::StateMachine::INITIALIZED ) << ")"
<< endmsg;
}
if ( alg->extraInputDeps().count( handle.objKey() ) == 0 ) {
throw GaudiException{"Usage of DataHandle[\"" + handle.objKey() + "\"] in Functor: " + get_name( m_f ) +
", requires that owning algorithm " + alg->name() +
" contains this TES location inside the ExtraInputs property. This is likely a "
"Configuration/PyConf bug!",
get_name( m_f ), StatusCode::FAILURE};
}
// DataObjectReadHandle has a protected `init()` so we need to call it
// through it's base class. This is the same thing Gaudi::Algorithm does in
// sysInitialize(). We do it here because this DataHandle is created inside
// start(), at which point the step of initializing the handles of an
// algorithm has already happened.
// !! Exception !! if we are getting this functor from the cache then we
// are already creating it in intialize(), and we need to skip the init()
// call as it's also done in the sysInitialize() of the algorithm and it is
// apparently forbidden to call init() twice on a DataHandle which is
// checked via an assert in DataObjectHandleBase->init(). So we only run
// init() here if the algorithm is already in an INITIALIZEDD state which
// means this construction is happening inside start()
if ( alg->FSMState() == Gaudi::StateMachine::INITIALIZED ) { static_cast<Gaudi::DataHandle*>( &handle )->init(); }
}
/** Make a tuple of references to the result of dereferencing each
......
......@@ -36,21 +36,12 @@ class with_functor_maps : public Functors::detail::with_functor_factory<base_t>
template <std::size_t Idx>
void decode() {
using TagType = boost::mp11::mp_at_c<functor_map_tag_types, Idx>;
using FunctorType = boost::mp11::mp_at_c<functor_types, Idx>;
// This is the {nickname: decoded_functor} map we want to populate
auto& functor_map = std::get<Idx>( m_functors );
// Clean it up each time in case we re-decode
functor_map.clear();
for ( auto const& [func_name, func_desc] : m_properties.template get<Idx>() ) {
// Local copy we can add extra headers to
ThOr::FunctorDesc proxy{func_desc};
if constexpr ( Functors::detail::has_extra_headers_v<TagType> ) {
for ( auto const& h : TagType::ExtraHeaders ) { proxy.headers.emplace_back( h ); }
}
// Decode the functor
functor_map[func_name] = this->getFunctorFactory().template get<FunctorType>(
this, proxy, Functors::detail::get_compilation_behaviour_v<TagType> );
this->getFunctorFactory().register_functor( this, functor_map[func_name], func_desc );
}
}
......
......@@ -31,21 +31,6 @@ namespace Functors::detail {
template <typename T>
inline constexpr bool has_extra_headers_v = has_extra_headers<T>::value;
/** @brief Check if the given type has a static member called Compilation
*/
template <typename, typename U = int>
struct get_compilation_behaviour {
static constexpr auto value = IFactory::DefaultCompilationBehaviour;
};
template <typename T>
struct get_compilation_behaviour<T, decltype( (void)T::Compilation, 0 )> {
static constexpr auto value = T::Compilation;
};
template <typename T>
inline constexpr auto get_compilation_behaviour_v = get_compilation_behaviour<T>::value;
/** Type that we use to tag whether or not the functor factory service handle
* has been added to a class.
*/
......@@ -147,16 +132,9 @@ private:
template <std::size_t Idx>
void decode() {
using TagType = std::tuple_element_t<Idx, functor_tag_types>;
using FunctorType = std::tuple_element_t<Idx, FunctorsTuple>;
// Make a copy, as we might need to add headers to it
ThOr::FunctorDesc proxy = m_properties.template get<Idx>();
// Add extra headers if needed
if constexpr ( Functors::detail::has_extra_headers_v<TagType> ) {
for ( auto const& h : TagType::ExtraHeaders ) { proxy.headers.emplace_back( h ); }
}
std::get<Idx>( m_functors ) = this->getFunctorFactory().template get<FunctorType>(
this, proxy, Functors::detail::get_compilation_behaviour_v<TagType> );
// FIXME note for cleanup of REGISTER_HEADER and similar header bookeeping
this->getFunctorFactory().register_functor( this, std::get<Idx>( m_functors ), m_properties.template get<Idx>() );
}
// Storage for the decoded functors
......
......@@ -173,17 +173,21 @@ struct with_output_tree : public Functors::detail::with_output_tree::mixin_base<
using mixin_base::mixin_base;
StatusCode initialize() override {
// Delegate to the base class method, this makes sure our functors are
// available.
auto sc = mixin_base::initialize();
// Open the ROOT file and create the TTree
m_root_file.reset( TFile::Open( m_root_file_name.value().c_str(), "recreate" ) );
m_root_file->cd();
// m_root_tree gets managed by m_root_file, this isn't a dangling pointer
m_root_tree = new TTree( m_root_tree_name.value().c_str(), "" );
return mixin_base::initialize();
}
StatusCode start() override {
// Set up our vectors of branch-filling helpers that go with those functors
// we can not call this in initialize() because the current implementation
// relies on calling functor->rtype() thus we need to wait unilt after the
// FunctorFactory's start() call.
( initialize<Tags>(), ... );
return sc;
return mixin_base::start();
}
StatusCode finalize() override {
......
###############################################################################
# (c) Copyright 2020 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
def pack_dict(input_dict, wrap=None):
"""Given a string-keyed BoundFunctor-valued dictionary, pack that into a
dictionary-of-vectors property added by the with_functor_maps C++ mixin.
This is basically a workaround for missing support for
Gaudi::Property<std::map<std::string, ThOr::FunctorProxy>>
The optional `wrap` argument is an extra functor adapter that will be used
to wrap all of the functors in the dictionary. The canonical usage of this
feature is to add a wrapping `POD` functor to ensure plain, scalar data
types are returned.
"""
if input_dict is None: return {}
if wrap is not None:
input_dict = {k: wrap(v) for k, v in input_dict.items()}
return {
k: (v.code(), v.headers(), v.code_repr())
for k, v in input_dict.items()
}
......@@ -25,9 +25,11 @@ namespace Functors::Cache {
HashType makeHash( std::string_view data ) { return sv_hash( data ); }
// Function used to generate the Gaudi component names from the hashes
std::string id( HashType hash ) {
std::string hashToStr( HashType hash ) {
std::ostringstream oss;
oss << "functor_" << std::showbase << std::hex << hash;
oss << std::showbase << std::hex << hash;
return oss.str();
}
std::string hashToFuncName( HashType hash ) { return "functor_" + hashToStr( hash ); }
} // namespace Functors::Cache
......@@ -69,7 +69,7 @@ private:
void decode() {
m_factory.retrieve().ignore();
m_pred = m_factory->get<Predicate>( this, m_functorproxy );
m_factory->register_functor( this, m_pred, m_functorproxy );
}
};
......@@ -118,7 +118,7 @@ private:
void decode() {
m_factory.retrieve().ignore();
m_pred = m_factory->get<Predicate>( this, m_functorproxy );
m_factory->register_functor( this, m_pred, m_functorproxy );
}
};
DECLARE_COMPONENT_WITH_ID( FunctorExampleAlg<>, "FunctorExampleAlg" )
......
......@@ -11,11 +11,26 @@
#include "Functors/FunctorDesc.h"
namespace std {
/**
* @brief operator<< specialization for a Functor Description (FuntorDesc)
*
* Output should match the python repr result, e.g for the PT Functor:
* "('::Functors::Track::TransverseMomentum{}', ['Functors/TrackLike.h'], 'PT')"
*
* @param o stream to output into
* @param f FunctorDesc to stream into o
* @return ostream& filled with the string representation of f
*/
std::ostream& operator<<( std::ostream& o, ThOr::FunctorDesc const& f ) {
return GaudiUtils::details::ostream_joiner( o << "\"(" << std::quoted( f.code, '\'' ) << ", "
<< "['",
f.headers, "', '" )
<< "']"
<< ", " << std::quoted( f.repr, '\'' ) << ")\"";
// we can't use the default operator<< for the std::vector of headers
// because we need the single quotes around the header to match the python
// repr output of a Funtor see above
o << "\"(" << std::quoted( f.code, '\'' ) << ", [";
if ( !f.headers.empty() ) {
// this if is to avoid having [''] instead of [] if f.headers is empty
GaudiUtils::details::ostream_joiner( o << "'", f.headers, "', '" ) << "'";
}
o << "], " << std::quoted( f.repr, '\'' ) << ")\"";
return o;
}
} // namespace std
/*****************************************************************************\
* (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration *
* *
* This software is distributed under the terms of the GNU General Public *
* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". *
* *
* In applying this licence, CERN does not waive the privileges and immunities *
* granted to it by virtue of its status as an Intergovernmental Organization *
* or submit itself to any jurisdiction. *
\*****************************************************************************/
#include "Functors/JIT_includes.h"
int main() { return 0; }
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
import os
from Gaudi.Configuration import ApplicationMgr, VERBOSE
from Configurables import Gaudi__Monitoring__MessageSvcSink as MessageSvcSink
from Configurables import EvtStoreSvc
from Functors import SIZE
# algorithms are coming from PyConf because we need to use DataHandles etc.
from PyConf.Algorithms import FunctorExampleAlg as FEA, Gaudi__Examples__VectorDataProducer as VDP
from PyConf.dataflow import dataflow_config
# user verbose so we can see the DH registration in the output
app = ApplicationMgr(OutputLevel=VERBOSE)
# FEA has counters so we need a sink
app.ExtSvc.append(MessageSvcSink())
# why does the default EventDataSvc not work? good question -> Gaudi#218
whiteboard = EvtStoreSvc("EventDataSvc", EventSlots=1)
app.ExtSvc.append(whiteboard)
vdp = VDP(name="VDP")
# this env var is set from inside the test_functor_string_datahandle.qmt file.
# For that test we pass a string instead of the datahandle into the functor,
# which works in pyconf as long as we allow strings as DataHandles...
# But this behaviour is not supported for functors and we check via this test
# that this is correctly caught on the C++ side and throws:
# ERROR TES::Size Usage of DataHandle["/Event/VDP/OutputLocation"]
# in Functor (TES::Size) requires that owning algorithm FEA contains this TES
# location inside the ExtraInputs property. This is likely a Configuration/PyConf bug!
if os.environ.get("TEST_FUNCTORS_DH_USE_STRING", False):
fea = FEA(name="FEA", Cut=SIZE(vdp.OutputLocation.location) < 5)
else:
fea = FEA(name="FEA", Cut=SIZE(vdp.OutputLocation) < 5)
c = dataflow_config()
c.update(fea.configuration())
algs, _ = c.apply()
app.TopAlg = algs
# - Event
app.EvtMax = 1
app.EvtSel = "NONE"
app.HistogramPersistency = "NONE"
......@@ -14,11 +14,10 @@
# @author Saverio Mariani
##
# =============================================================================
import Configurables
from PyConf import Algorithms
from Configurables import (ApplicationMgr, LHCbApp)
from Functors import *
from Functors.tests.categories import DUMMY_DATA_DEP
from Functors.utils import pack_dict
import Functors.math as fmath
from GaudiKernel.SystemOfUnits import GeV
......@@ -192,13 +191,16 @@ particle_functors = [
def test_functors(alg_name_suffix, functors_to_test, SkipCut=False):
algo = getattr(Configurables, 'InstantiateFunctors__' + alg_name_suffix)
test = algo('Test' + alg_name_suffix)
test.Functions = pack_dict(
{functor.code_repr(): functor
for functor in functors_to_test})
if not SkipCut: test.Cut = FILTER(ALL)
ApplicationMgr().TopAlg.append(test)
algo = getattr(Algorithms, 'InstantiateFunctors__' + alg_name_suffix)
test = algo(
name='Test' + alg_name_suffix,
Functions={
functor.code_repr(): functor
for functor in functors_to_test
},
Cut=FILTER(ALL) if not SkipCut else None)
algs, _ = test.configuration().apply()
ApplicationMgr().TopAlg.append(algs[-1])
def test_pr(prname, functors, only_unwrapped_functors=[]):
......@@ -213,7 +215,6 @@ app.EvtMax = 0
# these options, so if we *didn't* disable the cache then the test that cling
# can handle all of these functors would be bypassed.
from Configurables import FunctorFactory
FunctorFactory().UseCache = False
# Simple instantiation test: are the templates working?
#
......