diff --git a/Control/AthenaServices/test/AthenaOutputStream_test.cxx b/Control/AthenaServices/test/AthenaOutputStream_test.cxx index ec7891337355a36a787593514921af4711c20cc2..8425a140a0383d455cfbe52d19212527911f8c43 100644 --- a/Control/AthenaServices/test/AthenaOutputStream_test.cxx +++ b/Control/AthenaServices/test/AthenaOutputStream_test.cxx @@ -93,7 +93,7 @@ int main() { baz4lfc->setStore (bazaux4lfc.get()); SG::AuxElement::Accessor<float> foo ("foo"); SG::AuxElement::Accessor<double> bar ("bar"); - SG::AuxElement::Accessor<float> zzz ("zzz"); + SG::AuxElement::Accessor<std::vector<float>> zzz ("zzz"); foo (*baz4lfc); bar (*baz4lfc); zzz (*baz4lfc); diff --git a/DataQuality/DataQualityInterfaces/DataQualityInterfaces/HanConfig.h b/DataQuality/DataQualityInterfaces/DataQualityInterfaces/HanConfig.h index 8c655f6fed6c2839a143b1732a636003730ed0e9..b0d0d6426017efaa524f13efb8138738a8a91458 100644 --- a/DataQuality/DataQualityInterfaces/DataQualityInterfaces/HanConfig.h +++ b/DataQuality/DataQualityInterfaces/DataQualityInterfaces/HanConfig.h @@ -47,7 +47,9 @@ public: virtual ~HanConfig(); - virtual void AssembleAndSave( std::string infileName, std::string outfileName, std::string connectionString, long runNumber, bool bulk); + virtual void AssembleAndSave( std::string infileName, std::string outfileName, + std::string connectionString="sqlite://;schema=/afs/cern.ch/user/a/atlasdqm/dqmdisk1/cherrypy-devel/RefDB.db;dbname=REFDB", + long runNumber=2147483646, bool bulk=false); virtual void BuildMonitors( std::string configName, dqm_core::Input& input, HanOutput& output ); #ifndef __CINT__ diff --git a/Event/EventPrimitives/EventPrimitives/AmgMatrixBasePlugin.h b/Event/EventPrimitives/EventPrimitives/AmgMatrixBasePlugin.h index c214458188497563cf308caa1120ce29ab492675..36f2f2c9c95c68a5e1d7bd53e8729462f2d659f1 100644 --- a/Event/EventPrimitives/EventPrimitives/AmgMatrixBasePlugin.h +++ b/Event/EventPrimitives/EventPrimitives/AmgMatrixBasePlugin.h @@ -1,5 +1,5 @@ /* - Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration + Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration */ /////////////////////////////////////////////////////////////////// @@ -73,14 +73,14 @@ return -std::log(std::tan(this->theta()*.5)); //TODO: slow } //old method with safeguards, new method is in SymmetricMatrixHelpers.h - inline Scalar deltaR(const MatrixBase<Derived>& vec){ + inline Scalar deltaR(const MatrixBase<Derived>& vec) const { if (this->rows() < 2) return 0.; double a = this->eta() - vec.eta(); double b = this->deltaPhi(vec); return std::sqrt ( a*a + b*b ); } - inline Scalar deltaPhi(const MatrixBase<Derived>& vec){ + inline Scalar deltaPhi(const MatrixBase<Derived>& vec) const { if (this->rows() < 2) return 0.; double dphi = vec.phi() - this->phi(); if ( dphi > M_PI ) { diff --git a/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.cxx b/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.cxx index 4a7f7f574aef191ad3ae81bb77fdb64383acdf7e..e54b7185d03ed59c8f9a2eed99c804d708cc7937 100644 --- a/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.cxx +++ b/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.cxx @@ -23,46 +23,46 @@ namespace xAOD { /// Explicit Instantiation of Template const SG::AuxElement::Accessor< float >* - showerShapeAccessorV1( xAOD::EgammaParameters::ShowerShapeType type ) - { + showerShapeAccessorV1( xAOD::EgammaParameters::ShowerShapeType type ) + { switch( type ) { - DEFINE_ACCESSOR( float, e011 ); - DEFINE_ACCESSOR( float, e033 ); - DEFINE_ACCESSOR( float, e132 ); - DEFINE_ACCESSOR( float, e1152 ); - DEFINE_ACCESSOR( float, ethad1 ); - DEFINE_ACCESSOR( float, ethad ); - DEFINE_ACCESSOR( float, ehad1 ); - DEFINE_ACCESSOR( float, f1 ); - DEFINE_ACCESSOR( float, f3 ); - DEFINE_ACCESSOR( float, f1core ); - DEFINE_ACCESSOR( float, f3core ); - DEFINE_ACCESSOR( float, e233 ); - DEFINE_ACCESSOR( float, e235 ); - DEFINE_ACCESSOR( float, e255 ); - DEFINE_ACCESSOR( float, e237 ); - DEFINE_ACCESSOR( float, e277 ); - DEFINE_ACCESSOR( float, e333 ); - DEFINE_ACCESSOR( float, e335 ); - DEFINE_ACCESSOR( float, e337 ); - DEFINE_ACCESSOR( float, e377 ); - DEFINE_ACCESSOR( float, weta1 ); - DEFINE_ACCESSOR( float, weta2 ); - DEFINE_ACCESSOR( float, e2ts1 ); - DEFINE_ACCESSOR( float, e2tsts1 ); - DEFINE_ACCESSOR( float, fracs1 ); - DEFINE_ACCESSOR( float, widths1 ); - DEFINE_ACCESSOR( float, widths2 ); - DEFINE_ACCESSOR( float, poscs1 ); - DEFINE_ACCESSOR( float, poscs2 ); - DEFINE_ACCESSOR( float, asy1 ); - DEFINE_ACCESSOR( float, pos ); - DEFINE_ACCESSOR( float, pos7 ); - DEFINE_ACCESSOR( float, barys1 ); - DEFINE_ACCESSOR( float, wtots1 ); - DEFINE_ACCESSOR( float, emins1 ); - DEFINE_ACCESSOR( float, emaxs1 ); - DEFINE_ACCESSOR( float, r33over37allcalo ); + DEFINE_ACCESSOR( float, e011 ); + DEFINE_ACCESSOR( float, e033 ); + DEFINE_ACCESSOR( float, e132 ); + DEFINE_ACCESSOR( float, e1152 ); + DEFINE_ACCESSOR( float, ethad1 ); + DEFINE_ACCESSOR( float, ethad ); + DEFINE_ACCESSOR( float, ehad1 ); + DEFINE_ACCESSOR( float, f1 ); + DEFINE_ACCESSOR( float, f3 ); + DEFINE_ACCESSOR( float, f1core ); + DEFINE_ACCESSOR( float, f3core ); + DEFINE_ACCESSOR( float, e233 ); + DEFINE_ACCESSOR( float, e235 ); + DEFINE_ACCESSOR( float, e255 ); + DEFINE_ACCESSOR( float, e237 ); + DEFINE_ACCESSOR( float, e277 ); + DEFINE_ACCESSOR( float, e333 ); + DEFINE_ACCESSOR( float, e335 ); + DEFINE_ACCESSOR( float, e337 ); + DEFINE_ACCESSOR( float, e377 ); + DEFINE_ACCESSOR( float, weta1 ); + DEFINE_ACCESSOR( float, weta2 ); + DEFINE_ACCESSOR( float, e2ts1 ); + DEFINE_ACCESSOR( float, e2tsts1 ); + DEFINE_ACCESSOR( float, fracs1 ); + DEFINE_ACCESSOR( float, widths1 ); + DEFINE_ACCESSOR( float, widths2 ); + DEFINE_ACCESSOR( float, poscs1 ); + DEFINE_ACCESSOR( float, poscs2 ); + DEFINE_ACCESSOR( float, asy1 ); + DEFINE_ACCESSOR( float, pos ); + DEFINE_ACCESSOR( float, pos7 ); + DEFINE_ACCESSOR( float, barys1 ); + DEFINE_ACCESSOR( float, wtots1 ); + DEFINE_ACCESSOR( float, emins1 ); + DEFINE_ACCESSOR( float, emaxs1 ); + DEFINE_ACCESSOR( float, r33over37allcalo ); DEFINE_ACCESSOR( float, ecore ); DEFINE_ACCESSOR( float, Reta ); DEFINE_ACCESSOR( float, Rphi ); @@ -70,48 +70,12 @@ namespace xAOD { DEFINE_ACCESSOR( float, Rhad ); DEFINE_ACCESSOR( float, Rhad1 ); DEFINE_ACCESSOR( float, DeltaE ); - default: + default: std::cerr << "xAOD::Egamma ERROR Unknown float ShowerShapeType (" << type << ") requested" << std::endl; return nullptr; } - - } - const SG::AuxElement::Accessor< char >* - selectionMenuAccessorV1( xAOD::EgammaParameters::SelectionMenu menu ) - { - switch( menu ) { - DEFINE_ACCESSOR( char, Loose); - DEFINE_ACCESSOR( char, Medium); - DEFINE_ACCESSOR( char, Tight); - DEFINE_ACCESSOR( char, LHLoose); - DEFINE_ACCESSOR( char, LHMedium); - DEFINE_ACCESSOR( char, LHTight); - DEFINE_ACCESSOR( char, MultiLepton); - default: - std::cerr << "xAOD::Egamma ERROR Unknown char ElectronSelectionMenu (" - << menu << ") requested" << std::endl; - return nullptr; - } - } - - const SG::AuxElement::Accessor< unsigned int >* - selectionisEMAccessorV1( xAOD::EgammaParameters::SelectionisEM isEM ) - { - switch( isEM ) { - DEFINE_ACCESSOR( unsigned int, isEMLoose); - DEFINE_ACCESSOR( unsigned int, isEMMedium); - DEFINE_ACCESSOR( unsigned int, isEMTight); - DEFINE_ACCESSOR( unsigned int, isEMLHLoose); - DEFINE_ACCESSOR( unsigned int, isEMLHMedium); - DEFINE_ACCESSOR( unsigned int, isEMLHTight); - DEFINE_ACCESSOR( unsigned int, isEMMultiLepton); - default: - std::cerr << "xAOD::Egamma ERROR Unknown unsigned int ElectronSelectionisEM (" - << isEM << ") requested" << std::endl; - return nullptr; - } } diff --git a/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.h b/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.h index 1e4c4157929efe66e4e98fdc3e37882f4ad5c3bf..f2d33c270fade0282e0f775f1a69e2c655beb232 100644 --- a/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.h +++ b/Event/xAOD/xAODEgamma/Root/EgammaAccessors_v1.h @@ -24,13 +24,6 @@ namespace xAOD { const SG::AuxElement::Accessor< float >* showerShapeAccessorV1( xAOD::EgammaParameters::ShowerShapeType type ); - const SG::AuxElement::Accessor< char >* - selectionMenuAccessorV1( xAOD::EgammaParameters::SelectionMenu menu ) ; - - const SG::AuxElement::Accessor< unsigned int >* - selectionisEMAccessorV1( xAOD::EgammaParameters::SelectionisEM isEM ) ; - - } // namespace xAOD #endif // XAOD_EGAMMAACCESSORS_V1_H diff --git a/Event/xAOD/xAODEgamma/Root/Egamma_v1.cxx b/Event/xAOD/xAODEgamma/Root/Egamma_v1.cxx index 9f5c2e0304077bd374354b6e98220d7dbe734bbe..28c9e56dec8889d3f5582f7f61c9daa466de24e0 100644 --- a/Event/xAOD/xAODEgamma/Root/Egamma_v1.cxx +++ b/Event/xAOD/xAODEgamma/Root/Egamma_v1.cxx @@ -9,7 +9,7 @@ #include "xAODEgamma/EgammaContainer.h" #include "EgammaAccessors_v1.h" #include "xAODPrimitives/tools/getIsolationAccessor.h" -#include "xAODPrimitives/tools/getIsolationCorrectionAccessor.h" +#include "xAODPrimitives/tools/getIsolationCorrectionAccessor.h" #include "EventPrimitives/EventPrimitivesHelpers.h" #include <stdexcept> @@ -53,8 +53,8 @@ Egamma_v1::Egamma_v1(const Egamma_v1& eg) : IParticle(eg) { Egamma_v1& Egamma_v1::operator=(const Egamma_v1& eg ){ - if (this != &eg){ // protect against invalid self-assignment - if (!this->container() && !this->hasStore() ) { + if (this != &eg){ // protect against invalid self-assignment + if (!this->container() && !this->hasStore() ) { makePrivateStore(); } this->IParticle::operator=( eg ); @@ -89,17 +89,17 @@ Egamma_v1::GenVecFourMom_t Egamma_v1::genvecP4() const { } double Egamma_v1::e() const{ - return genvecP4().E(); + return genvecP4().E(); } double Egamma_v1::rapidity() const { - return genvecP4().Rapidity(); + return genvecP4().Rapidity(); } Egamma_v1::FourMom_t Egamma_v1::p4() const { FourMom_t p4; - p4.SetPtEtaPhiM( pt(), eta(), phi(),m()); - return p4; + p4.SetPtEtaPhiM( pt(), eta(), phi(),m()); + return p4; } void Egamma_v1::setP4(float pt, float eta, float phi, float m){ @@ -139,15 +139,15 @@ Egamma_v1::EgammaCovMatrix_t Egamma_v1::covMatrix() const{ EgammaCovMatrix_t cov; cov.setZero(); - if(!acc.isAvailable(*this) ) { + if(!acc.isAvailable(*this) ) { return cov; } - const std::vector<float>& v = acc(*this); - size_t size= v.size(); + const std::vector<float>& v = acc(*this); + size_t size= v.size(); if(size==16){ //up to 21.0.11 cov = Eigen::Map<const EgammaCovMatrix_t> (v.data()); - } + } else { //from >21.0.11 EgammaCovMatrix_t cov; @@ -163,7 +163,7 @@ void Egamma_v1::setCovMatrix(const Egamma_v1::EgammaCovMatrix_t& cov){ MatrixHelpers::compress(cov,acc(*this)); } -///egamma author +///egamma author uint16_t Egamma_v1::author(uint16_t mask) const { static const Accessor< uint16_t > acc( "author" ); uint16_t author = acc(*this); @@ -210,7 +210,7 @@ bool Egamma_v1::showerShapeValue(float& value, const EgammaParameters::ShowerSha if( !acc ) { return false; } - if(!acc->isAvailable(*this) ) { + if(!acc->isAvailable(*this) ) { return false; } // Retrieve the value: @@ -279,7 +279,7 @@ bool Egamma_v1::setIsolation(float value, const Iso::IsolationType information) } ///Isolation corrections -bool Egamma_v1::isolationCaloCorrection(float& value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, +bool Egamma_v1::isolationCaloCorrection(float& value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param) const{ const SG::AuxElement::Accessor< float > acc = getIsolationCorrectionAccessor(flavour,corr,param); if(!acc.isAvailable(*this) ) { @@ -290,7 +290,7 @@ bool Egamma_v1::isolationCaloCorrection(float& value, const Iso::IsolationFlavou return true; } -float Egamma_v1::isolationCaloCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, +float Egamma_v1::isolationCaloCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param) const{ const SG::AuxElement::Accessor< float > acc = getIsolationCorrectionAccessor(flavour,corr,param); @@ -298,7 +298,7 @@ float Egamma_v1::isolationCaloCorrection(const Iso::IsolationFlavour flavour, co return acc(*this); } -bool Egamma_v1::setIsolationCaloCorrection(float value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, +bool Egamma_v1::setIsolationCaloCorrection(float value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param){ const SG::AuxElement::Accessor< float > acc = getIsolationCorrectionAccessor(flavour,corr,param); // Set the value: @@ -338,7 +338,7 @@ bool Egamma_v1::isolationTrackCorrection(float& value, const Iso::IsolationFlavo return true; } -float Egamma_v1::isolationTrackCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationTrackCorrection corr) const{ +float Egamma_v1::isolationTrackCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationTrackCorrection corr) const{ const SG::AuxElement::Accessor< float > acc = getIsolationCorrectionAccessor(flavour,corr); if( !acc.isAvailable(*this) ) {throw std::runtime_error( "Unknown/Unavailable Isolation correction requested" );} return acc(*this); @@ -411,58 +411,10 @@ Egamma_v1::caloClusterLink( size_t index ) const { AUXSTORE_OBJECT_SETTER_AND_GETTER( Egamma_v1, Egamma_v1::CLELVec_t, caloClusterLinks, setCaloClusterLinks) -///Selectors / isEM and all that -///First with enums (static accessor no lookup => faster but less flexible) -bool Egamma_v1::passSelection(bool& value, const xAOD::EgammaParameters::SelectionMenu menu ) const { - const SG::AuxElement::Accessor< char >* acc = selectionMenuAccessorV1( menu ); - if( !acc ) { - return false; - } - if(!acc->isAvailable(*this) ) { - return false; - } - value= (*acc)(*this); - return true; -} - -bool Egamma_v1::passSelection(const xAOD::EgammaParameters::SelectionMenu menu ) const { - const SG::AuxElement::Accessor< char >* acc = selectionMenuAccessorV1( menu ); - if(!acc ) throw std::runtime_error( "Unknown/Unavailable bool selection menu requested" ); - return (*acc)(*this); -} - -void Egamma_v1::setPassSelection(bool value, const xAOD::EgammaParameters::SelectionMenu menu){ - const SG::AuxElement::Accessor< char >* acc = selectionMenuAccessorV1( menu ); - ( *acc )(*this)=value; -} - -bool Egamma_v1::selectionisEM(unsigned int& value, const xAOD::EgammaParameters::SelectionisEM isEM) const { - const SG::AuxElement::Accessor< unsigned int >* acc = selectionisEMAccessorV1( isEM ); - if(!acc ) { - return false; - } - if(!acc->isAvailable(*this) ) { - return false; - } - value= (*acc)(*this); - return true; -} - -unsigned int Egamma_v1::selectionisEM(const xAOD::EgammaParameters::SelectionisEM isEM) const { - const SG::AuxElement::Accessor< unsigned int >* acc = selectionisEMAccessorV1( isEM ); - if(!acc ) throw std::runtime_error( "Unknown/Unavailable unsigned int isEM requested" ); - return (*acc)(*this); -} - -void Egamma_v1::setSelectionisEM(unsigned int value, const xAOD::EgammaParameters::SelectionisEM isEM){ - const SG::AuxElement::Accessor< unsigned int >* acc = selectionisEMAccessorV1( isEM ); - ( *acc )(*this)=value; -} - ///Then with strings (full flexibility when adding new menus dynamically) bool Egamma_v1::passSelection(bool& value, const std::string& menu ) const { const SG::AuxElement::Accessor< char > acc( menu ); - if(!acc.isAvailable(*this) ) { + if(!acc.isAvailable(*this) ) { return false; } value= acc(*this); @@ -481,7 +433,7 @@ void Egamma_v1::setPassSelection(bool value, const std::string& menu){ bool Egamma_v1::selectionisEM(unsigned int& value, const std::string& isEM) const{ const SG::AuxElement::Accessor< unsigned int > acc( isEM ); - if(!acc.isAvailable(*this) ) { + if(!acc.isAvailable(*this) ) { return false; } value= acc(*this); @@ -500,7 +452,7 @@ void Egamma_v1::setSelectionisEM(unsigned int value, const std::string& isEM){ bool Egamma_v1::likelihoodValue(float& value, const std::string& LHValue/*=std::string("LHValue")*/) const{ const SG::AuxElement::Accessor<float> acc( LHValue ); - if(!acc.isAvailable(*this) ) { + if(!acc.isAvailable(*this) ) { return false; } value= acc(*this); diff --git a/Event/xAOD/xAODEgamma/xAODEgamma/EgammaDefs.h b/Event/xAOD/xAODEgamma/xAODEgamma/EgammaDefs.h index 68c6b440af5f082c290e62bfd923946a0ce4c877..30aeeb6e12271001ff3ef00d93162b65e0db32f7 100644 --- a/Event/xAOD/xAODEgamma/xAODEgamma/EgammaDefs.h +++ b/Event/xAOD/xAODEgamma/xAODEgamma/EgammaDefs.h @@ -29,7 +29,7 @@ namespace xAOD { /** @brief Electron reconstructed by the Forward cluster-based algorithm */ const uint16_t AuthorFwdElectron=0x8; /** @brief Object Reconstructed by standard cluster-based algorithm. Ambiguous ==> can be either Electron or Photon */ - const uint16_t AuthorAmbiguous=0x10; + const uint16_t AuthorAmbiguous=0x10; /** @brief trigger Electrons */ const uint16_t AuthorTrigElectron=0x20; /** @brief trigger Photons */ @@ -112,24 +112,24 @@ namespace xAOD { }; /** @brief */ - - const uint32_t BADCLUSELECTRON = + + const uint32_t BADCLUSELECTRON = 0x1 << MaskedCellCore | 0x1 << MissingFEBCellCore | 0x1 << MissingFEBCellEdgeS1 | 0x1 << MissingFEBCellEdgeS2 | - 0x1 << DeadHVS1S2S3Core | - 0x1 << DeadHVS1S2S3Edge ; - - const uint32_t BADCLUSPHOTON = + 0x1 << DeadHVS1S2S3Core | + 0x1 << DeadHVS1S2S3Edge ; + + const uint32_t BADCLUSPHOTON = 0x1 << MaskedCellCore | 0x1 << MissingFEBCellCore | 0x1 << MissingFEBCellEdgeS1 | 0x1 << MissingFEBCellEdgeS2 | - 0x1 << DeadHVS1S2S3Core | + 0x1 << DeadHVS1S2S3Core | 0x1 << DeadHVS1S2S3Edge | - 0x1 << BadS1Core; - + 0x1 << BadS1Core; + const uint32_t ALLOQ= 0xFFFFFFFF; diff --git a/Event/xAOD/xAODEgamma/xAODEgamma/EgammaEnums.h b/Event/xAOD/xAODEgamma/xAODEgamma/EgammaEnums.h index 733fc80244ee04eb12748f4a061d605ef0d06dfd..eae00a33c09d7f9f9514350f3a4a6bea1651fb8c 100644 --- a/Event/xAOD/xAODEgamma/xAODEgamma/EgammaEnums.h +++ b/Event/xAOD/xAODEgamma/xAODEgamma/EgammaEnums.h @@ -8,264 +8,288 @@ #ifndef XAODEGAMMA_EGAMMAENUMS_H #define XAODEGAMMA_EGAMMAENUMS_H - namespace xAOD { - namespace EgammaParameters { - - /// @name egamma types - enum EgammaType { - electron = 0, - unconvertedPhoton = 1, - convertedPhoton = 2, - NumberOfEgammaTypes = 3 - }; - +namespace EgammaParameters { - /// @name Shower shape types - /// @{ - enum ShowerShapeType{ - /// @brief uncalibrated energy (sum of cells) in presampler in a 1x1 window in cells in eta X phi - e011 = 0, - /// @brief uncalibrated energy (sum of cells) in presampler in a 3x3 window in cells in eta X phi - e033 = 1, - /// @brief uncalibrated energy (sum of cells) in strips in a 3x2 window in cells in eta X phi - e132 = 2, - /// @brief uncalibrated energy (sum of cells) in strips in a 15x2 window in cells in eta X phi - e1152 = 3, - /// @brief transverse energy in the first sampling of the hadronic calorimeters behind the cluster calculated from ehad1 - ethad1 = 4, - /// @brief ET leakage into hadronic calorimeter with exclusion of energy in CaloSampling::TileGap3 - ethad = 5, - /// @brief E leakage into 1st sampling of had calo (CaloSampling::HEC0 + CaloSampling::TileBar0 + CaloSampling::TileExt0) - ehad1 = 6, - /// @brief E1/E = fraction of energy reconstructed in the first sampling, where E1 is energy in all strips belonging to the cluster and E is the total energy reconstructed in the electromagnetic calorimeter cluster - f1 = 7, - /// @brief fraction of energy reconstructed in 3rd sampling - f3 = 8, - /// @brief E1(3x1)/E = fraction of the energy reconstructed in the first longitudinal compartment of the electromagnetic calorimeter, where E1(3x1) the energy reconstructed in +/-3 strips in eta, centered around the maximum energy strip and E is the energy reconstructed in the electromagnetic calorimeter - f1core = 9, - /// @brief E3(3x3)/E fraction of the energy reconstructed in the third compartment of the electromagnetic calorimeter, where E3(3x3), energy in the back sampling, is the sum of the energy contained in a 3x3 window around the maximum energy cell - f3core = 10, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 3x3 (in cell units eta X phi) - e233 = 11, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 3x5 - e235 = 12, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 5x5 - e255 = 13, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 3x7 - e237 = 14, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 7x7 - e277 = 15, - /// @brief uncalibrated energy (sum of cells) of the third sampling in a rectangle of size 3x3 - e333 = 16, - /// @brief uncalibrated energy (sum of cells) of the third sampling in a rectangle of size 3x5 - e335 = 17, - /// @brief uncalibrated energy (sum of cells) of the third sampling in a rectangle of size 3x7 - e337 = 18, - /// @brief uncalibrated energy (sum of cells) of the middle sampling in a rectangle of size 7x7 - e377 = 19, - /// @brief shower width using +/-3 strips around the one with the maximal energy deposit: - /// w3 strips = sqrt{sum(Ei)x(i-imax)^2/sum(Ei)}, where i is the number of the strip and imax the strip number of the most energetic one - weta1 = 20, - /// @brief the lateral width is calculated with a window of 3x5 cells using the energy weighted sum over all cells, which depends on the particle impact point inside the cell: weta2 = - /// sqrt(sum Ei x eta^2)/(sum Ei) -((sum Ei x eta)/(sum Ei))^2, where Ei is the energy of the i-th cell - weta2 = 21, - /// @brief 2nd max in strips calc by summing 3 strips - e2ts1 = 22, - /// @brief energy of the cell corresponding to second energy maximum in the first sampling - e2tsts1 = 23, - /// @brief shower shape in the shower core : [E(+/-3)-E(+/-1)]/E(+/-1), where E(+/-n) is the energy in ± n strips around the strip with highest energy - fracs1 = 24, - /// @brief same as egammaParameters::weta1 but without corrections on particle impact point inside the cell - widths1 = 25, - /// @brief same as egammaParameters::weta2 but without corrections on particle impact point inside the cell - widths2 = 26, - /// @brief relative position in eta within cell in 1st sampling - poscs1 = 27, - /// @brief relative position in eta within cell in 2nd sampling - poscs2= 28, - /// @brief uncorr asymmetry in 3 strips in the 1st sampling - asy1 = 29, - /// @brief difference between shower cell and predicted track in +/- 1 cells - pos = 30, - /// @brief Difference between the track and the shower positions: - /// sum_{i=i_m-7}^{i=i_m+7}E_i x (i-i_m) / sum_{i=i_m-7}^{i=i_m+7}E_i, - /// The difference between the track and the shower positions measured - /// in units of distance between the strips, where i_m is the impact cell - /// for the track reconstructed in the inner detector and E_i is the energy - /// reconstructed in the i-th cell in the eta direction for constant phi given by the track parameters - pos7 = 31, - /// @brief barycentre in sampling 1 calculated in 3 strips - barys1 =32, - /// @brief shower width is determined in a window detaxdphi = 0,0625 ×~0,2, corresponding typically to 20 strips in - ///eta : wtot1=sqrt{sum Ei x ( i-imax)^2 / sum Ei}, where i is the strip number and imax the strip number of the first local maximum - wtots1 = 33, - /// @brief energy reconstructed in the strip with the minimal value between the first and second maximum - emins1 = 34, - /// @brief energy of strip with maximal energy deposit - emaxs1 = 35, - /// @brief 1-ratio of energy in 3x3 over 3x7 cells; - /// E(3x3) = E0(1x1) + E1(3x1) + E2(3x3) + E3(3x3); E(3x7) = E0(3x3) + E1(15x3) + E2(3x7) + E3(3x7) - r33over37allcalo = 36, - /// @brief core energy in em calo E(core) = E0(3x3) + E1(15x2) + E2(5x5) + E3(3x5) - ecore = 37, - /// @brief e237/e277 - Reta = 38, - /// @brief e233/e237 - Rphi = 39, - /// @brief (emaxs1-e2tsts1)/(emaxs1+e2tsts1) - Eratio = 40, - /// @brief ethad/et - Rhad = 41, - /// @brief ethad1/et - Rhad1 = 42, - /// @brief e2tsts1-emins1 - DeltaE =43, - ///maximum number of enums - NumberOfShowerShapes = 44 - }; - /// @} - - - - /// @name Track Match variable types - /// @{ - enum TrackCaloMatchType{ - /// @brief difference between the cluster eta (presampler) and - ///the eta of the track extrapolated to the presampler - deltaEta0 = 0, - ///@brief difference between the cluster eta (first sampling) and the eta of the track extrapolated to the - ///first sampling: |eta_stripscluster -eta_ID|, where eta_stripscluster is computed - ///in the first sampling of the electromagnetic calorimeter, where the granularity is very fine, and eta_ID is the pseudo-rapidity - ///of the track extrapolated to the calorimeter - deltaEta1 = 1, - /// @brief difference between the cluster eta (second sampling) and the eta of the track extrapolated to the second sampling - deltaEta2 = 2, - /// @brief difference between the cluster eta (3rd sampling) and - /// the eta of the track extrapolated to the 3rd sampling - deltaEta3 = 3, - /// @brief difference between the cluster phi (presampler) and - /// the eta of the track extrapolated to the presampler - deltaPhi0 = 4, - /// @brief difference between the cluster eta (1st sampling) and - /// the eta of the track extrapolated to the 1st sampling (strips) - deltaPhi1 = 5, - /// @brief difference between the cluster phi (second sampling) and the phi of the track - /// extrapolated to the second sampling : |phi_middlecluster -phi_ID|, where phi_middlecluster - /// is computed in the second compartment of the electromagnetic calorimeter and phi_ID is the - /// azimuth of the track extrapolated to the calorimeter - deltaPhi2 = 6, - ///@brief difference between the cluster eta (3rd sampling) and - /// the eta of the track extrapolated to the 3rd sampling - deltaPhi3 = 7, - /// @brief difference between the cluster phi (sampling 2) and the - /// eta of the track extrapolated from the last measurement point. - deltaPhiFromLastMeasurement = 8, - /// @brief difference between the cluster phi (presampler) and - /// the eta of the track extrapolated to the presampler from the perigee with a rescaled - /// momentum. - deltaPhiRescaled0 = 9, - /// @brief difference between the cluster eta (1st sampling) and - /// the eta of the track extrapolated to the 1st sampling (strips) from the perigee with a rescaled - /// momentum. - deltaPhiRescaled1 = 10, - /// @brief difference between the cluster phi (second sampling) and the phi of the track - /// extrapolated to the second sampling from the perigee with a rescaled - /// momentum. - deltaPhiRescaled2 = 11, - ///@brief difference between the cluster eta (3rd sampling) and - /// the eta of the track extrapolated to the 3rd sampling from the perigee with a rescaled - /// momentum. - deltaPhiRescaled3 = 12, - ///maximum number of enums - NumberOfTrackMatchProperties= 13 - }; - ///@} +/// @name egamma types +enum EgammaType +{ + electron = 0, + unconvertedPhoton = 1, + convertedPhoton = 2, + NumberOfEgammaTypes = 3 +}; +/// @name Shower shape types +/// @{ +enum ShowerShapeType +{ + /// @brief uncalibrated energy (sum of cells) in presampler in a 1x1 window in + /// cells in eta X phi + e011 = 0, + /// @brief uncalibrated energy (sum of cells) in presampler in a 3x3 window in + /// cells in eta X phi + e033 = 1, + /// @brief uncalibrated energy (sum of cells) in strips in a 3x2 window in + /// cells in eta X phi + e132 = 2, + /// @brief uncalibrated energy (sum of cells) in strips in a 15x2 window in + /// cells in eta X phi + e1152 = 3, + /// @brief transverse energy in the first sampling of the hadronic + /// calorimeters behind the cluster calculated from ehad1 + ethad1 = 4, + /// @brief ET leakage into hadronic calorimeter with exclusion of energy in + /// CaloSampling::TileGap3 + ethad = 5, + /// @brief E leakage into 1st sampling of had calo (CaloSampling::HEC0 + + /// CaloSampling::TileBar0 + CaloSampling::TileExt0) + ehad1 = 6, + /// @brief E1/E = fraction of energy reconstructed in the first sampling, + /// where E1 is energy in all strips belonging to the cluster and E is the + /// total energy reconstructed in the electromagnetic calorimeter cluster + f1 = 7, + /// @brief fraction of energy reconstructed in 3rd sampling + f3 = 8, + /// @brief E1(3x1)/E = fraction of the energy reconstructed in the first + /// longitudinal compartment of the electromagnetic calorimeter, where E1(3x1) + /// the energy reconstructed in +/-3 strips in eta, centered around the + /// maximum energy strip and E is the energy reconstructed in the + /// electromagnetic calorimeter + f1core = 9, + /// @brief E3(3x3)/E fraction of the energy reconstructed in the third + /// compartment of the electromagnetic calorimeter, where E3(3x3), energy in + /// the back sampling, is the sum of the energy contained in a 3x3 window + /// around the maximum energy cell + f3core = 10, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 3x3 (in cell units eta X phi) + e233 = 11, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 3x5 + e235 = 12, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 5x5 + e255 = 13, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 3x7 + e237 = 14, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 7x7 + e277 = 15, + /// @brief uncalibrated energy (sum of cells) of the third sampling in a + /// rectangle of size 3x3 + e333 = 16, + /// @brief uncalibrated energy (sum of cells) of the third sampling in a + /// rectangle of size 3x5 + e335 = 17, + /// @brief uncalibrated energy (sum of cells) of the third sampling in a + /// rectangle of size 3x7 + e337 = 18, + /// @brief uncalibrated energy (sum of cells) of the middle sampling in a + /// rectangle of size 7x7 + e377 = 19, + /// @brief shower width using +/-3 strips around the one with the maximal + /// energy deposit: + /// w3 strips = sqrt{sum(Ei)x(i-imax)^2/sum(Ei)}, where i is the number of + ///the strip and imax the strip number of the most energetic one + weta1 = 20, + /// @brief the lateral width is calculated with a window of 3x5 cells using + /// the energy weighted sum over all cells, which depends on the particle + /// impact point inside the cell: weta2 = + /// sqrt(sum Ei x eta^2)/(sum Ei) -((sum Ei x eta)/(sum Ei))^2, where Ei is + ///the energy of the i-th cell + weta2 = 21, + /// @brief 2nd max in strips calc by summing 3 strips + e2ts1 = 22, + /// @brief energy of the cell corresponding to second energy maximum in the + /// first sampling + e2tsts1 = 23, + /// @brief shower shape in the shower core : [E(+/-3)-E(+/-1)]/E(+/-1), where + /// E(+/-n) is the energy in ± n strips around the strip with highest energy + fracs1 = 24, + /// @brief same as egammaParameters::weta1 but without corrections on + /// particle impact point inside the cell + widths1 = 25, + /// @brief same as egammaParameters::weta2 but without corrections on particle + /// impact point inside the cell + widths2 = 26, + /// @brief relative position in eta within cell in 1st sampling + poscs1 = 27, + /// @brief relative position in eta within cell in 2nd sampling + poscs2 = 28, + /// @brief uncorr asymmetry in 3 strips in the 1st sampling + asy1 = 29, + /// @brief difference between shower cell and predicted track in +/- 1 cells + pos = 30, + /// @brief Difference between the track and the shower positions: + /// sum_{i=i_m-7}^{i=i_m+7}E_i x (i-i_m) / sum_{i=i_m-7}^{i=i_m+7}E_i, + /// The difference between the track and the shower positions measured + /// in units of distance between the strips, where i_m is the impact cell + /// for the track reconstructed in the inner detector and E_i is the energy + /// reconstructed in the i-th cell in the eta direction for constant phi + ///given by the track parameters + pos7 = 31, + /// @brief barycentre in sampling 1 calculated in 3 strips + barys1 = 32, + /// @brief shower width is determined in a window detaxdphi = 0,0625 ×~0,2, + /// corresponding typically to 20 strips in + /// eta : wtot1=sqrt{sum Ei x ( i-imax)^2 / sum Ei}, where i is the strip + /// number and imax the strip number of the first local maximum + wtots1 = 33, + /// @brief energy reconstructed in the strip with the minimal value between + /// the first and second maximum + emins1 = 34, + /// @brief energy of strip with maximal energy deposit + emaxs1 = 35, + /// @brief 1-ratio of energy in 3x3 over 3x7 cells; + /// E(3x3) = E0(1x1) + E1(3x1) + E2(3x3) + E3(3x3); E(3x7) = E0(3x3) + + ///E1(15x3) + E2(3x7) + E3(3x7) + r33over37allcalo = 36, + /// @brief core energy in em calo E(core) = E0(3x3) + E1(15x2) + E2(5x5) + + /// E3(3x5) + ecore = 37, + /// @brief e237/e277 + Reta = 38, + /// @brief e233/e237 + Rphi = 39, + /// @brief (emaxs1-e2tsts1)/(emaxs1+e2tsts1) + Eratio = 40, + /// @brief ethad/et + Rhad = 41, + /// @brief ethad1/et + Rhad1 = 42, + /// @brief e2tsts1-emins1 + DeltaE = 43, + /// maximum number of enums + NumberOfShowerShapes = 44 +}; +/// @} - /// @name Vertex Match variable types - /// @{ - enum VertexCaloMatchType{ - /// @brief difference between the cluster eta and - ///the eta of the first track of the vertex extrapolated to the second sampling. - convMatchDeltaEta1 = 0, +/// @name Track Match variable types +/// @{ +enum TrackCaloMatchType +{ + /// @brief difference between the cluster eta (presampler) and + /// the eta of the track extrapolated to the presampler + deltaEta0 = 0, + ///@brief difference between the cluster eta (first sampling) and the eta of + ///the track extrapolated to the + /// first sampling: |eta_stripscluster -eta_ID|, where eta_stripscluster is + /// computed + /// in the first sampling of the electromagnetic calorimeter, where the + /// granularity is very fine, and eta_ID is the pseudo-rapidity + /// of the track extrapolated to the calorimeter + deltaEta1 = 1, + /// @brief difference between the cluster eta (second sampling) and the eta of + /// the track extrapolated to the second sampling + deltaEta2 = 2, + /// @brief difference between the cluster eta (3rd sampling) and + /// the eta of the track extrapolated to the 3rd sampling + deltaEta3 = 3, + /// @brief difference between the cluster phi (presampler) and + /// the eta of the track extrapolated to the presampler + deltaPhi0 = 4, + /// @brief difference between the cluster eta (1st sampling) and + /// the eta of the track extrapolated to the 1st sampling (strips) + deltaPhi1 = 5, + /// @brief difference between the cluster phi (second sampling) and the phi of + /// the track + /// extrapolated to the second sampling : |phi_middlecluster -phi_ID|, + ///where phi_middlecluster + /// is computed in the second compartment of the electromagnetic + ///calorimeter and phi_ID is the + /// azimuth of the track extrapolated to the calorimeter + deltaPhi2 = 6, + ///@brief difference between the cluster eta (3rd sampling) and + /// the eta of the track extrapolated to the 3rd sampling + deltaPhi3 = 7, + /// @brief difference between the cluster phi (sampling 2) and the + /// eta of the track extrapolated from the last measurement point. + deltaPhiFromLastMeasurement = 8, + /// @brief difference between the cluster phi (presampler) and + /// the eta of the track extrapolated to the presampler from the perigee + ///with a rescaled + /// momentum. + deltaPhiRescaled0 = 9, + /// @brief difference between the cluster eta (1st sampling) and + /// the eta of the track extrapolated to the 1st sampling (strips) from + ///the perigee with a rescaled + /// momentum. + deltaPhiRescaled1 = 10, + /// @brief difference between the cluster phi (second sampling) and the phi of + /// the track + /// extrapolated to the second sampling from the perigee with a rescaled + /// momentum. + deltaPhiRescaled2 = 11, + ///@brief difference between the cluster eta (3rd sampling) and + /// the eta of the track extrapolated to the 3rd sampling from the perigee + /// with a rescaled + /// momentum. + deltaPhiRescaled3 = 12, + /// maximum number of enums + NumberOfTrackMatchProperties = 13 +}; +///@} - /// @brief difference between the cluster eta and - ///the eta of the second track of the vertex extrapolated to the second sampling. - convMatchDeltaEta2 = 1, - - /// @brief difference between the cluster phi and - ///the phi of the first track of the vertex extrapolated to the second sampling. - convMatchDeltaPhi1 = 2, - - /// @brief difference between the cluster phi and - ///the phi of the second track of the vertex extrapolated to the second sampling. - convMatchDeltaPhi2 = 3, +/// @name Vertex Match variable types +/// @{ +enum VertexCaloMatchType +{ + /// @brief difference between the cluster eta and + /// the eta of the first track of the vertex extrapolated to the second + /// sampling. + convMatchDeltaEta1 = 0, - ///maximum number of enums - NumberOfVertexMatchProperties= 4 - }; - ///@} + /// @brief difference between the cluster eta and + /// the eta of the second track of the vertex extrapolated to the second + /// sampling. + convMatchDeltaEta2 = 1, - /// @name Selection Menus - /// @{ - enum SelectionMenu{ - Loose = 0, - Medium = 1, - Tight = 2, - LHLoose = 3, - LHMedium = 4, - LHTight = 5, - MultiLepton = 6, - NumberOfMenus = 7 - }; - ///@} + /// @brief difference between the cluster phi and + /// the phi of the first track of the vertex extrapolated to the second + /// sampling. + convMatchDeltaPhi1 = 2, + /// @brief difference between the cluster phi and + /// the phi of the second track of the vertex extrapolated to the second + /// sampling. + convMatchDeltaPhi2 = 3, - /// @name Selection isEM - /// @{ - enum SelectionisEM{ - isEMLoose = 0, - isEMMedium = 1, - isEMTight = 2, - isEMLHLoose = 3, - isEMLHMedium = 4, - isEMLHTight = 5, - isEMMultiLepton = 6, - NumberOfisEMs = 7 - }; - ///@} + /// maximum number of enums + NumberOfVertexMatchProperties = 4 +}; +///@} +/// @name Conversion types +/// @{ +enum ConversionType +{ + /// @brief unconverted photon + unconverted = 0, - /// @name Conversion types - /// @{ - enum ConversionType{ - /// @brief unconverted photon - unconverted = 0, + /// @brief one track only, with Si hits + singleSi = 1, - /// @brief one track only, with Si hits - singleSi = 1, + /// @brief one track only, no Si hits (TRT only) + singleTRT = 2, - /// @brief one track only, no Si hits (TRT only) - singleTRT = 2, - - /// @brief two tracks, both with Si hits - doubleSi = 3, - - /// @brief two tracks, none with Si hits (TRT only) - doubleTRT = 4, + /// @brief two tracks, both with Si hits + doubleSi = 3, - /// @brief two tracks, only one with Si hits - doubleSiTRT = 5, + /// @brief two tracks, none with Si hits (TRT only) + doubleTRT = 4, - ///maximum number of types - NumberOfVertexConversionTypes - }; - ///@} + /// @brief two tracks, only one with Si hits + doubleSiTRT = 5, + /// maximum number of types + NumberOfVertexConversionTypes +}; +///@} - }// End namespace EgammaParameters +} // End namespace EgammaParameters -}// End namespace xAOD +} // End namespace xAOD #endif // XAODEGAMMA_EGAMMAENUMS_H diff --git a/Event/xAOD/xAODEgamma/xAODEgamma/versions/Egamma_v1.h b/Event/xAOD/xAODEgamma/xAODEgamma/versions/Egamma_v1.h index f0d1f4724d0a052316c68c90ec8a0248d33f3827..a855583e1a48d722c2dedaddd9f7858ceee70083 100644 --- a/Event/xAOD/xAODEgamma/xAODEgamma/versions/Egamma_v1.h +++ b/Event/xAOD/xAODEgamma/xAODEgamma/versions/Egamma_v1.h @@ -18,8 +18,8 @@ #include "xAODEgamma/EgammaEnums.h" //CaloCluster include -#include "xAODCaloEvent/CaloCluster.h" -#include "xAODCaloEvent/CaloClusterContainer.h" +#include "xAODCaloEvent/CaloCluster.h" +#include "xAODCaloEvent/CaloClusterContainer.h" //xAOD Primitives #include "xAODPrimitives/IsolationCorrection.h" @@ -42,7 +42,7 @@ namespace xAOD { /// @class xAOD::Egamma - /// @brief Class describing an e/gamma + /// @brief Class describing an e/gamma /// @name xAOD::Egamma provides a public interface. /// @name xAOD::Electron and xAOD::Photon inherit from this class /// @@ -58,7 +58,7 @@ namespace xAOD { protected: /// @name xAOD::Egamma constructors /// The xAOD::Egamma is not supposed to be created directly, only via xAOD::Electron and xAOD::Photon. - /// xAOD::Egamma is an abstract class. It does not define the type() pure virtual function from IParticle + /// xAOD::Egamma is an abstract class. It does not define the type() pure virtual function from IParticle /// The default constructors is protected /// @{ @@ -84,7 +84,7 @@ namespace xAOD { /// @name xAOD::IParticle functions /// These are already virtual due to IParticle /// @{ - + /// @brief The transverse momentum (\f$p_T\f$) of the particle virtual double pt() const final; @@ -102,7 +102,7 @@ namespace xAOD { /// @brief The true rapidity (y) of the particle virtual double rapidity() const final; - + /// @brief Definition of the 4-momentum type typedef IParticle::FourMom_t FourMom_t; @@ -110,8 +110,8 @@ namespace xAOD { virtual FourMom_t p4() const final; /// @brief The type of the object as a simple enumeration, remains pure virtual in e/gamma. - virtual Type::ObjectType type() const override =0 ; - + virtual Type::ObjectType type() const override =0 ; + /// @} @@ -122,8 +122,8 @@ namespace xAOD { typedef ROOT::Math::LorentzVector<ROOT::Math::PtEtaPhiM4D<double> > GenVecFourMom_t; /// The full 4-momentum of the particle : internal egamma type. - GenVecFourMom_t genvecP4() const; - + GenVecFourMom_t genvecP4() const; + /// @brief set the 4-vec void setP4(float pt, float eta, float phi, float m); @@ -138,28 +138,28 @@ namespace xAOD { /// @brief set the Mass void setM(float m); - + /// @} - + /// @name xAOD::Egamma 4x4 Covariance Matrix - /// @{ + /// @{ ///4x4 Covariance Matrix in EtEtaPhiM (needs decision) typedef Eigen::Matrix<float,4,4> EgammaCovMatrix_t; /// Returns the 4x4 symmetric covariance matrix . - EgammaCovMatrix_t covMatrix() const; + EgammaCovMatrix_t covMatrix() const; /// set the 4x4 symmetric covariance matrix . void setCovMatrix(const EgammaCovMatrix_t& cov); - /// @} + /// @} /// @name xAOD::Egamma Pointer to CaloClusters - /// @{ + /// @{ /// @brief Return the number of xAOD::CaloClusters that define the electron /// candidate - size_t nCaloClusters() const; + size_t nCaloClusters() const; /// @brief Pointer to the xAOD::CaloCluster/s that define the electron /// candidate @@ -168,14 +168,14 @@ namespace xAOD { /// @brief ElementLink to the xAOD::CaloCluster/s that match the electron /// candidate const ElementLink< CaloClusterContainer >& - caloClusterLink( size_t index = 0 ) const; + caloClusterLink( size_t index = 0 ) const; /// Helper type definition typedef std::vector< ElementLink< CaloClusterContainer > > CLELVec_t; /// @brief Get all cluster links const CLELVec_t& caloClusterLinks() const; - /// @brief set Pointer to the xAOD::CaloCluster + /// @brief set Pointer to the xAOD::CaloCluster void setCaloClusterLinks( const CLELVec_t& links ); /// @} @@ -184,13 +184,13 @@ namespace xAOD { /// @name xAOD::Egamma author (i.e. which reco algorithm was used) /// @{ - /// @brief Get author + /// @brief Get author uint16_t author(uint16_t bitmask=EgammaParameters::AuthorALL) const; - /// @brief add author + /// @brief add author void addAuthor( uint16_t ); - /// @brief set author + /// @brief set author void setAuthor( uint16_t ); /// @} @@ -198,16 +198,16 @@ namespace xAOD { /// as the same cluster leads to creation of both. /// @{ - /// @brief Get ambiguous + /// @brief Get ambiguous const Egamma_v1* ambiguousObject() const; /// @} - - + + /// @name xAOD::Egamma Shower shape Accesors /// If 'information' is stored in this xAOD::Egamma and is of the correct type, /// then the function fills 'value' and returns 'true', otherwise it returns 'false', and does not touch 'value'. /// - /// @{ + /// @{ /// @brief Accessor for ShowerShape values. bool showerShapeValue(float& value,const EgammaParameters::ShowerShapeType information) const; @@ -222,9 +222,9 @@ namespace xAOD { /// @} - /// @name xAOD::Egamma object quality of the calorimeter cluster - /// @{ - + /// @name xAOD::Egamma object quality of the calorimeter cluster + /// @{ + /// @brief Check object quality. Return True is it is Good Object Quality bool isGoodOQ(uint32_t mask ) const; @@ -233,14 +233,14 @@ namespace xAOD { /// @brief Set the object quality void setOQ(uint32_t newOQ); - + ///@} /// @name xAOD::Egamma Isolation value Accesors /// If 'information' is stored in this xAOD::Egamma and is of the correct type, /// then the function fills 'value' and returns 'true', otherwise it returns 'false', and does not touch 'value'. /// - /// @{ + /// @{ /// @brief Accessor for Isolation values. bool isolation(float& value, const Iso::IsolationType information) const; @@ -275,19 +275,19 @@ namespace xAOD { /// If 'information' is stored in this xAOD::Egamma and is of the correct type, /// then the function fills 'value' and returns 'true', otherwise it returns 'false', and does not touch 'value'. /// - /// @{ + /// @{ /// @brief Accessor for flavour and type depended Isolation Calo correction. - bool isolationCaloCorrection(float& value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, + bool isolationCaloCorrection(float& value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param) const; /// @brief Accessor for flavour and type depended Isolation Calo corrections , this just returns the correction without internaly checking if it exists. /// Will lead to an exception if the information is not available - float isolationCaloCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, + float isolationCaloCorrection(const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param) const; /// @brief set method for flavour and type depended Isolation Calo Corrections. - bool setIsolationCaloCorrection(float value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, + bool setIsolationCaloCorrection(float value, const Iso::IsolationFlavour flavour, const Iso::IsolationCaloCorrection corr, const Iso::IsolationCorrectionParameter param); /// @brief Accessor for type depended Isolation Calo correction. @@ -323,52 +323,16 @@ namespace xAOD { /// @} - - - /// @name xAOD::Egamma selector / isEM methods using enums - /// @{ - /// @name xAOD::Egamma selector methods with enums - /// @brief Check if the egamma object pass a selection menu - ///If the menu decision is stored in this xAOD::Egamma, - ///then the function fills 'value' with the decision (reference) - ///and returns 'true', otherwise it returns 'false', - ///and does not touch 'value'. - bool passSelection(bool& value, const xAOD::EgammaParameters::SelectionMenu menu ) const; - - /// @brief Check if the egamma object pass a selection menu - /// If the particular menu decision is not stored in this xAOD::Egamma, - /// an exception will occur - bool passSelection( const xAOD::EgammaParameters::SelectionMenu menu ) const; - - /// @brief Set the selection decision for a menu - void setPassSelection(bool value, const xAOD::EgammaParameters::SelectionMenu menu); - - /// @brief Return the isEM word for a selection menu - ///If the menu isEM is stored in this xAOD::Egamma, - ///then the function fills 'value' with the isEM (reference) - ///and returns 'true', otherwise it returns 'false', - ///and does not touch 'value'. - bool selectionisEM(unsigned int& value, const xAOD::EgammaParameters::SelectionisEM isEM) const; - - /// @brief Return the isEM word for a selection menu - /// If the particular isEM word is not stored in this xAOD::Egamma, - /// an exception will occur - unsigned int selectionisEM(const xAOD::EgammaParameters::SelectionisEM isEM) const; - - /// @brief Set the isEM word for a selection menu - void setSelectionisEM(unsigned int value, const xAOD::EgammaParameters::SelectionisEM isEM); - - ///@} /// @name xAOD::Egamma selector / isEM methods using the menu name - /// @{ + /// @{ /// @brief Check if the egamma object pass a selection menu (using the name) ///If the menu decision is stored in this xAOD::Egamma, - ///then the function fills 'value' with the decision (reference) - ///and returns 'true', otherwise it returns 'false', + ///then the function fills 'value' with the decision (reference) + ///and returns 'true', otherwise it returns 'false', /// and does not touch 'value'. bool passSelection(bool& value, const std::string& menu ) const; @@ -382,8 +346,8 @@ namespace xAOD { /// @brief Return the isEM word for a selection menu ///If the menu isEM is stored in this xAOD::Egamma, - ///then the function fills 'value' with the isEM (reference) - ///and returns 'true', otherwise it returns 'false', + ///then the function fills 'value' with the isEM (reference) + ///and returns 'true', otherwise it returns 'false', ///and does not touch 'value'. bool selectionisEM(unsigned int& value, const std::string& isEM) const; @@ -397,8 +361,8 @@ namespace xAOD { /// @brief Return the LH value as float ///If the LH decision is stored in this xAOD::Egamma, - ///then the function fills 'value' with the decision (reference) - ///and returns 'true', otherwise it returns 'false', + ///then the function fills 'value' with the decision (reference) + ///and returns 'true', otherwise it returns 'false', ///and does not touch 'value'. bool likelihoodValue(float& value, const std::string& LHValue=std::string("LHValue")) const; @@ -407,12 +371,12 @@ namespace xAOD { /// an exception will occur float likelihoodValue(const std::string& LHValue=std::string("LHValue")) const; - /// @brief Set the LHValue as float + /// @brief Set the LHValue as float void setLikelihoodValue(float value, const std::string& LHValue=std::string("LHValue")); ///@} - + }; // class Egamma diff --git a/Event/xAOD/xAODEgamma/xAODEgamma/versions/Electron_v1.h b/Event/xAOD/xAODEgamma/xAODEgamma/versions/Electron_v1.h index 5a8c4afd76b796dd7c49b2d9bf1e161c89fb24bf..970e3e625d33f301d62140299d4f00a3eed0ca8b 100644 --- a/Event/xAOD/xAODEgamma/xAODEgamma/versions/Electron_v1.h +++ b/Event/xAOD/xAODEgamma/xAODEgamma/versions/Electron_v1.h @@ -6,7 +6,7 @@ #ifndef XAODEGAMMA_VERSIONS_ELECTRON_V1_H #define XAODEGAMMA_VERSIONS_ELECTRON_V1_H - + // Core include(s): #include "AthLinks/ElementLink.h" // xAOD include(s): @@ -25,13 +25,11 @@ namespace xAOD { /// @class xAOD::Electron - /// @brief Class describing an electron + /// @brief Class describing an electron /// /// @author Christos Anastopoulos /// @author Anthony Morley /// - /// $Revision: 636327 $ - /// $Date: 2014-12-16 17:34:24 +0100 (Tue, 16 Dec 2014) $ /// class Electron_v1 :public xAOD::Egamma_v1{ @@ -76,12 +74,12 @@ namespace xAOD { /// @} /// @name xAOD::Electron Pointers to TrackParticles - /// @{ + /// @{ /// @brief Return the number xAOD::TrackParticles that match the electron /// candidate size_t nTrackParticles() const; - + /// @brief Pointer to the xAOD::TrackParticle/s that match the electron /// candidate const xAOD::TrackParticle* trackParticle( size_t index = 0 ) const; @@ -89,7 +87,7 @@ namespace xAOD { /// @brief ElementLink to the xAOD::TrackParticle/s that match the electron /// candidate const ElementLink< TrackParticleContainer >& - trackParticleLink( size_t index = 0 ) const; + trackParticleLink( size_t index = 0 ) const; /// Helper type definition typedef std::vector< ElementLink< TrackParticleContainer > > TPELVec_t; @@ -103,16 +101,16 @@ namespace xAOD { /// @} /// @name xAOD::Electron Track to Calo matching values. - /// @{ + /// @{ - ///@brief Accessor for Track to Calo Match Values + ///@brief Accessor for Track to Calo Match Values bool trackCaloMatchValue( float& value, const EgammaParameters::TrackCaloMatchType information ) const; ///@brief Accessor for Track to Calo Match values , this just returns the value without internaly checking if it exists. /// Will lead to an exception if the information is not available. float trackCaloMatchValue( const EgammaParameters::TrackCaloMatchType information ) const; - + ///@brief Set method for Track to Calo Match values. bool setTrackCaloMatchValue( const float value, const EgammaParameters::TrackCaloMatchType information ); diff --git a/LArCalorimeter/LArCellRec/share/LArCellDeadOTXCorr_test.ref b/LArCalorimeter/LArCellRec/share/LArCellDeadOTXCorr_test.ref index dbe55ea5b6c01fade83b275a0de3012ca95e850a..689ae26514e0a6926cc42dddce99c5230ee11d2a 100644 --- a/LArCalorimeter/LArCellRec/share/LArCellDeadOTXCorr_test.ref +++ b/LArCalorimeter/LArCellRec/share/LArCellDeadOTXCorr_test.ref @@ -306,6 +306,7 @@ LArOnlineIDDetD... INFO in createObj: creating a LArOnlineID helper object in LArOnlineID INFO initialize_from_dictionary AtlasDetectorID INFO initialize_from_dictionary - OK ClassIDSvc INFO getRegistryEntries: read 68 CLIDRegistry entries for module ALL +CaloMgrDetDescrCnvWARNING Attempting to create a Calo Detector Manager object outside of the event loop. Geometry may not be aligned. CaloMgrDetDescrCnv INFO in createObj: creating a Calo Detector Manager object in the detector store DetectorStore WARNING retrieve(default): No valid proxy for default object of type TileDetDescrManager(CLID 2941) diff --git a/LumiBlock/LumiCalc/CMakeLists.txt b/LumiBlock/LumiCalc/CMakeLists.txt index ed5dec99e7e1399cec274cec877cf5b4c1885369..2b67db6530d5534d8a25e430d2f5fe7114d8ba09 100644 --- a/LumiBlock/LumiCalc/CMakeLists.txt +++ b/LumiBlock/LumiCalc/CMakeLists.txt @@ -15,7 +15,7 @@ atlas_add_library( LumiBlockCoolQuery src/ReplicaSorter.cxx src/LumiBlockCollectionConverter.cxx PUBLIC_HEADERS LumiCalc - INCLUDE_DIRS ${COOL_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS} ${CORAL_INCLUDE_DIRS} + INCLUDE_DIRS ${COOL_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS} LINK_LIBRARIES ${COOL_LIBRARIES} ${ROOT_LIBRARIES} AthenaKernel GoodRunsListsLib LumiBlockData xAODLuminosity ) atlas_add_executable( iLumiCalc diff --git a/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonErrorOptimisationTool.h b/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonErrorOptimisationTool.h index e4780b929d0cc7afb63676242f5146322673d7a9..8b490fb86677f14a9679b83218cb38dfc9a8a3d7 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonErrorOptimisationTool.h +++ b/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonErrorOptimisationTool.h @@ -23,7 +23,7 @@ namespace Muon { static const InterfaceID& interfaceID(); /** optimise errors on a track to maximize the momentum resolution */ - virtual Trk::Track* optimiseErrors(Trk::Track& track ) const = 0; + virtual std::unique_ptr<Trk::Track> optimiseErrors(Trk::Track* track ) const = 0; }; diff --git a/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonRefitTool.h b/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonRefitTool.h index e4c5b72ae0fbb274616ffe6e733430c39d96e45e..72ba99d9746f9c64dadd8a7ea83b094058210ee4 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonRefitTool.h +++ b/MuonSpectrometer/MuonReconstruction/MuonRecTools/MuonRecToolInterfaces/MuonRecToolInterfaces/IMuonRefitTool.h @@ -53,10 +53,10 @@ namespace Muon { static const InterfaceID& interfaceID(); /** refit a track */ - virtual const Trk::Track* refit( const Trk::Track& track, const Settings* settings = 0 ) const = 0; + virtual std::unique_ptr<Trk::Track> refit( Trk::Track* track, const Settings* settings = 0 ) const = 0; /** refit and back extrapolate a vector of track pairs */ - virtual std::vector<const Trk::Track*> refit( const std::vector<const Trk::Track*>& tracks, const Settings* settings = 0 ) const = 0; + virtual std::vector<std::unique_ptr<Trk::Track> > refit( std::vector<Trk::Track*>& tracks, const Settings* settings = 0 ) const = 0; }; diff --git a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.cxx b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.cxx index 100e3a5bb4386d12bde3fda75a8001a68d068600..7679206d8f0a891a690c98135eada94e374b30a9 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.cxx +++ b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.cxx @@ -63,24 +63,22 @@ namespace Muon { return StatusCode::SUCCESS; } - Trk::Track* MuonErrorOptimisationTool::optimiseErrors( Trk::Track& track ) const { + std::unique_ptr<Trk::Track> MuonErrorOptimisationTool::optimiseErrors( Trk::Track* track ) const { - if( m_refitTool.empty() ) return 0; - const Trk::Perigee* pp = track.perigeeParameters(); + if( m_refitTool.empty() ) return std::unique_ptr<Trk::Track>(); + const Trk::Perigee* pp = track->perigeeParameters(); bool isLowPt = false; if( pp && pp->momentum().mag() < m_lowPtThreshold ) isLowPt = true; if( isLowPt ) ++m_nrefitAllLowPt; else ++m_nrefitAll; - - const Trk::Track* refittedTrack = 0; - const Trk::Track* result1 = 0; - const Trk::Track* result2 = 0; + std::unique_ptr<Trk::Track> result1; + std::unique_ptr<Trk::Track> result2; // first refit with precise errors IMuonRefitTool::Settings settings = m_refitSettings; settings.broad = false; - refittedTrack = m_refitTool->refit(track,&settings); + std::unique_ptr<Trk::Track> refittedTrack = m_refitTool->refit(track,&settings); if( refittedTrack ){ // check whether it is ok @@ -88,8 +86,7 @@ namespace Muon { ATH_MSG_VERBOSE("Precise fit bad " << std::endl << m_printer->print(*refittedTrack) << std::endl << m_printer->printStations(*refittedTrack)); // if not delete track - result1 = refittedTrack != &track ? refittedTrack : 0; - refittedTrack = 0; + result1.swap(refittedTrack); }else{ ATH_MSG_VERBOSE("Precise fit ok " << std::endl << m_printer->print(*refittedTrack) << std::endl << m_printer->printStations(*refittedTrack)); if( isLowPt ) ++m_nrefitPreciseLowPt; @@ -110,8 +107,7 @@ namespace Muon { if( !m_edmHelperSvc->goodTrack(*refittedTrack,m_chi2NdofCutRefit) ) { ATH_MSG_VERBOSE("Loose fit bad " << std::endl << m_printer->print(*refittedTrack) << std::endl << m_printer->printStations(*refittedTrack)); // if not delete track - result2 = refittedTrack != &track ? refittedTrack : 0; - refittedTrack = 0; + result2.swap(refittedTrack); }else{ ATH_MSG_VERBOSE("Loose fit ok " << std::endl << m_printer->print(*refittedTrack) << std::endl << m_printer->printStations(*refittedTrack)); if( isLowPt ) ++m_nrefitLowPt; @@ -124,13 +120,13 @@ namespace Muon { } // if failed to refit or refit returned original track, return 0 - if( !refittedTrack || refittedTrack == &track ){ + if( !refittedTrack || *refittedTrack->perigeeParameters() == *track->perigeeParameters() ){ // check if any refit succeeded - if( !result1 && !result2 ) return 0; + if( !result1 && !result2 ) return std::unique_ptr<Trk::Track>(); // now compare chi2 - const Trk::FitQuality* fq0 = track.fitQuality(); + const Trk::FitQuality* fq0 = track->fitQuality(); const Trk::FitQuality* fq1 = result1 ? result1->fitQuality() : 0; const Trk::FitQuality* fq2 = result2 ? result2->fitQuality() : 0; @@ -147,56 +143,56 @@ namespace Muon { doSelection = false; // ugly bit of code to get the hit counts for the three tracks int nhits0 = -1; - Trk::TrackSummary* summary0 = track.trackSummary(); + Trk::TrackSummary* summary0 = track->trackSummary(); Trk::MuonTrackSummary* muonSummary0 = 0; if( summary0 ){ if( summary0->muonTrackSummary() ) { muonSummary0 = summary0->muonTrackSummary(); if( muonSummary0 ) nhits0 = muonSummary0->netaHits()+ muonSummary0->nphiHits(); - }else{ + }else{ Trk::TrackSummary tmpSum(*summary0); - m_trackSummaryTool->addDetailedTrackSummary(track,tmpSum); + m_trackSummaryTool->addDetailedTrackSummary(*track,tmpSum); if( tmpSum.muonTrackSummary() ) nhits0 = muonSummary0->netaHits()+ muonSummary0->nphiHits(); } }else{ Trk::TrackSummary tmpSummary; - m_trackSummaryTool->addDetailedTrackSummary(track,tmpSummary); + m_trackSummaryTool->addDetailedTrackSummary(*track,tmpSummary); if( tmpSummary.muonTrackSummary() ) muonSummary0 = tmpSummary.muonTrackSummary(); if( muonSummary0 ) nhits0 = muonSummary0->netaHits()+ muonSummary0->nphiHits(); } int nhits1 = -1; - Trk::TrackSummary* summary1 = track.trackSummary(); + Trk::TrackSummary* summary1 = track->trackSummary(); Trk::MuonTrackSummary* muonSummary1 = 0; if( summary1 ){ if( summary1->muonTrackSummary() ) muonSummary1 = summary1->muonTrackSummary(); else{ Trk::TrackSummary* tmpSum = summary1; - if( tmpSum ) m_trackSummaryTool->addDetailedTrackSummary(track,*tmpSum); + if( tmpSum ) m_trackSummaryTool->addDetailedTrackSummary(*track,*tmpSum); if( tmpSum->muonTrackSummary() ) muonSummary1 = tmpSum->muonTrackSummary(); } if( muonSummary1 ) nhits1 = muonSummary1->netaHits()+ muonSummary1->nphiHits(); }else{ Trk::TrackSummary tmpSummary; - m_trackSummaryTool->addDetailedTrackSummary(track,tmpSummary); + m_trackSummaryTool->addDetailedTrackSummary(*track,tmpSummary); if( tmpSummary.muonTrackSummary() ) muonSummary1 = tmpSummary.muonTrackSummary(); if( muonSummary1 ) nhits1 = muonSummary1->netaHits()+ muonSummary1->nphiHits(); } int nhits2 = -1; - Trk::TrackSummary* summary2 = track.trackSummary(); + Trk::TrackSummary* summary2 = track->trackSummary(); Trk::MuonTrackSummary* muonSummary2 = 0; if( summary2 ){ if( summary2->muonTrackSummary() ) muonSummary2 = summary2->muonTrackSummary(); else{ Trk::TrackSummary* tmpSum = summary2; - if( tmpSum ) m_trackSummaryTool->addDetailedTrackSummary(track,*tmpSum); + if( tmpSum ) m_trackSummaryTool->addDetailedTrackSummary(*track,*tmpSum); if( tmpSum->muonTrackSummary() ) muonSummary2 = tmpSum->muonTrackSummary(); } if( muonSummary2 ) nhits2 = muonSummary2->netaHits()+ muonSummary2->nphiHits(); }else{ Trk::TrackSummary tmpSummary; - m_trackSummaryTool->addDetailedTrackSummary(track,tmpSummary); + m_trackSummaryTool->addDetailedTrackSummary(*track,tmpSummary); if( tmpSummary.muonTrackSummary() ) muonSummary2 = tmpSummary.muonTrackSummary(); if( muonSummary2 ) nhits2 = muonSummary2->netaHits()+ muonSummary2->nphiHits(); } @@ -220,27 +216,19 @@ namespace Muon { if( chi2Refit < fq0->chiSquared() ){ if( firstIsBest ) { ATH_MSG_DEBUG("Keeping precise refit"); - delete result2; ++m_nbetterPreciseFit; - return const_cast<Trk::Track*>(result1); + return result1; }else{ ATH_MSG_DEBUG("Keeping loose refit"); - delete result1; ++m_nbetterFit; - return const_cast<Trk::Track*>(result2); + return result2; } } } - // clean up - delete result1; - delete result2; - return 0; + return std::unique_ptr<Trk::Track>(); } - // clean up - if( result1 && result1 != refittedTrack ) delete result1; -// if( result2 && result2 != refittedTrack ) delete result2; - return const_cast<Trk::Track*>(refittedTrack); + return refittedTrack; } } diff --git a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.h b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.h index e144fa27f1242588efeadfcd5649f9f48a59f4c4..a1e46f516764420c5888655d04897935dc5b92fc 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.h +++ b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonErrorOptimisationTool.h @@ -30,7 +30,7 @@ namespace Muon { virtual StatusCode finalize() override; /** optimise the error strategy used for the track */ - virtual Trk::Track* optimiseErrors( Trk::Track& track ) const override; + virtual std::unique_ptr<Trk::Track> optimiseErrors( Trk::Track* track ) const override; private: ServiceHandle<IMuonEDMHelperSvc> m_edmHelperSvc {this, "edmHelper", "Muon::MuonEDMHelperSvc/MuonEDMHelperSvc", "Handle to the service providing the IMuonEDMHelperSvc interface" }; diff --git a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.cxx b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.cxx index 28a5a250145ab33eb98ca51ec4d2ee0435b68ba5..1d3969d285ea748d2e4385495deb748196057e10 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.cxx +++ b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.cxx @@ -155,255 +155,247 @@ namespace Muon { return StatusCode::SUCCESS; } - const Trk::Track* MuonRefitTool::refit( const Trk::Track& track, const IMuonRefitTool::Settings* set ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::refit( Trk::Track* track, const IMuonRefitTool::Settings* set ) const { const IMuonRefitTool::Settings& settings = set ? *set : m_defaultSettings; + //to keep track of the latest track + std::unique_ptr<Trk::Track> newTrack; ++m_nrefits; - const Trk::Track* newTrack = &track; if( settings.removeOutliers ){ - const Trk::Track* cleanedTrack = removeOutliers(*newTrack,settings); + std::unique_ptr<Trk::Track> cleanedTrack = removeOutliers(track,settings); if( !cleanedTrack ){ ATH_MSG_DEBUG("Track lost during outlier removal"); ++m_failedOutlierRemoval; - return newTrack; + return std::make_unique<Trk::Track>(*track); } - if( cleanedTrack != newTrack ){ + if( cleanedTrack->perigeeParameters() != track->perigeeParameters() ){ ATH_MSG_DEBUG("Outlier removal removed hits from track"); - if( newTrack != &track ) delete newTrack; - } - newTrack = cleanedTrack; + } + newTrack.swap(cleanedTrack); } + else newTrack=std::make_unique<Trk::Track>(*track); if( settings.updateErrors ){ - Trk::Track* updateErrorTrack = m_alignmentErrors ? updateAlignmentErrors(*newTrack,settings) : updateErrors(*newTrack,settings); + std::unique_ptr<Trk::Track> updateErrorTrack = m_alignmentErrors ? updateAlignmentErrors(newTrack.get(),settings) : updateErrors(newTrack.get(),settings); if( !updateErrorTrack ) { ATH_MSG_WARNING("Failed to update errors"); ++m_failedErrorUpdate; return newTrack; } - if( updateErrorTrack != newTrack && newTrack != &track ) delete newTrack; - newTrack = updateErrorTrack; + newTrack.swap(updateErrorTrack); } if( settings.refit ){ - ATH_MSG_DEBUG("Original track" << m_printer->print(*&track) ); + ATH_MSG_DEBUG("Original track" << m_printer->print(*track) ); -// do not put AEOTs on extremely bad chi2 tracks and do not refit them + // do not put AEOTs on extremely bad chi2 tracks and do not refit them - Trk::Track* refittedTrack = track.fitQuality()&&track.fitQuality()->chiSquared()<10000*track.fitQuality()->numberDoF() ? m_trackFitter->fit(*newTrack,false,Trk::muon) : 0 ; + std::unique_ptr<Trk::Track> refittedTrack; + if(track->fitQuality() && track->fitQuality()->chiSquared()<10000*track->fitQuality()->numberDoF()) + refittedTrack= std::unique_ptr<Trk::Track>(m_trackFitter->fit(*newTrack,false,Trk::muon)); if( !refittedTrack ){ ATH_MSG_DEBUG("Failed to refit track"); ++m_failedRefit; -// BUG fix Peter - delete newTrack; - newTrack = &track; - return newTrack; + // BUG fix Peter + return std::make_unique<Trk::Track>(*track); } ATH_MSG_DEBUG("Refitted track" << m_printer->print(*refittedTrack) ); ATH_MSG_DEBUG("Refitted track" << m_printer->printMeasurements(*refittedTrack) ); - if( refittedTrack != newTrack && newTrack != &track ) delete newTrack; - newTrack = refittedTrack; + newTrack.swap(refittedTrack); } if( settings.extrapolateToMuonEntry ){ - Trk::Track* extrapolatedTrack = m_muonEntryTrackExtrapolator->extrapolate(*newTrack); + std::unique_ptr<Trk::Track> extrapolatedTrack(m_muonEntryTrackExtrapolator->extrapolate(*newTrack)); if( !extrapolatedTrack ){ ATH_MSG_WARNING("Failed to back-extrapolate track"); ++m_failedExtrapolationMuonEntry; return newTrack; } ATH_MSG_DEBUG("Entry track " << m_printer->print(*extrapolatedTrack) ); - if( extrapolatedTrack != newTrack && newTrack != &track ) delete newTrack; - newTrack = extrapolatedTrack; + newTrack.swap(extrapolatedTrack); } ++m_ngoodRefits; return newTrack; } - std::vector<const Trk::Track*> MuonRefitTool::refit( const std::vector<const Trk::Track*>& tracks, const IMuonRefitTool::Settings* set ) const { + std::vector<std::unique_ptr<Trk::Track> > MuonRefitTool::refit( std::vector<Trk::Track*>& tracks, const IMuonRefitTool::Settings* set ) const { - std::vector<const Trk::Track*> refittedTracks; + std::vector<std::unique_ptr<Trk::Track> > refittedTracks; refittedTracks.reserve(tracks.size()); - std::vector<const Trk::Track*>::const_iterator it = tracks.begin(); - std::vector<const Trk::Track*>::const_iterator it_end = tracks.end(); + std::vector<Trk::Track*>::const_iterator it = tracks.begin(); + std::vector<Trk::Track*>::const_iterator it_end = tracks.end(); for( ;it!=it_end;++it ){ - const Trk::Track* refittedTrack = refit(**it,set); - if( refittedTrack == *it ) refittedTrack = new Trk::Track(**it); - - if(refittedTrack) refittedTracks.push_back(refittedTrack); + refittedTracks.push_back(refit(*it,set)); } return refittedTracks; } - Trk::Track* MuonRefitTool::updateAlignmentErrors( const Trk::Track& track, const IMuonRefitTool::Settings& settings ) const { - + std::unique_ptr<Trk::Track> MuonRefitTool::updateAlignmentErrors( Trk::Track* track, const IMuonRefitTool::Settings& settings ) const { -// first scale the Mdt errors + // first scale the Mdt errors - const Trk::Track* inputTrack = &track; - Trk::Track* updatedTrack = updateMdtErrors(*inputTrack,settings); - - Trk::Track* updatedAEOTsTrack = m_simpleAEOTs ? makeSimpleAEOTs(*updatedTrack) : makeAEOTs(*updatedTrack); - if( updatedAEOTsTrack != updatedTrack ) delete updatedTrack; + std::unique_ptr<Trk::Track> updatedTrack = updateMdtErrors(track,settings); + + std::unique_ptr<Trk::Track> updatedAEOTsTrack = m_simpleAEOTs ? makeSimpleAEOTs(updatedTrack.get()) : makeAEOTs(updatedTrack.get()); - return updatedAEOTsTrack; + return updatedAEOTsTrack; } - Trk::Track* MuonRefitTool::makeAEOTs( const Trk::Track& track ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::makeAEOTs( Trk::Track* track ) const { -// -// use the new AlignmentEffectsOnTrack class and alignmentErrorTool -// + // + // use the new AlignmentEffectsOnTrack class and alignmentErrorTool + // if( m_alignErrorTool.empty() ) { - Trk::Track* newTrack = new Trk::Track( track ); + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( *track ); return newTrack; } -// -// Use the alignmentErrorTool and store a list of hits with error on position and angle -// + // + // Use the alignmentErrorTool and store a list of hits with error on position and angle + // std::map< std::vector<Identifier>, std::pair <double, double> > alignerrmap; - std::vector<Trk::AlignmentDeviation*> align_deviations; - m_alignErrorTool->makeAlignmentDeviations(track, align_deviations); - - int iok = 0; - bool isSmallChamber = false; - bool isLargeChamber = false; - bool isEndcap = false; - bool isBarrel = false; - std::vector <int> usedRotations; - - // loop on deviations - for(auto it : align_deviations){ - double angleError = 0.; - double translationError = 0.; - bool differentChambers = false; - int jdifferent = -1; - isSmallChamber = false; - isLargeChamber = false; - isEndcap = false; - isBarrel = false; - - if( dynamic_cast<MuonAlign::AlignmentTranslationDeviation*>(it) ) { - translationError = std::sqrt(it->getCovariance(0,0)); - // vector to store hit id - std::vector<Identifier> hitids; - std::vector<const Trk::RIO_OnTrack*> vec_riowithdev; - it->getListOfHits(vec_riowithdev); - // bool to decide if deviation should be skipped (if it's for more than 1 station) - for(auto riowithdev : vec_riowithdev){ - Identifier id_riowithdev = riowithdev->identify(); - if(m_idHelperSvc->isEndcap(id_riowithdev)) { - isEndcap = true; - } else { - isBarrel = true; - } - if(m_idHelperSvc->isSmallChamber(id_riowithdev)) { - isSmallChamber = true; - } else { - isLargeChamber = true; - } - hitids.push_back(id_riowithdev); - if( hitids.size()>1 && m_idHelperSvc->chamberId(id_riowithdev) != m_idHelperSvc->chamberId(hitids[0]) ) { - differentChambers = true; - jdifferent = hitids.size()-1; - } - } - bool matchFound = false; - if( hitids.size()>0) { - int iRot = -1; - for(auto itRot : align_deviations){ - iRot++; - if( dynamic_cast<MuonAlign::AlignmentRotationDeviation*>(itRot) ) { - if( itRot->hasValidHashOfHits() && it->hasValidHashOfHits() ) { - if( itRot->getHashOfHits() == it->getHashOfHits() ){ - angleError = std::sqrt(itRot->getCovariance(0,0)); - matchFound = true; - usedRotations.push_back(iRot); - } - } else { - ATH_MSG_ERROR("One of the alignment deviations has an invalid hash created from the hits."); - } - } - if(matchFound) break; - } - } - // if deviation is accepted (i.e. only on one station) store the hit IDs associated with the deviation and the error - -// store (all) translationError with or without a matched angleError - iok++; - alignerrmap.insert( std::pair < std::vector<Identifier>, std::pair < double, double > > ( hitids, std::pair < double, double > (translationError,angleError) ) ); - - if(matchFound) ATH_MSG_DEBUG(" AlignmentMap entry " << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); - if(!matchFound) ATH_MSG_DEBUG(" AlignmentMap entry No angleError" << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); - if(isEndcap) ATH_MSG_DEBUG(" AlignmentMap Endcap Chamber "); - if(isBarrel) ATH_MSG_DEBUG(" AlignmentMap Barrel Chamber "); - if(isSmallChamber) ATH_MSG_DEBUG(" AlignmentMap Small Chamber "); - if(isLargeChamber) ATH_MSG_DEBUG(" AlignmentMap Large Chamber "); - if(differentChambers) ATH_MSG_DEBUG(" AlignmentMap entry " << iok << " for different Chamber " << m_idHelperSvc->toString(hitids[jdifferent]) ); - } + std::vector<Trk::AlignmentDeviation*> align_deviations; + m_alignErrorTool->makeAlignmentDeviations(*track, align_deviations); + + int iok = 0; + bool isSmallChamber = false; + bool isLargeChamber = false; + bool isEndcap = false; + bool isBarrel = false; + std::vector <int> usedRotations; + + // loop on deviations + for(auto it : align_deviations){ + double angleError = 0.; + double translationError = 0.; + bool differentChambers = false; + int jdifferent = -1; + isSmallChamber = false; + isLargeChamber = false; + isEndcap = false; + isBarrel = false; + + if( dynamic_cast<MuonAlign::AlignmentTranslationDeviation*>(it) ) { + translationError = std::sqrt(it->getCovariance(0,0)); + // vector to store hit id + std::vector<Identifier> hitids; + std::vector<const Trk::RIO_OnTrack*> vec_riowithdev; + it->getListOfHits(vec_riowithdev); + // bool to decide if deviation should be skipped (if it's for more than 1 station) + for(auto riowithdev : vec_riowithdev){ + Identifier id_riowithdev = riowithdev->identify(); + if(m_idHelperSvc->isEndcap(id_riowithdev)) { + isEndcap = true; + } else { + isBarrel = true; + } + if(m_idHelperSvc->isSmallChamber(id_riowithdev)) { + isSmallChamber = true; + } else { + isLargeChamber = true; + } + hitids.push_back(id_riowithdev); + if( hitids.size()>1 && m_idHelperSvc->chamberId(id_riowithdev) != m_idHelperSvc->chamberId(hitids[0]) ) { + differentChambers = true; + jdifferent = hitids.size()-1; + } + } + bool matchFound = false; + if( hitids.size()>0) { + int iRot = -1; + for(auto itRot : align_deviations){ + iRot++; + if( dynamic_cast<MuonAlign::AlignmentRotationDeviation*>(itRot) ) { + if( itRot->hasValidHashOfHits() && it->hasValidHashOfHits() ) { + if( itRot->getHashOfHits() == it->getHashOfHits() ){ + angleError = std::sqrt(itRot->getCovariance(0,0)); + matchFound = true; + usedRotations.push_back(iRot); + } + } else { + ATH_MSG_ERROR("One of the alignment deviations has an invalid hash created from the hits."); + } + } + if(matchFound) break; + } + } + // if deviation is accepted (i.e. only on one station) store the hit IDs associated with the deviation and the error + + // store (all) translationError with or without a matched angleError + iok++; + alignerrmap.insert( std::pair < std::vector<Identifier>, std::pair < double, double > > ( hitids, std::pair < double, double > (translationError,angleError) ) ); + + if(matchFound) ATH_MSG_DEBUG(" AlignmentMap entry " << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); + if(!matchFound) ATH_MSG_DEBUG(" AlignmentMap entry No angleError" << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); + if(isEndcap) ATH_MSG_DEBUG(" AlignmentMap Endcap Chamber "); + if(isBarrel) ATH_MSG_DEBUG(" AlignmentMap Barrel Chamber "); + if(isSmallChamber) ATH_MSG_DEBUG(" AlignmentMap Small Chamber "); + if(isLargeChamber) ATH_MSG_DEBUG(" AlignmentMap Large Chamber "); + if(differentChambers) ATH_MSG_DEBUG(" AlignmentMap entry " << iok << " for different Chamber " << m_idHelperSvc->toString(hitids[jdifferent]) ); } + } -// now add the angleErrors that were NOT matched to a translationError - - int iRot = -1; - for(auto itRot : align_deviations){ - iRot++; - isSmallChamber = false; - isLargeChamber = false; - isEndcap = false; - isBarrel = false; - if( dynamic_cast<MuonAlign::AlignmentRotationDeviation*>(itRot) ) { - bool used = false; - for (unsigned int i = 0; i < usedRotations.size(); i++) { - if(iRot == usedRotations[i]) used = true; - } - if(used) continue; - ATH_MSG_ERROR("This following code should not be reached anymore!"); - std::vector<const Trk::RIO_OnTrack*> vec_riowithdev; - itRot->getListOfHits(vec_riowithdev); - - std::vector<Identifier> hitids; - // bool to decide if deviation should be skipped (if it's for more than 1 station) - for(auto riowithdev : vec_riowithdev){ - Identifier id_riowithdev = riowithdev->identify(); - if(m_idHelperSvc->isEndcap(id_riowithdev)) { - isEndcap = true; - } else { - isBarrel = true; - } - if(m_idHelperSvc->isSmallChamber(id_riowithdev)) { - isSmallChamber = true; - } else { - isLargeChamber = true; - } - hitids.push_back(id_riowithdev); - } - - double translationError = 0.; - double angleError = std::sqrt(itRot->getCovariance(0,0)); - - iok++; - alignerrmap.insert( std::pair < std::vector<Identifier>, std::pair < double, double > > ( hitids, std::pair < double, double > (translationError,angleError) ) ); - ATH_MSG_DEBUG(" AlignmentMap entry No Translation Error " << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); - if(isEndcap) ATH_MSG_DEBUG(" AlignmentMap Endcap Chamber"); - if(isBarrel) ATH_MSG_DEBUG(" AlignmentMap Barrel Chamber"); - if(isSmallChamber) ATH_MSG_DEBUG(" AlignmentMap Small Chamber "); - if(isLargeChamber) ATH_MSG_DEBUG(" AlignmentMap Large Chamber "); - } + // now add the angleErrors that were NOT matched to a translationError + + int iRot = -1; + for(auto itRot : align_deviations){ + iRot++; + isSmallChamber = false; + isLargeChamber = false; + isEndcap = false; + isBarrel = false; + if( dynamic_cast<MuonAlign::AlignmentRotationDeviation*>(itRot) ) { + bool used = false; + for (unsigned int i = 0; i < usedRotations.size(); i++) { + if(iRot == usedRotations[i]) used = true; + } + if(used) continue; + ATH_MSG_ERROR("This following code should not be reached anymore!"); + std::vector<const Trk::RIO_OnTrack*> vec_riowithdev; + itRot->getListOfHits(vec_riowithdev); + + std::vector<Identifier> hitids; + // bool to decide if deviation should be skipped (if it's for more than 1 station) + for(auto riowithdev : vec_riowithdev){ + Identifier id_riowithdev = riowithdev->identify(); + if(m_idHelperSvc->isEndcap(id_riowithdev)) { + isEndcap = true; + } else { + isBarrel = true; + } + if(m_idHelperSvc->isSmallChamber(id_riowithdev)) { + isSmallChamber = true; + } else { + isLargeChamber = true; + } + hitids.push_back(id_riowithdev); + } + + double translationError = 0.; + double angleError = std::sqrt(itRot->getCovariance(0,0)); + + iok++; + alignerrmap.insert( std::pair < std::vector<Identifier>, std::pair < double, double > > ( hitids, std::pair < double, double > (translationError,angleError) ) ); + ATH_MSG_DEBUG(" AlignmentMap entry No Translation Error " << iok << " filled with nr hitids " << hitids.size() << " " << m_idHelperSvc->toString(hitids[0]) << " translationError " << translationError << " angleError " << angleError ); + if(isEndcap) ATH_MSG_DEBUG(" AlignmentMap Endcap Chamber"); + if(isBarrel) ATH_MSG_DEBUG(" AlignmentMap Barrel Chamber"); + if(isSmallChamber) ATH_MSG_DEBUG(" AlignmentMap Small Chamber "); + if(isLargeChamber) ATH_MSG_DEBUG(" AlignmentMap Large Chamber "); } + } - // clean-up of alignment deviations - for(auto it : align_deviations) delete it; - align_deviations.clear(); + // clean-up of alignment deviations + for(auto it : align_deviations) delete it; + align_deviations.clear(); - const DataVector<const Trk::TrackStateOnSurface>* states = track.trackStateOnSurfaces(); + const DataVector<const Trk::TrackStateOnSurface>* states = track->trackStateOnSurfaces(); if( !states ){ ATH_MSG_WARNING(" track without states, discarding track "); return 0; @@ -439,7 +431,7 @@ namespace Muon { if( m_idHelperSvc->isMdt(id) ) stationIds.insert( m_idHelperSvc->chamberIndex(id) ); -// make Alignment Effect using the surface of the TSOS + // make Alignment Effect using the surface of the TSOS if(idMiddle==id) { double deltaError = itAli.second.first; @@ -457,9 +449,9 @@ namespace Muon { if(!found) ATH_MSG_WARNING(" This should not happen Identifier from AlignmentErrorTool is not found"); } -// -// clone the TSOSs and add the tsosAEOTs -// + // + // clone the TSOSs and add the tsosAEOTs + // DataVector<const Trk::TrackStateOnSurface>* trackStateOnSurfaces = new DataVector<const Trk::TrackStateOnSurface>(); trackStateOnSurfaces->reserve(states->size()+indexAEOTs.size()); tsit = states->begin(); @@ -482,7 +474,7 @@ namespace Muon { if(indexAEOTs.size()==0 && stationIds.size() > 1) ATH_MSG_WARNING(" Track without AEOT "); - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfaces, track.fitQuality() ? track.fitQuality()->clone():0 ); + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfaces, track->fitQuality() ? track->fitQuality()->clone():0 ); ATH_MSG_DEBUG(m_printer->print(*newTrack)); ATH_MSG_DEBUG(m_printer->printMeasurements(*newTrack)); @@ -491,22 +483,22 @@ namespace Muon { } - Trk::Track* MuonRefitTool::makeSimpleAEOTs( const Trk::Track& track ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::makeSimpleAEOTs( Trk::Track* track ) const { -// use the new AlignmentEffectsOnTrack class + // use the new AlignmentEffectsOnTrack class - const DataVector<const Trk::TrackStateOnSurface>* states = track.trackStateOnSurfaces(); + const DataVector<const Trk::TrackStateOnSurface>* states = track->trackStateOnSurfaces(); if( !states ){ ATH_MSG_WARNING(" track without states, discarding track "); - return 0; + return std::unique_ptr<Trk::Track>(); } DataVector<const Trk::TrackStateOnSurface>::const_iterator tsit = states->begin(); DataVector<const Trk::TrackStateOnSurface>::const_iterator tsit_end = states->end(); -// -// first clone the TSOSs -// + // + // first clone the TSOSs + // DataVector<const Trk::TrackStateOnSurface>* trackStateOnSurfaces = new DataVector<const Trk::TrackStateOnSurface>(); trackStateOnSurfaces->reserve(states->size()+1); for( ;tsit!=tsit_end ; ++tsit ){ @@ -514,7 +506,7 @@ namespace Muon { } -// loop over TSOSs and look for EM or BM chambers + // loop over TSOSs and look for EM or BM chambers tsit = trackStateOnSurfaces->begin(); tsit_end = trackStateOnSurfaces->end(); std::vector<const Trk::TrackStateOnSurface*> indicesOfAffectedTSOS; @@ -524,7 +516,6 @@ namespace Muon { int index = -1; int indexFirst = -1; int indexFirstInner = -1; -// const Trk::Surface *surf = 0; for( ; tsit!=tsit_end ; ++tsit ){ index++; if( !*tsit ) continue; //sanity check @@ -548,30 +539,28 @@ namespace Muon { // Not a ROT, else it would have had an identifier. Keep the TSOS. if( !id.is_valid() || !m_idHelperSvc->isMuon(id) ) continue; MuonStationIndex::StIndex stIndex = m_idHelperSvc->stationIndex(id); -// skip phi measurements + // skip phi measurements if( (m_idHelperSvc->isTrigger(id)&&m_idHelperSvc->measuresPhi(id)) || (m_idHelperSvc->isCsc(id)&&m_idHelperSvc->measuresPhi(id) ) ) continue; if(m_addAll) { -// skip RPC and TGC eta (to avoid code crashes) + // skip RPC and TGC eta (to avoid code crashes) if( m_idHelperSvc->isTrigger(id)) continue; if(indexFirst==-1) indexFirst = index; indicesOfAffectedTSOS.push_back(*tsit); indicesOfAffectedIds.push_back(id); } else { -// skip trigger hits and CSC phi measurements and select precision hits + // skip trigger hits and CSC phi measurements and select precision hits if( m_idHelperSvc->isTrigger(id)) continue; if( stIndex == MuonStationIndex::BM || stIndex == MuonStationIndex::EM) { if(indexFirst==-1) indexFirst = index; indicesOfAffectedTSOS.push_back(*tsit); indicesOfAffectedIds.push_back(id); -// two alignment discontinuities + // two alignment discontinuities if(m_addTwo) { if(indexFirstInner==-1) indexFirstInner = index; indicesOfAffectedTSOSInner.push_back(*tsit); indicesOfAffectedIdsInner.push_back(id); } } -// if( stIndex == MuonStationIndex::BO || stIndex == MuonStationIndex::EO || -// stIndex == MuonStationIndex::BI || stIndex == MuonStationIndex::EI) { if( stIndex == MuonStationIndex::BI || stIndex == MuonStationIndex::EI) { if(indexFirstInner==-1) indexFirstInner = index; indicesOfAffectedTSOSInner.push_back(*tsit); @@ -581,7 +570,7 @@ namespace Muon { } if(indicesOfAffectedTSOS.size()==0&&indicesOfAffectedTSOSInner.size()==0) { - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfaces, track.fitQuality() ? track.fitQuality()->clone():0 ); + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfaces, track->fitQuality() ? track->fitQuality()->clone():0 ); return newTrack; } @@ -601,7 +590,6 @@ namespace Muon { if(indicesOfAffectedTSOSInner.size()>0&&(m_addInner||m_addTwo)) { int middle = indicesOfAffectedTSOSInner.size()/2; const Trk::TrackStateOnSurface* tsosInner = indicesOfAffectedTSOSInner[middle]; -// const Trk::Surface *surfInner = dynamic_cast <Trk::Surface*> ((tsosInner->measurementOnTrack()->associatedSurface()).clone()); Trk::AlignmentEffectsOnTrack* aEOTInner = new Trk::AlignmentEffectsOnTrack(m_alignmentDelta,m_alignmentDeltaError,m_alignmentAngle,m_alignmentAngleError,indicesOfAffectedIdsInner,&(tsosInner->measurementOnTrack()->associatedSurface())); tsosAEOTInner = new Trk::TrackStateOnSurface(0,tsosInner->trackParameters()->clone(),0,0,typePattern,aEOTInner); } @@ -629,19 +617,7 @@ namespace Muon { if(!m_addInner&&!m_addTwo&&tsosAEOTInner) delete tsosAEOTInner; if(!m_addMiddle&&!m_addAll&&tsosAEOT) delete tsosAEOT; -// trackStateOnSurfacesAEOT->push_back(tsosAEOT); -// if(m_addInner) trackStateOnSurfacesAEOT->push_back(tsosAEOTInner); - - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfacesAEOT, track.fitQuality() ? track.fitQuality()->clone():0 ); - -// dump it -// const DataVector<const Trk::TrackStateOnSurface>* trackStateOnSurfacesNew = newTrack->trackStateOnSurfaces(); -// tsit = trackStateOnSurfacesNew->begin(); -// tsit_end = trackStateOnSurfacesNew->end(); -// for( ; tsit!=tsit_end ; ++tsit ){ -// if((*tsit)->alignmentEffectsOnTrack()) std::cout << " Peter alignmentEffectsOnTrack found with track pars " << (*tsit)->trackParameters() << std::endl; -// } - + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfacesAEOT, track->fitQuality() ? track->fitQuality()->clone():0 ); if(aEOT) ATH_MSG_DEBUG(" AlignmentEffectsOnTrack on surface " << aEOT->associatedSurface() << " nr of tsos affected " << indicesOfAffectedTSOS.size()); ATH_MSG_DEBUG(m_printer->print(*newTrack)); @@ -650,13 +626,13 @@ namespace Muon { return newTrack; } - Trk::Track* MuonRefitTool::updateErrors( const Trk::Track& track, const IMuonRefitTool::Settings& settings ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::updateErrors( Trk::Track* track, const IMuonRefitTool::Settings& settings ) const { // loop over track and calculate residuals - const DataVector<const Trk::TrackStateOnSurface>* states = track.trackStateOnSurfaces(); + const DataVector<const Trk::TrackStateOnSurface>* states = track->trackStateOnSurfaces(); if( !states ){ ATH_MSG_WARNING(" track without states, discarding track "); - return 0; + return std::unique_ptr<Trk::Track>(); } // vector to store states, the boolean indicated whether the state was create in this routine (true) or belongs to the track (false) @@ -707,11 +683,11 @@ namespace Muon { } if( !startPars ){ - if( !track.trackParameters() || track.trackParameters()->empty() ){ + if( !track->trackParameters() || track->trackParameters()->empty() ){ ATH_MSG_WARNING("Track without parameters, cannot update errors"); - return 0; + return std::unique_ptr<Trk::Track>(); } - startPars = track.trackParameters()->front(); + startPars = track->trackParameters()->front(); ATH_MSG_VERBOSE("Did not find fit starting parameters, using first parameters " << m_printer->print(*startPars)); } @@ -1025,20 +1001,20 @@ namespace Muon { // add states. If nit->first is true we have a new state. If it is false the state is from the old track and has to be cloned trackStateOnSurfaces->push_back( nit->first ? nit->second : nit->second->clone() ); } - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfaces, track.fitQuality() ? track.fitQuality()->clone():0 ); + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfaces, track->fitQuality() ? track->fitQuality()->clone():0 ); return newTrack; } - Trk::Track* MuonRefitTool::updateMdtErrors( const Trk::Track& track, const IMuonRefitTool::Settings& settings ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::updateMdtErrors( Trk::Track* track, const IMuonRefitTool::Settings& settings ) const { -// uses the muonErrorStrategy + // uses the muonErrorStrategy // loop over track and calculate residuals - const DataVector<const Trk::TrackStateOnSurface>* states = track.trackStateOnSurfaces(); + const DataVector<const Trk::TrackStateOnSurface>* states = track->trackStateOnSurfaces(); if( !states ){ ATH_MSG_WARNING(" track without states, discarding track "); - return 0; + return std::unique_ptr<Trk::Track>(); } // vector to store states, the boolean indicated whether the state was create in this routine (true) or belongs to the track (false) @@ -1085,11 +1061,11 @@ namespace Muon { } if( !startPars ){ - if( !track.trackParameters() || track.trackParameters()->empty() ){ + if( !track->trackParameters() || track->trackParameters()->empty() ){ ATH_MSG_WARNING("Track without parameters, cannot update errors"); - return 0; + return std::unique_ptr<Trk::Track>(); } - startPars = track.trackParameters()->front(); + startPars = track->trackParameters()->front(); ATH_MSG_VERBOSE("Did not find fit starting parameters, using first parameters " << m_printer->print(*startPars)); } @@ -1175,7 +1151,7 @@ namespace Muon { } bool hasT0Fit = false; - if( mdt->errorStrategy().creationParameter(Muon::MuonDriftCircleErrorStrategy::T0Refit)) hasT0Fit = true; + if( mdt->errorStrategy().creationParameter(Muon::MuonDriftCircleErrorStrategy::T0Refit)) hasT0Fit = true; const Trk::RIO_OnTrack* rot = 0; Trk::TrackStateOnSurface::TrackStateOnSurfaceType type = (*tsit)->type(Trk::TrackStateOnSurface::Outlier) ? @@ -1183,7 +1159,7 @@ namespace Muon { stIndex = m_idHelperSvc->stationIndex(id); -// use the muonErrorStrategy + // use the muonErrorStrategy MuonDriftCircleErrorStrategy strat(m_muonErrorStrategy); if( hasT0Fit ) strat.setParameter(MuonDriftCircleErrorStrategy::T0Refit,true); if( settings.broad ) strat.setParameter(MuonDriftCircleErrorStrategy::BroadError,true); @@ -1240,7 +1216,6 @@ namespace Muon { } - }else{ if( settings.updateTriggerErrors ){ @@ -1274,18 +1249,18 @@ namespace Muon { // add states. If nit->first is true we have a new state. If it is false the state is from the old track and has to be cloned trackStateOnSurfaces->push_back( nit->first ? nit->second : nit->second->clone() ); } - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfaces, track.fitQuality() ? track.fitQuality()->clone():0 ); + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfaces, track->fitQuality() ? track->fitQuality()->clone():0 ); return newTrack; } - const Trk::Track* MuonRefitTool::removeOutliers( const Trk::Track& track, const IMuonRefitTool::Settings& settings ) const { + std::unique_ptr<Trk::Track> MuonRefitTool::removeOutliers( Trk::Track* track, const IMuonRefitTool::Settings& settings ) const { // loop over track and calculate residuals - const DataVector<const Trk::TrackStateOnSurface>* states = track.trackStateOnSurfaces(); + const DataVector<const Trk::TrackStateOnSurface>* states = track->trackStateOnSurfaces(); if( !states ){ ATH_MSG_WARNING(" track without states, discarding track "); - return 0; + return std::unique_ptr<Trk::Track>(); } Identifier currentMdtChId; @@ -1340,7 +1315,7 @@ namespace Muon { if( chamberPars ){ if( !removeMdtOutliers(*chamberPars,mdts,removedIdentifiers,settings) ){ if( mdts.size() > 4 ) ATH_MSG_WARNING("Problem removing outliers in chamber " << m_idHelperSvc->toStringChamber(currentMdtChId) << " hits " << mdts.size()); - if( settings.discardNotCleanedTracks ) return 0; + if( settings.discardNotCleanedTracks ) return std::unique_ptr<Trk::Track>(); } } // update to new chamber @@ -1357,13 +1332,13 @@ namespace Muon { if( chamberPars ){ if( !removeMdtOutliers(*chamberPars,mdts,removedIdentifiers,settings) ){ if( mdts.size() > 4 ) ATH_MSG_WARNING("Problem removing outliers in chamber " << m_idHelperSvc->toStringChamber(currentMdtChId) << " hits " << mdts.size()); - if( settings.discardNotCleanedTracks ) return 0; + if( settings.discardNotCleanedTracks ) return std::unique_ptr<Trk::Track>(); } } if( removedIdentifiers.empty() ){ ATH_MSG_DEBUG("No hits remove, returning original track"); - return &track; + return std::make_unique<Trk::Track>(*track); } // states were added, create a new track @@ -1394,8 +1369,7 @@ namespace Muon { trackStateOnSurfaces->push_back( (*tsit)->clone() ); } - Trk::Track* newTrack = new Trk::Track( track.info(), trackStateOnSurfaces, track.fitQuality() ? track.fitQuality()->clone():0 ); - + std::unique_ptr<Trk::Track> newTrack = std::make_unique<Trk::Track>( track->info(), trackStateOnSurfaces, track->fitQuality() ? track->fitQuality()->clone():0 ); return newTrack; } diff --git a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.h b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.h index bc6c2a819d1947b18d57787250731e3587ff64e5..e6a23a411f6ed5348f6d91a78dced4c73e73ed0e 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.h +++ b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackFinderTools/src/MuonRefitTool.h @@ -41,25 +41,25 @@ namespace Muon { virtual StatusCode finalize() override; /** refit a track */ - const Trk::Track* refit( const Trk::Track& track, const Settings* settings = 0 ) const override; + std::unique_ptr<Trk::Track> refit( Trk::Track* track, const Settings* settings = 0 ) const override; /** refit and back extrapolate a vector of track pairs */ - std::vector<const Trk::Track*> refit( const std::vector<const Trk::Track*>& tracks, const Settings* settings = 0 ) const override; + std::vector<std::unique_ptr<Trk::Track> > refit( std::vector<Trk::Track*>& tracks, const Settings* settings = 0 ) const override; protected: /** update errors on a muon track */ - Trk::Track* updateErrors( const Trk::Track& track, const Settings& settings ) const; + std::unique_ptr<Trk::Track> updateErrors( Trk::Track* track, const Settings& settings ) const; - Trk::Track* updateMdtErrors( const Trk::Track& track, const Settings& settings ) const; + std::unique_ptr<Trk::Track> updateMdtErrors( Trk::Track* track, const Settings& settings ) const; - Trk::Track* updateAlignmentErrors( const Trk::Track& track, const Settings& settings ) const; + std::unique_ptr<Trk::Track> updateAlignmentErrors( Trk::Track* track, const Settings& settings ) const; - Trk::Track* makeAEOTs( const Trk::Track& track ) const; + std::unique_ptr<Trk::Track> makeAEOTs( Trk::Track* track ) const; - Trk::Track* makeSimpleAEOTs( const Trk::Track& track ) const; + std::unique_ptr<Trk::Track> makeSimpleAEOTs( Trk::Track* track ) const; - const Trk::Track* removeOutliers( const Trk::Track& track,const Settings& settings ) const; + std::unique_ptr<Trk::Track> removeOutliers( Trk::Track* track,const Settings& settings ) const; bool removeMdtOutliers( const Trk::TrackParameters& pars, const std::vector<const MdtDriftCircleOnTrack*>& hits, std::set<Identifier>& removedIdentifiers, const Settings& settings ) const; diff --git a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackSteeringTools/src/MooTrackBuilder.cxx b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackSteeringTools/src/MooTrackBuilder.cxx index cf21aacec5ce9643cbb3ef1e910ab44e1857af61..9e11681eec6c7f775c6c5f6ebc78242ff32a94e8 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackSteeringTools/src/MooTrackBuilder.cxx +++ b/MuonSpectrometer/MuonReconstruction/MuonTrackMakers/MuonTrackMakerTools/MuonTrackSteeringTools/src/MooTrackBuilder.cxx @@ -94,7 +94,9 @@ namespace Muon { // if not refit tool specified do a pure refit if( m_errorOptimisationTool.empty() ) return m_fitter->refit(track); - return m_errorOptimisationTool->optimiseErrors(track); + std::unique_ptr<Trk::Track> optTrack=m_errorOptimisationTool->optimiseErrors(&track); + //have to use release until the whole tool uses unique_ptr + return optTrack.release(); } MuPatTrack* MooTrackBuilder::refine( MuPatTrack& track ) const { diff --git a/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.cxx b/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.cxx index e1f56e38c2224213fb6446552ec142e8296a88db..719eaac1fa41c5c854d437cd0d05a9d756a77b38 100644 --- a/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.cxx +++ b/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.cxx @@ -3,19 +3,19 @@ */ #include "TElectronLikelihoodTool.h" -#include "TFile.h" // for TFile -#include "TH1.h" // for TH1F +#include "TFile.h" // for TFile +#include "TH1.h" // for TH1F #include "TROOT.h" -#include "TString.h" // for TString +#include "TString.h" // for TString #include "TSystem.h" -#include <algorithm> // for min +#include <algorithm> // for min #include <cmath> -#include <cstdio> // for sprintf -#include <fstream> // for char_traits +#include <cstdio> // for sprintf +#include <fstream> // for char_traits #include "ElectronPhotonSelectorTools/ElectronSelectorHelpers.h" -/** +/** Author : Kurt Brendlinger <kurb@sas.upenn.edu> Please see TElectronLikelihoodTool.h for usage. */ @@ -25,40 +25,40 @@ Please see TElectronLikelihoodTool.h for usage. //============================================================================= //---------------------------------------------------------------------------------------- -Root::TElectronLikelihoodTool::TElectronLikelihoodTool(const char* name) : - asg::AsgMessaging(std::string(name)), - m_doRemoveF3AtHighEt(false), - m_doRemoveTRTPIDAtHighEt(false), - m_doSmoothBinInterpolation(false), - m_useOneExtraHighETLHBin(false), - m_highETBinThreshold(125), - m_doPileupTransform(false), - m_doCentralityTransform(false), - m_discMaxForPileupTransform(2.0), - m_pileupMaxForPileupTransform(50), - m_variableNames(""), - m_pdfFileName(""), - m_name(name), - m_variableBitMask(0x0), - m_ipBinning(""), - m_pdfFile(nullptr), - m_cutPosition_kinematic(-9), - m_cutPosition_NSilicon(-9), - m_cutPosition_NPixel(-9), - m_cutPosition_NBlayer(-9), - m_cutPosition_ambiguity(-9), - m_cutPosition_LH(-9), - m_cutPositionTrackA0(-9), - m_cutPositionTrackMatchEta(-9), - m_cutPositionTrackMatchPhiRes(-9), - m_cutPositionWstotAtHighET(-9), - m_cutPositionEoverPAtHighET(-9) +Root::TElectronLikelihoodTool::TElectronLikelihoodTool(const char* name) + : asg::AsgMessaging(std::string(name)) + , m_doRemoveF3AtHighEt(false) + , m_doRemoveTRTPIDAtHighEt(false) + , m_doSmoothBinInterpolation(false) + , m_useOneExtraHighETLHBin(false) + , m_highETBinThreshold(125) + , m_doPileupTransform(false) + , m_doCentralityTransform(false) + , m_discMaxForPileupTransform(2.0) + , m_pileupMaxForPileupTransform(50) + , m_variableNames("") + , m_pdfFileName("") + , m_name(name) + , m_variableBitMask(0x0) + , m_ipBinning("") + , m_pdfFile(nullptr) + , m_cutPosition_kinematic(-9) + , m_cutPosition_NSilicon(-9) + , m_cutPosition_NPixel(-9) + , m_cutPosition_NBlayer(-9) + , m_cutPosition_ambiguity(-9) + , m_cutPosition_LH(-9) + , m_cutPositionTrackA0(-9) + , m_cutPositionTrackMatchEta(-9) + , m_cutPositionTrackMatchPhiRes(-9) + , m_cutPositionWstotAtHighET(-9) + , m_cutPositionEoverPAtHighET(-9) { - for(unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++){ - for(unsigned int s_or_b = 0; s_or_b < 2; s_or_b++){ - for (unsigned int ip = 0; ip < IP_BINS; ip++){ - for(unsigned int et = 0; et < s_fnEtBinsHist; et++){ - for(unsigned int eta = 0; eta < s_fnEtaBins; eta++){ + for (unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++) { + for (unsigned int s_or_b = 0; s_or_b < 2; s_or_b++) { + for (unsigned int ip = 0; ip < IP_BINS; ip++) { + for (unsigned int et = 0; et < s_fnEtBinsHist; et++) { + for (unsigned int eta = 0; eta < s_fnEtaBins; eta++) { fPDFbins[s_or_b][ip][et][eta][varIndex] = nullptr; } } @@ -67,20 +67,17 @@ Root::TElectronLikelihoodTool::TElectronLikelihoodTool(const char* name) : } } - - - //============================================================================= // Destructor //============================================================================= Root::TElectronLikelihoodTool::~TElectronLikelihoodTool() { - for(unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++){ - for(unsigned int s_or_b = 0; s_or_b < 2; s_or_b++){ - for (unsigned int ip = 0; ip < IP_BINS; ip++){ - for(unsigned int et = 0; et < s_fnEtBinsHist; et++){ - for(unsigned int eta = 0; eta < s_fnEtaBins; eta++){ - if (fPDFbins[s_or_b][ip][et][eta][varIndex]){ + for (unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++) { + for (unsigned int s_or_b = 0; s_or_b < 2; s_or_b++) { + for (unsigned int ip = 0; ip < IP_BINS; ip++) { + for (unsigned int et = 0; et < s_fnEtBinsHist; et++) { + for (unsigned int eta = 0; eta < s_fnEtaBins; eta++) { + if (fPDFbins[s_or_b][ip][et][eta][varIndex]) { delete fPDFbins[s_or_b][ip][et][eta][varIndex]; fPDFbins[s_or_b][ip][et][eta][varIndex] = nullptr; } @@ -91,139 +88,170 @@ Root::TElectronLikelihoodTool::~TElectronLikelihoodTool() } } - -StatusCode Root::TElectronLikelihoodTool::initialize() +StatusCode +Root::TElectronLikelihoodTool::initialize() { - ATH_MSG_DEBUG( "TElectronLikelihoodTool initialize."); + ATH_MSG_DEBUG("TElectronLikelihoodTool initialize."); // use an int as a StatusCode StatusCode sc(StatusCode::SUCCESS); // Check that all needed variables are setup - if ( m_pdfFileName.empty() ) - { - ATH_MSG_WARNING("You need to specify the input PDF file name before you call initialize() with setPDFFileName('your/file/name.root') "); + if (m_pdfFileName.empty()) { + ATH_MSG_WARNING("You need to specify the input PDF file name before you " + "call initialize() with " + "setPDFFileName('your/file/name.root') "); sc = StatusCode::FAILURE; } - unsigned int number_of_expected_bin_combinedLH ; - if(m_useOneExtraHighETLHBin) number_of_expected_bin_combinedLH = s_fnDiscEtBinsOneExtra*s_fnEtaBins ; - else number_of_expected_bin_combinedLH = s_fnDiscEtBins*s_fnEtaBins ; - unsigned int number_of_expected_bin_combinedOther = s_fnDiscEtBins*s_fnEtaBins ; + unsigned int number_of_expected_bin_combinedLH; + if (m_useOneExtraHighETLHBin) + number_of_expected_bin_combinedLH = s_fnDiscEtBinsOneExtra * s_fnEtaBins; + else + number_of_expected_bin_combinedLH = s_fnDiscEtBins * s_fnEtaBins; + unsigned int number_of_expected_bin_combinedOther = s_fnDiscEtBins * s_fnEtaBins; - - if( m_cutLikelihood.size() != number_of_expected_bin_combinedLH){ - ATH_MSG_ERROR("Configuration issue : cutLikelihood expected size " << number_of_expected_bin_combinedLH << - " input size " << m_cutLikelihood.size()); + if (m_cutLikelihood.size() != number_of_expected_bin_combinedLH) { + ATH_MSG_ERROR("Configuration issue : cutLikelihood expected size " + << number_of_expected_bin_combinedLH << " input size " << m_cutLikelihood.size()); sc = StatusCode::FAILURE; - } + } - if( !m_discHardCutForPileupTransform.empty() ) { - if( m_discHardCutForPileupTransform.size() != number_of_expected_bin_combinedLH){ - ATH_MSG_ERROR("Configuration issue : DiscHardCutForPileupTransform expected size " << number_of_expected_bin_combinedLH << - " input size " << m_discHardCutForPileupTransform.size()); + if (!m_discHardCutForPileupTransform.empty()) { + if (m_discHardCutForPileupTransform.size() != number_of_expected_bin_combinedLH) { + ATH_MSG_ERROR("Configuration issue : DiscHardCutForPileupTransform expected size " + << number_of_expected_bin_combinedLH << " input size " + << m_discHardCutForPileupTransform.size()); sc = StatusCode::FAILURE; - } + } } - if(!m_discHardCutSlopeForPileupTransform.empty() ) { - if(m_discHardCutSlopeForPileupTransform.size() != number_of_expected_bin_combinedLH){ - ATH_MSG_ERROR("Configuration issue : DiscHardCutSlopeForPileupTransform expected size " << number_of_expected_bin_combinedLH << - " input size " << m_discHardCutSlopeForPileupTransform.size()); + if (!m_discHardCutSlopeForPileupTransform.empty()) { + if (m_discHardCutSlopeForPileupTransform.size() != number_of_expected_bin_combinedLH) { + ATH_MSG_ERROR("Configuration issue : " + "DiscHardCutSlopeForPileupTransform expected size " + << number_of_expected_bin_combinedLH << " input size " + << m_discHardCutSlopeForPileupTransform.size()); sc = StatusCode::FAILURE; - } + } } - if(!m_discLooseForPileupTransform.empty()) { - if( m_discLooseForPileupTransform.size() != number_of_expected_bin_combinedLH){ - ATH_MSG_ERROR("Configuration issue : DiscLooseForPileupTransform expected size " << number_of_expected_bin_combinedLH << - " input size " << m_discLooseForPileupTransform.size()); + if (!m_discLooseForPileupTransform.empty()) { + if (m_discLooseForPileupTransform.size() != number_of_expected_bin_combinedLH) { + ATH_MSG_ERROR("Configuration issue : DiscLooseForPileupTransform expected size " + << number_of_expected_bin_combinedLH << " input size " + << m_discLooseForPileupTransform.size()); sc = StatusCode::FAILURE; - } + } } // d0 cut - if (!m_cutA0.empty()){ - if (m_cutA0.size() != number_of_expected_bin_combinedOther){ - ATH_MSG_ERROR("Configuration issue : CutA0 expected size " << number_of_expected_bin_combinedOther << - " input size " << m_cutA0.size()); + if (!m_cutA0.empty()) { + if (m_cutA0.size() != number_of_expected_bin_combinedOther) { + ATH_MSG_ERROR("Configuration issue : CutA0 expected size " + << number_of_expected_bin_combinedOther << " input size " << m_cutA0.size()); sc = StatusCode::FAILURE; } } // deltaEta cut - if (!m_cutDeltaEta.empty()){ - if (m_cutDeltaEta.size() != number_of_expected_bin_combinedOther){ - ATH_MSG_ERROR("Configuration issue : CutDeltaEta expected size " << number_of_expected_bin_combinedOther << - " input size " << m_cutDeltaEta.size()); + if (!m_cutDeltaEta.empty()) { + if (m_cutDeltaEta.size() != number_of_expected_bin_combinedOther) { + ATH_MSG_ERROR("Configuration issue : CutDeltaEta expected size " + << number_of_expected_bin_combinedOther << " input size " + << m_cutDeltaEta.size()); sc = StatusCode::FAILURE; } } // deltaPhiRes cut - if (!m_cutDeltaPhiRes.empty()){ - if (m_cutDeltaPhiRes.size() != number_of_expected_bin_combinedOther ){ - ATH_MSG_ERROR("Configuration issue : CutDeltaPhiRes expected size " << number_of_expected_bin_combinedOther << - " input size " << m_cutDeltaPhiRes.size()); + if (!m_cutDeltaPhiRes.empty()) { + if (m_cutDeltaPhiRes.size() != number_of_expected_bin_combinedOther) { + ATH_MSG_ERROR("Configuration issue : CutDeltaPhiRes expected size " + << number_of_expected_bin_combinedOther << " input size " + << m_cutDeltaPhiRes.size()); sc = StatusCode::FAILURE; } } - if ( sc == StatusCode::FAILURE ){ + if (sc == StatusCode::FAILURE) { ATH_MSG_ERROR("Could NOT initialize! Please fix the errors mentioned above..."); return sc; } // -------------------------------------------------------------------------- // Register the cuts and check that the registration worked: - // NOTE: THE ORDER IS IMPORTANT!!! Cut0 corresponds to bit 0, Cut1 to bit 1,... - // if ( m_cutPosition_nSCTMin < 0 ) sc == StatusCode::FAILURE; // Exceeded the number of allowed cuts (32) - - // Cut position for the kineatic pre-selection - m_cutPosition_kinematic = m_acceptInfo.addCut( "kinematic", "pass kinematic" ); - if ( m_cutPosition_kinematic < 0 ) {sc = StatusCode::FAILURE;} + // NOTE: THE ORDER IS IMPORTANT!!! Cut0 corresponds to bit 0, Cut1 to bit + // 1,... Cut position for the kineatic pre-selection + m_cutPosition_kinematic = m_acceptInfo.addCut("kinematic", "pass kinematic"); + if (m_cutPosition_kinematic < 0) { + sc = StatusCode::FAILURE; + } // NSilicon - m_cutPosition_NSilicon = m_acceptInfo.addCut( "NSilicon", "pass NSilicon" ); - if ( m_cutPosition_NSilicon < 0 ) {sc = StatusCode::FAILURE;} + m_cutPosition_NSilicon = m_acceptInfo.addCut("NSilicon", "pass NSilicon"); + if (m_cutPosition_NSilicon < 0) { + sc = StatusCode::FAILURE; + } // NPixel - m_cutPosition_NPixel = m_acceptInfo.addCut( "NPixel", "pass NPixel" ); - if ( m_cutPosition_NPixel < 0 ) {sc = StatusCode::FAILURE;} + m_cutPosition_NPixel = m_acceptInfo.addCut("NPixel", "pass NPixel"); + if (m_cutPosition_NPixel < 0) { + sc = StatusCode::FAILURE; + } // NBlayer - m_cutPosition_NBlayer = m_acceptInfo.addCut( "NBlayer", "pass NBlayer" ); - if ( m_cutPosition_NBlayer < 0 ) {sc = StatusCode::FAILURE;} + m_cutPosition_NBlayer = m_acceptInfo.addCut("NBlayer", "pass NBlayer"); + if (m_cutPosition_NBlayer < 0) { + sc = StatusCode::FAILURE; + } // Ambiguity - m_cutPosition_ambiguity = m_acceptInfo.addCut( "ambiguity", "pass ambiguity" ); - if ( m_cutPosition_ambiguity < 0 ) {sc = StatusCode::FAILURE;} - + m_cutPosition_ambiguity = m_acceptInfo.addCut("ambiguity", "pass ambiguity"); + if (m_cutPosition_ambiguity < 0) { + sc = StatusCode::FAILURE; + } // Cut position for the likelihood selection - DO NOT CHANGE ORDER! - m_cutPosition_LH = m_acceptInfo.addCut( "passLH", "pass Likelihood" ); - if ( m_cutPosition_LH < 0 ) {sc = StatusCode::FAILURE;} + m_cutPosition_LH = m_acceptInfo.addCut("passLH", "pass Likelihood"); + if (m_cutPosition_LH < 0) { + sc = StatusCode::FAILURE; + } // D0 - m_cutPositionTrackA0 = m_acceptInfo.addCut( "TrackA0", "A0 (aka d0) wrt beam spot < Cut" ); - if ( m_cutPositionTrackA0 < 0 ) {sc = StatusCode::FAILURE;} + m_cutPositionTrackA0 = m_acceptInfo.addCut("TrackA0", "A0 (aka d0) wrt beam spot < Cut"); + if (m_cutPositionTrackA0 < 0) { + sc = StatusCode::FAILURE; + } // deltaeta - m_cutPositionTrackMatchEta = m_acceptInfo.addCut("TrackMatchEta", "Track match deta in 1st sampling < Cut"); - if ( m_cutPositionTrackMatchEta < 0 ) {sc = StatusCode::FAILURE;} + m_cutPositionTrackMatchEta = + m_acceptInfo.addCut("TrackMatchEta", "Track match deta in 1st sampling < Cut"); + if (m_cutPositionTrackMatchEta < 0) { + sc = StatusCode::FAILURE; + } // deltaphi - m_cutPositionTrackMatchPhiRes = m_acceptInfo.addCut( "TrackMatchPhiRes", "Track match dphi in 2nd sampling, rescaled < Cut" ); - if ( m_cutPositionTrackMatchPhiRes < 0 ) {sc = StatusCode::FAILURE;} + m_cutPositionTrackMatchPhiRes = + m_acceptInfo.addCut("TrackMatchPhiRes", "Track match dphi in 2nd sampling, rescaled < Cut"); + if (m_cutPositionTrackMatchPhiRes < 0) { + sc = StatusCode::FAILURE; + } // Wstot - m_cutPositionWstotAtHighET = m_acceptInfo.addCut( "WstotAtHighET", "Above HighETBinThreshold, Wstot < Cut" ); - if ( m_cutPositionWstotAtHighET < 0 ) {sc = StatusCode::FAILURE;} + m_cutPositionWstotAtHighET = + m_acceptInfo.addCut("WstotAtHighET", "Above HighETBinThreshold, Wstot < Cut"); + if (m_cutPositionWstotAtHighET < 0) { + sc = StatusCode::FAILURE; + } // EoverP - m_cutPositionEoverPAtHighET = m_acceptInfo.addCut( "EoverPAtHighET", "Above HighETBinThreshold, EoverP < Cut" ); - if ( m_cutPositionEoverPAtHighET < 0 ) {sc = StatusCode::FAILURE;} + m_cutPositionEoverPAtHighET = + m_acceptInfo.addCut("EoverPAtHighET", "Above HighETBinThreshold, EoverP < Cut"); + if (m_cutPositionEoverPAtHighET < 0) { + sc = StatusCode::FAILURE; + } // Check that we got everything OK - if ( sc == StatusCode::FAILURE ){ - ATH_MSG_ERROR("! Something went wrong with the setup of the decision objects..."); + if (sc == StatusCode::FAILURE) { + ATH_MSG_ERROR("! Something went wrong with the setup of the decision objects..."); return sc; } @@ -236,28 +264,28 @@ StatusCode Root::TElectronLikelihoodTool::initialize() TString tmpString(m_pdfFileName); gSystem->ExpandPathName(tmpString); std::string fname(tmpString.Data()); - m_pdfFile = TFile::Open( fname.c_str(), "READ" ); + m_pdfFile = TFile::Open(fname.c_str(), "READ"); // Check that we could load the ROOT file - if ( !m_pdfFile ) - { + if (!m_pdfFile) { ATH_MSG_ERROR(" No ROOT file found here: " << m_pdfFileName); return StatusCode::FAILURE; } - //Load the histograms - for(unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++){ + // Load the histograms + for (unsigned int varIndex = 0; varIndex < s_fnVariables; varIndex++) { const std::string& vstr = fVariables[varIndex]; - // Skip the loading of PDFs for variables we don't care about for this operating point. - // If the string is empty (which is true in the default 2012 case), load all of them. - if(m_variableNames.find(vstr) == std::string::npos && !m_variableNames.empty()){ + // Skip the loading of PDFs for variables we don't care about for this + // operating point. If the string is empty (which is true in the default + // 2012 case), load all of them. + if (m_variableNames.find(vstr) == std::string::npos && !m_variableNames.empty()) { continue; } - loadVarHistograms(vstr,varIndex); + loadVarHistograms(vstr, varIndex); } - //TFile close does not free the memory + // TFile close does not free the memory m_pdfFile->Close(); - //We need the destructor to be called + // We need the destructor to be called delete m_pdfFile; //----------End File/Histo operation------------------------------------ @@ -265,138 +293,172 @@ StatusCode Root::TElectronLikelihoodTool::initialize() << "\n - pdfFileName : " << m_pdfFileName << "\n - Variable bitmask : " << m_variableBitMask); - ATH_MSG_DEBUG( "\n - VariableNames : " << m_variableNames - << "\n - (bool)CutBL (yes/no) : " << (!m_cutBL.empty() ? "yes" : "no") - << "\n - (bool)CutPi (yes/no) : " << (!m_cutPi.empty() ? "yes" : "no") - << "\n - (bool)CutSi (yes/no) : " << (!m_cutSi.empty() ? "yes" : "no") - << "\n - (bool)CutAmbiguity (yes/no) : " << (!m_cutAmbiguity.empty() ? "yes" : "no") - << "\n - (bool)doRemoveF3AtHighEt (yes/no) : " << (m_doRemoveF3AtHighEt ? "yes" : "no") - << "\n - (bool)doRemoveTRTPIDAtHighEt (yes/no) : " << (m_doRemoveTRTPIDAtHighEt ? "yes" : "no") - << "\n - (bool)doSmoothBinInterpolation (yes/no) : " << (m_doSmoothBinInterpolation ? "yes" : "no") - << "\n - (bool)useOneExtraHighETLHBin(yes/no) : " << (m_useOneExtraHighETLHBin ? "yes" : "no") - << "\n - (double)HighETBinThreshold : " << m_highETBinThreshold - << "\n - (bool)doPileupTransform (yes/no) : " << (m_doPileupTransform ? "yes" : "no") - << "\n - (bool)doCentralityTransform (yes/no) : " << (m_doCentralityTransform ? "yes" : "no") - << "\n - (bool)CutLikelihood (yes/no) : " << (!m_cutLikelihood.empty() ? "yes" : "no") - << "\n - (bool)CutLikelihoodPileupCorrection (yes/no) : " << (!m_cutLikelihoodPileupCorrection.empty() ? "yes" : "no") - << "\n - (bool)CutA0 (yes/no) : " << (!m_cutA0.empty() ? "yes" : "no") - << "\n - (bool)CutDeltaEta (yes/no) : " << (!m_cutDeltaEta.empty() ? "yes" : "no") - << "\n - (bool)CutDeltaPhiRes (yes/no) : " << (!m_cutDeltaPhiRes.empty() ? "yes" : "no") - << "\n - (bool)CutWstotAtHighET (yes/no) : " << (!m_cutWstotAtHighET.empty() ? "yes" : "no") - << "\n - (bool)CutEoverPAtHighET (yes/no) : " << (!m_cutEoverPAtHighET.empty() ? "yes" : "no") - ); + ATH_MSG_DEBUG( + "\n - VariableNames : " + << m_variableNames + << "\n - (bool)CutBL (yes/no) : " << (!m_cutBL.empty() ? "yes" : "no") + << "\n - (bool)CutPi (yes/no) : " << (!m_cutPi.empty() ? "yes" : "no") + << "\n - (bool)CutSi (yes/no) : " << (!m_cutSi.empty() ? "yes" : "no") + << "\n - (bool)CutAmbiguity (yes/no) : " + << (!m_cutAmbiguity.empty() ? "yes" : "no") + << "\n - (bool)doRemoveF3AtHighEt (yes/no) : " + << (m_doRemoveF3AtHighEt ? "yes" : "no") + << "\n - (bool)doRemoveTRTPIDAtHighEt (yes/no) : " + << (m_doRemoveTRTPIDAtHighEt ? "yes" : "no") + << "\n - (bool)doSmoothBinInterpolation (yes/no) : " + << (m_doSmoothBinInterpolation ? "yes" : "no") + << "\n - (bool)useOneExtraHighETLHBin(yes/no) : " + << (m_useOneExtraHighETLHBin ? "yes" : "no") + << "\n - (double)HighETBinThreshold : " << m_highETBinThreshold + << "\n - (bool)doPileupTransform (yes/no) : " + << (m_doPileupTransform ? "yes" : "no") + << "\n - (bool)doCentralityTransform (yes/no) : " + << (m_doCentralityTransform ? "yes" : "no") + << "\n - (bool)CutLikelihood (yes/no) : " + << (!m_cutLikelihood.empty() ? "yes" : "no") + << "\n - (bool)CutLikelihoodPileupCorrection (yes/no) : " + << (!m_cutLikelihoodPileupCorrection.empty() ? "yes" : "no") + << "\n - (bool)CutA0 (yes/no) : " << (!m_cutA0.empty() ? "yes" : "no") + << "\n - (bool)CutDeltaEta (yes/no) : " + << (!m_cutDeltaEta.empty() ? "yes" : "no") + << "\n - (bool)CutDeltaPhiRes (yes/no) : " + << (!m_cutDeltaPhiRes.empty() ? "yes" : "no") + << "\n - (bool)CutWstotAtHighET (yes/no) : " + << (!m_cutWstotAtHighET.empty() ? "yes" : "no") + << "\n - (bool)CutEoverPAtHighET (yes/no) : " + << (!m_cutEoverPAtHighET.empty() ? "yes" : "no")); return sc; } -int Root::TElectronLikelihoodTool::loadVarHistograms(const std::string& vstr,unsigned int varIndex){ - for(unsigned int s_or_b = 0; s_or_b < 2; s_or_b++){ - for (unsigned int ip = 0; ip < IP_BINS; ip++){ - for(unsigned int et = 0; et < s_fnEtBinsHist; et++){ - for(unsigned int eta = 0; eta < s_fnEtaBins; eta++){ - - std::string sig_bkg = (s_or_b==0) ? "sig" : "bkg" ; - // Because eta bins in the root file don't match up exactly with cut menu - // definitions, the second eta bin is an exact copy of the first, +int +Root::TElectronLikelihoodTool::loadVarHistograms(const std::string& vstr, unsigned int varIndex) +{ + for (unsigned int s_or_b = 0; s_or_b < 2; s_or_b++) { + for (unsigned int ip = 0; ip < IP_BINS; ip++) { + for (unsigned int et = 0; et < s_fnEtBinsHist; et++) { + for (unsigned int eta = 0; eta < s_fnEtaBins; eta++) { + + std::string sig_bkg = (s_or_b == 0) ? "sig" : "bkg"; + // Because eta bins in the root file don't match up exactly with cut + // menu definitions, the second eta bin is an exact copy of the first, // and all subsequent eta bins are pushed back by one. - unsigned int eta_tmp = (eta > 0) ? eta-1 : eta ; - // The 7-10 GeV, crack bin uses the 10-15 Gev pdfs. WE DO NOT DO THIS ANYMORE! - //unsigned int et_tmp = (eta == 5 && et == 1) ? 1 : et; + unsigned int eta_tmp = (eta > 0) ? eta - 1 : eta; + // The 7-10 GeV, crack bin uses the 10-15 Gev pdfs. WE DO NOT DO THIS + // ANYMORE! unsigned int et_tmp = (eta == 5 && et == 1) ? 1 : et; unsigned int et_tmp = et; char binname[200]; - getBinName( binname, et_tmp, eta_tmp, ip, m_ipBinning ); + getBinName(binname, et_tmp, eta_tmp, ip, m_ipBinning); - if (((std::string(binname).find("2.37") != std::string::npos)) && (vstr.find("el_f3") != std::string::npos)) + if (((std::string(binname).find("2.37") != std::string::npos)) && + (vstr.find("el_f3") != std::string::npos)) continue; - if (((std::string(binname).find("2.01") != std::string::npos) || (std::string(binname).find("2.37") != std::string::npos)) - && (vstr.find("TRT") != std::string::npos)) + if (((std::string(binname).find("2.01") != std::string::npos) || + (std::string(binname).find("2.37") != std::string::npos)) && + (vstr.find("TRT") != std::string::npos)) continue; char pdfdir[500]; - snprintf(pdfdir,500,"%s/%s",vstr.c_str(),sig_bkg.c_str()); + snprintf(pdfdir, 500, "%s/%s", vstr.c_str(), sig_bkg.c_str()); char pdf[500]; - snprintf(pdf,500,"%s_%s_smoothed_hist_from_KDE_%s",vstr.c_str(),sig_bkg.c_str(),binname); + snprintf( + pdf, 500, "%s_%s_smoothed_hist_from_KDE_%s", vstr.c_str(), sig_bkg.c_str(), binname); char pdf_newname[500]; - snprintf(pdf_newname,500,"%s_%s_%s_LHtool_copy_%s", m_name.c_str(),vstr.c_str(),sig_bkg.c_str(),binname); - - if (!m_pdfFile->GetListOfKeys()->Contains(vstr.c_str())){ - ATH_MSG_INFO("Warning: skipping variable " << vstr << " because the folder does not exist."); + snprintf(pdf_newname, + 500, + "%s_%s_%s_LHtool_copy_%s", + m_name.c_str(), + vstr.c_str(), + sig_bkg.c_str(), + binname); + + if (!m_pdfFile->GetListOfKeys()->Contains(vstr.c_str())) { + ATH_MSG_INFO("Warning: skipping variable " << vstr + << " because the folder does not exist."); return 1; } - if (!((TDirectory*)m_pdfFile->Get(vstr.c_str()))->GetListOfKeys()->Contains(sig_bkg.c_str())){ - ATH_MSG_INFO("Warning: skipping variable " << vstr << " because the folder does not exist."); + if (!((TDirectory*)m_pdfFile->Get(vstr.c_str())) + ->GetListOfKeys() + ->Contains(sig_bkg.c_str())) { + ATH_MSG_INFO("Warning: skipping variable " << vstr + << " because the folder does not exist."); return 1; } - // We only need to load PDFs + // We only need to load PDFs // up to a certain ET value (40 GeV) - if(et > s_fnEtBinsHist-1){ + if (et > s_fnEtBinsHist - 1) { continue; } - // If the 0th et bin (4-7 GeV) histogram does not exist in the root file, - // then just use the 7-10 GeV bin histogram. - // This should preserve backward compatibility + // If the 0th et bin (4-7 GeV) histogram does not exist in the root + // file, then just use the 7-10 GeV bin histogram. This should + // preserve backward compatibility if (et == 0 && !((TDirectory*)m_pdfFile->Get(pdfdir))->GetListOfKeys()->Contains(pdf)) { - //std::cout << "Info: using 7 GeV bin in place of 4 GeV bin." << std::endl; - getBinName( binname, et_tmp+1, eta_tmp, ip, m_ipBinning ); - snprintf(pdf,500,"%s_%s_smoothed_hist_from_KDE_%s",vstr.c_str(),sig_bkg.c_str(),binname); - snprintf(pdf_newname,500,"%s_%s_%s_LHtool_copy4GeV_%s", m_name.c_str(),vstr.c_str(),sig_bkg.c_str(),binname); + getBinName(binname, et_tmp + 1, eta_tmp, ip, m_ipBinning); + snprintf( + pdf, 500, "%s_%s_smoothed_hist_from_KDE_%s", vstr.c_str(), sig_bkg.c_str(), binname); + snprintf(pdf_newname, + 500, + "%s_%s_%s_LHtool_copy4GeV_%s", + m_name.c_str(), + vstr.c_str(), + sig_bkg.c_str(), + binname); } if (((TDirectory*)m_pdfFile->Get(pdfdir))->GetListOfKeys()->Contains(pdf)) { TH1F* hist = (TH1F*)(((TDirectory*)m_pdfFile->Get(pdfdir))->Get(pdf)); fPDFbins[s_or_b][ip][et][eta][varIndex] = new EGSelectors::SafeTH1(hist); delete hist; - } - else { + } else { ATH_MSG_INFO("Warning: Object " << pdf << " does not exist."); ATH_MSG_INFO("Skipping all other histograms with this variable."); return 1; } } } - } + } } return 1; } asg::AcceptData -Root::TElectronLikelihoodTool::accept( double likelihood, - double eta, - double eT, - int nSiHitsPlusDeadSensors, - int nPixHitsPlusDeadSensors, - bool passBLayerRequirement, - uint8_t ambiguityBit, - double d0, - double deltaEta, - double deltaphires, - double wstot, - double EoverP, - double ip - ) const +Root::TElectronLikelihoodTool::accept(double likelihood, + double eta, + double eT, + int nSiHitsPlusDeadSensors, + int nPixHitsPlusDeadSensors, + bool passBLayerRequirement, + uint8_t ambiguityBit, + double d0, + double deltaEta, + double deltaphires, + double wstot, + double EoverP, + double ip) const { LikeEnum::LHAcceptVars_t vars; - vars.likelihood = likelihood; - vars.eta = eta; - vars.eT = eT; - vars.nSiHitsPlusDeadSensors = nSiHitsPlusDeadSensors; + vars.likelihood = likelihood; + vars.eta = eta; + vars.eT = eT; + vars.nSiHitsPlusDeadSensors = nSiHitsPlusDeadSensors; vars.nPixHitsPlusDeadSensors = nPixHitsPlusDeadSensors; - vars.passBLayerRequirement = passBLayerRequirement; - vars.ambiguityBit = ambiguityBit; - vars.d0 = d0; - vars.deltaEta = deltaEta; - vars.deltaphires = deltaphires; - vars.wstot = wstot; - vars.EoverP = EoverP; - vars.ip = ip; + vars.passBLayerRequirement = passBLayerRequirement; + vars.ambiguityBit = ambiguityBit; + vars.d0 = d0; + vars.deltaEta = deltaEta; + vars.deltaphires = deltaphires; + vars.wstot = wstot; + vars.EoverP = EoverP; + vars.ip = ip; return accept(vars); } -// This method calculates if the current electron passes the requested likelihood cut +// This method calculates if the current electron passes the requested +// likelihood cut asg::AcceptData -Root::TElectronLikelihoodTool::accept( LikeEnum::LHAcceptVars_t& vars_struct ) const +Root::TElectronLikelihoodTool::accept(LikeEnum::LHAcceptVars_t& vars_struct) const { // Setup return accept with AcceptInfo asg::AcceptData acceptData(&m_acceptInfo); @@ -414,140 +476,151 @@ Root::TElectronLikelihoodTool::accept( LikeEnum::LHAcceptVars_t& vars_struct ) c bool passWstotAtHighET(true); bool passEoverPAtHighET(true); - if (fabs(vars_struct.eta) > 2.47) { - ATH_MSG_DEBUG("This electron is fabs(eta)>2.47 Returning False."); + if (std::abs(vars_struct.eta) > 2.47) { + ATH_MSG_DEBUG("This electron is std::abs(eta)>2.47 Returning False."); passKine = false; } - unsigned int etbinLH = getLikelihoodEtDiscBin(vars_struct.eT,true); - unsigned int etbinOther = getLikelihoodEtDiscBin(vars_struct.eT,false); + unsigned int etbinLH = getLikelihoodEtDiscBin(vars_struct.eT, true); + unsigned int etbinOther = getLikelihoodEtDiscBin(vars_struct.eT, false); unsigned int etabin = getLikelihoodEtaBin(vars_struct.eta); - //unsigned int ipbin = 0; // sanity - if (etbinLH >= s_fnDiscEtBinsOneExtra) { - ATH_MSG_WARNING( "Cannot evaluate likelihood for Et " << vars_struct.eT<< ". Returning false.."); + if (etbinLH >= s_fnDiscEtBinsOneExtra) { + ATH_MSG_WARNING("Cannot evaluate likelihood for Et " << vars_struct.eT + << ". Returning false.."); passKine = false; } // sanity - if (etbinOther >= s_fnDiscEtBins) { - ATH_MSG_WARNING( "Cannot evaluate likelihood for Et " << vars_struct.eT<< ". Returning false.."); + if (etbinOther >= s_fnDiscEtBins) { + ATH_MSG_WARNING("Cannot evaluate likelihood for Et " << vars_struct.eT + << ". Returning false.."); passKine = false; } - // Return if the kinematic requirements are not fulfilled - acceptData.setCutResult( m_cutPosition_kinematic, passKine ); - if ( !passKine ){ return acceptData; } + acceptData.setCutResult(m_cutPosition_kinematic, passKine); + if (!passKine) { + return acceptData; + } // ambiguity bit if (!m_cutAmbiguity.empty()) { - if ( !ElectronSelectorHelpers::passAmbiguity((xAOD::AmbiguityTool::AmbiguityType)vars_struct.ambiguityBit, - m_cutAmbiguity[etabin]) - ) { - ATH_MSG_DEBUG("Likelihood macro: ambiguity Bit Failed." ); + if (!ElectronSelectorHelpers::passAmbiguity( + (xAOD::AmbiguityTool::AmbiguityType)vars_struct.ambiguityBit, m_cutAmbiguity[etabin])) { + ATH_MSG_DEBUG("Likelihood macro: ambiguity Bit Failed."); passAmbiguity = false; } } // blayer cut - if (!m_cutBL.empty() ) { - if(m_cutBL[etabin] == 1 && !vars_struct.passBLayerRequirement) { + if (!m_cutBL.empty()) { + if (m_cutBL[etabin] == 1 && !vars_struct.passBLayerRequirement) { ATH_MSG_DEBUG("Likelihood macro: Blayer cut failed."); passNBlayer = false; } } // pixel cut - if (!m_cutPi.empty()){ - if (vars_struct.nPixHitsPlusDeadSensors < m_cutPi[etabin]){ + if (!m_cutPi.empty()) { + if (vars_struct.nPixHitsPlusDeadSensors < m_cutPi[etabin]) { ATH_MSG_DEBUG("Likelihood macro: Pixels Failed."); passNPixel = false; } } // silicon cut - if (!m_cutSi.empty()){ - if (vars_struct.nSiHitsPlusDeadSensors < m_cutSi[etabin]){ - ATH_MSG_DEBUG( "Likelihood macro: Silicon Failed."); + if (!m_cutSi.empty()) { + if (vars_struct.nSiHitsPlusDeadSensors < m_cutSi[etabin]) { + ATH_MSG_DEBUG("Likelihood macro: Silicon Failed."); passNSilicon = false; } } double cutDiscriminant; - unsigned int ibin_combinedLH = etbinLH*s_fnEtaBins+etabin; // Must change if number of eta bins changes!. Also starts from 7-10 GeV bin. - unsigned int ibin_combinedOther = etbinOther*s_fnEtaBins+etabin; // Must change if number of eta bins changes!. Also starts from 7-10 GeV bin. - - if(!m_cutLikelihood.empty()){ + unsigned int ibin_combinedLH = + etbinLH * s_fnEtaBins + etabin; // Must change if number of eta bins + // changes!. Also starts from 7-10 GeV bin. + unsigned int ibin_combinedOther = + etbinOther * s_fnEtaBins + etabin; // Must change if number of eta bins changes!. Also + // starts from 7-10 GeV bin. + + if (!m_cutLikelihood.empty()) { // To protect against a binning mismatch, which should never happen - if(ibin_combinedLH>=m_cutLikelihood.size()){ - ATH_MSG_ERROR("The desired eta/pt bin " << ibin_combinedLH - << " is outside of the range specified by the input" << m_cutLikelihood.size() << "This should never happen!"); - return acceptData; + if (ibin_combinedLH >= m_cutLikelihood.size()) { + ATH_MSG_ERROR("The desired eta/pt bin " + << ibin_combinedLH << " is outside of the range specified by the input" + << m_cutLikelihood.size() << "This should never happen!"); + return acceptData; } - if (m_doSmoothBinInterpolation){ - cutDiscriminant = InterpolateCuts(m_cutLikelihood,m_cutLikelihood4GeV,vars_struct.eT,vars_struct.eta); - if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection.empty() && !m_cutLikelihoodPileupCorrection4GeV.empty()) - cutDiscriminant += vars_struct.ip*InterpolateCuts(m_cutLikelihoodPileupCorrection,m_cutLikelihoodPileupCorrection4GeV,vars_struct.eT,vars_struct.eta); + if (m_doSmoothBinInterpolation) { + cutDiscriminant = + InterpolateCuts(m_cutLikelihood, m_cutLikelihood4GeV, vars_struct.eT, vars_struct.eta); + if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection.empty() && + !m_cutLikelihoodPileupCorrection4GeV.empty()) + cutDiscriminant += vars_struct.ip * InterpolateCuts(m_cutLikelihoodPileupCorrection, + m_cutLikelihoodPileupCorrection4GeV, + vars_struct.eT, + vars_struct.eta); } else { - if (vars_struct.eT > 7000. || m_cutLikelihood4GeV.empty()){ + if (vars_struct.eT > 7000. || m_cutLikelihood4GeV.empty()) { cutDiscriminant = m_cutLikelihood[ibin_combinedLH]; - // If doPileupTransform, then correct the discriminant itself instead of the cut value - if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection.empty()) - cutDiscriminant += vars_struct.ip*m_cutLikelihoodPileupCorrection[ibin_combinedLH]; - } - else { + // If doPileupTransform, then correct the discriminant itself instead of + // the cut value + if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection.empty()) { + cutDiscriminant += vars_struct.ip * m_cutLikelihoodPileupCorrection[ibin_combinedLH]; + } + } else { cutDiscriminant = m_cutLikelihood4GeV[etabin]; - if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection4GeV.empty()) - cutDiscriminant += vars_struct.ip*m_cutLikelihoodPileupCorrection4GeV[etabin]; + if (!m_doPileupTransform && !m_cutLikelihoodPileupCorrection4GeV.empty()) + cutDiscriminant += vars_struct.ip * m_cutLikelihoodPileupCorrection4GeV[etabin]; } } // Determine if the calculated likelihood value passes the cut ATH_MSG_DEBUG("Likelihood macro: Discriminant: "); - if ( vars_struct.likelihood < cutDiscriminant ) - { + if (vars_struct.likelihood < cutDiscriminant) { ATH_MSG_DEBUG("Likelihood macro: Disciminant Cut Failed."); passLH = false; } } // d0 cut - if (!m_cutA0.empty()){ - if (fabs(vars_struct.d0) > m_cutA0[ibin_combinedOther]){ + if (!m_cutA0.empty()) { + if (std::abs(vars_struct.d0) > m_cutA0[ibin_combinedOther]) { ATH_MSG_DEBUG("Likelihood macro: D0 Failed."); passTrackA0 = false; } } // deltaEta cut - if (!m_cutDeltaEta.empty()){ - if ( fabs(vars_struct.deltaEta) > m_cutDeltaEta[ibin_combinedOther]){ + if (!m_cutDeltaEta.empty()) { + if (std::abs(vars_struct.deltaEta) > m_cutDeltaEta[ibin_combinedOther]) { ATH_MSG_DEBUG("Likelihood macro: deltaEta Failed."); passDeltaEta = false; } } // deltaPhiRes cut - if (!m_cutDeltaPhiRes.empty()){ - if ( fabs(vars_struct.deltaphires) > m_cutDeltaPhiRes[ibin_combinedOther]){ + if (!m_cutDeltaPhiRes.empty()) { + if (std::abs(vars_struct.deltaphires) > m_cutDeltaPhiRes[ibin_combinedOther]) { ATH_MSG_DEBUG("Likelihood macro: deltaphires Failed."); passDeltaPhiRes = false; } } // Only do this above HighETBinThreshold [in GeV] - if(vars_struct.eT > m_highETBinThreshold*1000){ + if (vars_struct.eT > m_highETBinThreshold * 1000) { // wstot cut - if (!m_cutWstotAtHighET.empty()){ - if ( fabs(vars_struct.wstot) > m_cutWstotAtHighET[etabin]){ + if (!m_cutWstotAtHighET.empty()) { + if (std::abs(vars_struct.wstot) > m_cutWstotAtHighET[etabin]) { ATH_MSG_DEBUG("Likelihood macro: wstot Failed."); passWstotAtHighET = false; } } // EoverP cut - if (!m_cutEoverPAtHighET.empty()){ - if ( fabs(vars_struct.EoverP) > m_cutEoverPAtHighET[etabin]){ + if (!m_cutEoverPAtHighET.empty()) { + if (std::abs(vars_struct.EoverP) > m_cutEoverPAtHighET[etabin]) { ATH_MSG_DEBUG("Likelihood macro: EoverP Failed."); passEoverPAtHighET = false; } @@ -555,94 +628,116 @@ Root::TElectronLikelihoodTool::accept( LikeEnum::LHAcceptVars_t& vars_struct ) c } // Set the individual cut bits in the return object - acceptData.setCutResult( m_cutPosition_NSilicon, passNSilicon ); - acceptData.setCutResult( m_cutPosition_NPixel, passNPixel ); - acceptData.setCutResult( m_cutPosition_NBlayer, passNBlayer ); - acceptData.setCutResult( m_cutPosition_ambiguity, passAmbiguity ); - acceptData.setCutResult( m_cutPosition_LH, passLH ); - acceptData.setCutResult( m_cutPositionTrackA0, passTrackA0 ); - acceptData.setCutResult( m_cutPositionTrackMatchEta, passDeltaEta ); - acceptData.setCutResult( m_cutPositionTrackMatchPhiRes, passDeltaPhiRes ); - acceptData.setCutResult( m_cutPositionWstotAtHighET, passWstotAtHighET ); - acceptData.setCutResult( m_cutPositionEoverPAtHighET, passEoverPAtHighET ); + acceptData.setCutResult(m_cutPosition_NSilicon, passNSilicon); + acceptData.setCutResult(m_cutPosition_NPixel, passNPixel); + acceptData.setCutResult(m_cutPosition_NBlayer, passNBlayer); + acceptData.setCutResult(m_cutPosition_ambiguity, passAmbiguity); + acceptData.setCutResult(m_cutPosition_LH, passLH); + acceptData.setCutResult(m_cutPositionTrackA0, passTrackA0); + acceptData.setCutResult(m_cutPositionTrackMatchEta, passDeltaEta); + acceptData.setCutResult(m_cutPositionTrackMatchPhiRes, passDeltaPhiRes); + acceptData.setCutResult(m_cutPositionWstotAtHighET, passWstotAtHighET); + acceptData.setCutResult(m_cutPositionEoverPAtHighET, passEoverPAtHighET); return acceptData; } double -Root::TElectronLikelihoodTool::calculate( double eta, double eT,double f3, double rHad, double rHad1, - double Reta, double w2, double f1, double eratio, - double deltaEta, double d0, double d0sigma, double rphi, - double deltaPoverP ,double deltaphires, double TRT_PID, - double ip ) const +Root::TElectronLikelihoodTool::calculate(double eta, + double eT, + double f3, + double rHad, + double rHad1, + double Reta, + double w2, + double f1, + double eratio, + double deltaEta, + double d0, + double d0sigma, + double rphi, + double deltaPoverP, + double deltaphires, + double TRT_PID, + double ip) const { LikeEnum::LHCalcVars_t vars; - vars.eta = eta ; - vars.eT = eT ; - vars.f3 = f3 ; - vars.rHad = rHad ; - vars.rHad1 = rHad1 ; - vars.Reta = Reta ; - vars.w2 = w2 ; - vars.f1 = f1 ; - vars.eratio = eratio ; - vars.deltaEta = deltaEta ; - vars.d0 = d0 ; - vars.d0sigma = d0sigma ; - vars.rphi = rphi ; + vars.eta = eta; + vars.eT = eT; + vars.f3 = f3; + vars.rHad = rHad; + vars.rHad1 = rHad1; + vars.Reta = Reta; + vars.w2 = w2; + vars.f1 = f1; + vars.eratio = eratio; + vars.deltaEta = deltaEta; + vars.d0 = d0; + vars.d0sigma = d0sigma; + vars.rphi = rphi; vars.deltaPoverP = deltaPoverP; vars.deltaphires = deltaphires; - vars.TRT_PID = TRT_PID ; - vars.ip = ip ; + vars.TRT_PID = TRT_PID; + vars.ip = ip; return calculate(vars); } // The main public method to actually calculate the likelihood value double -Root::TElectronLikelihoodTool::calculate(LikeEnum::LHCalcVars_t& vars_struct) const +Root::TElectronLikelihoodTool::calculate(LikeEnum::LHCalcVars_t& vars_struct) const { // Reset the results to defaul values double result = -999; unsigned int etabin = getLikelihoodEtaBin(vars_struct.eta); double rhad_corr; - if(etabin == 3 || etabin == 4) rhad_corr = vars_struct.rHad; - else rhad_corr = vars_struct.rHad1; - double d0significance = vars_struct.d0sigma == 0 ? 0. : fabs(vars_struct.d0)/vars_struct.d0sigma; - - std::vector<double> vec ={d0significance,vars_struct.eratio,vars_struct.deltaEta - ,vars_struct.f1,vars_struct.f3 - ,vars_struct.Reta,rhad_corr,vars_struct.rphi - ,vars_struct.d0,vars_struct.w2 - ,vars_struct.deltaPoverP,vars_struct.deltaphires - ,vars_struct.TRT_PID}; + if (etabin == 3 || etabin == 4) { + rhad_corr = vars_struct.rHad; + } else { + rhad_corr = vars_struct.rHad1; + } + double d0significance = + vars_struct.d0sigma == 0 ? 0. : std::abs(vars_struct.d0) / vars_struct.d0sigma; + + std::vector<double> vec = { d0significance, vars_struct.eratio, vars_struct.deltaEta, + vars_struct.f1, vars_struct.f3, vars_struct.Reta, + rhad_corr, vars_struct.rphi, vars_struct.d0, + vars_struct.w2, vars_struct.deltaPoverP, vars_struct.deltaphires, + vars_struct.TRT_PID }; // Calculate the actual likelihood value and fill the return object - result = this->evaluateLikelihood(vec,vars_struct.eT,vars_struct.eta,vars_struct.ip); + result = this->evaluateLikelihood(vec, vars_struct.eT, vars_struct.eta, vars_struct.ip); return result; } -double Root::TElectronLikelihoodTool::evaluateLikelihood(std::vector<float> varVector,double et,double eta,double ip) const +double +Root::TElectronLikelihoodTool::evaluateLikelihood(const std::vector<float>& varVector, + double et, + double eta, + double ip) const { std::vector<double> vec; - for(unsigned int var = 0; var < s_fnVariables; var++){ + for (unsigned int var = 0; var < s_fnVariables; var++) { vec.push_back(varVector[var]); } - return evaluateLikelihood(vec,et,eta,ip);//,mask); + return evaluateLikelihood(vec, et, eta, ip); } -double Root::TElectronLikelihoodTool::evaluateLikelihood(std::vector<double> varVector,double et,double eta,double ip) const +double +Root::TElectronLikelihoodTool::evaluateLikelihood(const std::vector<double>& varVector, + double et, + double eta, + double ip) const { const double GeV = 1000; unsigned int etbin = getLikelihoodEtHistBin(et); // hist binning unsigned int etabin = getLikelihoodEtaBin(eta); - unsigned int ipbin = getIpBin(ip); + unsigned int ipbin = getIpBin(ip); - ATH_MSG_DEBUG("et: " << et << " eta: " << eta - << " etbin: " << etbin << " etabin: " << etabin); + ATH_MSG_DEBUG("et: " << et << " eta: " << eta << " etbin: " << etbin << " etabin: " << etabin); if (etbin >= s_fnEtBinsHist) { ATH_MSG_WARNING("skipping etbin " << etbin << ", et " << et); @@ -653,8 +748,9 @@ double Root::TElectronLikelihoodTool::evaluateLikelihood(std::vector<double> var return -999.; } - if (varVector.size() != s_fnVariables) - ATH_MSG_WARNING("Error! Variable vector size mismatch! Check your vector!" ); + if (varVector.size() != s_fnVariables) { + ATH_MSG_WARNING("Error! Variable vector size mismatch! Check your vector!"); + } double SigmaS = 1.; double SigmaB = 1.; @@ -664,39 +760,40 @@ double Root::TElectronLikelihoodTool::evaluateLikelihood(std::vector<double> var const std::string el_f3_string = "el_f3"; const std::string el_TRT_PID_string = "el_TRT_PID"; - for(unsigned int var = 0; var < s_fnVariables; var++){ + for (unsigned int var = 0; var < s_fnVariables; var++) { const std::string& varstr = fVariables[var]; // Skip variables that are masked off (not used) in the likelihood - if (!(m_variableBitMask & (0x1 << var))){ + if (!(m_variableBitMask & (0x1 << var))) { continue; } // Don't use TRT for outer eta bins (2.01,2.37) - if (((etabin == 8) || (etabin == 9)) && (varstr.find(TRT_string) != std::string::npos)){ + if (((etabin == 8) || (etabin == 9)) && (varstr.find(TRT_string) != std::string::npos)) { continue; } // Don't use f3 for outer eta bin (2.37) - if ((etabin == 9) && (varstr.find(el_f3_string) != std::string::npos)){ + if ((etabin == 9) && (varstr.find(el_f3_string) != std::string::npos)) { continue; } // Don't use f3 for high et (>80 GeV) - if (m_doRemoveF3AtHighEt && (et > 80*GeV) && (varstr.find(el_f3_string) != std::string::npos)){ + if (m_doRemoveF3AtHighEt && (et > 80 * GeV) && + (varstr.find(el_f3_string) != std::string::npos)) { continue; } // Don't use TRTPID for high et (>80 GeV) - if (m_doRemoveTRTPIDAtHighEt && (et > 80*GeV) && (varstr.find(el_TRT_PID_string) != std::string::npos)){ + if (m_doRemoveTRTPIDAtHighEt && (et > 80 * GeV) && + (varstr.find(el_TRT_PID_string) != std::string::npos)) { continue; } - for (unsigned int s_or_b=0; s_or_b<2;s_or_b++) { + for (unsigned int s_or_b = 0; s_or_b < 2; s_or_b++) { int bin = fPDFbins[s_or_b][ipbin][etbin][etabin][var]->FindBin(varVector[var]); double prob = 0; if (m_doSmoothBinInterpolation) { - prob = InterpolatePdfs(s_or_b,ipbin,et,eta,bin,var); - } - else { + prob = InterpolatePdfs(s_or_b, ipbin, et, eta, bin, var); + } else { double integral = double(fPDFbins[s_or_b][ipbin][etbin][etabin][var]->Integral()); if (integral == 0) { ATH_MSG_WARNING("Error! PDF integral == 0!"); @@ -706,35 +803,50 @@ double Root::TElectronLikelihoodTool::evaluateLikelihood(std::vector<double> var prob = double(fPDFbins[s_or_b][ipbin][etbin][etabin][var]->GetBinContent(bin)) / integral; } - if (s_or_b == 0) SigmaS *= prob; - else if (s_or_b == 1) SigmaB *= prob; + if (s_or_b == 0) { + SigmaS *= prob; + } else if (s_or_b == 1) { + SigmaB *= prob; + } } } - return TransformLikelihoodOutput( SigmaS, SigmaB, ip, et, eta ); + return TransformLikelihoodOutput(SigmaS, SigmaB, ip, et, eta); } // -------------------------------------------- -double Root::TElectronLikelihoodTool::TransformLikelihoodOutput(double ps,double pb, double ip, double et, double eta) const { +double +Root::TElectronLikelihoodTool::TransformLikelihoodOutput(double ps, + double pb, + double ip, + double et, + double eta) const +{ // returns transformed or non-transformed output // (Taken mostly from TMVA likelihood code) double fEpsilon = 1e-99; // If both signal and bkg are 0, we want it to fail. - if (ps < fEpsilon) ps = 0; - if (pb < fEpsilon) pb = fEpsilon; - double disc = ps/double(ps + pb); - - if (disc >= 1.0) disc = 1. - 1.e-15; - else if (disc <= 0.0) disc = fEpsilon; + if (ps < fEpsilon) + ps = 0; + if (pb < fEpsilon) + pb = fEpsilon; + double disc = ps / double(ps + pb); + + if (disc >= 1.0) { + disc = 1. - 1.e-15; + } else if (disc <= 0.0) { + disc = fEpsilon; + } double tau = 15.0; - disc = - log(1.0/disc - 1.0)*(1./double(tau)); + disc = -log(1.0 / disc - 1.0) * (1. / double(tau)); // Linearly transform the discriminant as a function of pileup, rather than - // the old scheme of changing the cut value based on pileup. This is simpler for - // the tuning, as well as ensuring subsets / making discriminants more transparent. - // In the HI case, a quadratic centrality transform is applied instead. - if(m_doPileupTransform){ + // the old scheme of changing the cut value based on pileup. This is simpler + // for the tuning, as well as ensuring subsets / making discriminants more + // transparent. In the HI case, a quadratic centrality transform is applied + // instead. + if (m_doPileupTransform) { // The variables used by the transform: // @@ -746,107 +858,123 @@ double Root::TElectronLikelihoodTool::TransformLikelihoodOutput(double ps,double // - pileup_max = max nvtx or mu for calculating the transform. Any larger // pileup values will use this maximum value in the transform. - if( m_discHardCutForPileupTransform.empty() || m_discHardCutSlopeForPileupTransform.empty() || m_discLooseForPileupTransform.empty()){ - ATH_MSG_WARNING("Vectors needed for pileup-dependent transform not correctly filled! Skipping the transform."); + if (m_discHardCutForPileupTransform.empty() || m_discHardCutSlopeForPileupTransform.empty() || + m_discLooseForPileupTransform.empty()) { + ATH_MSG_WARNING("Vectors needed for pileup-dependent transform not correctly filled! " + "Skipping the transform."); return disc; } - if(m_doCentralityTransform && m_discHardCutQuadForPileupTransform.empty()){ - ATH_MSG_WARNING("Vectors needed for centrality-dependent transform not correctly filled! Skipping the transform."); + if (m_doCentralityTransform && m_discHardCutQuadForPileupTransform.empty()) { + ATH_MSG_WARNING("Vectors needed for centrality-dependent transform not " + "correctly filled! " + "Skipping the transform."); return disc; } unsigned int etabin = getLikelihoodEtaBin(eta); - double disc_hard_cut_ref = 0; + double disc_hard_cut_ref = 0; double disc_hard_cut_ref_slope = 0; - double disc_hard_cut_ref_quad = 0; // only used for heavy ion implementation of the LH - double disc_loose_ref = 0; - double disc_max = m_discMaxForPileupTransform; - double pileup_max = m_pileupMaxForPileupTransform; - - if (m_doSmoothBinInterpolation){ - disc_hard_cut_ref = InterpolateCuts(m_discHardCutForPileupTransform,m_discHardCutForPileupTransform4GeV,et,eta); - disc_hard_cut_ref_slope = InterpolateCuts(m_discHardCutSlopeForPileupTransform,m_discHardCutSlopeForPileupTransform4GeV,et,eta); - if (m_doCentralityTransform) disc_hard_cut_ref_quad = InterpolateCuts(m_discHardCutQuadForPileupTransform,m_discHardCutQuadForPileupTransform4GeV,et,eta); - disc_loose_ref = InterpolateCuts(m_discLooseForPileupTransform,m_discLooseForPileupTransform4GeV,et,eta); + double disc_hard_cut_ref_quad = 0; // only used for heavy ion implementation of the LH + double disc_loose_ref = 0; + double disc_max = m_discMaxForPileupTransform; + double pileup_max = m_pileupMaxForPileupTransform; + + if (m_doSmoothBinInterpolation) { + disc_hard_cut_ref = InterpolateCuts( + m_discHardCutForPileupTransform, m_discHardCutForPileupTransform4GeV, et, eta); + disc_hard_cut_ref_slope = InterpolateCuts( + m_discHardCutSlopeForPileupTransform, m_discHardCutSlopeForPileupTransform4GeV, et, eta); + if (m_doCentralityTransform) + disc_hard_cut_ref_quad = InterpolateCuts( + m_discHardCutQuadForPileupTransform, m_discHardCutQuadForPileupTransform4GeV, et, eta); + disc_loose_ref = + InterpolateCuts(m_discLooseForPileupTransform, m_discLooseForPileupTransform4GeV, et, eta); } else { // default situation, in the case where 4-7 GeV bin is not defined - if (et > 7000. || m_discHardCutForPileupTransform4GeV.empty()){ - unsigned int etfinebinLH = getLikelihoodEtDiscBin(et,true); - unsigned int ibin_combined = etfinebinLH*s_fnEtaBins+etabin; - disc_hard_cut_ref = m_discHardCutForPileupTransform[ibin_combined]; + if (et > 7000. || m_discHardCutForPileupTransform4GeV.empty()) { + unsigned int etfinebinLH = getLikelihoodEtDiscBin(et, true); + unsigned int ibin_combined = etfinebinLH * s_fnEtaBins + etabin; + disc_hard_cut_ref = m_discHardCutForPileupTransform[ibin_combined]; disc_hard_cut_ref_slope = m_discHardCutSlopeForPileupTransform[ibin_combined]; - if (m_doCentralityTransform) disc_hard_cut_ref_quad = m_discHardCutQuadForPileupTransform[ibin_combined]; - disc_loose_ref = m_discLooseForPileupTransform[ibin_combined]; + if (m_doCentralityTransform) + disc_hard_cut_ref_quad = m_discHardCutQuadForPileupTransform[ibin_combined]; + disc_loose_ref = m_discLooseForPileupTransform[ibin_combined]; } else { - if( m_discHardCutForPileupTransform4GeV.empty() || m_discHardCutSlopeForPileupTransform4GeV.empty() || m_discLooseForPileupTransform4GeV.empty()){ - ATH_MSG_WARNING("Vectors needed for pileup-dependent transform not correctly filled for 4-7 GeV bin! Skipping the transform."); + if (m_discHardCutForPileupTransform4GeV.empty() || + m_discHardCutSlopeForPileupTransform4GeV.empty() || + m_discLooseForPileupTransform4GeV.empty()) { + ATH_MSG_WARNING("Vectors needed for pileup-dependent transform not " + "correctly filled for 4-7 GeV " + "bin! Skipping the transform."); return disc; } - if(m_doCentralityTransform && m_discHardCutQuadForPileupTransform4GeV.empty()){ - ATH_MSG_WARNING("Vectors needed for centrality-dependent transform not correctly filled for 4-7 GeV bin! Skipping the transform."); + if (m_doCentralityTransform && m_discHardCutQuadForPileupTransform4GeV.empty()) { + ATH_MSG_WARNING("Vectors needed for centrality-dependent transform " + "not correctly filled for 4-7 " + "GeV bin! Skipping the transform."); return disc; } - disc_hard_cut_ref = m_discHardCutForPileupTransform4GeV[etabin]; + disc_hard_cut_ref = m_discHardCutForPileupTransform4GeV[etabin]; disc_hard_cut_ref_slope = m_discHardCutSlopeForPileupTransform4GeV[etabin]; - if (m_doCentralityTransform) disc_hard_cut_ref_quad = m_discHardCutQuadForPileupTransform4GeV[etabin]; - disc_loose_ref = m_discLooseForPileupTransform4GeV[etabin]; + if (m_doCentralityTransform) + disc_hard_cut_ref_quad = m_discHardCutQuadForPileupTransform4GeV[etabin]; + disc_loose_ref = m_discLooseForPileupTransform4GeV[etabin]; } } - double ip_for_corr = std::min(ip,pileup_max); // turn off correction for values > pileup_max - double disc_hard_cut_ref_prime = disc_hard_cut_ref + disc_hard_cut_ref_slope * ip_for_corr + disc_hard_cut_ref_quad * ip_for_corr * ip_for_corr; + double ip_for_corr = std::min(ip, pileup_max); // turn off correction for values > pileup_max + double disc_hard_cut_ref_prime = disc_hard_cut_ref + disc_hard_cut_ref_slope * ip_for_corr + + disc_hard_cut_ref_quad * ip_for_corr * ip_for_corr; - if(disc <= disc_loose_ref){ + if (disc <= disc_loose_ref) { // Below threshold for applying pileup correction - //disc = disc; - } - else if(disc <= disc_hard_cut_ref_prime){ + } else if (disc <= disc_hard_cut_ref_prime) { // Between the loose and hard cut reference points for pileup correction double denom = double(disc_hard_cut_ref_prime - disc_loose_ref); - if(denom < 0.001) denom = 0.001; - disc = disc_loose_ref + (disc - disc_loose_ref) * (disc_hard_cut_ref - disc_loose_ref) / denom; - } - else if(disc_hard_cut_ref_prime < disc && disc <= disc_max){ + if (denom < 0.001) + denom = 0.001; + disc = + disc_loose_ref + (disc - disc_loose_ref) * (disc_hard_cut_ref - disc_loose_ref) / denom; + } else if (disc_hard_cut_ref_prime < disc && disc <= disc_max) { // Between the hard cut and max reference points for pileup correction double denom = double(disc_max - disc_hard_cut_ref_prime); - if(denom < 0.001) denom = 0.001; - disc = disc_hard_cut_ref + (disc - disc_hard_cut_ref_prime) * (disc_max - disc_hard_cut_ref) / denom; - } - else{ - // Above threshold where pileup correction necessary - //disc = disc; + if (denom < 0.001) + denom = 0.001; + disc = disc_hard_cut_ref + + (disc - disc_hard_cut_ref_prime) * (disc_max - disc_hard_cut_ref) / denom; } } - ATH_MSG_DEBUG( "disc is " << disc ); + ATH_MSG_DEBUG("disc is " << disc); return disc; } - - -const double Root::TElectronLikelihoodTool::fIpBounds[IP_BINS+1] = {0.,500.}; +const double Root::TElectronLikelihoodTool::fIpBounds[IP_BINS + 1] = { 0., 500. }; //--------------------------------------------------------------------------------------- // Gets the IP bin -unsigned int Root::TElectronLikelihoodTool::getIpBin(double ip) const{ - for(unsigned int ipBin = 0; ipBin < IP_BINS; ++ipBin){ - if(ip < fIpBounds[ipBin+1]) +unsigned int +Root::TElectronLikelihoodTool::getIpBin(double ip) const +{ + for (unsigned int ipBin = 0; ipBin < IP_BINS; ++ipBin) { + if (ip < fIpBounds[ipBin + 1]) return ipBin; } return 0; } - //--------------------------------------------------------------------------------------- // Gets the Eta bin [0-9] given the eta -unsigned int Root::TElectronLikelihoodTool::getLikelihoodEtaBin(double eta) const{ +unsigned int +Root::TElectronLikelihoodTool::getLikelihoodEtaBin(double eta) const +{ const unsigned int nEtaBins = s_fnEtaBins; - const double etaBins[nEtaBins] = {0.1,0.6,0.8,1.15,1.37,1.52,1.81,2.01,2.37,2.47}; + const double etaBins[nEtaBins] = { 0.1, 0.6, 0.8, 1.15, 1.37, 1.52, 1.81, 2.01, 2.37, 2.47 }; - for(unsigned int etaBin = 0; etaBin < nEtaBins; ++etaBin){ - if(fabs(eta) < etaBins[etaBin]) + for (unsigned int etaBin = 0; etaBin < nEtaBins; ++etaBin) { + if (std::abs(eta) < etaBins[etaBin]) return etaBin; } @@ -854,69 +982,90 @@ unsigned int Root::TElectronLikelihoodTool::getLikelihoodEtaBin(double eta) cons } //--------------------------------------------------------------------------------------- // Gets the histogram Et bin given the et (MeV) -- corrresponds to fnEtBinsHist -unsigned int Root::TElectronLikelihoodTool::getLikelihoodEtHistBin(double eT) const { +unsigned int +Root::TElectronLikelihoodTool::getLikelihoodEtHistBin(double eT) const +{ const double GeV = 1000; const unsigned int nEtBins = s_fnEtBinsHist; - const double eTBins[nEtBins] = {7*GeV,10*GeV,15*GeV,20*GeV,30*GeV,40*GeV,50*GeV}; + const double eTBins[nEtBins] = { 7 * GeV, 10 * GeV, 15 * GeV, 20 * GeV, + 30 * GeV, 40 * GeV, 50 * GeV }; - for(unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin){ - if(eT < eTBins[eTBin]) + for (unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin) { + if (eT < eTBins[eTBin]) { return eTBin; + } } - return nEtBins-1; // Return the last bin if > the last bin. + return nEtBins - 1; // Return the last bin if > the last bin. } //--------------------------------------------------------------------------------------- // Gets the Et bin [0-10] given the et (MeV) -unsigned int Root::TElectronLikelihoodTool::getLikelihoodEtDiscBin(double eT, const bool isLHbinning) const{ +unsigned int +Root::TElectronLikelihoodTool::getLikelihoodEtDiscBin(double eT, const bool isLHbinning) const +{ const double GeV = 1000; - if(m_useOneExtraHighETLHBin && isLHbinning){ + if (m_useOneExtraHighETLHBin && isLHbinning) { const unsigned int nEtBins = s_fnDiscEtBinsOneExtra; - const double eTBins[nEtBins] = {10*GeV,15*GeV,20*GeV,25*GeV,30*GeV,35*GeV,40*GeV,45*GeV,m_highETBinThreshold*GeV,6000*GeV}; + const double eTBins[nEtBins] = { 10 * GeV, 15 * GeV, 20 * GeV, + 25 * GeV, 30 * GeV, 35 * GeV, + 40 * GeV, 45 * GeV, m_highETBinThreshold * GeV, + 6000 * GeV }; - for(unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin){ - if(eT < eTBins[eTBin]) + for (unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin) { + if (eT < eTBins[eTBin]) return eTBin; } - return nEtBins-1; // Return the last bin if > the last bin. - + return nEtBins - 1; // Return the last bin if > the last bin. } - - const unsigned int nEtBins = s_fnDiscEtBins; - const double eTBins[nEtBins] = {10*GeV,15*GeV,20*GeV,25*GeV,30*GeV,35*GeV,40*GeV,45*GeV,50*GeV}; - for(unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin){ - if(eT < eTBins[eTBin]) - return eTBin; - } + const unsigned int nEtBins = s_fnDiscEtBins; + const double eTBins[nEtBins] = { 10 * GeV, 15 * GeV, 20 * GeV, 25 * GeV, 30 * GeV, + 35 * GeV, 40 * GeV, 45 * GeV, 50 * GeV }; + + for (unsigned int eTBin = 0; eTBin < nEtBins; ++eTBin) { + if (eT < eTBins[eTBin]) + return eTBin; + } - return nEtBins-1; // Return the last bin if > the last bin. - + return nEtBins - 1; // Return the last bin if > the last bin. } //--------------------------------------------------------------------------------------- // Gets the bin name. Given the HISTOGRAM binning (fnEtBinsHist) -void Root::TElectronLikelihoodTool::getBinName(char* buffer, int etbin,int etabin, int ipbin, const std::string& iptype) const{ - double eta_bounds[9] = {0.0,0.6,0.8,1.15,1.37,1.52,1.81,2.01,2.37}; - int et_bounds[s_fnEtBinsHist] = {4,7,10,15,20,30,40}; - if (!iptype.empty()){ - snprintf(buffer, 200,"%s%det%02deta%0.2f", iptype.c_str(), int(fIpBounds[ipbin]), et_bounds[etbin], eta_bounds[etabin]); - } - else{ - snprintf(buffer, 200,"et%deta%0.2f", et_bounds[etbin], eta_bounds[etabin]); - } +void +Root::TElectronLikelihoodTool::getBinName(char* buffer, + int etbin, + int etabin, + int ipbin, + const std::string& iptype) const +{ + double eta_bounds[9] = { 0.0, 0.6, 0.8, 1.15, 1.37, 1.52, 1.81, 2.01, 2.37 }; + int et_bounds[s_fnEtBinsHist] = { 4, 7, 10, 15, 20, 30, 40 }; + if (!iptype.empty()) { + snprintf(buffer, + 200, + "%s%det%02deta%0.2f", + iptype.c_str(), + int(fIpBounds[ipbin]), + et_bounds[etbin], + eta_bounds[etabin]); + } else { + snprintf(buffer, 200, "et%deta%0.2f", et_bounds[etbin], eta_bounds[etabin]); } +} //---------------------------------------------------------------------------------------- -unsigned int Root::TElectronLikelihoodTool::getLikelihoodBitmask(const std::string& vars) const{ +unsigned int +Root::TElectronLikelihoodTool::getLikelihoodBitmask(const std::string& vars) const +{ unsigned int mask = 0x0; - ATH_MSG_DEBUG ("Variables to be used: "); - for(unsigned int var = 0; var < s_fnVariables; var++){ - if (vars.find(fVariables[var]) != std::string::npos){ - ATH_MSG_DEBUG( fVariables[var] ); + ATH_MSG_DEBUG("Variables to be used: "); + for (unsigned int var = 0; var < s_fnVariables; var++) { + if (vars.find(fVariables[var]) != std::string::npos) { + ATH_MSG_DEBUG(fVariables[var]); mask = mask | 0x1 << var; } } @@ -925,112 +1074,158 @@ unsigned int Root::TElectronLikelihoodTool::getLikelihoodBitmask(const std::stri } //---------------------------------------------------------------------------------------- -// Note that this will only perform the cut interpolation up to ~45 GeV, so +// Note that this will only perform the cut interpolation up to ~45 GeV, so // no smoothing is done above this for the high ET LH binning yet -double Root::TElectronLikelihoodTool::InterpolateCuts(const std::vector<double>& cuts,const std::vector<double>& cuts_4gev,double et,double eta) const{ - int etbinLH = getLikelihoodEtDiscBin(et,true); +double +Root::TElectronLikelihoodTool::InterpolateCuts(const std::vector<double>& cuts, + const std::vector<double>& cuts_4gev, + double et, + double eta) const +{ + int etbinLH = getLikelihoodEtDiscBin(et, true); int etabin = getLikelihoodEtaBin(eta); - unsigned int ibin_combinedLH = etbinLH*s_fnEtaBins+etabin; + unsigned int ibin_combinedLH = etbinLH * s_fnEtaBins + etabin; double cut = cuts.at(ibin_combinedLH); - if (!cuts_4gev.empty() && et < 7000.) {cut = cuts_4gev.at(etabin);} - if (et > 47500.) {return cut;} // interpolation stops here. - if (cuts_4gev.empty() && et < 8500.) {return cut;} // stops here - if (!cuts_4gev.empty() && et < 6000.) {return cut;} // stops here + if (!cuts_4gev.empty() && et < 7000.) { + cut = cuts_4gev.at(etabin); + } + if (et > 47500.) { + return cut; + } // interpolation stops here. + if (cuts_4gev.empty() && et < 8500.) { + return cut; + } // stops here + if (!cuts_4gev.empty() && et < 6000.) { + return cut; + } // stops here double bin_width = 5000.; - if (7000. < et && et < 10000.) {bin_width = 3000.;} - if (et < 7000.) {bin_width = 2000.;} - const double GeV = 1000; - const double eTBins[9] = {8.5*GeV,12.5*GeV,17.5*GeV,22.5*GeV,27.5*GeV,32.5*GeV,37.5*GeV,42.5*GeV,47.5*GeV}; + if (7000. < et && et < 10000.) { + bin_width = 3000.; + } + if (et < 7000.) { + bin_width = 2000.; + } + const double GeV = 1000; + const double eTBins[9] = { 8.5 * GeV, 12.5 * GeV, 17.5 * GeV, 22.5 * GeV, 27.5 * GeV, + 32.5 * GeV, 37.5 * GeV, 42.5 * GeV, 47.5 * GeV }; double bin_center = eTBins[etbinLH]; if (et > bin_center) { double cut_next = cut; - if (etbinLH+1<=8) cut_next = cuts.at((etbinLH+1)*s_fnEtaBins+etabin); - return cut+(cut_next-cut)*(et-bin_center)/(bin_width); + if (etbinLH + 1 <= 8) + cut_next = cuts.at((etbinLH + 1) * s_fnEtaBins + etabin); + return cut + (cut_next - cut) * (et - bin_center) / (bin_width); } // or else if et < bin_center : double cut_before = cut; - if (etbinLH-1>=0) {cut_before = cuts.at((etbinLH-1)*s_fnEtaBins+etabin);} - else if (etbinLH == 0 && !cuts_4gev.empty()){cut_before = cuts_4gev.at(etabin);} + if (etbinLH - 1 >= 0) { + cut_before = cuts.at((etbinLH - 1) * s_fnEtaBins + etabin); + } else if (etbinLH == 0 && !cuts_4gev.empty()) { + cut_before = cuts_4gev.at(etabin); + } - return cut-(cut-cut_before)*(bin_center-et)/(bin_width); + return cut - (cut - cut_before) * (bin_center - et) / (bin_width); } //---------------------------------------------------------------------------------------- -// Note that this will only perform the PDF interpolation up to ~45 GeV, so +// Note that this will only perform the PDF interpolation up to ~45 GeV, so // no smoothing is done above this for the high ET LH binning yet -double Root::TElectronLikelihoodTool::InterpolatePdfs(unsigned int s_or_b,unsigned int ipbin,double et,double eta,int bin,unsigned int var) const{ +double +Root::TElectronLikelihoodTool::InterpolatePdfs(unsigned int s_or_b, + unsigned int ipbin, + double et, + double eta, + int bin, + unsigned int var) const +{ // histograms exist for the following bins: 4, 7, 10, 15, 20, 30, 40. - // Interpolation between histograms must follow fairly closely the interpolation - // scheme between cuts - so be careful! + // Interpolation between histograms must follow fairly closely the + // interpolation scheme between cuts - so be careful! int etbin = getLikelihoodEtHistBin(et); // hist binning int etabin = getLikelihoodEtaBin(eta); double integral = double(fPDFbins[s_or_b][ipbin][etbin][etabin][var]->Integral()); double prob = double(fPDFbins[s_or_b][ipbin][etbin][etabin][var]->GetBinContent(bin)) / integral; - int Nbins = fPDFbins[s_or_b][ipbin][etbin][etabin][var]->GetNbinsX(); - if (et > 42500.) return prob; // interpolation stops here. - if (et < 6000.) return prob; // interpolation stops here. - if (22500. < et && et < 27500.) return prob; // region of non-interpolation for pdfs - if (32500. < et && et < 37500.) return prob; // region of non-interpolation for pdfs + int Nbins = fPDFbins[s_or_b][ipbin][etbin][etabin][var]->GetNbinsX(); + if (et > 42500.) { + return prob; // interpolation stops here. + } + if (et < 6000.) { + return prob; // interpolation stops here. + } + if (22500. < et && et < 27500.) { + return prob; // region of non-interpolation for pdfs + } + if (32500. < et && et < 37500.) { + return prob; // region of non-interpolation for pdfs + } double bin_width = 5000.; - if (7000. < et && et < 10000.) bin_width = 3000.; - if (et < 7000.) bin_width = 2000.; - const double GeV = 1000; - const double eTHistBins[7] = {6.*GeV,8.5*GeV,12.5*GeV,17.5*GeV,22.5*GeV,32.5*GeV,42.5*GeV}; + if (7000. < et && et < 10000.) { + bin_width = 3000.; + } + if (et < 7000.) { + bin_width = 2000.; + } + const double GeV = 1000; + const double eTHistBins[7] = { 6. * GeV, 8.5 * GeV, 12.5 * GeV, 17.5 * GeV, + 22.5 * GeV, 32.5 * GeV, 42.5 * GeV }; double bin_center = eTHistBins[etbin]; - if (etbin == 4 && et >= 27500.) bin_center = 27500.; // special: interpolate starting from 27.5 here - if (etbin == 5 && et >= 37500.) bin_center = 37500.; // special: interpolate starting from 37.5 here - if (et > bin_center){ + if (etbin == 4 && et >= 27500.) { + bin_center = 27500.; // special: interpolate starting from 27.5 here + } + if (etbin == 5 && et >= 37500.) { + bin_center = 37500.; // special: interpolate starting from 37.5 here + } + if (et > bin_center) { double prob_next = prob; - if (etbin+1<=6) { + if (etbin + 1 <= 6) { // account for potential histogram bin inequalities - int NbinsPlus = fPDFbins[s_or_b][ipbin][etbin+1][etabin][var]->GetNbinsX(); + int NbinsPlus = fPDFbins[s_or_b][ipbin][etbin + 1][etabin][var]->GetNbinsX(); int binplus = bin; - if (Nbins < NbinsPlus){ - binplus = int(round(bin*(Nbins/NbinsPlus))); - } - else if (Nbins > NbinsPlus){ - binplus = int(round(bin*(NbinsPlus/Nbins))); + if (Nbins < NbinsPlus) { + binplus = int(round(bin * (Nbins / NbinsPlus))); + } else if (Nbins > NbinsPlus) { + binplus = int(round(bin * (NbinsPlus / Nbins))); } // do interpolation - double integral_next = double(fPDFbins[s_or_b][ipbin][etbin+1][etabin][var]->Integral()); - prob_next = double(fPDFbins[s_or_b][ipbin][etbin+1][etabin][var]->GetBinContent(binplus)) / integral_next; - return prob+(prob_next-prob)*(et-bin_center)/(bin_width); + double integral_next = double(fPDFbins[s_or_b][ipbin][etbin + 1][etabin][var]->Integral()); + prob_next = double(fPDFbins[s_or_b][ipbin][etbin + 1][etabin][var]->GetBinContent(binplus)) / + integral_next; + return prob + (prob_next - prob) * (et - bin_center) / (bin_width); } } // or else if et < bin_center : double prob_before = prob; - if (etbin-1>=0) { + if (etbin - 1 >= 0) { // account for potential histogram bin inequalities - int NbinsMinus = fPDFbins[s_or_b][ipbin][etbin-1][etabin][var]->GetNbinsX(); + int NbinsMinus = fPDFbins[s_or_b][ipbin][etbin - 1][etabin][var]->GetNbinsX(); int binminus = bin; - if (Nbins < NbinsMinus){ - binminus = int(round(bin*(Nbins/NbinsMinus))); - } - else if (Nbins > NbinsMinus){ - binminus = int(round(bin*(NbinsMinus/Nbins))); + if (Nbins < NbinsMinus) { + binminus = int(round(bin * (Nbins / NbinsMinus))); + } else if (Nbins > NbinsMinus) { + binminus = int(round(bin * (NbinsMinus / Nbins))); } - double integral_before = double(fPDFbins[s_or_b][ipbin][etbin-1][etabin][var]->Integral()); - prob_before = double(fPDFbins[s_or_b][ipbin][etbin-1][etabin][var]->GetBinContent(binminus)) / integral_before; + double integral_before = double(fPDFbins[s_or_b][ipbin][etbin - 1][etabin][var]->Integral()); + prob_before = double(fPDFbins[s_or_b][ipbin][etbin - 1][etabin][var]->GetBinContent(binminus)) / + integral_before; } - return prob-(prob-prob_before)*(bin_center-et)/(bin_width); + return prob - (prob - prob_before) * (bin_center - et) / (bin_width); } //---------------------------------------------------------------------------------------- // These are the variables availalble in the likelihood. const std::string Root::TElectronLikelihoodTool::fVariables[s_fnVariables] = { - "el_d0significance" - ,"el_eratio" - ,"el_deltaeta1" - ,"el_f1" - ,"el_f3" - ,"el_reta" - ,"el_rhad" - ,"el_rphi" - ,"el_trackd0pvunbiased" - ,"el_weta2" - ,"el_DeltaPoverP" - ,"el_deltaphiRescaled" - ,"el_TRT_PID" + "el_d0significance", + "el_eratio", + "el_deltaeta1", + "el_f1", + "el_f3", + "el_reta", + "el_rhad", + "el_rphi", + "el_trackd0pvunbiased", + "el_weta2", + "el_DeltaPoverP", + "el_deltaphiRescaled", + "el_TRT_PID" }; diff --git a/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.h b/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.h index 970810afb5e43bf2f0286f616202f3eb0606ff2a..f8b4137ab614ef94e3ef5213e233ded86ca833b9 100644 --- a/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.h +++ b/PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/Root/TElectronLikelihoodTool.h @@ -3,7 +3,6 @@ */ // Dear emacs, this is -*-c++-*- -//---------------------------------------------------------------------------------------- /** Author: Kurt Brendlinger <kurb@sas.upenn.edu> @@ -14,93 +13,29 @@ Includes the following operating points: "Medium" - Same signal efficiency as current (2012) mediumpp menu "VeryLoose" - Same background rejection as current (2012) multilepton menu "Loose" - Same signal efficiency as current (2012) multilepton menu - -Usage: -In order to compile this outside the Athena framework, you also need to get PATCore from svn. -You need to include the header where you want to use it: -#include "ElectronPhotonSelectorTools/TElectronLikelihoodTool.h" - -Then, before the event loop starts, you need to create an instance of this tool: -Root::TElectronLikelihoodTool* myElLHTool = new TElectronLikelihoodTool(); - -configure it: -myElLHTool->setPDFFileName( "path/to/package/data/ElectronLikelihoodPdfs.root" ); - -and initialize it: -myElLHTool->initialize(); - -To get the likelihood value for this electron, do: -double likelihood = double( myElLHTool->calculate(...) ); - -To see if an electron passes this selection (in this example, the "VeryLoose" selection), do: -bool passThisElectron = bool( myElLHTool->accept( likelihood, ... ) ); - -See below which variables you have to use. - - -In order to correctly apply the macro, you must use the following (d3pd) variables as inputs: -eta : el_etas2 -eT (in MeV) : energy-rescaled (using egammaAnalysisUtils' EnergyRescalerUpgrade) pt, where pt is defined in -twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/ElectronsEnergyDirection -so el_cl_pt if nSiHits < 4, or else -el_cl_E/cosh(el_etas2) THIS IS PROBABLY NOT CORRECT!!! Don't want to use rescaled energies. To be verified... -f3 : el_f3 -rHad : el_Ethad / pt (see above for pt) -rHad1 : el_Ethad1/ pt -Reta : el_reta -w2 : el_weta2 -f1 : el_f1 -wstot : el_wstot -eratio : (el_emaxs1+el_Emax2 == 0.) ? 0. : (el_emaxs1-el_Emax2)/(el_emaxs1+el_Emax2) -(i.e. Eratio) -deltaEta : el_deltaeta1 -d0 : el_trackd0pvunbiased -TRratio : el_TRTHighTHitsRatio -eOverP : el_cl_E * el_trackqoverp (where el_cl_E is energy-rescaled) -deltaPhi : el_deltaphi2 -d0sigma : el_tracksigd0pvunbiased -rphi : el_rphi -nTRT : el_nTRTHits -nTRTOutliers : el_nTRTOutliers -nSi : el_nSiHits -nSiOutliers : el_nPixelOutliers + el_nSCTOutliers -nPix : el_nPixHits -nPixOutliers : el_nPixelOutliers -nBlayer : el_nBLHits -nBlayerOutliers : el_nBLayerOutliers -expectBlayer : el_expectHitInBLayer -nNextToInnerMostLayer : next to the inner most -nNextToInnerMostLayerOutliers : next to the inner most -expectNextToInnerMostLayer : next to the inner most -convBit : el_isEM & (0x1 << egammaPID::ConversionMatch_Electron) -ambiguityBit : cut on the ambiguity type -ip : Count number of vertices in vxp_n with >= 2 tracks in vxp_trk_n - -Created: +reated: June 2011 - */ -//------------------------------------------------------------------------------------------------------- #ifndef TELECTRONLIKELIHOODTOOL_H #define TELECTRONLIKELIHOODTOOL_H // Include the return objects and the base class #include "AsgTools/AsgMessaging.h" -#include "PATCore/AcceptInfo.h" #include "PATCore/AcceptData.h" +#include "PATCore/AcceptInfo.h" // -#include <string> // for string -#include <vector> // for vector #include "SafeTH1.h" +#include <string> // for string +#include <vector> // for vector class TFile; - -namespace{ -const unsigned int IP_BINS=1; +namespace { +const unsigned int IP_BINS = 1; } namespace LikeEnum { -struct LHAcceptVars_t{ +struct LHAcceptVars_t +{ double likelihood; double eta; double eT; @@ -113,27 +48,28 @@ struct LHAcceptVars_t{ double deltaphires; double wstot; double EoverP; - double ip; + double ip; }; -struct LHCalcVars_t{ +struct LHCalcVars_t +{ double eta; double eT; - double f3; - double rHad; + double f3; + double rHad; double rHad1; double Reta; - double w2; - double f1; + double w2; + double f1; double eratio; - double deltaEta; - double d0; + double deltaEta; + double d0; double d0sigma; - double rphi; + double rphi; double deltaPoverP; double deltaphires; double TRT_PID; - double ip; + double ip; }; } @@ -141,7 +77,7 @@ namespace Root { class TElectronLikelihoodTool : public asg::AsgMessaging { -public: +public: /// Standard constructor TElectronLikelihoodTool(const char* name = "TElectronLikelihoodTool"); @@ -159,62 +95,87 @@ public: /// The main accept method: the actual cuts are applied here asg::AcceptData accept(LikeEnum::LHAcceptVars_t& vars_struct) const; - asg::AcceptData accept( double likelihood, - double eta, double eT, - int nSiHitsPlusDeadSensors, int nPixHitsPlusDeadSensors, - bool passBLayerRequirement, - uint8_t ambiguityBit, double d0, double deltaEta, double deltaphires, - double wstot, double EoverP, double ip ) const; + asg::AcceptData accept(double likelihood, + double eta, + double eT, + int nSiHitsPlusDeadSensors, + int nPixHitsPlusDeadSensors, + bool passBLayerRequirement, + uint8_t ambiguityBit, + double d0, + double deltaEta, + double deltaphires, + double wstot, + double EoverP, + double ip) const; /** Return dummy accept with only info */ asg::AcceptData accept() const { return asg::AcceptData(&m_acceptInfo); } - double calculate(LikeEnum::LHCalcVars_t& vars_struct) const ; - double calculate( double eta, double eT,double f3, double rHad, double rHad1, - double Reta, double w2, double f1, double eratio, - double deltaEta, double d0, double d0sigma, double rphi, - double deltaPoverP ,double deltaphires, double TRT_PID, - double ip) const; - + double calculate(LikeEnum::LHCalcVars_t& vars_struct) const; + double calculate(double eta, + double eT, + double f3, + double rHad, + double rHad1, + double Reta, + double w2, + double f1, + double eratio, + double deltaEta, + double d0, + double d0sigma, + double rphi, + double deltaPoverP, + double deltaphires, + double TRT_PID, + double ip) const; /// Add an input file that holds the PDFs - inline void setPDFFileName ( const std::string& val ) { m_pdfFileName = val; } + inline void setPDFFileName(const std::string& val) { m_pdfFileName = val; } /// Define the variable names - inline void setVariableNames ( const std::string& val ) { - m_variableNames = val; + inline void setVariableNames(const std::string& val) + { + m_variableNames = val; m_variableBitMask = getLikelihoodBitmask(val); } /// Load the variable histograms from the pdf file. int loadVarHistograms(const std::string& vstr, unsigned int varIndex); - /// Define the binning - inline void setBinning ( const std::string& val ) { m_ipBinning = val; } + /// Define the binning + inline void setBinning(const std::string& val) { m_ipBinning = val; } - unsigned int getBitmask(void) const { return m_variableBitMask;} + unsigned int getBitmask(void) const { return m_variableBitMask; } inline void setBitmask(unsigned int val) { m_variableBitMask = val; }; // Private methods private: - // For every input "varVector", make sure elements of vector are // in the same order as prescribed in fVariables - /// Description??? - double evaluateLikelihood(std::vector<double> varVector,double et,double eta,double ip=0) const; + double evaluateLikelihood(const std::vector<double>& varVector, + double et, + double eta, + double ip = 0) const; - /// Description??? - double evaluateLikelihood(std::vector<float> varVector,double et,double eta,double ip=0) const; + double evaluateLikelihood(const std::vector<float>& varVector, + double et, + double eta, + double ip = 0) const; - - // To concoct a bitmask on your own, use the - // variable names prescribed in fVariables. - - /// Description??? unsigned int getLikelihoodBitmask(const std::string& vars) const; - double InterpolateCuts(const std::vector<double>& cuts,const std::vector<double>& cuts_4gev,double et,double eta) const; - double InterpolatePdfs(unsigned int s_or_b,unsigned int ipbin,double et,double eta,int bin,unsigned int var) const; + double InterpolateCuts(const std::vector<double>& cuts, + const std::vector<double>& cuts_4gev, + double et, + double eta) const; + double InterpolatePdfs(unsigned int s_or_b, + unsigned int ipbin, + double et, + double eta, + int bin, + unsigned int var) const; public: /** @brief cut min on b-layer hits*/ @@ -259,19 +220,26 @@ public: std::vector<double> m_cutLikelihoodPileupCorrection4GeV; /** @brief reference disc for very hard cut; used by pileup transform */ std::vector<double> m_discHardCutForPileupTransform; - /** @brief reference slope on disc for very hard cut; used by pileup transform */ + /** @brief reference slope on disc for very hard cut; used by pileup transform + */ std::vector<double> m_discHardCutSlopeForPileupTransform; - /** @brief reference quadratic apr on disc for very hard cut; used by centrality transform */ + /** @brief reference quadratic apr on disc for very hard cut; used by + * centrality transform */ std::vector<double> m_discHardCutQuadForPileupTransform; - /** @brief reference disc for a pileup independent loose menu; used by pileup transform */ + /** @brief reference disc for a pileup independent loose menu; used by pileup + * transform */ std::vector<double> m_discLooseForPileupTransform; - /** @brief reference disc for very hard cut; used by pileup transform - 4-7 GeV */ + /** @brief reference disc for very hard cut; used by pileup transform - 4-7 + * GeV */ std::vector<double> m_discHardCutForPileupTransform4GeV; - /** @brief reference slope on disc for very hard cut; used by pileup transform - 4-7 GeV */ + /** @brief reference slope on disc for very hard cut; used by pileup transform + * - 4-7 GeV */ std::vector<double> m_discHardCutSlopeForPileupTransform4GeV; - /** @brief reference quadratic par on disc for very hard cut; used by centrality transform - 4-7 GeV */ + /** @brief reference quadratic par on disc for very hard cut; used by + * centrality transform - 4-7 GeV */ std::vector<double> m_discHardCutQuadForPileupTransform4GeV; - /** @brief reference disc for a pileup independent loose menu; used by pileup transform - 4-7 GeV */ + /** @brief reference disc for a pileup independent loose menu; used by pileup + * transform - 4-7 GeV */ std::vector<double> m_discLooseForPileupTransform4GeV; /** @brief max discriminant for which pileup transform is to be used */ double m_discMaxForPileupTransform; @@ -282,37 +250,42 @@ public: /** Name of the pdf file*/ std::string m_pdfFileName; - // Private methods private: - /// Apply a transform to zoom into the LH output peaks. Optionally do pileup correction too - double TransformLikelihoodOutput(double ps,double pb, double ip, double et, double eta) const; + /// Apply a transform to zoom into the LH output peaks. Optionally do pileup + /// correction too + double TransformLikelihoodOutput(double ps, + double pb, + double ip, + double et, + double eta) const; /// Eta binning for pdfs and discriminant cuts. - unsigned int getLikelihoodEtaBin(double eta) const ; + unsigned int getLikelihoodEtaBin(double eta) const; /// Coarse Et binning. Used for the likelihood pdfs. - unsigned int getLikelihoodEtHistBin(double eT) const ; + unsigned int getLikelihoodEtHistBin(double eT) const; /// Fine Et binning. Used for the likelihood discriminant cuts. - unsigned int getLikelihoodEtDiscBin(double eT , const bool isLHbinning) const; + unsigned int getLikelihoodEtDiscBin(double eT, const bool isLHbinning) const; // Private member variables private: /// tool name - std::string m_name; + std::string m_name; /// Accept info - asg::AcceptInfo m_acceptInfo; + asg::AcceptInfo m_acceptInfo; - /// The bitmask corresponding to the variables in the likelihood. For internal use. - unsigned int m_variableBitMask; + /// The bitmask corresponding to the variables in the likelihood. For internal + /// use. + unsigned int m_variableBitMask; /// Deprecated. - std::string m_ipBinning; + std::string m_ipBinning; /// Pointer to the opened TFile that holds the PDFs - TFile* m_pdfFile; + TFile* m_pdfFile; /// The position of the kinematic cut bit in the AcceptInfo return object int m_cutPosition_kinematic; @@ -338,29 +311,41 @@ private: /// The position of the deltaeta cut bit in the AcceptInfo return object int m_cutPositionTrackMatchEta; - // /// The position of the deltaphi cut bit in the AcceptInfo return object + /// The position of the deltaphi cut bit in the AcceptInfo return object int m_cutPositionTrackMatchPhiRes; - // /// The position of the high ET wstot cut bit in the AcceptInfo return object + /// The position of the high ET wstot cut bit in the AcceptInfo return + /// object int m_cutPositionWstotAtHighET; - // /// The position of the high ET EoverP cut bit in the AcceptInfo return object + /// The position of the high ET EoverP cut bit in the AcceptInfo return + /// object int m_cutPositionEoverPAtHighET; - static const double fIpBounds[IP_BINS+1]; - static const unsigned int s_fnEtBinsHist = 7; // number of hists stored for original LH, including 4GeV bin (for backwards compatibility) - static const unsigned int s_fnDiscEtBins = 9; // number of discs stored for original LH, excluding 4GeV bin (for backwards compatibility) - static const unsigned int s_fnDiscEtBinsOneExtra = 10; // number of discs stored for original LH plus one for HighETBinThreshold (useOneExtraHighETLHBin), excluding 4GeV bin - static const unsigned int s_fnEtaBins = 10; - static const unsigned int s_fnVariables = 13; - EGSelectors::SafeTH1* fPDFbins [2][IP_BINS][s_fnEtBinsHist][s_fnEtaBins][s_fnVariables]; // [sig(0)/bkg(1)][ip][et][eta][variable] - static const std::string fVariables [s_fnVariables]; + static const double fIpBounds[IP_BINS + 1]; + // number of hists stored for original LH, including 4GeV bin (for backwards + // compatibility) + static const unsigned int s_fnEtBinsHist = 7; + // number of discs stored for original LH, excluding 4GeV bin (for + // backwards compatibility) + static const unsigned int s_fnDiscEtBins = 9; + // number of discs stored for original LH plus one for + // HighETBinThreshold (useOneExtraHighETLHBin), excluding 4GeV bin + static const unsigned int s_fnDiscEtBinsOneExtra = 10; + static const unsigned int s_fnEtaBins = 10; + static const unsigned int s_fnVariables = 13; + // 5D array of ptr to SafeTH1 // [sig(0)/bkg(1)][ip][et][eta][variable] + EGSelectors::SafeTH1* fPDFbins[2][IP_BINS][s_fnEtBinsHist][s_fnEtaBins][s_fnVariables]; + static const std::string fVariables[s_fnVariables]; unsigned int getIpBin(double ip) const; - void getBinName(char* buffer, int etbin,int etabin, int ipbin, const std::string& iptype) const; + void getBinName(char* buffer, + int etbin, + int etabin, + int ipbin, + const std::string& iptype) const; }; } // End: namespace Root -//---------------------------------------------------------------------------------------- #endif diff --git a/Reconstruction/MuonIdentification/MuidTrackBuilder/src/CombinedMuonTrackBuilder.cxx b/Reconstruction/MuonIdentification/MuidTrackBuilder/src/CombinedMuonTrackBuilder.cxx index 6b05614defd116d398e2e214293a2d51328cc728..bf23fea72aea1662a1191fa558f8cdc6d7c0e772 100755 --- a/Reconstruction/MuonIdentification/MuidTrackBuilder/src/CombinedMuonTrackBuilder.cxx +++ b/Reconstruction/MuonIdentification/MuidTrackBuilder/src/CombinedMuonTrackBuilder.cxx @@ -2212,46 +2212,45 @@ CombinedMuonTrackBuilder::standaloneRefit(const Trk::Track& combinedTrack, float return nullptr; } - if (refittedTrack) { - if (!refittedTrack->fitQuality()) { - delete refittedTrack; - delete vertex; - return nullptr; + //eventually this whole tool will use unique_ptrs + //in the meantime, this allows the MuonErrorOptimisationTool and MuonRefitTool to use them + std::unique_ptr<Trk::Track> refittedTrackUnique(refittedTrack); + if (refittedTrackUnique) { + if (!refittedTrackUnique->fitQuality()) { + delete vertex; + return nullptr; } - if (!m_trackQuery->isCaloAssociated(*refittedTrack)) { + if (!m_trackQuery->isCaloAssociated(*refittedTrackUnique)) { // fail as calo incorrectly described m_messageHelper->printWarning(28); - delete refittedTrack; delete vertex; return nullptr; } - countAEOTs(refittedTrack, " standaloneRefit final refittedTrack "); + countAEOTs(refittedTrackUnique.get(), " standaloneRefit final refittedTrack "); // fit with optimized spectrometer errors // this should also be inside the "if(refittedTrack) statement - if (!m_muonErrorOptimizer.empty() && !refittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack) - && countAEOTs(refittedTrack, " before optimize ") == 0) + if (!m_muonErrorOptimizer.empty() && !refittedTrackUnique->info().trackProperties(Trk::TrackInfo::StraightTrack) + && countAEOTs(refittedTrackUnique.get(), " before optimize ") == 0) { ATH_MSG_VERBOSE(" perform spectrometer error optimization after cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*refittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(refittedTrackUnique.get()); if (optimizedTrack) { - if (checkTrack("standaloneRefitOpt", optimizedTrack, refittedTrack)) { - delete refittedTrack; - refittedTrack = optimizedTrack; - countAEOTs(refittedTrack, " standaloneRefit alignment errors Track "); - } else { - delete optimizedTrack; - } + if (checkTrack("standaloneRefitOpt", optimizedTrack.get(), refittedTrackUnique.get())) { + refittedTrackUnique.swap(optimizedTrack); + countAEOTs(refittedTrackUnique.get(), " standaloneRefit alignment errors Track "); + } } } } delete vertex; - return refittedTrack; + //have to release it until the whole tool is migrated to unique_ptr + return refittedTrackUnique.release(); } /** refit a track */ @@ -2381,42 +2380,42 @@ CombinedMuonTrackBuilder::fit(Trk::Track& track, const Trk::RunOutlierRemoval ru return nullptr; } + //eventually this whole tool will use unique_ptrs + //in the meantime, this allows the MuonErrorOptimisationTool and MuonRefitTool to use them + std::unique_ptr<Trk::Track> fittedTrackUnique(fittedTrack); // track cleaning if (runOutlier) { - // fit with optimized spectrometer errors + // fit with optimized spectrometer errors - if (!m_muonErrorOptimizer.empty() && !fittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack) - && optimizeErrors(fittedTrack)) + if (!m_muonErrorOptimizer.empty() && !fittedTrackUnique->info().trackProperties(Trk::TrackInfo::StraightTrack) + && optimizeErrors(fittedTrackUnique.get())) { ATH_MSG_VERBOSE(" perform spectrometer error optimization after cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrackUnique.get()); if (optimizedTrack) { - if (checkTrack("fitInterface1Opt", optimizedTrack, fittedTrack)) { - delete fittedTrack; - fittedTrack = optimizedTrack; - countAEOTs(fittedTrack, " re fit scaled errors Track "); - } else { - delete optimizedTrack; - } + if (checkTrack("fitInterface1Opt", optimizedTrack.get(), fittedTrackUnique.get())) { + fittedTrackUnique.swap(optimizedTrack); + countAEOTs(fittedTrackUnique.get(), " re fit scaled errors Track "); + } } } // chi2 before clean - double chi2Before = normalizedChi2(*fittedTrack); + double chi2Before = normalizedChi2(*fittedTrackUnique); // muon cleaner - ATH_MSG_VERBOSE(" perform track cleaning... " << m_printer->print(*fittedTrack) << std::endl - << m_printer->printStations(*fittedTrack)); + ATH_MSG_VERBOSE(" perform track cleaning... " << m_printer->print(*fittedTrackUnique) << std::endl + << m_printer->printStations(*fittedTrackUnique)); - if (fittedTrack) countAEOTs(fittedTrack, " refit: fitted track before cleaning "); + if (fittedTrackUnique) countAEOTs(fittedTrackUnique.get(), " refit: fitted track before cleaning "); - std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrack); + std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrackUnique); if (cleanTrack) countAEOTs(cleanTrack.get(), " refit: after cleaning"); - if (cleanTrack && !checkTrack("fitInterface1Cleaner", cleanTrack.get(), fittedTrack)) { + if (cleanTrack && !checkTrack("fitInterface1Cleaner", cleanTrack.get(), fittedTrackUnique.get())) { cleanTrack.reset(); } @@ -2424,32 +2423,30 @@ CombinedMuonTrackBuilder::fit(Trk::Track& track, const Trk::RunOutlierRemoval ru if (m_allowCleanerVeto && chi2Before > m_badFitChi2) { ATH_MSG_DEBUG(" cleaner veto A "); ++m_countStandaloneCleanerVeto; - delete fittedTrack; - fittedTrack = nullptr; + fittedTrackUnique.reset(); } else { ATH_MSG_DEBUG(" keep original standalone track despite cleaner veto "); } - } else if (!(*cleanTrack->perigeeParameters() == *fittedTrack->perigeeParameters())) { + } else if (!(*cleanTrack->perigeeParameters() == *fittedTrackUnique->perigeeParameters())) { double chi2After = normalizedChi2(*cleanTrack); if (chi2After < m_badFitChi2 || chi2After < chi2Before) { ATH_MSG_VERBOSE(" found and removed spectrometer outlier(s) "); - delete fittedTrack; - // using release until the entire code can be migrated to use smart pointers - fittedTrack = cleanTrack.release(); + fittedTrackUnique.swap(cleanTrack); } else { ATH_MSG_VERBOSE(" keep original track despite cleaning "); } } // FIXME: provide indet cleaner - if (fittedTrack) { - ATH_MSG_VERBOSE(" finished track cleaning... " << m_printer->print(*fittedTrack) << std::endl - << m_printer->printStations(*fittedTrack)); + if (fittedTrackUnique) { + ATH_MSG_VERBOSE(" finished track cleaning... " << m_printer->print(*fittedTrackUnique) << std::endl + << m_printer->printStations(*fittedTrackUnique)); } } - return fittedTrack; + //have to use release until the whole tool uses unique_ptr + return fittedTrackUnique.release(); } /** @@ -2519,40 +2516,40 @@ CombinedMuonTrackBuilder::fit(const Trk::MeasurementSet& measurementSet, const T return nullptr; } + //eventually this whole tool will use unique_ptrs + //in the meantime, this allows the MuonErrorOptimisationTool and MuonRefitTool to use them + std::unique_ptr<Trk::Track> fittedTrackUnique(fittedTrack); // track cleaning if (runOutlier) { // fit with optimized spectrometer errors - if (!m_muonErrorOptimizer.empty() && !fittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack) - && optimizeErrors(fittedTrack)) + if (!m_muonErrorOptimizer.empty() && !fittedTrackUnique->info().trackProperties(Trk::TrackInfo::StraightTrack) + && optimizeErrors(fittedTrackUnique.get())) { ATH_MSG_VERBOSE(" perform spectrometer error optimization after cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrackUnique.get()); if (optimizedTrack) { - if (checkTrack("fitInterface2Opt", optimizedTrack, fittedTrack)) { - delete fittedTrack; - fittedTrack = optimizedTrack; - countAEOTs(fittedTrack, " fit mstSet scaled errors Track "); - } else { - delete optimizedTrack; - } + if (checkTrack("fitInterface2Opt", optimizedTrack.get(), fittedTrackUnique.get())) { + fittedTrackUnique.swap(optimizedTrack); + countAEOTs(fittedTrackUnique.get(), " fit mstSet scaled errors Track "); + } } } // chi2 before clean - double chi2Before = normalizedChi2(*fittedTrack); + double chi2Before = normalizedChi2(*fittedTrackUnique); // muon cleaner ATH_MSG_VERBOSE(" perform track cleaning... "); - if (fittedTrack) countAEOTs(fittedTrack, " fit mstSet before cleaning "); + if (fittedTrackUnique) countAEOTs(fittedTrackUnique.get(), " fit mstSet before cleaning "); - std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrack); + std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrackUnique); if (cleanTrack) countAEOTs(cleanTrack.get(), " fit mstSet clean Track "); - if (cleanTrack && !checkTrack("fitInterface2Cleaner", cleanTrack.get(), fittedTrack)) { + if (cleanTrack && !checkTrack("fitInterface2Cleaner", cleanTrack.get(), fittedTrackUnique.get())) { cleanTrack.reset(); } @@ -2560,19 +2557,15 @@ CombinedMuonTrackBuilder::fit(const Trk::MeasurementSet& measurementSet, const T if (m_allowCleanerVeto && chi2Before > m_badFitChi2) { ATH_MSG_DEBUG(" cleaner veto B"); ++m_countExtensionCleanerVeto; - delete fittedTrack; - fittedTrack = nullptr; + fittedTrackUnique.reset(); } else { ATH_MSG_DEBUG(" keep original extension track despite cleaner veto "); } - } else if (!(*cleanTrack->perigeeParameters() == *fittedTrack->perigeeParameters())) { + } else if (!(*cleanTrack->perigeeParameters() == *fittedTrackUnique->perigeeParameters())) { double chi2After = normalizedChi2(*cleanTrack); if (chi2After < m_badFitChi2 || chi2After < chi2Before) { ATH_MSG_VERBOSE(" found and removed spectrometer outlier(s) "); - - delete fittedTrack; - // using release until the entire code can be migrated to use smart pointers - fittedTrack = cleanTrack.release(); + fittedTrackUnique.swap(cleanTrack); } else { ATH_MSG_VERBOSE(" keep original track despite cleaning "); } @@ -2581,8 +2574,8 @@ CombinedMuonTrackBuilder::fit(const Trk::MeasurementSet& measurementSet, const T // FIXME: provide indet cleaner ATH_MSG_VERBOSE(" finished cleaning"); } - - return fittedTrack; + //have to use release until the whole code uses unique_ptr + return fittedTrackUnique.release(); } @@ -2644,34 +2637,37 @@ CombinedMuonTrackBuilder::fit(const Trk::Track& indetTrack, Trk::Track& extrapol if (!fittedTrack) return nullptr; + //eventually this whole tool will use unique_ptrs + //in the meantime, this allows the MuonErrorOptimisationTool and MuonRefitTool to use them + std::unique_ptr<Trk::Track> fittedTrackUnique(fittedTrack); + // track cleaning if (runOutlier) { // fit with optimized spectrometer errors if (!m_muonErrorOptimizer.empty() && !fittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack) - && optimizeErrors(fittedTrack)) + && optimizeErrors(fittedTrackUnique.get())) { ATH_MSG_VERBOSE(" perform spectrometer error optimization after cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrackUnique.get()); if (optimizedTrack) { - delete fittedTrack; - fittedTrack = optimizedTrack; - countAEOTs(fittedTrack, " cbfit scaled errors Track "); + fittedTrackUnique.swap(optimizedTrack); + countAEOTs(fittedTrackUnique.get(), " cbfit scaled errors Track "); } } // chi2 before clean - double chi2Before = normalizedChi2(*fittedTrack); + double chi2Before = normalizedChi2(*fittedTrackUnique.get()); // muon cleaner - ATH_MSG_VERBOSE(" perform track cleaning... " << m_printer->print(*fittedTrack) << std::endl - << m_printer->printStations(*fittedTrack)); + ATH_MSG_VERBOSE(" perform track cleaning... " << m_printer->print(*fittedTrackUnique) << std::endl + << m_printer->printStations(*fittedTrackUnique)); - if (fittedTrack) { - countAEOTs(fittedTrack, " cb before clean Track "); + if (fittedTrackUnique) { + countAEOTs(fittedTrackUnique.get(), " cb before clean Track "); } - std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrack); + std::unique_ptr<Trk::Track> cleanTrack = m_cleaner->clean(*fittedTrackUnique); if (cleanTrack) { countAEOTs(cleanTrack.get(), " cb after clean Track "); } @@ -2680,19 +2676,15 @@ CombinedMuonTrackBuilder::fit(const Trk::Track& indetTrack, Trk::Track& extrapol if (m_allowCleanerVeto && chi2Before > m_badFitChi2) { ATH_MSG_DEBUG(" cleaner veto C"); ++m_countCombinedCleanerVeto; - delete fittedTrack; - fittedTrack = nullptr; + fittedTrackUnique.reset(); } else { ATH_MSG_DEBUG(" keep original combined track despite cleaner veto "); } - } else if (!(*cleanTrack->perigeeParameters() == *fittedTrack->perigeeParameters())) { + } else if (!(*cleanTrack->perigeeParameters() == *fittedTrackUnique->perigeeParameters())) { double chi2After = normalizedChi2(*cleanTrack); if (chi2After < m_badFitChi2 || chi2After < chi2Before) { ATH_MSG_VERBOSE(" found and removed spectrometer outlier(s) "); - - delete fittedTrack; - // using release until the entire code can be migrated to use smart pointers - fittedTrack = cleanTrack.release(); + fittedTrackUnique.swap(cleanTrack); } else { ATH_MSG_VERBOSE(" keep original track despite cleaning "); } @@ -2701,8 +2693,8 @@ CombinedMuonTrackBuilder::fit(const Trk::Track& indetTrack, Trk::Track& extrapol // FIXME: provide indet cleaner ATH_MSG_VERBOSE(" finished cleaning"); } - - return fittedTrack; + //have to use release until the whole code uses unique_ptr + return fittedTrackUnique.release(); } /* private methods follow */ @@ -4285,21 +4277,25 @@ CombinedMuonTrackBuilder::finalTrackBuild(Trk::Track*& track) const ATH_MSG_VERBOSE(" finished hole recovery procedure "); } + //eventually this whole tool will use unique_ptrs + //in the meantime, this allows the MuonErrorOptimisationTool and MuonRefitTool to use them + std::unique_ptr<Trk::Track> trackUnique(track); // final fit with optimized spectrometer errors - if (!m_muonErrorOptimizer.empty() && !track->info().trackProperties(Trk::TrackInfo::StraightTrack) - && countAEOTs(track, " before optimize ") == 0) + if (!m_muonErrorOptimizer.empty() && !trackUnique->info().trackProperties(Trk::TrackInfo::StraightTrack) + && countAEOTs(trackUnique.get(), " before optimize ") == 0) { ATH_MSG_VERBOSE(" perform spectrometer error optimization... "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*track); - if (optimizedTrack && checkTrack("finalTrackBuild2", optimizedTrack, track)) { - delete track; - track = optimizedTrack; - countAEOTs(track, " finalTrackBuilt alignment errors Track "); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(trackUnique.get()); + if (optimizedTrack && checkTrack("finalTrackBuild2", optimizedTrack.get(), trackUnique.get())) { + trackUnique.swap(optimizedTrack); + countAEOTs(track, " finalTrackBuilt alignment errors Track "); } } // add the track summary - m_trackSummary->updateTrack(*track); + m_trackSummary->updateTrack(*trackUnique); + //have to use release until the whole code uses unique_ptr + track=trackUnique.release(); } Trk::Track* diff --git a/Reconstruction/MuonIdentification/MuidTrackBuilder/src/OutwardsCombinedMuonTrackBuilder.cxx b/Reconstruction/MuonIdentification/MuidTrackBuilder/src/OutwardsCombinedMuonTrackBuilder.cxx index ee5260697a83b281de418d4850c13f2242af5946..516bbe8ffe6231ec465a5e874cbf767f9974bb4e 100755 --- a/Reconstruction/MuonIdentification/MuidTrackBuilder/src/OutwardsCombinedMuonTrackBuilder.cxx +++ b/Reconstruction/MuonIdentification/MuidTrackBuilder/src/OutwardsCombinedMuonTrackBuilder.cxx @@ -386,10 +386,11 @@ OutwardsCombinedMuonTrackBuilder::fit(Trk::Track& track, const Trk::RunOutlierRe if (!m_muonErrorOptimizer.empty() && !fittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack)) { ATH_MSG_VERBOSE(" perform spectrometer error optimization before cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrack); if (optimizedTrack) { delete fittedTrack; - fittedTrack = optimizedTrack; + //until code is updated to use unique_ptr or removed + fittedTrack = optimizedTrack.release(); } } @@ -467,10 +468,11 @@ OutwardsCombinedMuonTrackBuilder::fit(const Trk::Track& indetTrack, const Trk::T // fit with optimized spectrometer errors if (!m_muonErrorOptimizer.empty() && !fittedTrack->info().trackProperties(Trk::TrackInfo::StraightTrack)) { ATH_MSG_VERBOSE(" perform spectrometer error optimization before cleaning "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrack); if (optimizedTrack) { delete fittedTrack; - fittedTrack = optimizedTrack; + //until code is updated to use unique_ptr or removed + fittedTrack = optimizedTrack.release(); } } // muon cleaner @@ -580,10 +582,11 @@ OutwardsCombinedMuonTrackBuilder::fit(const Trk::Track& indetTrack, const Trk::T ATH_MSG_VERBOSE(" perform spectrometer error optimization... "); - Trk::Track* optimizedTrack = m_muonErrorOptimizer->optimiseErrors(*fittedTrack); + std::unique_ptr<Trk::Track> optimizedTrack = m_muonErrorOptimizer->optimiseErrors(fittedTrack); if (optimizedTrack) { delete fittedTrack; - fittedTrack = optimizedTrack; + //until the code uses unique ptrs (or is removed since it's deprecated) + fittedTrack = optimizedTrack.release(); } } return fittedTrack; diff --git a/Tools/PyUtils/python/MetaReader.py b/Tools/PyUtils/python/MetaReader.py index 3d6992790a3104f43876cd607c0f1139bb2770ba..ee7fb225fcc45ccc427ec4813528c517f8ef2d28 100644 --- a/Tools/PyUtils/python/MetaReader.py +++ b/Tools/PyUtils/python/MetaReader.py @@ -13,6 +13,8 @@ regexEventStreamInfo = re.compile(r'^EventStreamInfo(_p\d+)?$') regexIOVMetaDataContainer = re.compile(r'^IOVMetaDataContainer(_p\d+)?$') regexByteStreamMetadataContainer = re.compile(r'^ByteStreamMetadataContainer(_p\d+)?$') regexXAODEventFormat = re.compile(r'^xAOD::EventFormat(_v\d+)?$') +regexXAODTriggerMenu = re.compile(r'^DataVector<xAOD::TriggerMenu(_v\d+)?>$') +regexXAODTriggerMenuAux = re.compile(r'^xAOD::TriggerMenuAuxContainer(_v\d+)?$') regex_cppname = re.compile(r'^([\w:]+)(<.*>)?$') # regex_persistent_class = re.compile(r'^([a-zA-Z]+_p\d+::)*[a-zA-Z]+_p\d+$') regex_persistent_class = re.compile(r'^([a-zA-Z]+(_[pv]\d+)?::)*[a-zA-Z]+_[pv]\d+$') @@ -146,6 +148,8 @@ def read_metadata(filenames, file_type = None, mode = 'lite', promote = None, me '/Simulation/Parameters': 'IOVMetaDataContainer_p1', '/Digitization/Parameters': 'IOVMetaDataContainer_p1', '/EXT/DCS/MAGNETS/SENSORDATA': 'IOVMetaDataContainer_p1', + 'TriggerMenu': 'DataVector<xAOD::TriggerMenu_v1>', + 'TriggerMenuAux.': 'xAOD::TriggerMenuAuxContainer_v1', '*': 'EventStreamInfo_p*' } @@ -158,6 +162,7 @@ def read_metadata(filenames, file_type = None, mode = 'lite', promote = None, me branch = metadata_branches.At(i) name = branch.GetName() + class_name = branch.GetClassName() if regexIOVMetaDataContainer.match(class_name): @@ -192,6 +197,10 @@ def read_metadata(filenames, file_type = None, mode = 'lite', promote = None, me persistent_instances[name] = ROOT.IOVMetaDataContainer_p1() elif regexXAODEventFormat.match(class_name): persistent_instances[name] = ROOT.xAOD.EventFormat_v1() + elif regexXAODTriggerMenu.match(class_name): + persistent_instances[name] = ROOT.xAOD.TriggerMenuContainer_v1() + elif regexXAODTriggerMenuAux.match(class_name): + persistent_instances[name] = ROOT.xAOD.TriggerMenuAuxContainer_v1() if name in persistent_instances: branch.SetAddress(ROOT.AddressOf(persistent_instances[name])) @@ -209,8 +218,13 @@ def read_metadata(filenames, file_type = None, mode = 'lite', promote = None, me if hasattr(content, 'm_folderName'): key = getattr(content, 'm_folderName') - meta_dict[filename][key] = _convert_value(content) + aux = None + if key == 'TriggerMenu' and 'TriggerMenuAux.' in persistent_instances: + aux = persistent_instances['TriggerMenuAux.'] + elif key == 'TriggerMenuAux.': + continue + meta_dict[filename][key] = _convert_value(content, aux) # This is a required workaround which will temporarily be fixing ATEAM-560 originated from ATEAM-531 @@ -423,7 +437,7 @@ def _extract_fields(obj): return result -def _convert_value(value): +def _convert_value(value, aux = None): if hasattr(value, '__cppname__'): result = regex_cppname.match(value.__cppname__) @@ -452,6 +466,9 @@ def _convert_value(value): elif value.__cppname__ == 'xAOD::EventFormat_v1': return _extract_fields_ef(value) + elif value.__cppname__ == 'DataVector<xAOD::TriggerMenu_v1>' : + return _extract_fields_triggermenu(interface=value, aux=aux) + elif (value.__cppname__ == 'EventStreamInfo_p2' or value.__cppname__ == 'EventStreamInfo_p3'): return _extract_fields_esi(value) @@ -576,6 +593,29 @@ def _extract_fields_ef(value): return result +def _extract_fields_triggermenu(interface, aux): + L1Items = [] + HLTChains = [] + + try: + interface.setStore( aux ) + if interface.size() > 0: + # We make the assumption that the first stored SMK is + # representative of all events in the input collection. + firstMenu = interface.at(0) + L1Items = [ item for item in firstMenu.itemNames() ] + HLTChains = [ chain for chain in firstMenu.chainNames() ] + except Exception as err: + msg.warn('Problem reading xAOD::TriggerMenu:') + msg.warn(err) + + result = {} + result['L1Items'] = L1Items + result['HLTChains'] = HLTChains + + return result + + def _convert_event_type_bitmask(value): types = None diff --git a/Tracking/TrkExtrapolation/TrkExTools/TrkExTools/Navigator.h b/Tracking/TrkExtrapolation/TrkExTools/TrkExTools/Navigator.h index ce3a1fbb1d7bce42f0d308b786ae82bbfdc4b798..b06cd208d6cf3d24cbce2f0c595831365a0c2136 100755 --- a/Tracking/TrkExtrapolation/TrkExTools/TrkExTools/Navigator.h +++ b/Tracking/TrkExtrapolation/TrkExTools/TrkExTools/Navigator.h @@ -19,13 +19,16 @@ #include "TrkVolumes/BoundarySurface.h" #include "TrkGeometry/MagneticFieldProperties.h" #include "TrkParameters/TrackParameters.h" +#include "StoreGate/ReadCondHandleKey.h" +#include "TrkGeometry/TrackingGeometry.h" + + // STD #include <cstring> #include <exception> #include <Gaudi/Accumulators.h> -#include "CxxUtils/checker_macros.h" namespace Trk { class ITrackingGeometrySvc; @@ -34,7 +37,7 @@ namespace Trk { class NavigatorException : public std::exception { const char* what() const throw() - { return "Problem with TrackingGeometry loading"; } + { return "Problem with TrackingGeometry loading"; } }; class IGeometryBuilder; @@ -42,24 +45,22 @@ namespace Trk { class Surface; class Track; class TrackingVolume; - class TrackingGeometry; - - typedef std::pair<const NavigationCell*,const NavigationCell*> NavigationPair; + typedef std::pair<const NavigationCell*,const NavigationCell*> NavigationPair; - /** + /** @class Navigator - + Main AlgTool for Navigation in the TrkExtrapolation realm : - It retrieves the TrackingGeometry from the DetectorStore - as the reference Geometry. + It retrieves the TrackingGeometry from the DetectorStore + as the reference Geometry. - There's an experimental possibility to use a straightLineApproximation for the + There's an experimental possibility to use a straightLineApproximation for the Navigation. This is unstable due to wrong cylinder intersections. @author Andreas.Salzburger@cern.ch */ - class ATLAS_NOT_THREAD_SAFE Navigator : public AthAlgTool, + class Navigator : public AthAlgTool, virtual public INavigator { public: /** Constructor */ @@ -148,37 +149,43 @@ namespace Trk { double& path) const override final; private: - /* + /* * Methods to be overriden by the NavigatorValidation */ virtual void validationInitialize() {} virtual void validationFill(const Trk::TrackParameters* trackPar) const{ (void)trackPar; - } - - void updateTrackingGeometry() const; - - - bool m_validationMode; //!<This becomes a dummy option for now - /* - **************************************************************** - * According to Goetz Gaycken this needs special attention marking as - * @TODO replace by conditions handle. - */ - mutable const TrackingGeometry* m_trackingGeometry; //!< the tracking geometry owned by the navigator - ServiceHandle<Trk::ITrackingGeometrySvc> m_trackingGeometrySvc; //!< ToolHandle to the TrackingGeometrySvc - std::string m_trackingGeometryName; //!< Name of the TrackingGeometry as given in Detector Store - /******************************************************************/ - double m_insideVolumeTolerance; //!< Tolerance for inside() method of Volumes - double m_isOnSurfaceTolerance; //!< Tolerance for isOnSurface() method of BoundarySurfaces - bool m_useStraightLineApproximation; //!< use the straight line approximation for the next boundary sf - bool m_searchWithDistance; //!< search with new distanceToSurface() method + } + + SG::ReadCondHandleKey<TrackingGeometry> m_trackingGeometryReadKey{ + this, + "TrackingGeometryKey", + "", + "Key of output of TrackingGeometry for ID" + }; + + /// ToolHandle to the TrackingGeometrySvc + ServiceHandle<Trk::ITrackingGeometrySvc> m_trackingGeometrySvc; + /// Name of the TrackingGeometry as given in Detector Store + std::string m_trackingGeometryName; + /******************************************************************/ + /// Tolerance for inside() method of Volumes + double m_insideVolumeTolerance; + /// Tolerance for isOnSurface() method of BoundarySurfaces + double m_isOnSurfaceTolerance; + bool m_useConditions; + Trk::MagneticFieldProperties m_fieldProperties; + /// use the straight line approximation for the next boundary sf + bool m_useStraightLineApproximation; + /// search with new distanceToSurface() method + bool m_searchWithDistance; //------------ Magnetic field properties - bool m_fastField; - Trk::MagneticFieldProperties m_fieldProperties; + bool m_fastField; + bool m_validationMode; //!< This becomes a dummy option for now // ------ PERFORMANCE STATISTICS -------------------------------- // - /* All performance stat counters are atomic (the simplest solution perhaps not the most performant one)*/ + /* All performance stat counters are atomic (the simplest solution perhaps + * not the most performant one)*/ mutable Gaudi::Accumulators::Counter<int> m_forwardCalls; //!< counter for forward nextBounday calls mutable Gaudi::Accumulators::Counter<int> m_forwardFirstBoundSwitch; //!< counter for failed first forward nextBounday calls mutable Gaudi::Accumulators::Counter<int> m_forwardSecondBoundSwitch; //!< counter for failed second forward nextBounday calls @@ -188,7 +195,7 @@ namespace Trk { mutable Gaudi::Accumulators::Counter<int> m_backwardSecondBoundSwitch; //!< counter for failed second backward nextBounday calls mutable Gaudi::Accumulators::Counter<int> m_backwardThirdBoundSwitch; //!< counter for failed third backward nextBounday calls mutable Gaudi::Accumulators::Counter<int> m_outsideVolumeCase; //!< counter for navigation-break in outside volume cases (ovc) - mutable Gaudi::Accumulators::Counter<int> m_sucessfulBackPropagation; //!< counter for sucessful recovery of navigation-break in ovc + mutable Gaudi::Accumulators::Counter<int> m_sucessfulBackPropagation; //!< counter for sucessful recovery of navigation-break in ovc }; } // end of namespace diff --git a/Tracking/TrkExtrapolation/TrkExTools/src/Navigator.cxx b/Tracking/TrkExtrapolation/TrkExTools/src/Navigator.cxx index 01d804dd48954776e4ec1af5d5d3ebf8bac91bcf..e2042bf2915fc9df8e1be6933f832e3455bd4ad2 100755 --- a/Tracking/TrkExtrapolation/TrkExTools/src/Navigator.cxx +++ b/Tracking/TrkExtrapolation/TrkExTools/src/Navigator.cxx @@ -42,8 +42,6 @@ const Trk::MagneticFieldProperties s_zeroMagneticField(Trk::NoField); // constructor Trk::Navigator::Navigator(const std::string &t, const std::string &n, const IInterface *p) : AthAlgTool(t, n, p), - m_validationMode(false), - m_trackingGeometry(nullptr), m_trackingGeometrySvc("AtlasTrackingGeometrySvc", n), m_trackingGeometryName("AtlasTrackingGeometry"), m_insideVolumeTolerance(1. * Gaudi::Units::mm), @@ -51,6 +49,7 @@ Trk::Navigator::Navigator(const std::string &t, const std::string &n, const IInt m_useStraightLineApproximation(false), m_searchWithDistance(true), m_fastField(false), + m_validationMode(false), m_forwardCalls{}, m_forwardFirstBoundSwitch{}, m_forwardSecondBoundSwitch{}, @@ -83,41 +82,44 @@ Trk::Navigator::~Navigator() { // initialize StatusCode Trk::Navigator::initialize() { - // Initialize StatusCode - //StatusCode s = StatusCode::SUCCESS; + + + //We can use conditions when the key is not empty + m_useConditions=!m_trackingGeometryReadKey.key().empty(); // get the TrackingGeometrySvc - if (m_trackingGeometrySvc.retrieve().isSuccess()) { - ATH_MSG_INFO("Successfully retrieved " << m_trackingGeometrySvc); - m_trackingGeometryName = m_trackingGeometrySvc->trackingGeometryName(); - } else { - ATH_MSG_WARNING("Couldn't retrieve " << m_trackingGeometrySvc << ". "); - ATH_MSG_WARNING(" -> Trying to retrieve default '" << m_trackingGeometryName << "' from DetectorStore."); + if (!m_useConditions) { + if (m_trackingGeometrySvc.retrieve().isSuccess()) { + ATH_MSG_INFO("Successfully retrieved " << m_trackingGeometrySvc); + m_trackingGeometryName = m_trackingGeometrySvc->trackingGeometryName(); + } else { + ATH_MSG_WARNING("Couldn't retrieve " << m_trackingGeometrySvc << ". "); + ATH_MSG_WARNING(" -> Trying to retrieve default '" + << m_trackingGeometryName << "' from DetectorStore."); + } } - validationInitialize(); + ATH_CHECK(m_trackingGeometryReadKey.initialize(m_useConditions)); - m_fieldProperties = m_fastField ? Trk::MagneticFieldProperties(Trk::FastField) : - Trk::MagneticFieldProperties(Trk::FullField); + m_fieldProperties = m_fastField + ? Trk::MagneticFieldProperties(Trk::FastField) + : Trk::MagneticFieldProperties(Trk::FullField); + //This is no-op for the Navigator only relevant for + //derivated Validation for now + validationInitialize(); ATH_MSG_DEBUG("initialize() successful"); return StatusCode::SUCCESS; } const Trk::TrackingVolume * Trk::Navigator::volume(const Amg::Vector3D &gp) const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } - return(m_trackingGeometry->lowestTrackingVolume(gp)); + return(trackingGeometry()->lowestTrackingVolume(gp)); } const Trk::TrackingVolume * Trk::Navigator::highestVolume() const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } - return(m_trackingGeometry->highestTrackingVolume()); + return(trackingGeometry()->highestTrackingVolume()); } const Trk::BoundarySurface<Trk::TrackingVolume>* @@ -141,9 +143,6 @@ Trk::Navigator::nextBoundarySurface(const EventContext& ctx, Trk::PropDirection dir, const Trk::TrackingVolume& vol) const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } // get the surface accessor Trk::ObjectAccessor surfAcc = vol.boundarySurfaceAccessor( parms.position(), dir * parms.momentum().normalized()); @@ -210,9 +209,6 @@ Trk::Navigator::nextTrackingVolume(const EventContext& ctx, Trk::PropDirection dir, const Trk::TrackingVolume& vol) const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } // --------------------------------------------------- if (dir == Trk::alongMomentum) { @@ -271,11 +267,11 @@ Trk::Navigator::nextTrackingVolume(const EventContext& ctx, << surface_id << " of Volume: '" << vol.volumeName() << "' NOT FOUND."); continue; - } + } ATH_MSG_VERBOSE(" [N] " << tryBoundary << ". try - BoundarySurface " << surface_id << " of Volume: '" << vol.volumeName() << "'."); - + const Trk::Surface& currentSurface = currentBoundary->surfaceRepresentation(); @@ -355,9 +351,6 @@ Trk::Navigator::nextDenseTrackingVolume( ATH_MSG_DEBUG(" [N] nextDenseTrackingVolume() to volume '" << vol.volumeName() << "', starting from " << parms.position()); - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } Trk::NavigationCell solution(nullptr, nullptr); const Trk::TrackParameters *currPar = &parms; @@ -370,7 +363,7 @@ Trk::Navigator::nextDenseTrackingVolume( if (atVolumeBoundary(currPar, &vol, dir, nextVolume, tol) && nextVolume != (&vol)) { if (!nextVolume) { const Amg::Vector3D& gp = currPar->position(); - const Trk::TrackingVolume *currStatic = m_trackingGeometry->lowestStaticTrackingVolume(gp); + const Trk::TrackingVolume *currStatic = trackingGeometry()->lowestStaticTrackingVolume(gp); if (&vol != currStatic) { nextVolume = currStatic; } @@ -414,7 +407,7 @@ Trk::Navigator::nextDenseTrackingVolume( } else if (atVolumeBoundary(nextPar, &vol, dir, nextVolume, tol)) { if (!nextVolume) { // detached volume boundary or world boundary : resolve - const Trk::TrackingVolume *currStatic = m_trackingGeometry->lowestStaticTrackingVolume(gp); + const Trk::TrackingVolume *currStatic = trackingGeometry()->lowestStaticTrackingVolume(gp); if (&vol != currStatic) { nextVolume = currStatic; } @@ -482,14 +475,10 @@ Trk::Navigator::closestParameters(const Trk::Track& trk, const Trk::Surface& sf, const Trk::IPropagator* propptr) const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); - } // -- corresponds to Extrapolator::m_searchLevel = 2/3 - search with Propagation if (propptr && !m_searchWithDistance) { const Trk::TrackParameters *closestTrackParameters = nullptr; - // const Trk::TrackingVolume* highestVolume = (m_trackingGeometry->highestTrackingVolume()); double distanceToSurface = 10e10; @@ -606,7 +595,6 @@ Trk::Navigator::closestParameters(const Trk::Track& trk, // finalize StatusCode Trk::Navigator::finalize() { - m_trackingGeometry = nullptr; if (msgLvl(MSG::DEBUG)) { ATH_MSG_DEBUG("[N] " << name() << " Perfomance Statistics : --------------------------------"); @@ -627,21 +615,25 @@ Trk::Navigator::finalize() { return StatusCode::SUCCESS; } -const Trk::TrackingGeometry * +const Trk::TrackingGeometry* Trk::Navigator::trackingGeometry() const { - if (!m_trackingGeometry) { - updateTrackingGeometry(); + if (!m_useConditions) { + const TrackingGeometry* trackingGeometry = nullptr; + if (detStore() + ->retrieve(trackingGeometry, m_trackingGeometryName) + .isFailure()) { + ATH_MSG_FATAL("Could not retrieve TrackingGeometry from DetectorStore."); + throw Trk::NavigatorException(); + } + return trackingGeometry; + } else { + SG::ReadCondHandle<TrackingGeometry> handle(m_trackingGeometryReadKey, + Gaudi::Hive::currentContext()); + if (!handle.isValid()) { + ATH_MSG_FATAL("Could not retrieve TrackingGeometry from DetectorStore."); + throw Trk::NavigatorException(); + } + return handle.cptr(); } - return m_trackingGeometry; } -void -Trk::Navigator::updateTrackingGeometry() const { - // -------------------- public TrackingGeometry (from DetectorStore) ---------------------------- - - if (detStore()->retrieve(m_trackingGeometry, m_trackingGeometryName).isFailure()) { - ATH_MSG_FATAL("Could not retrieve TrackingGeometry '" << m_trackingGeometryName << "' from DetectorStore."); - ATH_MSG_FATAL(" - probably the chosen layout is not supported / no cool tag exists. "); - throw Trk::NavigatorException(); - } -} diff --git a/Trigger/TrigAlgorithms/TrigT2CaloCommon/CMakeLists.txt b/Trigger/TrigAlgorithms/TrigT2CaloCommon/CMakeLists.txt index ef96d698a84f0e848c053d5aea466699845be281..06c97ab7da03320343029f7a7e6d9e102d3a9947 100644 --- a/Trigger/TrigAlgorithms/TrigT2CaloCommon/CMakeLists.txt +++ b/Trigger/TrigAlgorithms/TrigT2CaloCommon/CMakeLists.txt @@ -38,6 +38,5 @@ endfunction( _add_test ) _add_test( TestService test/test_dataaccess.sh ) _add_test( TestServiceNewJO test/test_dataaccessNewJO.sh ) -_add_test( caloCf test/test_t2calo_cf_build.sh ) _add_test( caloOnly test/test_t2calo_only_build.sh ) _add_test( caloRinger test/test_t2calo_ringer_only_build.sh ) diff --git a/Trigger/TrigAlgorithms/TrigT2CaloCommon/python/CaloDef.py b/Trigger/TrigAlgorithms/TrigT2CaloCommon/python/CaloDef.py index c9e9100b8470306e655f4e5a5b72cae5cf03c4c2..a969a417250c25541c673840bfa7552c0225fdaf 100644 --- a/Trigger/TrigAlgorithms/TrigT2CaloCommon/python/CaloDef.py +++ b/Trigger/TrigAlgorithms/TrigT2CaloCommon/python/CaloDef.py @@ -10,6 +10,10 @@ def setMinimalCaloSetup() : from TrigT2CaloCommon.TrigT2CaloCommonConfig import TrigCaloDataAccessSvc svcMgr+=TrigCaloDataAccessSvc() svcMgr.TrigCaloDataAccessSvc.OutputLevel=ERROR + if not hasattr(svcMgr,'RegSelSvcDefault'): + from RegionSelector.RegSelSvcDefault import RegSelSvcDefault + svcMgr += RegSelSvcDefault() + ######################## diff --git a/Trigger/TrigAlgorithms/TrigT2CaloCommon/test/test_t2calo_cf_build.sh b/Trigger/TrigAlgorithms/TrigT2CaloCommon/test/test_t2calo_cf_build.sh deleted file mode 100755 index 235141b868488c828298931b855afd7c20ad179a..0000000000000000000000000000000000000000 --- a/Trigger/TrigAlgorithms/TrigT2CaloCommon/test/test_t2calo_cf_build.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# art-description: athenaMT trigger test using IDCalo job options with doID=False -# art-type: build -# art-include: master/Athena -# Skipping art-output which has no effect for build tests. -# If you create a grid version, check art-output in existing grid tests. - -export EVENTS=20 -export THREADS=1 -export SLOTS=1 -export JOBOPTION="TrigUpgradeTest/IDCalo.py" -export EXTRA="doID=False" - -# Skip dumping chain counts because this test doesn't produce the histogram including them -export SKIP_CHAIN_DUMP=1 - -source exec_TrigUpgradeTest_art_athenaMT.sh -source exec_TrigUpgradeTest_art_post.sh diff --git a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/AnalysisConfigMT_Ntuple.cxx b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/AnalysisConfigMT_Ntuple.cxx index 183061cf891c625d86067ee49aa894f95912d41a..016b3d536c6b464e13640fa9106d513f42d2f996 100644 --- a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/AnalysisConfigMT_Ntuple.cxx +++ b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/AnalysisConfigMT_Ntuple.cxx @@ -38,7 +38,7 @@ void remove_duplicates(std::vector<T>& vec) { void AnalysisConfigMT_Ntuple::loop() { - m_provider->msg(MSG::INFO) << "[91;1m" << "AnalysisConfig_Ntuple::loop() for " << m_analysisInstanceName + m_provider->msg(MSG::DEBUG) << "[91;1m" << "AnalysisConfig_Ntuple::loop() for " << m_analysisInstanceName << " compiled " << __DATE__ << " " << __TIME__ << "\t: " << date() << "[m" << endmsg; // get (offline) beam position @@ -56,6 +56,7 @@ void AnalysisConfigMT_Ntuple::loop() { ybeam = vertex[1]; zbeam = vertex[2]; + /// leave this code commented here - useful for debugging // m_provider->msg(MSG::INFO) << " using beam position\tx=" << xbeam << "\ty=" << ybeam << "\tz=" << zbeam <<endmsg; beamline.push_back(xbeam); beamline.push_back(ybeam); @@ -102,11 +103,15 @@ void AnalysisConfigMT_Ntuple::loop() { std::vector<std::string> configuredChains = (*m_tdt)->getListOfTriggers("L2_.*, EF_.*, HLT_.*"); - m_provider->msg(MSG::VERBOSE) << "[91;1m" << configuredChains.size() << " Configured Chains" << "[m" << endmsg; + if (m_provider->msg().level() <= MSG::VERBOSE) { + m_provider->msg(MSG::VERBOSE) << "[91;1m" << configuredChains.size() << " Configured Chains" << "[m" << endmsg; + } + for ( unsigned i=0 ; i<configuredChains.size() ; i++ ) { - // m_provider->msg(MSG::VERBOSE) << "[91;1m" << "Chain " << configuredChains[i] << " (ACN)[m" << endmsg; + if (m_provider->msg().level() <= MSG::VERBOSE) { + m_provider->msg(MSG::VERBOSE) << "[91;1m" << "Chain " << configuredChains[i] << " (ACN)[m" << endmsg; + } configuredHLTChains.insert( configuredChains[i] ); - } tida_first = false; @@ -207,9 +212,9 @@ void AnalysisConfigMT_Ntuple::loop() { mu_val = pEventInfo->averageInteractionsPerCrossing(); } - m_provider->msg(MSG::INFO) << "run " << run_number - << "\tevent " << event_number - << "\tlb " << lumi_block << endmsg; + m_provider->msg(MSG::DEBUG) << "run " << run_number + << "\tevent " << event_number + << "\tlb " << lumi_block << endmsg; m_event->run_number(run_number); m_event->event_number(event_number); @@ -243,7 +248,7 @@ void AnalysisConfigMT_Ntuple::loop() { int passed_chains = 0; - m_provider->msg(MSG::INFO) << "Checking " << m_chainNames.size() << " chains" << endmsg; + m_provider->msg(MSG::DEBUG) << "Checking " << m_chainNames.size() << " chains" << endmsg; if ( m_chainNames.empty() ) { m_provider->msg(MSG::WARNING) << "No chains to check" << endmsg; @@ -273,7 +278,7 @@ void AnalysisConfigMT_Ntuple::loop() { bool passPhysics = (*m_tdt)->isPassed(chainName); - m_provider->msg(MSG::INFO) << "Chain " << chainName << "\troi " << roistring + m_provider->msg(MSG::DEBUG) << "Chain " << chainName << "\troi " << roistring << "\tpres " << (*m_tdt)->getPrescale(chainName) << ( passPhysics ? "[91;1m" : "" ) << "\tpass physics " << passPhysics << ( passPhysics ? "[m" : "" ) << "\t: ( pass " << (*m_tdt)->isPassed(chainName, decisiontype_ ) << "\tdec type " << decisiontype_ << " ) " << endmsg; @@ -290,12 +295,12 @@ void AnalysisConfigMT_Ntuple::loop() { /// bomb out if no chains passed and not told to keep all events and found no /// offline objects if ( !analyse && !m_keepAllEvents && !foundOffline ) { - m_provider->msg(MSG::INFO) << "No chains passed unprescaled - not processing this event: " << run_number << " " << event_number << " " << lumi_block << endmsg; + m_provider->msg(MSG::DEBUG) << "No chains passed unprescaled - not processing this event: " << run_number << " " << event_number << " " << lumi_block << endmsg; return; } - m_provider->msg(MSG::INFO) << "Chains passed " << passed_chains << endmsg; + m_provider->msg(MSG::DEBUG) << "Chains passed " << passed_chains << endmsg; /// for Monte Carlo get the truth particles if requested to do so @@ -304,11 +309,11 @@ void AnalysisConfigMT_Ntuple::loop() { selectorTruth.clear(); - m_provider->msg(MSG::INFO) << "MC Truth flag " << m_mcTruth << endmsg; + m_provider->msg(MSG::DEBUG) << "MC Truth flag " << m_mcTruth << endmsg; const TrigInDetTrackTruthMap* truthMap = 0; bool foundTruth = false; if ( m_mcTruth && m_TruthPdgId!=15) { - m_provider->msg(MSG::INFO) << "getting Truth" << endmsg; + m_provider->msg(MSG::DEBUG) << "getting Truth" << endmsg; if ( m_provider->evtStore()->retrieve(truthMap, "TrigInDetTrackTruthMap").isFailure()) { m_hasTruthMap = false; } @@ -353,7 +358,7 @@ void AnalysisConfigMT_Ntuple::loop() { if ( m_mcTruth && !foundTruth ) { - m_provider->msg(MSG::INFO) << "getting Truth" << endmsg; + m_provider->msg(MSG::DEBUG) << "getting Truth" << endmsg; /// selectTracks<TruthParticleContainer>( &selectorTruth, "INav4MomTruthEvent" ); @@ -369,22 +374,22 @@ void AnalysisConfigMT_Ntuple::loop() { for ( int ik=0 ; ik<4 ; ik++ ) { - m_provider->msg(MSG::INFO) << "Try McEventCollection: " << collectionNames[ik] << endmsg; + m_provider->msg(MSG::DEBUG) << "Try McEventCollection: " << collectionNames[ik] << endmsg; if (!m_provider->evtStore()->contains<McEventCollection>(collectionNames[ik]) ) { - m_provider->msg(MSG::INFO) << "No McEventCollection: " << collectionNames[ik] << endmsg; + m_provider->msg(MSG::DEBUG) << "No McEventCollection: " << collectionNames[ik] << endmsg; continue; } - m_provider->msg(MSG::INFO) << "evtStore()->retrieve( mcevent, " << collectionNames[ik] << " )" << endmsg; + m_provider->msg(MSG::DEBUG) << "evtStore()->retrieve( mcevent, " << collectionNames[ik] << " )" << endmsg; if ( m_provider->evtStore()->retrieve( mcevent, collectionNames[ik] ).isFailure() ) { - m_provider->msg(MSG::INFO) << "Failed to get McEventCollection: " << collectionNames[ik] << endmsg; + m_provider->msg(MSG::DEBUG) << "Failed to get McEventCollection: " << collectionNames[ik] << endmsg; } else { // found this collectionName collectionName = collectionNames[ik]; - m_provider->msg(MSG::INFO) << "Found McEventCollection: " << collectionName << endmsg; + m_provider->msg(MSG::DEBUG) << "Found McEventCollection: " << collectionName << endmsg; foundcollection = true; break; } @@ -409,7 +414,7 @@ void AnalysisConfigMT_Ntuple::loop() { /// it's not at all tidy, and should be rewritten, /// but probably never will be - m_provider->msg(MSG::INFO) << "Found McEventCollection: " << collectionName << "\tNevents " << mcevent->size() << endmsg; + m_provider->msg(MSG::DEBUG) << "Found McEventCollection: " << collectionName << "\tNevents " << mcevent->size() << endmsg; /// count the number of interactions of each sort /// this is actually *very stupid*, there are a *lot* @@ -472,9 +477,9 @@ void AnalysisConfigMT_Ntuple::loop() { } } - m_provider->msg(MSG::INFO) << "Found " << ip << " TruthParticles (GenParticles) in " << ie_ip << " GenEvents out of " << ie << endmsg; + m_provider->msg(MSG::DEBUG) << "Found " << ip << " TruthParticles (GenParticles) in " << ie_ip << " GenEvents out of " << ie << endmsg; - m_provider->msg(MSG::INFO) << "selected " << selectorTruth.size() << " TruthParticles (GenParticles)" << endmsg; + m_provider->msg(MSG::DEBUG) << "selected " << selectorTruth.size() << " TruthParticles (GenParticles)" << endmsg; //////////////////////////////////////////////////////////////////////////////////////// @@ -494,7 +499,7 @@ void AnalysisConfigMT_Ntuple::loop() { /// get offline tracks - m_provider->msg(MSG::INFO) << " Offline tracks " << endmsg; + m_provider->msg(MSG::DEBUG) << " Offline tracks " << endmsg; selectorRef.clear(); @@ -525,7 +530,7 @@ void AnalysisConfigMT_Ntuple::loop() { if ( xaodVtxCollection!=0 ) { - m_provider->msg(MSG::INFO) << "xAOD Primary vertex container " << xaodVtxCollection->size() << " entries" << endmsg; + m_provider->msg(MSG::DEBUG) << "xAOD Primary vertex container " << xaodVtxCollection->size() << " entries" << endmsg; xAOD::VertexContainer::const_iterator vtxitr = xaodVtxCollection->begin(); @@ -681,11 +686,11 @@ void AnalysisConfigMT_Ntuple::loop() { std::vector<TIDA::Vertex> tidavertices; - m_provider->msg(MSG::INFO) << "\tFetch xAOD::VertexContainer with key " << vtx_name << endmsg; + m_provider->msg(MSG::DEBUG) << "\tFetch xAOD::VertexContainer with key " << vtx_name << endmsg; if ( vtx_name!="" ) { - m_provider->msg(MSG::INFO) << "\tFetch xAOD::VertexContainer with key " << vtx_name << endmsg; + m_provider->msg(MSG::DEBUG) << "\tFetch xAOD::VertexContainer with key " << vtx_name << endmsg; /// MT Vertex access @@ -697,8 +702,8 @@ void AnalysisConfigMT_Ntuple::loop() { if ( xaodVtxCollection!=0 ) { - m_provider->msg(MSG::INFO) << "\txAOD::VertexContainer found with size " << xaodVtxCollection->size() - << "\t" << vtx_name << endmsg; + m_provider->msg(MSG::DEBUG) << "\txAOD::VertexContainer found with size " << xaodVtxCollection->size() + << "\t" << vtx_name << endmsg; xAOD::VertexContainer::const_iterator vtxitr = xaodVtxCollection->begin(); @@ -755,7 +760,7 @@ void AnalysisConfigMT_Ntuple::loop() { int Ntest = selectorTest.tracks().size(); - m_provider->msg(MSG::INFO) << "collection " << collectionname << "\ttest tracks.size() " << Ntest << endmsg; + m_provider->msg(MSG::DEBUG) << "collection " << collectionname << "\ttest tracks.size() " << Ntest << endmsg; for ( int ii=Ntest ; ii-- ; ) m_provider->msg(MSG::DEBUG) << " test track " << ii << " " << *selectorTest.tracks()[ii] << endmsg; } } @@ -821,7 +826,7 @@ void AnalysisConfigMT_Ntuple::loop() { /// get muons for ( size_t imuon=0 ; imuon<m_muonType.size() ; imuon++ ) { - m_provider->msg(MSG::INFO) << "fetching offline muons " << endmsg; + m_provider->msg(MSG::DEBUG) << "fetching offline muons " << endmsg; int muonType = -1; for ( int it=0 ; it<5 ; it++ ) if ( m_muonType[imuon] == MuonRef[it] ) muonType=it; @@ -833,7 +838,7 @@ void AnalysisConfigMT_Ntuple::loop() { Nmu += Nmu_; - m_provider->msg(MSG::INFO) << "found " << Nmu << " offline muons " << endmsg; + m_provider->msg(MSG::DEBUG) << "found " << Nmu << " offline muons " << endmsg; std::string mchain = "Muons"; if ( m_muonType[imuon]!="" ) mchain += "_" + m_muonType[imuon]; @@ -853,7 +858,7 @@ void AnalysisConfigMT_Ntuple::loop() { } m_provider->msg(MSG::DEBUG) << "ref muon tracks.size() " << selectorRef.tracks().size() << endmsg; - for ( int ii=selectorRef.tracks().size() ; ii-- ; ) m_provider->msg(MSG::INFO) << " ref muon track " << ii << " " << *selectorRef.tracks()[ii] << endmsg; + for ( int ii=selectorRef.tracks().size() ; ii-- ; ) m_provider->msg(MSG::DEBUG) << " ref muon track " << ii << " " << *selectorRef.tracks()[ii] << endmsg; } @@ -862,20 +867,20 @@ void AnalysisConfigMT_Ntuple::loop() { /// get muons if ( m_doMuonsSP ) { - m_provider->msg(MSG::INFO) << "fetching offline muons " << endmsg; + m_provider->msg(MSG::DEBUG) << "fetching offline muons " << endmsg; int muonType = 0; Nmu += processMuons( selectorRef, muonType ); - m_provider->msg(MSG::INFO) << "found " << Nmu << " offline muons " << endmsg; + m_provider->msg(MSG::DEBUG) << "found " << Nmu << " offline muons " << endmsg; m_event->addChain("MuonsSP"); m_event->back().addRoi(TIDARoiDescriptor(true)); m_event->back().back().addTracks(selectorRef.tracks()); m_provider->msg(MSG::DEBUG) << "ref muon tracks.size() " << selectorRef.tracks().size() << endmsg; - for ( int ii=selectorRef.tracks().size() ; ii-- ; ) m_provider->msg(MSG::INFO) << " ref muon track " << ii << " " << *selectorRef.tracks()[ii] << endmsg; + for ( int ii=selectorRef.tracks().size() ; ii-- ; ) m_provider->msg(MSG::DEBUG) << " ref muon track " << ii << " " << *selectorRef.tracks()[ii] << endmsg; } @@ -934,7 +939,7 @@ void AnalysisConfigMT_Ntuple::loop() { /// useful debug information 0 leave here // std::cout << "SUTT Ntaus: " << Ntau << std::endl; - if ( Nmu==0 && Noff==0 && Nel==0 && Ntau==0 ) m_provider->msg(MSG::INFO) << "No offline objects found " << endmsg; + if ( Nmu==0 && Noff==0 && Nel==0 && Ntau==0 ) m_provider->msg(MSG::DEBUG) << "No offline objects found " << endmsg; else foundOffline = true; @@ -998,7 +1003,7 @@ void AnalysisConfigMT_Ntuple::loop() { roist = comb->get<TrigRoiDescriptor>( roi_name_tmp, decisiontype_, roi_tename ); if ( roist.size()>0 ) { - for ( unsigned ir=0 ; ir<roist.size() ; ir++ ) m_provider->msg(MSG::INFO) << "\t\tRetrieved roi " << roi_name << "\t" << *roist[ir].cptr() << endmsg; + for ( unsigned ir=0 ; ir<roist.size() ; ir++ ) m_provider->msg(MSG::DEBUG) << "\t\tRetrieved roi " << roi_name << "\t" << *roist[ir].cptr() << endmsg; } else { m_provider->msg(MSG::WARNING) << "\t\tRequested roi " << roi_name << " not found" << endmsg; @@ -1049,6 +1054,9 @@ void AnalysisConfigMT_Ntuple::loop() { const ElementLink<TrigRoiDescriptorCollection> roi_link = roi_info.link; + /// check this is not a spurious TDT match + if ( roi_key!="" && roi_link.dataID()!=roi_key ) continue; + const TrigRoiDescriptor* const* roiptr = roi_link.cptr(); if ( roiptr == 0 ) { @@ -1064,13 +1072,8 @@ void AnalysisConfigMT_Ntuple::loop() { /// get the tracks - ////////////////////////////////// - /// useful cocumentation line ... - /// std::string keys[5] = { "TrigJetRec", "TrigSplitJet", "TrigSuperRoi", "forID", "" }; - - // m_provider->msg(MSG::INFO) << "using chain roi " << *roid << endmsg; - m_provider->msg(MSG::INFO) << "TIDARoi " << *roi_tmp << "\tcollectionName: " << collectionName << endmsg; + m_provider->msg(MSG::VERBOSE) << "TIDARoi " << *roi_tmp << "\tcollectionName: " << collectionName << endmsg; /// this should *never* be the case, and we should only run this /// bit of code once the first time round the loop anyhow @@ -1080,7 +1083,11 @@ void AnalysisConfigMT_Ntuple::loop() { if ( chainName.find("HLT_")!=std::string::npos ) { if ( selectTracks<xAOD::TrackParticleContainer>( &selectorTest, roi_link, collectionName ) ); - else m_provider->msg(MSG::WARNING) << "\tNo track collection " << collectionName << " found" << endmsg; + else { + if (m_provider->msg().level() <= MSG::DEBUG) { + m_provider->msg(MSG::WARNING) << "\tNo track collection " << collectionName << " found" << endmsg; + } + } } /// fetch vertices if available ... @@ -1089,7 +1096,7 @@ void AnalysisConfigMT_Ntuple::loop() { if ( vtx_name!="" ) { - m_provider->msg(MSG::INFO) << "\tFetch xAOD::VertexContainer for chain " << chainName << " with key " << vtx_name << endmsg; + m_provider->msg(MSG::DEBUG) << "\tFetch xAOD::VertexContainer for chain " << chainName << " with key " << vtx_name << endmsg; /// MT Vertex access @@ -1101,7 +1108,7 @@ void AnalysisConfigMT_Ntuple::loop() { } else { - m_provider->msg(MSG::INFO) << "\txAOD::VertexContainer found with size " << (vtx_itrpair.second - vtx_itrpair.first) + m_provider->msg(MSG::DEBUG) << "\txAOD::VertexContainer found with size " << (vtx_itrpair.second - vtx_itrpair.first) << "\t" << vtx_name << endmsg; xAOD::VertexContainer::const_iterator vtxitr = vtx_itrpair.first; @@ -1177,9 +1184,6 @@ void AnalysisConfigMT_Ntuple::loop() { if ( beamline_online.size()>3 ) chain.back().addUserData(beamline_online); } - - // m_provider->msg(MSG::INFO) << " done" << endmsg; - if ( roi_tmp ) delete roi_tmp; roi_tmp = 0; } @@ -1279,7 +1283,6 @@ void AnalysisConfigMT_Ntuple::loop() { #endif - // if ( m_printInfo ) m_provider->msg(MSG::INFO) << "FILL TREE\n" << (*m_event) << endmsg; if ( mTree ) mTree->Fill(); } diff --git a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/Analysis_Tier0.cxx b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/Analysis_Tier0.cxx index 1914de2f9f60ce764323c12c0f948e8eed08c2c4..30fb216dc24d3408352de4919a6e33ce84459a9f 100644 --- a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/Analysis_Tier0.cxx +++ b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/Analysis_Tier0.cxx @@ -51,6 +51,8 @@ void Analysis_Tier0::initialise() { addHistogram(h_chain); /// variable width bins for track occupancy + +#if 0 double vnbins[81] = { -0.5, @@ -75,10 +77,27 @@ void Analysis_Tier0::initialise() { 185.5, 192.5, 200.5 }; +#endif - - - h_ntrk = new TH1F( "reftrk_N", "Reference tracks", 80, vnbins ); + double vnbins[101] = { + -0.5, + 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 17.5, 18.5, 19.5, 21.5, + 23.5, 24.5, 26.5, 28.5, 30.5, 32.5, 35.5, 37.5, 40.5, 43.5, 46.5, 50.5, 53.5, 57.5, 61.5, 66.5, 71.5, 76.5, 81.5, 87.5, + 93.5, 100.5, 107.5, 114.5, 123.5, 131.5, 141.5, 150.5, 161.5, 172.5, 185.5, 198.5, 211.5, 226.5, 242.5, 259.5, 277.5, 297.5, 317.5, 340.5, + 363.5, 389.5, 416.5, 445.5, 476.5, 509.5, + 544.5, 582.5, 623.5, 666.5, 713.5, 762.5, 815.5, 872.5, 933.5, 998.5, 1067.5, + 1141.5, 1221.5, 1305.5, 1396.5, 1493.5, 1597.5, + 1708.5, 1827.5, 1953.5, 2089.5, + 2234.5, 2389.5, 2555.5, + 2733.5, 2923.5, 3125.5, + 3342.5, 3574.5, + 3823.5, 4088.5, + 4372.5, 4675.5, + 5000.5 + }; + + + h_ntrk = new TH1F( "reftrk_N", "Reference tracks", 100, vnbins ); addHistogram(h_ntrk); @@ -130,7 +149,7 @@ void Analysis_Tier0::initialise() { /// test track distributions - h_ntrk_rec = new TH1F( "testtrk_N", "Test tracks", 80, vnbins ); + h_ntrk_rec = new TH1F( "testtrk_N", "Test tracks", 100, vnbins ); addHistogram(h_ntrk_rec); @@ -179,6 +198,7 @@ void Analysis_Tier0::initialise() { h_z0eff = new TProfile( "Eff_z0", "z0 efficiency", 50, -225., 225. ); h_nVtxeff = new TProfile( "Eff_nVtx", "nVtx efficiency", 71, -0.5, 70.5 ); + h_lbeff = new TProfile( "Eff_lb", "efficinecy vs lumiblock", 301, -0.5, 3009.5 ); addHistogram( h_lbeff ); diff --git a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/TrigTestMonToolAC.cxx b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/TrigTestMonToolAC.cxx index 562d9a1c142505681fc695be6e754b8446f41506..bf7c19102dba107e880ba53197a750e0795c31ac 100644 --- a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/TrigTestMonToolAC.cxx +++ b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/TrigTestMonToolAC.cxx @@ -344,10 +344,9 @@ StatusCode TrigTestMonToolAC::book(bool newEventsBlock, bool newLumiBlock, bool StatusCode TrigTestMonToolAC::fill() { - msg(MSG::INFO) << " ----- enter fill() (athena) ----- " << endmsg; + msg(MSG::DEBUG) << " ----- enter fill() (athena) ----- " << endmsg; - - msg(MSG::INFO) << "chains: " << m_chainNames.size() << endmsg; + msg(MSG::DEBUG) << "chains: " << m_chainNames.size() << endmsg; for ( unsigned i=0 ; i<m_chainNames.size() ; i++ ) { ChainString s = m_chainNames[i]; @@ -357,7 +356,7 @@ StatusCode TrigTestMonToolAC::fill() { for ( unsigned i=0 ; i<m_sequences.size() ; i++ ) m_sequences[i]->execute(); - msg(MSG::INFO) << " ----- exit fill() ----- " << endmsg; + msg(MSG::DEBUG) << " ----- exit fill() ----- " << endmsg; return StatusCode::SUCCESS; } diff --git a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/VtxAnalysis.cxx b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/VtxAnalysis.cxx index 5a353f747909c4e1af58c68f68ffedd56bff7e82..f7e598d41ef8f4f586bf22b62ae81efe47fb563d 100644 --- a/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/VtxAnalysis.cxx +++ b/Trigger/TrigAnalysis/TrigInDetAnalysisExample/src/VtxAnalysis.cxx @@ -26,7 +26,7 @@ void VtxAnalysis::initialise() { mdir = new TIDDirectory(name()); mdir->push(); - +#if 0 double vnbins[81] = { -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, @@ -50,13 +50,31 @@ void VtxAnalysis::initialise() { 185.5, 192.5, 200.5 }; +#endif + + double vnbins[101] = { + -0.5, + 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 17.5, 18.5, 19.5, 21.5, + 23.5, 24.5, 26.5, 28.5, 30.5, 32.5, 35.5, 37.5, 40.5, 43.5, 46.5, 50.5, 53.5, 57.5, 61.5, 66.5, 71.5, 76.5, 81.5, 87.5, + 93.5, 100.5, 107.5, 114.5, 123.5, 131.5, 141.5, 150.5, 161.5, 172.5, 185.5, 198.5, 211.5, 226.5, 242.5, 259.5, 277.5, 297.5, 317.5, 340.5, + 363.5, 389.5, 416.5, 445.5, 476.5, 509.5, + 544.5, 582.5, 623.5, 666.5, 713.5, 762.5, 815.5, 872.5, 933.5, 998.5, 1067.5, + 1141.5, 1221.5, 1305.5, 1396.5, 1493.5, 1597.5, + 1708.5, 1827.5, 1953.5, 2089.5, + 2234.5, 2389.5, 2555.5, + 2733.5, 2923.5, 3125.5, + 3342.5, 3574.5, + 3823.5, 4088.5, + 4372.5, 4675.5, + 5000.5 + }; hnvtx = new TH1F( "vx_nvtx", ";number of vertices", 100, -0.5, 100.5 ); hzed = new TH1F( "vx_zed", ";vtx z [mm]", 100, -250, 250 ); - hntrax = new TH1F( "vx_ntrax", ";number of tracks", 100, 0.5, 200.5 ); + hntrax = new TH1F( "vx_ntrax", ";number of tracks", 100, vnbins ); addHistogram( hnvtx ); addHistogram( hzed ); @@ -65,7 +83,7 @@ void VtxAnalysis::initialise() { hnvtx_rec = new TH1F( "vx_nvtx_rec", ";number of vertices", 100, -0.5, 100.5 ); hzed_rec = new TH1F( "vx_zed_rec", ";vtx z [mm]", 100, -250, 250 ); - hntrax_rec = new TH1F( "vx_ntrax_rec", ";number of tracks", 100, 0.5, 200.5 ); + hntrax_rec = new TH1F( "vx_ntrax_rec", ";number of tracks", 100, vnbins ); addHistogram( hnvtx_rec ); addHistogram( hzed_rec ); @@ -76,7 +94,7 @@ void VtxAnalysis::initialise() { addHistogram( hzed_res ); rdz_vs_zed = new TProfile( "vx_rdz_vs_zed", "rdz_vs_zed; vtx z [mm];z residual [mm]", 100, -250, 250 ); - rdz_vs_ntrax = new TProfile( "vx_rdz_vs_ntrax", "rdz_vs_ntrax;number of tracks;z residual [mm]", 80, vnbins ); + rdz_vs_ntrax = new TProfile( "vx_rdz_vs_ntrax", "rdz_vs_ntrax;number of tracks;z residual [mm]", 100, vnbins ); rdz_vs_nvtx = new TProfile( "vx_rdz_vs_nvtx", "rdz_vs_nvtx;number of vertices;z residual [mm]", 51, -0.125, 50.125 ); addHistogram( rdz_vs_zed ); @@ -86,7 +104,7 @@ void VtxAnalysis::initialise() { // rdz_vs_mu = new TProfile( "rdz_vs_mu", 30, 0, 30, 400, -20, 20 ); eff_zed = new TProfile( "vx_zed_eff", "zed_eff;efficiency;offline vtx z [mm]", 50, -250, 250 ); - eff_ntrax = new TProfile( "vx_ntrax_eff", "ntrax_eff;number of tracks;efficiency", 80, vnbins ); + eff_ntrax = new TProfile( "vx_ntrax_eff", "ntrax_eff;number of tracks;efficiency", 100, vnbins ); eff_nvtx = new TProfile( "vx_nvtx_eff", "nvtx_eff;number of vertices;efficiency", 100, -0.5, 100.5 ); eff_mu = new TProfile( "vx_mu_eff", "mu_eff;<#mu>;efficiency", 61, -0.5, 60.5 ); eff_lb = new TProfile( "vx_lb_eff", "lb_eff;lumi block;efficiency", 151, -0.5, 3019.5 ); diff --git a/Trigger/TrigAnalysis/TrigInDetAnalysisUser/Analysis/src/ConfVtxAnalysis.cxx b/Trigger/TrigAnalysis/TrigInDetAnalysisUser/Analysis/src/ConfVtxAnalysis.cxx index d9b674b7f2ce33a7f22416159bed37ee181be79c..ba88603d1e7bee289f65f6b2553d0175f0c98800 100644 --- a/Trigger/TrigAnalysis/TrigInDetAnalysisUser/Analysis/src/ConfVtxAnalysis.cxx +++ b/Trigger/TrigAnalysis/TrigInDetAnalysisUser/Analysis/src/ConfVtxAnalysis.cxx @@ -49,7 +49,7 @@ void ConfVtxAnalysis::initialise() { 61.5, 65.5, 69.5, 74.5, 80.5 }; -#endif + double vnbins[81] = { -0.5, @@ -75,6 +75,25 @@ void ConfVtxAnalysis::initialise() { 192.5, 200.5 }; +#endif + + double vnbins[101] = { + -0.5, + 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5, 15.5, 17.5, 18.5, 19.5, 21.5, + 23.5, 24.5, 26.5, 28.5, 30.5, 32.5, 35.5, 37.5, 40.5, 43.5, 46.5, 50.5, 53.5, 57.5, 61.5, 66.5, 71.5, 76.5, 81.5, 87.5, + 93.5, 100.5, 107.5, 114.5, 123.5, 131.5, 141.5, 150.5, 161.5, 172.5, 185.5, 198.5, 211.5, 226.5, 242.5, 259.5, 277.5, 297.5, 317.5, 340.5, + 363.5, 389.5, 416.5, 445.5, 476.5, 509.5, + 544.5, 582.5, 623.5, 666.5, 713.5, 762.5, 815.5, 872.5, 933.5, 998.5, 1067.5, + 1141.5, 1221.5, 1305.5, 1396.5, 1493.5, 1597.5, + 1708.5, 1827.5, 1953.5, 2089.5, + 2234.5, 2389.5, 2555.5, + 2733.5, 2923.5, 3125.5, + 3342.5, 3574.5, + 3823.5, 4088.5, + 4372.5, 4675.5, + 5000.5 + }; + diff --git a/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/DumpAll.cxx b/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/DumpAll.cxx index a6b8e6a31baa509b137178eeef6007371ef923dd..201fcfb83e46eefefd18221c93dcd683a8fcfa64 100644 --- a/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/DumpAll.cxx +++ b/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/DumpAll.cxx @@ -3,7 +3,7 @@ */ -/** +/** * NAME : DumpAll.cxx * PACKAGE : Trigger/L1CaloUpgrade/DumpAll * @@ -38,7 +38,7 @@ DumpAll::DumpAll( const std::string& name, ISvcLocator* pSvcLocator ) : AthAlgor DumpAll::~DumpAll(){} StatusCode DumpAll::initialize(){ - + MsgStream msg(msgSvc(), name()); msg << MSG::DEBUG << "initializing DumpAll" << endmsg; m_counter = 0; @@ -127,7 +127,7 @@ StatusCode DumpAll::finalize(){ } StatusCode DumpAll::execute(){ - + MsgStream msg(msgSvc(), name()); msg << MSG::DEBUG << "execute DumpAll" << endmsg; std::cout << "DumpAll" << std::endl; @@ -313,15 +313,15 @@ StatusCode DumpAll::execute(){ m_offel_f1.push_back(el->auxdata<float>("f1") ); m_offel_f3.push_back(el->auxdata<float>("f3") ); m_offel_had.push_back( 0.0 ); // not yet there - m_offel_istight.push_back( (el->passSelection( xAOD::EgammaParameters::LHTight ) ? 1 : 0 ) ); - m_offel_ismedium.push_back( (el->passSelection( xAOD::EgammaParameters::LHMedium ) ? 1 : 0 ) ); - m_offel_isloose.push_back( (el->passSelection( xAOD::EgammaParameters::LHLoose ) ? 1 : 0 ) ); + m_offel_istight.push_back( (el->passSelection( "LHTight" ) ? 1 : 0 ) ); + m_offel_ismedium.push_back( (el->passSelection( "LHMedium" ) ? 1 : 0 ) ); + m_offel_isloose.push_back( (el->passSelection( "LHLoose" ) ? 1 : 0 ) ); } // end of electron m_offelectron->Fill(); m_counter++; - + return StatusCode::SUCCESS; } diff --git a/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/EFexAnalysis.cxx b/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/EFexAnalysis.cxx index a7475956760a94f6de5808a3ba2c000009b5a998..f2b1b146c364f039f510e350692148531c0ef69c 100644 --- a/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/EFexAnalysis.cxx +++ b/Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/src/EFexAnalysis.cxx @@ -3,7 +3,7 @@ */ -/** +/** * NAME : EFexAnalysis.cxx * PACKAGE : Trigger/TrigL1Upgrade/TrigL1CaloUpgrade/EFexAnalysis * @@ -35,7 +35,7 @@ EFexAnalysis::EFexAnalysis( const std::string& name, ISvcLocator* pSvcLocator ) EFexAnalysis::~EFexAnalysis(){} StatusCode EFexAnalysis::initialize(){ - + MsgStream msg(msgSvc(), name()); msg << MSG::DEBUG << "initializing TrigT1CaloEFex" << endmsg; if ( m_enableMon ){ @@ -122,7 +122,7 @@ StatusCode EFexAnalysis::finalize(){ } StatusCode EFexAnalysis::execute(){ - + MsgStream msg(msgSvc(), name()); msg << MSG::DEBUG << "execute TrigT1CaloEFex" << endmsg; const xAOD::TrigEMClusterContainer* scluster(nullptr); @@ -176,7 +176,7 @@ StatusCode EFexAnalysis::execute(){ m_f3->Fill( (cl->energy( CaloSampling::EMB3 ) + cl->energy( CaloSampling::EME3 ) ) / cl->energy() ); } } - + } if ( m_doTruth ) { const xAOD::TruthParticleContainer* truth; @@ -208,7 +208,7 @@ StatusCode EFexAnalysis::execute(){ if ( std::abs( tt->eta() )<2.5 ) m_eff_truth_ptA_n->Fill( tt->pt()/1e3 ); m_eff_truth_eta_n->Fill( tt->eta() ); } - + } } // end of if m_doTruth @@ -222,7 +222,7 @@ StatusCode EFexAnalysis::execute(){ for( auto el : *electrons ){ if ( el->pt() < 1e3 ) continue; //if ( !el->passSelection( xAOD::EgammaParameters::LHMedium ) ) continue; - if ( !el->passSelection( xAOD::EgammaParameters::LHLoose ) ) continue; + if ( !el->passSelection( "LHLoose" ) ) continue; m_eff_off_pt_d->Fill( el->pt()/1e3 ); if ( el->pt() > 16000 ) m_eff_off_eta_d->Fill( el->eta() ); for( auto cl : *scluster ){ @@ -235,7 +235,7 @@ StatusCode EFexAnalysis::execute(){ m_res_off->Fill( resolution ); m_res_off_eta->Fill( el->eta(), resolution ); m_res_off_pt->Fill( el->pt()/1e3, resolution ); - + if ( (el->caloCluster()->et() > 9e3) && (std::abs(el->eta())<2.47) ) m_res_off_nvtx->Fill( nvtxs, resolution ); @@ -262,7 +262,7 @@ StatusCode EFexAnalysis::execute(){ if ( cl->e277() > 0.0 ) { SE_reta = cl->e237()/cl->e277(); } float resol_reta = -100.0; - if ( off_reta > 0 ) resol_reta = + if ( off_reta > 0 ) resol_reta = 100.0*(off_reta-SE_reta)/off_reta; m_res_rEta_off->Fill( resol_reta ); m_res_rEta_off_eta->Fill( el->eta(), resol_reta ); @@ -275,7 +275,7 @@ StatusCode EFexAnalysis::execute(){ if ( cl->energy() > 0.0 ) { SE_f1 = (cl->energy(CaloSampling::EMB1)+cl->energy(CaloSampling::EME1))/cl->energy(); } float resol_f1 = -100.0; - if ( fabsf(off_f1) > 0.01 ) resol_f1 = + if ( fabsf(off_f1) > 0.01 ) resol_f1 = 100.0*(off_f1-SE_f1)/off_f1; m_res_f1_off->Fill( resol_f1 ); m_res_f1_off_eta->Fill( el->eta(), resol_f1 ); @@ -289,7 +289,7 @@ StatusCode EFexAnalysis::execute(){ if ( cl->energy() > 0.0 ) { SE_f3 = (cl->energy(CaloSampling::EMB3)+cl->energy(CaloSampling::EME3))/cl->energy(); } float resol_f3 = -100.0; - if ( fabsf(off_f3) > 0.001 ) resol_f3 = + if ( fabsf(off_f3) > 0.001 ) resol_f3 = 100.0*(off_f3-SE_f3)/off_f3; m_res_f3_off->Fill( resol_f3 ); m_res_f3_off_eta->Fill( el->eta(), resol_f3 ); diff --git a/Trigger/TrigTools/TrigInDetConfig/python/InDetPT.py b/Trigger/TrigTools/TrigInDetConfig/python/InDetPT.py index 58f15c70652cd61db93087a951256da97a20a09b..5fe6d96a5e009d5e099c4920dd4f195a0b7fe9e8 100644 --- a/Trigger/TrigTools/TrigInDetConfig/python/InDetPT.py +++ b/Trigger/TrigTools/TrigInDetConfig/python/InDetPT.py @@ -6,8 +6,6 @@ from __future__ import print_function from AthenaCommon.Include import include include.block("InDetTrigRecExample/EFInDetConfig.py") -include("InDetTrigRecExample/InDetTrigRec_jobOptions.py") # this is needed to get InDetTrigFlags -from InDetTrigRecExample.InDetTrigFlags import InDetTrigFlags from AthenaCommon.Logging import logging log = logging.getLogger("InDetPT") @@ -106,7 +104,7 @@ def makeInDetPrecisionTracking( whichSignature, trigTrackSummaryTool = Trk__TrackSummaryTool(name = "%sTrackSummaryToolSharedHitsWithTRT%s"%(algNamePrefix, signature), InDetSummaryHelperTool = InDetTrigTrackSummaryHelperToolSharedHits, - doSharedHits = InDetTrigFlags.doSharedHits(), + doSharedHits = True, doHolesInDet = True ) if doTRTextension: diff --git a/Trigger/TrigValidation/TrigUpgradeTest/share/IDCalo.py b/Trigger/TrigValidation/TrigUpgradeTest/share/IDCalo.py deleted file mode 100644 index 55540059ec5ab585a325a341882af25f263534bd..0000000000000000000000000000000000000000 --- a/Trigger/TrigValidation/TrigUpgradeTest/share/IDCalo.py +++ /dev/null @@ -1,143 +0,0 @@ -# -# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -# - -doWriteRDOTrigger = False -doWriteBS = False -include("TriggerJobOpts/runHLT_standalone.py") - -from AthenaCommon.AlgSequence import AlgSequence -topSequence = AlgSequence() - -# ---------------------------------------------------------------- -# Setup Views -# ---------------------------------------------------------------- -from AthenaCommon.AlgSequence import AthSequencer -viewSeq = AthSequencer("AthViewSeq", Sequential=True, ModeOR=False, StopOverride=False) -topSequence += viewSeq - -from L1Decoder.L1DecoderConfig import mapThresholdToL1RoICollection, mapThresholdToL1DecisionCollection -roiCollectionName = mapThresholdToL1RoICollection("EM") - -# View maker alg -viewNodeName = "allViewAlgorithms" -from ViewAlgs.ViewAlgsConf import EventViewCreatorAlgorithm -from DecisionHandling.DecisionHandlingConf import ViewCreatorInitialROITool - -inputMakerAlg = EventViewCreatorAlgorithm("viewMaker") -inputMakerAlg.ViewFallThrough = True -inputMakerAlg.RoIsLink = roiCollectionName -inputMakerAlg.InViewRoIs = "EMViewRoIs" -inputMakerAlg.Views = "testView" -inputMakerAlg.RoITool = ViewCreatorInitialROITool() -inputMakerAlg.InputMakerInputDecisions = [ mapThresholdToL1DecisionCollection("EM") ] -inputMakerAlg.ViewNodeName = viewNodeName -inputMakerAlg.InputMakerOutputDecisions = 'DUMMYOUTDEC' -viewSeq += inputMakerAlg - -# Set of view algs -allViewAlgorithms = AthSequencer(viewNodeName, Sequential=False, ModeOR=False, StopOverride=False) - - -if TriggerFlags.doID: - - from TrigInDetConfig.InDetSetup import makeInDetAlgs - - viewAlgs = makeInDetAlgs("FS", rois= roiCollectionName) - - for viewAlg in viewAlgs: - allViewAlgorithms += viewAlg - - - #Adding vertexing - from TrigInDetConfig.TrigInDetPriVtxConfig import makeVertices - # TODO need to change the name of the output vertex collection to something recordable - # what is this actually testing ? why do we have FS tracks with egamma and aprimary vertex ??? - vtxAlgs = makeVertices( "bjet", "HLT_IDTrack_FS_FTF", "HLT_IDVertex_FS" ) - allViewAlgorithms += vtxAlgs - - - from TrigInDetConfig.InDetPT import makeInDetPrecisionTracking - #Adding precision tracking - PTTracks, PTTrackParticles, PTAlgs = makeInDetPrecisionTracking( "bjet", inputFTFtracks="TrigFastTrackFinder_Tracks_Bjet" ) - - allViewAlgorithms += PTAlgs - - #Testing BeamSpotAlg in Run3 configuration - prefixName = "InDetTrigMT" - from TrigVertexFitter.TrigVertexFitterConf import TrigPrimaryVertexFitter - primaryVertexFitter = TrigPrimaryVertexFitter( name = prefixName + "VertexFitter", - zVariance=3.0, - CreateTrackLists=True ) - - #Can it be added to the service when we need to make it private? - ToolSvc += primaryVertexFitter - - from TrigT2BeamSpot.T2VertexBeamSpotMonitoring import T2VertexBeamSpotAlgMonitoring, T2VertexBeamSpotToolMonitoring - alg = T2VertexBeamSpotAlgMonitoring() - toolMon = T2VertexBeamSpotToolMonitoring() - - from TrigT2BeamSpot.TrigT2BeamSpotConf import PESA__T2VertexBeamSpotTool - InDetTrigMTBeamSpotTool = PESA__T2VertexBeamSpotTool( name = "TestBeamSpotTool", - OutputLevel = INFO, - MonTool = toolMon, - nSplitVertices = 1, # Turn on (>1) or off vertex splitting - ReclusterSplit = False, # Recluster split track collections before vertex fitting - WeightClusterZ = True, # Use the track Z0 weighted cluster Z position as seed - - TotalNTrackMin = 4, # Minimum number of tracks required in an event - TrackMinPt = 0.5, # Minimum track pT to be considered for vertexing - TrackSeedPt = 0.7, # Minimum track pT to be considered for seeding a vertex fit - TrackClusterDZ = 0.35, # Maximum distance between tracks considered as a cluster - TrackMaxZ0 = 200.0, # Maximum track Z0 to be considered for vertexing - TrackMaxD0 = 10.0, # Maximum track d0 to be considered for vertexing - TrackMaxZ0err = 5.0, # Maximum track Z0 error to be considered for vertexing - TrackMaxD0err = 5.0, # Maximum track d0 error to be considered for vertexing - TrackMinNDF = 2.0, # Minimum track NDF to be considered for vertexing - TrackMinQual = 0.0, # Minimum track chi^2/NDF to be considered for vertexing - TrackMaxQual = 10.0, # Maximum track chi^2/NDF to be considered for vertexing - TrackMinChi2Prob = -10.0, # Minimum track cumulative chi2 probability, from CLHEP/GenericFunctions/CumulativeChiSquare.hh - TrackMinSiHits = 7, # Minimum # track silicon (PIX + SCT) hits to be considered for vertexing - TrackMinPIXHits = 0, # Minimum # track silicon (PIX + SCT) hits to be considered for vertexing - TrackMinSCTHits = 0, # Minimum # track silicon (PIX + SCT) hits to be considered for vertexing - TrackMinTRTHits = -10, # Minimum # track TRT hits to be considered for vertexing - - VertexMinNTrk = 2, # Minimum # tracks in a cluster to be considered for vertexing - VertexMaxNTrk = 100, # Maximum # tracks in a cluster to be considered for vertexing (saves on time!) - VertexMaxXerr = 1., # Maximum resulting X error on vertex fit for "good" vertices - VertexMaxYerr = 1., # Maximum resulting Y error on vertex fit for "good" vertices - VertexMaxZerr = 10., # Maximum resulting Z error on vertex fit for "good" vertices - VertexMinQual = 0.0, # Minimum resulting chi^2/NDF on vertex fit for "good" vertices - VertexMaxQual = 100.0, # Maximum resulting chi^2/NDF on vertex fit for "good" vertices - VertexMinChi2Prob = -10.0, # Minimum cumulative chi2 probability, from CLHEP/GenericFunctions/CumulativeChiSquare.hh - VertexBCIDMinNTrk = 10, # Minimum # tracks in a vertex to be used for per-BCID monitoring - PrimaryVertexFitter = primaryVertexFitter ) - - ToolSvc += InDetTrigMTBeamSpotTool - - - -#Testing base default class - from TrigT2BeamSpot.TrigT2BeamSpotConf import PESA__T2VertexBeamSpot - InDetTrigMTBeamSpotAlg = PESA__T2VertexBeamSpot( name = "TestBeamSpotAlg", - OutputLevel =INFO, - MonTool = alg, - vertexCollName = "TrigBeamSpotVertex", # Output vertex collection Name - TrackCollections = [ PTTracks[-1] ], #For now using PT tracks as a test but FTF should be enough - BeamSpotTool = InDetTrigMTBeamSpotTool ) - - - - allViewAlgorithms += InDetTrigMTBeamSpotAlg - - from TrigT2MinBias.TrigT2MinBiasConf import MbtsFexMT - alg=MbtsFexMT() - allViewAlgorithms += alg - - - if TriggerFlags.doCalo: - from TrigT2CaloEgamma.TrigT2CaloEgammaConfig import T2CaloEgamma_ReFastAlgo - algo=T2CaloEgamma_ReFastAlgo("testFastAlgo") - algo.RoIs="EMViewRoIs" - allViewAlgorithms += algo - viewSeq += allViewAlgorithms diff --git a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_calo_cf_build.sh b/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_calo_cf_build.sh deleted file mode 100755 index bd31c3e5778f151568a7e60bc0ffa90a70724197..0000000000000000000000000000000000000000 --- a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_calo_cf_build.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# art-description: athenaMT trigger test using IDCalo job options -# art-type: build -# art-include: master/Athena -# Skipping art-output which has no effect for build tests. -# If you create a grid version, check art-output in existing grid tests. - -export EVENTS=20 -export THREADS=1 -export SLOTS=1 -export JOBOPTION="TrigUpgradeTest/IDCalo.py" - -# Skip dumping chain counts because this test doesn't produce the histogram including them -export SKIP_CHAIN_DUMP=1 - -source exec_TrigUpgradeTest_art_athenaMT.sh -source exec_TrigUpgradeTest_art_post.sh diff --git a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_build.sh b/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_build.sh deleted file mode 100755 index 1f5a29c977e585fbf04d5b0abd5d690866a37970..0000000000000000000000000000000000000000 --- a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_build.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# art-description: athenaMT trigger test using IDCalo job options with doCalo=False -# art-type: build -# art-include: master/Athena -# Skipping art-output which has no effect for build tests. -# If you create a grid version, check art-output in existing grid tests. - -export EVENTS=20 -export THREADS=1 -export SLOTS=1 -export JOBOPTION="TrigUpgradeTest/IDCalo.py" -export EXTRA="doCalo=False" - -# Skip dumping chain counts because this test doesn't produce the histogram including them -export SKIP_CHAIN_DUMP=1 - -source exec_TrigUpgradeTest_art_athenaMT.sh -source exec_TrigUpgradeTest_art_post.sh diff --git a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_mc_build.sh b/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_mc_build.sh deleted file mode 100755 index 989f8c1638a0e0b7c5fe1d6f9f2a85ac8e3322d6..0000000000000000000000000000000000000000 --- a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_id_cf_mc_build.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# art-description: athenaMT trigger test on MC using IDCalo job options with doCalo=False -# art-type: build -# art-include: master/Athena -# Skipping art-output which has no effect for build tests. -# If you create a grid version, check art-output in existing grid tests. - -export EVENTS=20 -export THREADS=1 -export SLOTS=1 -export INPUT="ttbar" -export JOBOPTION="TrigUpgradeTest/IDCalo.py" -export EXTRA="doCalo=False" - -# Skip dumping chain counts because this test doesn't produce the histogram including them -export SKIP_CHAIN_DUMP=1 - -source exec_TrigUpgradeTest_art_athenaMT.sh -source exec_TrigUpgradeTest_art_post.sh diff --git a/Trigger/TrigValidation/TrigValTools/CMakeLists.txt b/Trigger/TrigValidation/TrigValTools/CMakeLists.txt index 81fe27447d39a72dfa3298da4c4c6374f6116ce5..7eb8cae993f06f84f8fac26c2ebe03ace7245b71 100644 --- a/Trigger/TrigValidation/TrigValTools/CMakeLists.txt +++ b/Trigger/TrigValidation/TrigValTools/CMakeLists.txt @@ -34,3 +34,7 @@ atlas_add_test( rootcomp SCRIPT test/test_rootcomp.sh ${CMAKE_CURRENT_SOURCE_DIR}/test/test_rootcomp.C PROPERTIES TIMEOUT 450 POST_EXEC_SCRIPT nopost.sh ) + +atlas_add_test( chainDump + SCRIPT test/test_chainDump.sh ${CMAKE_CURRENT_SOURCE_DIR}/test/test_chainDump.C + LOG_IGNORE_PATTERN "Processing.*test_chainDump" ) diff --git a/Trigger/TrigValidation/TrigValTools/bin/chainDump.py b/Trigger/TrigValidation/TrigValTools/bin/chainDump.py index dfa4f637d7d812cbe1c6a96853ef73f53d985657..af98ec9f7f78394620d58cb82993f730b12c283d 100755 --- a/Trigger/TrigValidation/TrigValTools/bin/chainDump.py +++ b/Trigger/TrigValidation/TrigValTools/bin/chainDump.py @@ -45,11 +45,13 @@ def get_parser(): help='Save outputs also to a json file with the given name or %(const)s if no name is given') parser.add_argument('--fracTolerance', metavar='FRAC', + type=float, default=0.001, help='Tolerance as a fraction, default = %(default)s. ' 'Flagged diffs must exceed all tolerances') parser.add_argument('--intTolerance', metavar='NUM', + type=int, default=2, help='Tolerance as a number of counts, default = %(default)s. ' 'Flagged diffs must exceed all tolerances') @@ -258,13 +260,14 @@ def print_counts(json_dict): dump_lines = [] for item_name, item_counts in counts.items(): v = item_counts['count'] - line = ' {name:{nw}s} {val:>{w}d}'.format(name=item_name, val=v, nw=name_width, w=column_width) + line = ' {name:{nw}s} {val:>{w}s}'.format(name=item_name, val=str(v), nw=name_width, w=column_width) if not no_ref: ref_v = item_counts['ref_count'] diff = item_counts['ref_diff'] - line += ' {val:>{w}d}'.format(val=ref_v, w=column_width) + line += ' {val:>{w}s}'.format(val=str(ref_v), w=column_width) if diff: line += ' <<<<<<<<<<' + dump_lines.append(line) logging.info('Writing %s counts from histogram %s:\n%s', text_name, hist_name, '\n'.join(dump_lines)) @@ -343,7 +346,7 @@ def main(): if len(in_hists) == 0: logging.error('No count histograms could be loaded.') return 1 - logging.info('Loaded count histograms: %s', in_hists.keys()) + logging.info('Loaded count histograms: %s', list(in_hists.keys())) in_total_hists = load_histograms(in_file, args.totalHists) if len(in_total_hists) == 0: @@ -359,7 +362,7 @@ def main(): ref_total = None if args.referenceFile: ref_hists = load_histograms(ref_file, args.countHists) - logging.info('Loaded reference count histograms: %s', ref_hists.keys()) + logging.info('Loaded reference count histograms: %s', list(ref_hists.keys())) missing_refs = [k for k in in_hists.keys() if k not in ref_hists.keys()] if len(missing_refs) > 0: logging.error('Count histogram(s) %s missing in the reference', missing_refs) @@ -368,9 +371,9 @@ def main(): if len(ref_total_hists) == 0: logging.error('No total-events reference histogram could be loaded') return 1 - ref_total = ref_total_hists.values()[0].GetEntries() + ref_total = list(ref_total_hists.values())[0].GetEntries() logging.info('Loaded total-events reference histogram %s, number of events: %d', - ref_total_hists.keys()[0], ref_total) + list(ref_total_hists.keys())[0], ref_total) ################################################## # Extract counts from histograms diff --git a/Trigger/TrigValidation/TrigValTools/share/chainDump.ref b/Trigger/TrigValidation/TrigValTools/share/chainDump.ref new file mode 100644 index 0000000000000000000000000000000000000000..341afb013671ecf3d2ff37e8b9f9ec97d78742ff --- /dev/null +++ b/Trigger/TrigValidation/TrigValTools/share/chainDump.ref @@ -0,0 +1,36 @@ + +Info in <test_chainDump>: Creating test file testChainDump1.root +Info in <test_chainDump>: Creating test file testChainDump2.root +INFO Loaded count histograms: ['chains', 'decisions'] +INFO Loaded total-events histogram total, number of events: 100 +INFO Loaded reference count histograms: ['chains', 'decisions'] +INFO Loaded total-events reference histogram total, number of events: 100 +INFO Comparing counts to reference +INFO HLTChain has 1 item(s) out of tolerance: + HLT_testChain1 26 5 +INFO HLTDecision has 7 item(s) out of tolerance: + HLT_testChain1_Step0 96 18 + HLT_testChain1_Step1 66 10 + HLT_testChain1_Step2 57 7 + HLT_testChain2_Step1 101 53 + HLT_testChain3_Step0 76 179 + HLT_testChain3_Step1 92 163 + HLT_testChain3_Step2 96 112 +INFO Writing total event count to file TotalEventsProcessed.txt +INFO Writing counts from histogram chains to file HLTChain.txt +INFO Writing counts from histogram decisions to file HLTDecision.txt +INFO Writing results to chainDump.json +100 +HLT_testChain1 26 5 <<<<<<<<<< +HLT_testChain2 30 40 +HLT_testChain3 62 52 +HLT_testChain1_Step0 96 18 <<<<<<<<<< +HLT_testChain1_Step1 66 10 <<<<<<<<<< +HLT_testChain1_Step2 57 7 <<<<<<<<<< +HLT_testChain2_Step0 82 86 +HLT_testChain2_Step1 101 53 <<<<<<<<<< +HLT_testChain2_Step2 71 72 +HLT_testChain3_Step0 76 179 <<<<<<<<<< +HLT_testChain3_Step1 92 163 <<<<<<<<<< +HLT_testChain3_Step2 96 112 <<<<<<<<<< +{"TotalEventsProcessed": {"hist_name": "total", "count": 100, "ref_count": 100}, "HLTChain": {"hist_name": "chains", "counts": {"HLT_testChain1": {"count": 26, "ref_count": 5, "ref_diff": true}, "HLT_testChain2": {"count": 30, "ref_count": 40, "ref_diff": false}, "HLT_testChain3": {"count": 62, "ref_count": 52, "ref_diff": false}}}, "HLTDecision": {"hist_name": "decisions", "counts": {"HLT_testChain1_Step0": {"count": 96, "ref_count": 18, "ref_diff": true}, "HLT_testChain1_Step1": {"count": 66, "ref_count": 10, "ref_diff": true}, "HLT_testChain1_Step2": {"count": 57, "ref_count": 7, "ref_diff": true}, "HLT_testChain2_Step0": {"count": 82, "ref_count": 86, "ref_diff": false}, "HLT_testChain2_Step1": {"count": 101, "ref_count": 53, "ref_diff": true}, "HLT_testChain2_Step2": {"count": 71, "ref_count": 72, "ref_diff": false}, "HLT_testChain3_Step0": {"count": 76, "ref_count": 179, "ref_diff": true}, "HLT_testChain3_Step1": {"count": 92, "ref_count": 163, "ref_diff": true}, "HLT_testChain3_Step2": {"count": 96, "ref_count": 112, "ref_diff": true}}}} diff --git a/Trigger/TrigValidation/TrigValTools/test/test_chainDump.C b/Trigger/TrigValidation/TrigValTools/test/test_chainDump.C new file mode 100644 index 0000000000000000000000000000000000000000..560451e28e3282f5efaf2e4795dccfd5ed45679b --- /dev/null +++ b/Trigger/TrigValidation/TrigValTools/test/test_chainDump.C @@ -0,0 +1,58 @@ +/* + Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +*/ + +void fillBin(TH2& h, const char* xlabel, const char* ylabel, size_t n) { + h.SetBinContent(h.GetXaxis()->FindBin(xlabel), h.GetYaxis()->FindBin(ylabel), n); +} + +void makeTestFile(const char* fileName) { + TFile f(fileName, "RECREATE"); + size_t nTotal = 100; + + TH1D total("total", "", 10, 0, 10); + total.FillRandom("gaus", nTotal); + + TH2D chains("chains", "chains;chain;step", 3, 0, 3, 6, 0, 6); + TH2D decisions("decisions", "decisions;chain;step", 3, 0, 3, 6, 0, 6); + for (TH2D* h : {&chains, &decisions}) { + h->GetXaxis()->SetBinLabel(1, "HLT_testChain1"); + h->GetXaxis()->SetBinLabel(2, "HLT_testChain2"); + h->GetXaxis()->SetBinLabel(3, "HLT_testChain3"); + h->GetYaxis()->SetBinLabel(1, "L1"); + h->GetYaxis()->SetBinLabel(2, "AfterPS"); + h->GetYaxis()->SetBinLabel(3, "Step 0"); + h->GetYaxis()->SetBinLabel(4, "Step 1"); + h->GetYaxis()->SetBinLabel(5, "Step 2"); + h->GetYaxis()->SetBinLabel(6, "Output"); + } + for (size_t iChain=1; iChain<4; ++iChain) { + TString chainName = TString::Format("HLT_testChain%lu", iChain); + size_t nInput = gRandom->Uniform(nTotal); + size_t nOutput = gRandom->Uniform(nInput/2, nInput); + fillBin(chains, chainName.Data(), "L1", nInput); + fillBin(chains, chainName.Data(), "AfterPS", nInput); + fillBin(chains, chainName.Data(), "Output", nOutput); + for (size_t iStep=0; iStep<3; ++iStep) { + TString stepName = TString::Format("Step %lu", iStep); + size_t nStepMin = (8-iStep)*nInput/10; + size_t nStepMax = (9-iStep)*nInput/10; + size_t nStep = gRandom->Uniform(nStepMin, nStepMax); + size_t nDecisions = gRandom->Uniform(nStepMax, 2*nInput); + fillBin(chains, chainName.Data(), stepName.Data(), nStep); + fillBin(decisions, chainName.Data(), stepName.Data(), nDecisions); + } + } + + f.Write(); +} + +void test_chainDump() { + delete gRandom; + gRandom = new TRandom3(1234); // arbitrary fixed seed + Info("test_chainDump", "Creating test file testChainDump1.root"); + makeTestFile("testChainDump1.root"); + gRandom->SetSeed(12345); // different arbitrary fixed seed + Info("test_chainDump", "Creating test file testChainDump2.root"); + makeTestFile("testChainDump2.root"); +} diff --git a/Trigger/TrigValidation/TrigValTools/test/test_chainDump.sh b/Trigger/TrigValidation/TrigValTools/test/test_chainDump.sh new file mode 100755 index 0000000000000000000000000000000000000000..c3e8aad0d581d339062bba5d73429cfb75ce92ba --- /dev/null +++ b/Trigger/TrigValidation/TrigValTools/test/test_chainDump.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +# +# Unit test for chainDump.py + +if [ -z "$1" ]; then + echo "Usage: $0 root.C" + exit 1 +fi + +# Helpers: +assert_pass() { + eval $@ || exit 1 +} + +assert_fail() { + eval $@ && exit 1 +} + +# Clean up files +rm -f testChainDump1.root testChainDump2.root TotalEventsProcessed.txt HLTChain.txt HLTDecision.txt chainDump.json + +# Create histogram files: +assert_pass root -l -b -n -q $1 + +# Run chainDump +assert_fail chainDump.py --json --fracTolerance=0.1 --intTolerance=10 \ +--totalHists total --countHists chains decisions \ +--histDict chains:HLTChain decisions:HLTDecision \ +-r testChainDump1.root -f testChainDump2.root + +# Print output files +assert_pass cat TotalEventsProcessed.txt +assert_pass cat HLTChain.txt +assert_pass cat HLTDecision.txt +assert_pass cat chainDump.json + +# If we get here all tests succeeded: +exit 0 diff --git a/Trigger/TriggerCommon/TriggerJobOpts/python/HLTTriggerGetter.py b/Trigger/TriggerCommon/TriggerJobOpts/python/HLTTriggerGetter.py index 7b875313fab3d499b0022cc44cb38e0e422826a7..f4cfb67c00b7a58b1fc49ea1deb959353540f061 100644 --- a/Trigger/TriggerCommon/TriggerJobOpts/python/HLTTriggerGetter.py +++ b/Trigger/TriggerCommon/TriggerJobOpts/python/HLTTriggerGetter.py @@ -4,7 +4,6 @@ from TriggerJobOpts.TriggerFlags import TriggerFlags from AthenaCommon.Logging import logging # loads logger from PerfMonComps.PerfMonFlags import jobproperties from AthenaCommon.Include import include -from RegionSelector.RegSelSvcDefault import RegSelSvcDefault from RecExConfig.Configured import Configured @@ -145,6 +144,7 @@ class HLTSimulationGetter(Configured): log.info("Loading RegionSelector") from AthenaCommon.AppMgr import ServiceMgr + from RegionSelector.RegSelSvcDefault import RegSelSvcDefault ServiceMgr += RegSelSvcDefault() # Configure the Data Preparation for Calo diff --git a/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py b/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py index 932be7ba4eb49903fe0ef2675986a15a3baf86a4..45e27aa217e3d48b5b51e36f92f2308fd0af166e 100644 --- a/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py +++ b/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py @@ -371,9 +371,6 @@ from AthenaCommon.DetFlags import DetFlags DetFlags.BField_setOn() include ("RecExCond/AllDet_detDescr.py") -from RegionSelector.RegSelSvcDefault import RegSelSvcDefault -svcMgr += RegSelSvcDefault() - if TriggerFlags.doID(): from InDetRecExample.InDetJobProperties import InDetFlags InDetFlags.doPrintConfigurables = log.getEffectiveLevel() <= logging.DEBUG