From adce47a4aef2190a6d48792f68c71f9059f614d0 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Thu, 30 Apr 2020 20:35:18 +0200 Subject: [PATCH 001/111] First commit of PrMatchNN to use the PrTrack classes. non functional --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 15 +++++++++------ Pr/PrAlgorithms/src/PrMatchNN.h | 8 +++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index bc3302bce97..92042cf3665 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,7 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", LHCb::TrackLocation::Velo}, KeyValue{"SeedInput", LHCb::TrackLocation::Seed}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,18 +48,18 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN::Track>& velos, - const std::vector<PrMatchNN::Track>& seeds ) const { +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); - if ( velos.empty() ) { + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; return matches; } - if ( seeds.empty() ) { + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; return matches; @@ -76,7 +76,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - + + /* for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -176,6 +177,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } // end loop match cands } // end loop velo tracks + */ + m_tracksCount += matches.size(); return matches; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index cb7581998fa..50071ecad47 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -14,6 +14,9 @@ // Include files // from Gaudi #include "Event/Track_v2.h" +#include "Event/PrVeloTracks.h" +#include "Event/PrSeedTracks.h" + #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -36,8 +39,7 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const std::vector<LHCb::Event::v2::Track>&, const std::vector<LHCb::Event::v2::Track>& )> { +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { using Track = LHCb::Event::v2::Track; public: @@ -48,7 +50,7 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const std::vector<Track>&, const std::vector<Track>& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * -- GitLab From fee30cbd20874db5ee7b4d4a4c07be1c57f7a453 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 5 May 2020 09:40:57 +0200 Subject: [PATCH 002/111] add hits indices --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 30 +++++++++++++++++-- .../src/SciFiTrackForwarding.cpp | 13 ++++++++ 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 027e180e71e..fbbd41e18be 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2207,20 +2207,32 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); LHCb::Pr::Forward::Tracks result( velo_ancestors, upstream_ancestors ); - for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids ) ) { + for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids) ) { int const currentsize = result.size(); if ( !cand.valid() ) continue; int uttrack = cand.track(); + if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { result.store_trackVP<I>( currentsize, input_tracks.template trackVP<I>( uttrack ) ); result.store_trackUT<I>( currentsize, uttrack ); + + const int veloidx = input_tracks.template trackVP<I>( uttrack ).cast(); + const int velohits= (*velo_ancestors).template nHits<I>(veloidx).cast(); + for ( int idx{0}; idx < velohits; ++idx ) { result.store_velohit_indices<I>( currentsize, idx, (*velo_ancestors).template hit<I>(veloidx,idx) ); } + ///TO Do: currently for UT hits, LHCbIDs are stored + const int uthits= input_tracks.template nHits<I>(uttrack).cast(); + for ( int idx{0}; idx < uthits; ++idx ) { result.store_uthit_indices<I>( currentsize, idx, input_tracks.template hit<I>(uttrack,idx) ); } + } else { result.store_trackVP<I>( currentsize, uttrack ); result.store_trackUT<I>( currentsize, -1 ); + + const int velohits = input_tracks.template nHits<I>(uttrack).cast(); + for ( int idx{0}; idx < velohits; ++idx ) { result.store_velohit_indices<I>( currentsize, idx, input_tracks.template hit<I>(uttrack,idx) ); } // only used to disable unused warning in the velo track input case - uttrack = input_tracks.size(); + //uttrack = input_tracks.size(); } const double qOverP = cand.getQoP(); @@ -2238,6 +2250,9 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_statePos<F>( currentsize, pos ); result.store_stateDir<F>( currentsize, dir ); + ///TO DO: change the LHCbID to be indices + std::vector<LHCb::LHCbID> utid; + utid.reserve(4); if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { if ( m_addUTHitsTool.isEnabled() ) { double chi2{0}; @@ -2250,13 +2265,24 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& } else { for ( auto const hit : uthits ) id.emplace_back( hit.HitPtr->chanID() ); std::sort( id.begin(), id.end() ); + //TO do: change the LHCbIDs to indices + for ( auto const hit : uthits ) utid.emplace_back( hit.HitPtr->chanID() ); + std::sort( utid.begin(), utid.end() ); } } + for ( size_t idx{0}; idx < utid.size(); ++idx ) { result.store_uthit_indices<I>( currentsize, idx, utid[idx].lhcbID() ); } } //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } result.store_nHits<I>( currentsize, id.size() ); + + //== hits indices, max_fthits=15, not sure if we need this. + //assert(id.size()<=15 && "Container cannot store more than 15 hits per track") + auto const& ihits = cand.ihits(); + for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_fthit_indices<I>( currentsize, idx, ihits[idx] ); } + // TO Do: not sure which size to be stored + //result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Forward::Tracks::max_tracks ) ) { // FIXME: find a better way to define diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 1de3be01c79..cb082fd110e 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -667,10 +667,23 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac float const qop = 1.f / bestcandidate.PQ; Output.compressstore_stateQoP<sF>( i, mask, qop ); + // store Velo hit indices + LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); + const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); + const int velohits = (*velo_ancestors).nHits<sI>( velotrack ).cast(); + for ( auto idx{0}; idx < velohits; ++idx ) { + Output.compressstore_velohit_indices<sI>( i, idx, mask, (*velo_ancestors).hit<sI>( velotrack, idx ) ); + } + // TO Do: change the LHCbIDs to hit indices + const int uthits= tracks.nHits<sI>( uttrack + tr ).cast(); + for ( int idx{0}; idx < uthits; ++idx ) { Output.compressstore_uthit_indices<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr,idx) ); } + int n_hits = 0; for ( auto idx{bestcandidate.ids.begin()}; idx != bestcandidate.ids.end(); ++idx, ++n_hits ) { Output.compressstore_hit<sI>( i, n_hits, mask, hithandler.IDs[*idx] ); + /// FT hit indices + Output.compressstore_fthit_indices<sI>( i, n_hits, mask, *idx ); } Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits ); -- GitLab From f8194e577a5bbd12519f3201deefa33ba2c89ae9 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 5 May 2020 07:41:49 +0000 Subject: [PATCH 003/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8243361 --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 45 +++++++++++-------- .../src/SciFiTrackForwarding.cpp | 14 +++--- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index fbbd41e18be..ca42e230ff2 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2207,32 +2207,37 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); LHCb::Pr::Forward::Tracks result( velo_ancestors, upstream_ancestors ); - for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids) ) { + for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids ) ) { int const currentsize = result.size(); if ( !cand.valid() ) continue; int uttrack = cand.track(); - if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { result.store_trackVP<I>( currentsize, input_tracks.template trackVP<I>( uttrack ) ); result.store_trackUT<I>( currentsize, uttrack ); - const int veloidx = input_tracks.template trackVP<I>( uttrack ).cast(); - const int velohits= (*velo_ancestors).template nHits<I>(veloidx).cast(); - for ( int idx{0}; idx < velohits; ++idx ) { result.store_velohit_indices<I>( currentsize, idx, (*velo_ancestors).template hit<I>(veloidx,idx) ); } - ///TO Do: currently for UT hits, LHCbIDs are stored - const int uthits= input_tracks.template nHits<I>(uttrack).cast(); - for ( int idx{0}; idx < uthits; ++idx ) { result.store_uthit_indices<I>( currentsize, idx, input_tracks.template hit<I>(uttrack,idx) ); } + const int veloidx = input_tracks.template trackVP<I>( uttrack ).cast(); + const int velohits = ( *velo_ancestors ).template nHits<I>( veloidx ).cast(); + for ( int idx{0}; idx < velohits; ++idx ) { + result.store_velohit_indices<I>( currentsize, idx, ( *velo_ancestors ).template hit<I>( veloidx, idx ) ); + } + /// TO Do: currently for UT hits, LHCbIDs are stored + const int uthits = input_tracks.template nHits<I>( uttrack ).cast(); + for ( int idx{0}; idx < uthits; ++idx ) { + result.store_uthit_indices<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); + } } else { result.store_trackVP<I>( currentsize, uttrack ); result.store_trackUT<I>( currentsize, -1 ); - - const int velohits = input_tracks.template nHits<I>(uttrack).cast(); - for ( int idx{0}; idx < velohits; ++idx ) { result.store_velohit_indices<I>( currentsize, idx, input_tracks.template hit<I>(uttrack,idx) ); } + + const int velohits = input_tracks.template nHits<I>( uttrack ).cast(); + for ( int idx{0}; idx < velohits; ++idx ) { + result.store_velohit_indices<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); + } // only used to disable unused warning in the velo track input case - //uttrack = input_tracks.size(); + // uttrack = input_tracks.size(); } const double qOverP = cand.getQoP(); @@ -2250,9 +2255,9 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_statePos<F>( currentsize, pos ); result.store_stateDir<F>( currentsize, dir ); - ///TO DO: change the LHCbID to be indices + /// TO DO: change the LHCbID to be indices std::vector<LHCb::LHCbID> utid; - utid.reserve(4); + utid.reserve( 4 ); if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { if ( m_addUTHitsTool.isEnabled() ) { double chi2{0}; @@ -2265,24 +2270,26 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& } else { for ( auto const hit : uthits ) id.emplace_back( hit.HitPtr->chanID() ); std::sort( id.begin(), id.end() ); - //TO do: change the LHCbIDs to indices + // TO do: change the LHCbIDs to indices for ( auto const hit : uthits ) utid.emplace_back( hit.HitPtr->chanID() ); std::sort( utid.begin(), utid.end() ); } } - for ( size_t idx{0}; idx < utid.size(); ++idx ) { result.store_uthit_indices<I>( currentsize, idx, utid[idx].lhcbID() ); } + for ( size_t idx{0}; idx < utid.size(); ++idx ) { + result.store_uthit_indices<I>( currentsize, idx, utid[idx].lhcbID() ); + } } //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } result.store_nHits<I>( currentsize, id.size() ); - + //== hits indices, max_fthits=15, not sure if we need this. - //assert(id.size()<=15 && "Container cannot store more than 15 hits per track") + // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") auto const& ihits = cand.ihits(); for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_fthit_indices<I>( currentsize, idx, ihits[idx] ); } // TO Do: not sure which size to be stored - //result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); + // result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Forward::Tracks::max_tracks ) ) { // FIXME: find a better way to define diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index cb082fd110e..729c4e60fbc 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -668,15 +668,17 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac Output.compressstore_stateQoP<sF>( i, mask, qop ); // store Velo hit indices - LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); - const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); - const int velohits = (*velo_ancestors).nHits<sI>( velotrack ).cast(); + LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); + const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); + const int velohits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); for ( auto idx{0}; idx < velohits; ++idx ) { - Output.compressstore_velohit_indices<sI>( i, idx, mask, (*velo_ancestors).hit<sI>( velotrack, idx ) ); + Output.compressstore_velohit_indices<sI>( i, idx, mask, ( *velo_ancestors ).hit<sI>( velotrack, idx ) ); } // TO Do: change the LHCbIDs to hit indices - const int uthits= tracks.nHits<sI>( uttrack + tr ).cast(); - for ( int idx{0}; idx < uthits; ++idx ) { Output.compressstore_uthit_indices<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr,idx) ); } + const int uthits = tracks.nHits<sI>( uttrack + tr ).cast(); + for ( int idx{0}; idx < uthits; ++idx ) { + Output.compressstore_uthit_indices<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr, idx ) ); + } int n_hits = 0; -- GitLab From e0f69675e0875a4fb54292dac4854180db80c7ca Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 5 May 2020 10:21:18 +0200 Subject: [PATCH 004/111] add residual hits and tracks algorithms --- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 140 ++++++++++++++++++ Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 126 +++++++++++++++++ Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 141 +++++++++++++++++++ 3 files changed, 407 insertions(+) create mode 100644 Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp create mode 100644 Pr/PrAlgorithms/src/PrResidualUTHits.cpp create mode 100644 Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp new file mode 100644 index 00000000000..9e30285908c --- /dev/null +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -0,0 +1,140 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +#include "Event/ODIN.h" +#include "Event/Track.h" +#include "Event/Track_v2.h" +#include "Gaudi/Accumulators.h" +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/IRegistry.h" +#include "PrKernel/PrFTInfo.h" +#include "PrKernel/PrFTZoneHandler.h" +#include "PrKernel/PrHit.h" +#include "PrKernel/PrSciFiHits.h" +#include <Vc/Vc> +#include <vector> + +#include "boost/container/small_vector.hpp" +#include "boost/container/static_vector.hpp" +#include <memory> + +//----------------------------------------------------------------------------- +// class : PrResidualSciFiHits +// Store residual SciFiHits after other Algorithms, e.g. PrMatchNN used +// +// 2020-04-02 : Peilian Li +// +//----------------------------------------------------------------------------- + +namespace { + using namespace SciFiHits; +} + +class PrResidualSciFiHits : public Gaudi::Functional::Transformer<PrSciFiHits( + const std::vector<LHCb::Event::v2::Track>&, const PrSciFiHits& )> { + using Tracks = std::vector<LHCb::Event::v2::Track>; + +public: + PrResidualSciFiHits( const std::string& name, ISvcLocator* pSvcLocator ); + + PrSciFiHits operator()( const Tracks&, const PrSciFiHits& ) const override; +}; + +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID( PrResidualSciFiHits, "PrResidualSciFiHits" ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +PrResidualSciFiHits::PrResidualSciFiHits( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, + {KeyValue{"TracksLocation", ""}, KeyValue{"SciFiHitsLocation", PrFTInfo::SciFiHitsLocation}}, + KeyValue{"SciFiHitsOutput", PrFTInfo::SciFiHitsLocation} ) {} + +//============================================================================= +// Main execution +//============================================================================= +PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFiHits& fthits ) const { + PrSciFiHits tmp{}; + auto& hitvec = tmp._x; + auto& z0vec = tmp._z0; + auto& yMinvec = tmp._yMins; + auto& yMaxvec = tmp._yMaxs; + auto& planeCodevec = tmp._planeCodes; + auto& IDvec = tmp._IDs; + auto& werrvec = tmp._w; + auto& dzDyvec = tmp._dzDy; + auto& dxDyvec = tmp._dxDy; + auto& zoneIndexes = tmp.zoneIndexes; + + if ( tracks.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; + return fthits; + } + + std::vector<long unsigned int> usedFTHits{}; + usedFTHits.reserve( fthits._IDs.size() ); + for ( auto& track : tracks ) { + auto ids = track.lhcbIDs(); + for ( auto& id : ids ) { + if ( !( id.isFT() ) ) continue; + usedFTHits.emplace_back( id.lhcbID() ); + } + } + constexpr auto xu = PrFTInfo::xZonesUpper; + constexpr auto hitzonesID = + std::array<int, 24>{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23}; + zoneIndexes[0] = hitvec.size(); + int j = 1; + for ( long unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop the SciFiHits container + bool used = false; + // for (auto & id : usedFTHits ) { + // if ( id == fthits._IDs[i] ) { + for ( long unsigned int id = 0; id != usedFTHits.size(); id++ ) { // loop all the SciFiHits used by tracks + if ( usedFTHits[id] == fthits._IDs[i] ) { + used = true; + usedFTHits.erase( usedFTHits.begin() + id ); + break; + } + } + if ( used ) continue; + hitvec.emplace_back( fthits._x[i] ); + z0vec.emplace_back( fthits._z0[i] ); + yMinvec.emplace_back( fthits._yMins[i] ); + yMaxvec.emplace_back( fthits._yMaxs[i] ); + planeCodevec.emplace_back( fthits._planeCodes[i] ); + IDvec.emplace_back( fthits._IDs[i] ); + werrvec.emplace_back( fthits._w[i] ); + dzDyvec.emplace_back( fthits._dzDy[i] ); + dxDyvec.emplace_back( fthits._dxDy[i] ); + if ( i != fthits._IDs.size() - 1 && fthits._IDs[i] == 0 && fthits._IDs[i + 1] == 0 ) { + zoneIndexes[hitzonesID[j]] = hitvec.size(); + j++; + } + } + zoneIndexes[PrFTInfo::NFTZones] = zoneIndexes[xu[0]]; + zoneIndexes[PrFTInfo::NFTZones + 1] = hitvec.size(); + + const Vc::float_v one_f_large = Vc::float_v::One() * 1.e9f; + const Vc::float_v zero_f = Vc::float_v::Zero(); + one_f_large.store( &*hitvec.end() ); + one_f_large.store( &*z0vec.end() ); + one_f_large.store( &*yMinvec.end() ); + one_f_large.store( &*yMaxvec.end() ); + one_f_large.store( &*werrvec.end() ); + ( Vc::int_v::One() * std::numeric_limits<int>::max() ).store( &*planeCodevec.end() ); + ( Vc::uint_v::Zero() ).store( &*IDvec.end() ); + zero_f.store( &*dzDyvec.end() ); + zero_f.store( &*dxDyvec.end() ); + + return tmp; +} diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp new file mode 100644 index 00000000000..b54edb2f959 --- /dev/null +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -0,0 +1,126 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +#include "Event/ODIN.h" +#include "Event/Track.h" +#include "Event/Track_v2.h" +#include "Gaudi/Accumulators.h" +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/IRegistry.h" +#include "PrKernel/PrHit.h" +#include "PrKernel/PrUTHitHandler.h" +#include "PrKernel/UTHit.h" +#include "PrKernel/UTHitHandler.h" +#include "PrKernel/UTHitInfo.h" +#include "UTDAQ/UTInfo.h" +#include "UTDet/DeUTDetector.h" +#include <Vc/Vc> +#include <vector> + +#include "boost/container/small_vector.hpp" +#include "boost/container/static_vector.hpp" +#include <memory> + +//----------------------------------------------------------------------------- +// class : PrResidualUTHits +// Store residual UTHits after other Algorithms, e.g. PrMatchNN/PrForward used +// +// 2020-04-21 : Peilian Li +// +//----------------------------------------------------------------------------- + +class PrResidualUTHits : public Gaudi::Functional::Transformer<UT::HitHandler( + const std::vector<LHCb::Event::v2::Track>&, const UT::HitHandler& )> { + + using Tracks = std::vector<LHCb::Event::v2::Track>; + +public: + StatusCode initialize() override; + + PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); + + UT::HitHandler operator()( const Tracks&, const UT::HitHandler& ) const override; + +private: + DeUTDetector* m_utDet = nullptr; +}; + +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID( PrResidualUTHits, "PrResidualUTHits" ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +PrResidualUTHits::PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, {KeyValue{"TracksLocation", ""}, KeyValue{"UTHitsLocation", ""}}, + // KeyValue{"GeomCache", "AlgorithmSpecific-" + name + "-UTGeomCache"}}, + KeyValue{"UTHitsOutput", ""} ) {} + +// initializes +//============================================================================= +StatusCode PrResidualUTHits::initialize() { + StatusCode sc = GaudiAlgorithm::initialize(); + if ( sc.isFailure() ) return sc; + m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); + debug() << "Number of UT layers " << m_utDet->layers().size() << endmsg; + return StatusCode::SUCCESS; +} +// Main execution +//============================================================================= +// UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::HitHandler& uthithandler, const +// UTGeomCache& cache ) const { +UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::HitHandler& uthithandler ) const { + UT::HitHandler tmp{}; + + if ( tracks.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; + return uthithandler; + } + + std::vector<long unsigned int> usedUTHits{}; + usedUTHits.reserve( uthithandler.nbHits() ); + + // info() <<"total UT Hits " << uthithandler.nbHits() <<endmsg; + for ( auto& track : tracks ) { + auto& ids = track.lhcbIDs(); + for ( auto& id : ids ) { + if ( !( id.isUT() ) ) continue; + usedUTHits.emplace_back( id.utID().channelID() ); + } + } + // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; + + for ( int iStation = 1; iStation < 3; ++iStation ) { + for ( int iLayer = 1; iLayer < 3; ++iLayer ) { + for ( int iRegion = 1; iRegion < 4; ++iRegion ) { + for ( int iSector = 1; iSector < 99; ++iSector ) { + for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { + bool used = false; + for ( auto& id : usedUTHits ) { + if ( uthit.chanID().channelID() == id ) { + used = true; + break; + } + } + if ( used ) continue; + const unsigned int fullChanIdx = UT::HitHandler::HitsInUT::idx( iStation, iLayer, iRegion, iSector ); + const auto* aSector = m_utDet->getSector( uthit.chanID() ); + tmp.AddHit( aSector, fullChanIdx, uthit.strip(), uthit.fracStrip(), uthit.chanID(), uthit.size(), + uthit.highThreshold() ); + } + } + } + } + } + // info() <<"residual UT Hits" << tmp.nbHits() <<endmsg; + return tmp; +} diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp new file mode 100644 index 00000000000..b76541f59fb --- /dev/null +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -0,0 +1,141 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +#include "Event/Track.h" +#include "Event/Track_v2.h" +#include "Gaudi/Accumulators.h" +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/IRegistry.h" +#include <algorithm> +#include <array> +#include <vector> + +#include "Kernel/STLExtensions.h" +#include "Kernel/VPChannelID.h" +#include "PrKernel/PrPixelModule.h" +#include "PrKernel/VeloPixelInfo.h" +#include "VPDet/DeVP.h" + +#include "Event/PrVeloHits.h" +#include "Event/PrVeloTracks.h" + +#include "Event/ODIN.h" +#include "LHCbMath/SIMDWrapper.h" +#include <Vc/Vc> + +#include "Kernel/AllocatorUtils.h" +#include "boost/container/small_vector.hpp" +#include "boost/container/static_vector.hpp" +#include <memory> + +//----------------------------------------------------------------------------- +// class : PrResidualVeloTracks +// Store residual VeloTracks after other Algorithms, e.g. PrMatchNN used +// +// 2020-04-02 : Peilian Li +// +//----------------------------------------------------------------------------- + +// namespace { +// using namespace LHCb::Pr::Velo; +//} + +typedef LHCb::Event::v1::Tracks V1Tracks; +typedef LHCb::Pr::Velo::Tracks VeloTracks; +class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( + const V1Tracks&, const VeloTracks&, const LHCb::Pr::Velo::Hits& )> { + +public: + PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ); + + LHCb::Pr::Velo::Tracks operator()( const V1Tracks&, const VeloTracks&, const LHCb::Pr::Velo::Hits& ) const override; +}; + +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID( PrResidualVeloTracks, "PrResidualVeloTracks" ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +PrResidualVeloTracks::PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, + {KeyValue{"TracksLocation", ""}, KeyValue{"VeloTrackLocation", "Rec/Track/Velo"}, + KeyValue{"PrVeloHitsLocation", ""}}, + KeyValue{"VeloTrackOutput", ""} ) {} + +//============================================================================= +// Main execution +//============================================================================= +LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const V1Tracks& tracks, const VeloTracks& velotracks, + const LHCb::Pr::Velo::Hits& veloHits ) const { + + // using simd = SIMDWrapper::avx256::types; + using simd = SIMDWrapper::scalar::types; + using I = SIMDWrapper::scalar::types::int_v; + auto tmp = LHCb::make_obj_propagating_allocator<LHCb::Pr::Velo::Tracks>( tracks, Zipping::generateZipIdentifier() ); + + /* + if ( tracks.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; + return tmp; + } + */ + + if ( velotracks.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Velo Track container '" << inputLocation<1>() << "' is empty" << endmsg; + return tmp; + } + + // info()<<"velo tracks " <<velotracks.size() << " match tracks: " << tracks.size() <<endmsg; + for ( int t = 0; t < velotracks.size(); t += simd::size ) { + + // auto loop_mask = simd :: loop_mask (t , velotracks.size()); + + const int nhits = velotracks.nHits<I>( t ).cast(); + auto const hits = velotracks.lhcbIDs( t, veloHits ); + + /// residual velo track index + bool usedtrack = false; + if ( !tracks.empty() ) { + for ( auto& track : tracks ) { + auto ids = track->lhcbIDs(); + int nVelohits = 0; + for ( auto& id : ids ) + if ( id.isVP() ) { nVelohits++; } + bool used = true; + if ( nVelohits != nhits ) + used = false; + else { + for ( int i = 0; i < nhits; i++ ) { + if ( !( track->isOnTrack( hits[i] ) ) ) { + used = false; + break; + } + } + } + if ( used ) { + usedtrack = true; + break; + } + } + } + if ( usedtrack ) continue; + // if(all(usedtrack)) continue; + // auto mask = (!usedtrack) && loop_mask; + auto mask = ( !usedtrack ); + + tmp.copy_back<simd>( velotracks, t, mask ); + } + // info()<<"===========end "<< tmp.size()<< endmsg; + return tmp; +} -- GitLab From 487df6e6442cadabcad49908e9c2d54ccb5997c2 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 6 May 2020 10:11:01 +0200 Subject: [PATCH 005/111] change input to be SOA structures --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 4 +- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 79 +++++++++----------- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 53 +++---------- 3 files changed, 50 insertions(+), 86 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index ca42e230ff2..3c81ed59936 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2282,12 +2282,14 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } - result.store_nHits<I>( currentsize, id.size() ); + //result.store_nHits<I>( currentsize, id.size() ); //== hits indices, max_fthits=15, not sure if we need this. // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") auto const& ihits = cand.ihits(); + result.store_nHits<I>( currentsize, ihits.size() ); for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_fthit_indices<I>( currentsize, idx, ihits[idx] ); } + // TO Do: not sure which size to be stored // result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index 9e30285908c..c3df5f5d6a7 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -18,9 +18,11 @@ #include "PrKernel/PrFTInfo.h" #include "PrKernel/PrFTZoneHandler.h" #include "PrKernel/PrHit.h" +#include "Event/PrForwardTracks.h" #include "PrKernel/PrSciFiHits.h" #include <Vc/Vc> #include <vector> +#include <array> #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" @@ -28,7 +30,8 @@ //----------------------------------------------------------------------------- // class : PrResidualSciFiHits -// Store residual SciFiHits after other Algorithms, e.g. PrMatchNN used +// Store residual SciFiHits after other Algorithms, e.g. PrMatchNN or PrForwardTracking +// the input tracks and SciFiHits are in SOA structure // // 2020-04-02 : Peilian Li // @@ -39,8 +42,8 @@ namespace { } class PrResidualSciFiHits : public Gaudi::Functional::Transformer<PrSciFiHits( - const std::vector<LHCb::Event::v2::Track>&, const PrSciFiHits& )> { - using Tracks = std::vector<LHCb::Event::v2::Track>; + const LHCb::Pr::Forward::Tracks&, const PrSciFiHits& )> { + using Tracks = LHCb::Pr::Forward::Tracks; public: PrResidualSciFiHits( const std::string& name, ISvcLocator* pSvcLocator ); @@ -63,6 +66,9 @@ PrResidualSciFiHits::PrResidualSciFiHits( const std::string& name, ISvcLocator* // Main execution //============================================================================= PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFiHits& fthits ) const { + using simd = SIMDWrapper::scalar::types; + using I = SIMDWrapper::scalar::types::int_v; + PrSciFiHits tmp{}; auto& hitvec = tmp._x; auto& z0vec = tmp._z0; @@ -75,38 +81,36 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi auto& dxDyvec = tmp._dxDy; auto& zoneIndexes = tmp.zoneIndexes; - if ( tracks.empty() ) { + if ( tracks.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; return fthits; } - std::vector<long unsigned int> usedFTHits{}; - usedFTHits.reserve( fthits._IDs.size() ); - for ( auto& track : tracks ) { - auto ids = track.lhcbIDs(); - for ( auto& id : ids ) { - if ( !( id.isFT() ) ) continue; - usedFTHits.emplace_back( id.lhcbID() ); + std::array<bool, 25000> used{false}; + + /// mark used SciFi Hits + for ( int t = 0; t < tracks.size(); t += simd::size ) { + const int nfthits = tracks.nHits<I>( t ).cast(); + for ( int id = 0; id != nfthits; id++ ) { + auto idx = tracks.fthit_indices<I>(t, id).cast() ; + if( idx != 0 ) used[idx] = true; } } - constexpr auto xu = PrFTInfo::xZonesUpper; - constexpr auto hitzonesID = - std::array<int, 24>{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23}; - zoneIndexes[0] = hitvec.size(); - int j = 1; - for ( long unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop the SciFiHits container - bool used = false; - // for (auto & id : usedFTHits ) { - // if ( id == fthits._IDs[i] ) { - for ( long unsigned int id = 0; id != usedFTHits.size(); id++ ) { // loop all the SciFiHits used by tracks - if ( usedFTHits[id] == fthits._IDs[i] ) { - used = true; - usedFTHits.erase( usedFTHits.begin() + id ); - break; - } - } - if ( used ) continue; + constexpr auto xu = PrFTInfo::xZonesUpper; + constexpr auto uvu = PrFTInfo::uvZonesUpper; + + constexpr auto xd = PrFTInfo::xZonesLower; + constexpr auto uvd = PrFTInfo::uvZonesLower; + constexpr auto hitzones = std::array<int, PrFTInfo::NFTZones>{ + xd[0], uvd[0], uvd[1], xd[1], xd[2], uvd[2], uvd[3], xd[3], xd[4], uvd[4], uvd[5], xd[5], + xu[0], uvu[0], uvu[1], xu[1], xu[2], uvu[2], uvu[3], xu[3], xu[4], uvu[4], uvu[5], xu[5]}; + + zoneIndexes[hitzones[0]] = hitvec.size(); + int j = 1; + for ( long unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop whole SciFiHits container + + if( used[i] ) continue; hitvec.emplace_back( fthits._x[i] ); z0vec.emplace_back( fthits._z0[i] ); yMinvec.emplace_back( fthits._yMins[i] ); @@ -116,25 +120,14 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi werrvec.emplace_back( fthits._w[i] ); dzDyvec.emplace_back( fthits._dzDy[i] ); dxDyvec.emplace_back( fthits._dxDy[i] ); - if ( i != fthits._IDs.size() - 1 && fthits._IDs[i] == 0 && fthits._IDs[i + 1] == 0 ) { - zoneIndexes[hitzonesID[j]] = hitvec.size(); + if ( j < 24 && fthits._IDs[i] == 0 && fthits._IDs[i + 1] == 0 ) { + zoneIndexes[hitzones[j]] = hitvec.size(); j++; } } zoneIndexes[PrFTInfo::NFTZones] = zoneIndexes[xu[0]]; zoneIndexes[PrFTInfo::NFTZones + 1] = hitvec.size(); - - const Vc::float_v one_f_large = Vc::float_v::One() * 1.e9f; - const Vc::float_v zero_f = Vc::float_v::Zero(); - one_f_large.store( &*hitvec.end() ); - one_f_large.store( &*z0vec.end() ); - one_f_large.store( &*yMinvec.end() ); - one_f_large.store( &*yMaxvec.end() ); - one_f_large.store( &*werrvec.end() ); - ( Vc::int_v::One() * std::numeric_limits<int>::max() ).store( &*planeCodevec.end() ); - ( Vc::uint_v::Zero() ).store( &*IDvec.end() ); - zero_f.store( &*dzDyvec.end() ); - zero_f.store( &*dxDyvec.end() ); - + return tmp; + } diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index b76541f59fb..66d3597fd1f 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -26,6 +26,7 @@ #include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" +#include "Event/PrForwardTracks.h" #include "Event/ODIN.h" #include "LHCbMath/SIMDWrapper.h" @@ -44,19 +45,15 @@ // //----------------------------------------------------------------------------- -// namespace { -// using namespace LHCb::Pr::Velo; -//} - -typedef LHCb::Event::v1::Tracks V1Tracks; -typedef LHCb::Pr::Velo::Tracks VeloTracks; +typedef LHCb::Pr::Forward::Tracks LongTracks; +typedef LHCb::Pr::Velo::Tracks VeloTracks; class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( - const V1Tracks&, const VeloTracks&, const LHCb::Pr::Velo::Hits& )> { + const LongTracks&, const VeloTracks& )> { public: PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ); - LHCb::Pr::Velo::Tracks operator()( const V1Tracks&, const VeloTracks&, const LHCb::Pr::Velo::Hits& ) const override; + LHCb::Pr::Velo::Tracks operator()( const LongTracks&, const VeloTracks& ) const override; }; // Declaration of the Algorithm Factory @@ -67,15 +64,13 @@ DECLARE_COMPONENT_WITH_ID( PrResidualVeloTracks, "PrResidualVeloTracks" ) //============================================================================= PrResidualVeloTracks::PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"TracksLocation", ""}, KeyValue{"VeloTrackLocation", "Rec/Track/Velo"}, - KeyValue{"PrVeloHitsLocation", ""}}, + {KeyValue{"TracksLocation", ""}, KeyValue{"VeloTrackLocation", "Rec/Track/Velo"}}, KeyValue{"VeloTrackOutput", ""} ) {} //============================================================================= // Main execution //============================================================================= -LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const V1Tracks& tracks, const VeloTracks& velotracks, - const LHCb::Pr::Velo::Hits& veloHits ) const { +LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& tracks, const VeloTracks& velotracks) const { // using simd = SIMDWrapper::avx256::types; using simd = SIMDWrapper::scalar::types; @@ -96,46 +91,20 @@ LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const V1Tracks& tracks, return tmp; } - // info()<<"velo tracks " <<velotracks.size() << " match tracks: " << tracks.size() <<endmsg; for ( int t = 0; t < velotracks.size(); t += simd::size ) { - // auto loop_mask = simd :: loop_mask (t , velotracks.size()); - const int nhits = velotracks.nHits<I>( t ).cast(); - auto const hits = velotracks.lhcbIDs( t, veloHits ); - - /// residual velo track index bool usedtrack = false; - if ( !tracks.empty() ) { - for ( auto& track : tracks ) { - auto ids = track->lhcbIDs(); - int nVelohits = 0; - for ( auto& id : ids ) - if ( id.isVP() ) { nVelohits++; } - bool used = true; - if ( nVelohits != nhits ) - used = false; - else { - for ( int i = 0; i < nhits; i++ ) { - if ( !( track->isOnTrack( hits[i] ) ) ) { - used = false; - break; - } - } - } - if ( used ) { - usedtrack = true; - break; - } - } + for ( int itrack = 0; itrack < tracks.size(); itrack += simd::size ) { + const auto veloidx = tracks.trackVP<I>( itrack ); + if (t == veloidx) { usedtrack = true; break; } } if ( usedtrack ) continue; - // if(all(usedtrack)) continue; + // auto mask = (!usedtrack) && loop_mask; auto mask = ( !usedtrack ); tmp.copy_back<simd>( velotracks, t, mask ); } - // info()<<"===========end "<< tmp.size()<< endmsg; return tmp; } -- GitLab From 59c0df0094231528bb1a00ab84a81dea337f6c17 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Wed, 6 May 2020 08:12:07 +0000 Subject: [PATCH 006/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8259680 --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 4 ++-- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 25 ++++++++++---------- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 21 +++++++++------- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 3c81ed59936..2603451903c 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2282,14 +2282,14 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } - //result.store_nHits<I>( currentsize, id.size() ); + // result.store_nHits<I>( currentsize, id.size() ); //== hits indices, max_fthits=15, not sure if we need this. // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") auto const& ihits = cand.ihits(); result.store_nHits<I>( currentsize, ihits.size() ); for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_fthit_indices<I>( currentsize, idx, ihits[idx] ); } - + // TO Do: not sure which size to be stored // result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index c3df5f5d6a7..b41fc5300eb 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -10,6 +10,7 @@ \*****************************************************************************/ // Include files #include "Event/ODIN.h" +#include "Event/PrForwardTracks.h" #include "Event/Track.h" #include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" @@ -18,11 +19,10 @@ #include "PrKernel/PrFTInfo.h" #include "PrKernel/PrFTZoneHandler.h" #include "PrKernel/PrHit.h" -#include "Event/PrForwardTracks.h" #include "PrKernel/PrSciFiHits.h" #include <Vc/Vc> -#include <vector> #include <array> +#include <vector> #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" @@ -30,7 +30,7 @@ //----------------------------------------------------------------------------- // class : PrResidualSciFiHits -// Store residual SciFiHits after other Algorithms, e.g. PrMatchNN or PrForwardTracking +// Store residual SciFiHits after other Algorithms, e.g. PrMatchNN or PrForwardTracking // the input tracks and SciFiHits are in SOA structure // // 2020-04-02 : Peilian Li @@ -41,8 +41,8 @@ namespace { using namespace SciFiHits; } -class PrResidualSciFiHits : public Gaudi::Functional::Transformer<PrSciFiHits( - const LHCb::Pr::Forward::Tracks&, const PrSciFiHits& )> { +class PrResidualSciFiHits + : public Gaudi::Functional::Transformer<PrSciFiHits( const LHCb::Pr::Forward::Tracks&, const PrSciFiHits& )> { using Tracks = LHCb::Pr::Forward::Tracks; public: @@ -93,24 +93,24 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi for ( int t = 0; t < tracks.size(); t += simd::size ) { const int nfthits = tracks.nHits<I>( t ).cast(); for ( int id = 0; id != nfthits; id++ ) { - auto idx = tracks.fthit_indices<I>(t, id).cast() ; - if( idx != 0 ) used[idx] = true; + auto idx = tracks.fthit_indices<I>( t, id ).cast(); + if ( idx != 0 ) used[idx] = true; } } constexpr auto xu = PrFTInfo::xZonesUpper; constexpr auto uvu = PrFTInfo::uvZonesUpper; - constexpr auto xd = PrFTInfo::xZonesLower; - constexpr auto uvd = PrFTInfo::uvZonesLower; + constexpr auto xd = PrFTInfo::xZonesLower; + constexpr auto uvd = PrFTInfo::uvZonesLower; constexpr auto hitzones = std::array<int, PrFTInfo::NFTZones>{ xd[0], uvd[0], uvd[1], xd[1], xd[2], uvd[2], uvd[3], xd[3], xd[4], uvd[4], uvd[5], xd[5], xu[0], uvu[0], uvu[1], xu[1], xu[2], uvu[2], uvu[3], xu[3], xu[4], uvu[4], uvu[5], xu[5]}; zoneIndexes[hitzones[0]] = hitvec.size(); - int j = 1; + int j = 1; for ( long unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop whole SciFiHits container - if( used[i] ) continue; + if ( used[i] ) continue; hitvec.emplace_back( fthits._x[i] ); z0vec.emplace_back( fthits._z0[i] ); yMinvec.emplace_back( fthits._yMins[i] ); @@ -127,7 +127,6 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi } zoneIndexes[PrFTInfo::NFTZones] = zoneIndexes[xu[0]]; zoneIndexes[PrFTInfo::NFTZones + 1] = hitvec.size(); - - return tmp; + return tmp; } diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index 66d3597fd1f..23da3e92cad 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -24,9 +24,9 @@ #include "PrKernel/VeloPixelInfo.h" #include "VPDet/DeVP.h" +#include "Event/PrForwardTracks.h" #include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" -#include "Event/PrForwardTracks.h" #include "Event/ODIN.h" #include "LHCbMath/SIMDWrapper.h" @@ -46,9 +46,9 @@ //----------------------------------------------------------------------------- typedef LHCb::Pr::Forward::Tracks LongTracks; -typedef LHCb::Pr::Velo::Tracks VeloTracks; -class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( - const LongTracks&, const VeloTracks& )> { +typedef LHCb::Pr::Velo::Tracks VeloTracks; +class PrResidualVeloTracks + : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( const LongTracks&, const VeloTracks& )> { public: PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ); @@ -63,14 +63,14 @@ DECLARE_COMPONENT_WITH_ID( PrResidualVeloTracks, "PrResidualVeloTracks" ) // Standard constructor, initializes variables //============================================================================= PrResidualVeloTracks::PrResidualVeloTracks( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer( name, pSvcLocator, - {KeyValue{"TracksLocation", ""}, KeyValue{"VeloTrackLocation", "Rec/Track/Velo"}}, + : Transformer( name, pSvcLocator, {KeyValue{"TracksLocation", ""}, KeyValue{"VeloTrackLocation", "Rec/Track/Velo"}}, KeyValue{"VeloTrackOutput", ""} ) {} //============================================================================= // Main execution //============================================================================= -LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& tracks, const VeloTracks& velotracks) const { +LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& tracks, + const VeloTracks& velotracks ) const { // using simd = SIMDWrapper::avx256::types; using simd = SIMDWrapper::scalar::types; @@ -96,8 +96,11 @@ LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& track bool usedtrack = false; for ( int itrack = 0; itrack < tracks.size(); itrack += simd::size ) { - const auto veloidx = tracks.trackVP<I>( itrack ); - if (t == veloidx) { usedtrack = true; break; } + const auto veloidx = tracks.trackVP<I>( itrack ); + if ( t == veloidx ) { + usedtrack = true; + break; + } } if ( usedtrack ) continue; -- GitLab From c74794458088aff177fc395313d5f2878e97c6d6 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 6 May 2020 14:48:20 +0200 Subject: [PATCH 007/111] change array to dynamic_bitset --- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index b41fc5300eb..f130b8bdce6 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -26,6 +26,7 @@ #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" +#include "boost/dynamic_bitset.hpp" #include <memory> //----------------------------------------------------------------------------- @@ -87,7 +88,8 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi return fthits; } - std::array<bool, 25000> used{false}; + const auto nhits = fthits._IDs.size(); + boost::dynamic_bitset<> used{nhits, false}; /// mark used SciFi Hits for ( int t = 0; t < tracks.size(); t += simd::size ) { -- GitLab From d620e490d6f2a5d72c10e97bf690712547031875 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Wed, 6 May 2020 12:49:05 +0000 Subject: [PATCH 008/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8264329 --- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index f130b8bdce6..60258df24ea 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -88,7 +88,7 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi return fthits; } - const auto nhits = fthits._IDs.size(); + const auto nhits = fthits._IDs.size(); boost::dynamic_bitset<> used{nhits, false}; /// mark used SciFi Hits -- GitLab From 8cf381b8206a9f2fb35ed6a17e8a95413c0f0627 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 11 Mar 2020 14:23:51 +0100 Subject: [PATCH 009/111] add latest PrVeloUT --- Pr/PrVeloUT/src/PrVeloUT.cpp | 999 +++++++++++++++++++++++------------ Pr/PrVeloUT/src/PrVeloUT.h | 160 +++++- 2 files changed, 823 insertions(+), 336 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 09a4fc68365..f81b938c341 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -31,114 +31,183 @@ DECLARE_COMPONENT_WITH_ID( LHCb::Pr::VeloUT, "PrVeloUT" ) namespace LHCb::Pr { namespace { - using simd = SIMDWrapper::avx2::types; + simd::mask_v CholeskyDecomposition3( const std::array<simd::float_v, 6>& mat, std::array<simd::float_v, 3>& rhs ) { + // -- copied from Root::Math::CholeskyDecomp + // -- first decompose + std::array<simd::float_v, 6> dst; + simd::mask_v mask = mat[0] < simd::float_v{0.0f}; + dst[0] = max( 1e-6f, dst[0] ); // that's only needed if you care about FPE + // dst[0] = rsqrt( mat[0] ); // this seems to give precision issues + dst[0] = 1.0f / sqrt( mat[0] ); + dst[1] = mat[1] * dst[0]; + dst[2] = mat[2] - dst[1] * dst[1]; + mask = mask || ( dst[2] < simd::float_v{0.0f} ); + dst[2] = max( 1e-6f, dst[2] ); // that's only needed if you care about FPE + // dst[2] = rsqrt( dst[2] ); // this seems to give precision issues + dst[2] = 1.0f / sqrt( dst[2] ); + dst[3] = mat[3] * dst[0]; + dst[4] = ( mat[4] - dst[1] * dst[3] ) * dst[2]; + dst[5] = mat[5] - ( dst[3] * dst[3] + dst[4] * dst[4] ); + mask = mask || ( dst[5] < simd::float_v{0.0f} ); + dst[5] = max( 1e-6f, dst[5] ); // that's only needed if you care about FPE + // dst[5] = rsqrt( dst[5] ); // this seems to give precision issues + dst[5] = 1.0f / sqrt( dst[5] ); + + // -- then solve + // -- solve Ly = rhs + const simd::float_v y0 = rhs[0] * dst[0]; + const simd::float_v y1 = ( rhs[1] - dst[1] * y0 ) * dst[2]; + const simd::float_v y2 = ( rhs[2] - ( dst[3] * y0 + dst[4] * y1 ) ) * dst[5]; + // solve L^Tx = y, and put x into rhs + rhs[2] = (y2)*dst[5]; + rhs[1] = ( y1 - ( dst[4] * rhs[2] ) ) * dst[2]; + rhs[0] = ( y0 - ( dst[3] * rhs[2] + dst[1] * rhs[1] ) ) * dst[0]; + + return mask; + } // -- parameters that describe the z position of the kink point as a function of ty in a 4th order polynomial (even // terms only) constexpr auto magFieldParams = std::array{2010.0f, -2240.0f, -71330.f}; // perform a fit using trackhelper's best hits with y correction, improve qop estimate - float fastfitter( const TrackHelper& helper, const LHCb::Pr::UT::Mut::Hits& hits, - std::array<float, 4>& improvedParams, const float zMidUT, const float qpxz2p ) { + simd::float_v fastfitterSIMD( std::array<simd::float_v, 4>& improvedParams, const ProtoTracks& protoTracks, + const float zMidUT, const simd::float_v qpxz2p, const int t, + simd::mask_v& goodFitMask ) { - const float ty = helper.state.ty; - const float zKink = magFieldParams[0] - ty * ty * magFieldParams[1] - ty * ty * ty * ty * magFieldParams[2]; - const float xMidField = helper.state.x + helper.state.tx * ( zKink - helper.state.z ); + const simd::float_v x = protoTracks.xState<simd::float_v>( t ); + const simd::float_v y = protoTracks.yState<simd::float_v>( t ); + const simd::float_v z = protoTracks.zState<simd::float_v>( t ); + const simd::float_v tx = protoTracks.txState<simd::float_v>( t ); + const simd::float_v ty = protoTracks.tyState<simd::float_v>( t ); + const simd::float_v zKink = + magFieldParams[0] - ty * ty * magFieldParams[1] - ty * ty * ty * ty * magFieldParams[2]; + const simd::float_v xMidField = x + tx * ( zKink - z ); - const float zDiff = 0.001f * ( zKink - zMidUT ); + const simd::float_v zDiff = 0.001f * ( zKink - zMidUT ); // -- This is to avoid division by zero... - const float pHelper = std::max( float( std::abs( helper.bestParams[0] * qpxz2p ) ), float( 1e-9 ) ); - const float invP = pHelper * vdt::fast_isqrtf( 1.0f + ty * ty ); + const simd::float_v pHelper = max( abs( protoTracks.qp<simd::float_v>( t ) * qpxz2p ), 1e-9f ); + const simd::float_v invP = pHelper * rsqrt( 1.0f + ty * ty ); // these resolution are semi-empirical, could be tuned and might not be correct for low momentum. - const float error1 = + const simd::float_v error1 = 0.14f + 10000.0f * invP; // this is the resolution due to multiple scattering between Velo and UT - const float error2 = 0.12f + 3000.0f * invP; // this is the resolution due to the finite Velo resolution - const float error = error1 * error1 + error2 * error2; - const float weight = 1.0f / error; - - float mat[6] = {weight, weight * zDiff, weight * zDiff * zDiff, 0.0f, 0.0f, 0.0f}; - float rhs[3] = {weight * xMidField, weight * xMidField * zDiff, 0.0f}; - - for ( int index : helper.bestIndices ) { - - // -- only the last one can be a nullptr - if ( index == -1 ) break; - - const float ui = hits.xs[index]; - const float dz = 0.001f * ( hits.zs[index] - zMidUT ); - const float w = hits.weights[index]; - const float t = hits.sins[index]; + const simd::float_v error2 = 0.12f + 3000.0f * invP; // this is the resolution due to the finite Velo resolution + const simd::float_v error = error1 * error1 + error2 * error2; + const simd::float_v weight = 1.0f / error; + + std::array<simd::float_v, 6> mat = {weight, weight * zDiff, weight * zDiff * zDiff, 0.0f, 0.0f, 0.0f}; + std::array<simd::float_v, 3> rhs = {weight * xMidField, weight * xMidField * zDiff, 0.0f}; + + for ( int i = 0; i < 4; ++i ) { + + // -- there are 3-hit candidates, but we'll + // -- just treat them like 4-hit candidates + // -- with 0 weight for the last hit + const simd::float_v ui = protoTracks.x<simd::float_v>( t, i ); + const simd::float_v dz = 0.001f * ( protoTracks.z<simd::float_v>( t, i ) - zMidUT ); + const simd::float_v w = protoTracks.weight<simd::float_v>( t, i ); + const simd::float_v ta = protoTracks.sin<simd::float_v>( t, i ); mat[0] += w; mat[1] += w * dz; mat[2] += w * dz * dz; - mat[3] += w * t; - mat[4] += w * dz * t; - mat[5] += w * t * t; + mat[3] += w * ta; + mat[4] += w * dz * ta; + mat[5] += w * ta * ta; rhs[0] += w * ui; rhs[1] += w * ui * dz; - rhs[2] += w * ui * t; + rhs[2] += w * ui * ta; } - ROOT::Math::CholeskyDecomp<float, 3> decomp( mat ); - if ( UNLIKELY( !decomp ) ) { - return helper.bestParams[0]; - } else { - decomp.Solve( rhs ); - } + goodFitMask = !CholeskyDecomposition3( mat, rhs ); - const float xSlopeUTFit = 0.001f * rhs[1]; - const float xUTFit = rhs[0]; - const float offsetY = rhs[2]; + const simd::float_v xUTFit = rhs[0]; + const simd::float_v xSlopeUTFit = 0.001f * rhs[1]; + const simd::float_v offsetY = rhs[2]; - const float distX = ( xMidField - xUTFit - xSlopeUTFit * ( zKink - zMidUT ) ); + const simd::float_v distX = ( xMidField - xUTFit - xSlopeUTFit * ( zKink - zMidUT ) ); // -- This takes into account that the distance between a point and track is smaller than the distance on the // x-axis - const float distCorrectionX2 = 1.0f / ( 1 + xSlopeUTFit * xSlopeUTFit ); - float chi2 = weight * ( distX * distX * distCorrectionX2 + offsetY * offsetY / ( 1.0f + ty * ty ) ); + const simd::float_v distCorrectionX2 = 1.0f / ( 1 + xSlopeUTFit * xSlopeUTFit ); + simd::float_v chi2 = weight * ( distX * distX * distCorrectionX2 + offsetY * offsetY / ( 1.0f + ty * ty ) ); + + for ( int i = 0; i < 4; ++i ) { - for ( int index : helper.bestIndices ) { - if ( index == -1 ) break; + const simd::float_v dz = protoTracks.z<simd::float_v>( t, i ) - zMidUT; + const simd::float_v w = protoTracks.weight<simd::float_v>( t, i ); + const simd::float_v dist = ( protoTracks.x<simd::float_v>( t, i ) - xUTFit - xSlopeUTFit * dz - + offsetY * protoTracks.sin<simd::float_v>( t, i ) ); - const float w = hits.weights[index]; - const float dz = hits.zs[index] - zMidUT; - const float dist = ( hits.xs[index] - xUTFit - xSlopeUTFit * dz - offsetY * hits.sins[index] ); chi2 += w * dist * dist * distCorrectionX2; } // new VELO slope x - const float xb = 0.5f * ( ( xUTFit + xSlopeUTFit * ( zKink - zMidUT ) ) + xMidField ); // the 0.5 is empirical - const float xSlopeVeloFit = ( xb - helper.state.x ) / ( zKink - helper.state.z ); + const simd::float_v xb = + 0.5f * ( ( xUTFit + xSlopeUTFit * ( zKink - zMidUT ) ) + xMidField ); // the 0.5 is empirical + const simd::float_v xSlopeVeloFit = ( xb - x ) / ( zKink - z ); - improvedParams = {xUTFit, xSlopeUTFit, helper.state.y + helper.state.ty * ( zMidUT - helper.state.z ) + offsetY, - chi2}; + improvedParams = {xUTFit, xSlopeUTFit, y + ty * ( zMidUT - z ) + offsetY, chi2}; // calculate q/p - const float sinInX = xSlopeVeloFit * vdt::fast_isqrtf( 1.0f + xSlopeVeloFit * xSlopeVeloFit + ty * ty ); - const float sinOutX = xSlopeUTFit * vdt::fast_isqrtf( 1.0f + xSlopeUTFit * xSlopeUTFit + ty * ty ); + const simd::float_v sinInX = xSlopeVeloFit * rsqrt( 1.0f + xSlopeVeloFit * xSlopeVeloFit + ty * ty ); + const simd::float_v sinOutX = xSlopeUTFit * rsqrt( 1.0f + xSlopeUTFit * xSlopeUTFit + ty * ty ); return ( sinInX - sinOutX ); } // -- Evaluate the linear discriminant // -- Coefficients derived with LD method for p, pT and chi2 with TMVA - template <std::size_t nHits> - float evaluateLinearDiscriminant( const std::array<float, 3> inputValues ) { + template <int nHits> + simd::float_v evaluateLinearDiscriminantSIMD( const std::array<simd::float_v, 3>& inputValues ) { constexpr auto coeffs = ( nHits == 3 ? std::array{0.162880166064f, -0.107081172665f, 0.134153123662f, -0.137764853657f} : std::array{0.235010729187f, -0.0938323617311f, 0.110823681145f, -0.170467109599f} ); assert( coeffs.size() == inputValues.size() + 1 ); - return std::inner_product( std::next( coeffs.begin() ), coeffs.end(), inputValues.begin(), coeffs.front(), - std::plus{}, []( float c, float iv ) { return c * vdt::fast_logf( iv ); } ); + + return simd::float_v{coeffs[0]} + coeffs[1] * log<simd::float_v>( inputValues[0] ) + + coeffs[2] * log<simd::float_v>( inputValues[1] ) + coeffs[3] * log<simd::float_v>( inputValues[2] ); + } + + /* + simd::float_v calcXTol( const simd::float_v minMom, const simd::float_v ty ) { + return ( 38000.0f / minMom + 0.25f ) * ( 1.0f + ty * ty * 0.8f ); + } + */ + + // -- bubble sort is slow, but we never have more than 9 elements (horizontally) + // -- and can act on 8 elements at once vertically (with AVX) + void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& helper, + const int start ) { + for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { + for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { + swap( helper[start + j] > helper[start + j + 1], helper[start + j], helper[start + j + 1] ); + } + } + } + + // -- not sure that is the smartest solution + // -- but I could not come up with anything better + // -- inspired by: https://lemire.me/blog/2017/04/10/removing-duplicates-from-lists-quickly/ + simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& out, int start, + size_t len ) { + simd::int_v pos = start + 1; + simd::int_v oldv = out[start]; + for ( size_t j = start + 1; j < start + len; ++j ) { + simd::int_v newv = out[j]; + simd::mask_v blendMask = ( newv == oldv ); + for ( size_t k = j + 1; k < start + len; ++k ) { out[k - 1] = select( blendMask, out[k], out[k - 1] ); } + oldv = newv; + pos = pos + select( blendMask, simd::int_v{0}, simd::int_v{1} ); + } + return pos; } // -- These things are all hardcopied from the PrTableForFunction // -- and PrUTMagnetTool // -- If the granularity or whatever changes, this will give wrong results - - int masterIndex( const int index1, const int index2, const int index3 ) { + simd::int_v masterIndexSIMD( const simd::int_v index1, const simd::int_v index2, const simd::int_v index3 ) { return ( index3 * 11 + index2 ) * 31 + index1; } @@ -232,6 +301,9 @@ namespace LHCb::Pr { /// Initialization StatusCode VeloUT::initialize() { + + // std::cout << "initialize" << std::endl; + return Transformer::initialize().andThen( [&] { return m_PrUTMagnetTool.retrieve(); } ).andThen( [&] { // m_zMidUT is a position of normalization plane which should to be close to z middle of UT ( +- 5 cm ). // Cached once in VeloUTTool at initialization. No need to update with small UT movement. @@ -253,35 +325,105 @@ namespace LHCb::Pr { //============================================================================= Upstream::Tracks VeloUT::operator()( const EventContext& evtCtx, const Velo::Tracks& inputTracks, const LHCb::Pr::UT::HitHandler& hh, const UTDAQ::GeomCache& geometry ) const { + if ( m_doTiming ) m_timerTool->start( m_veloUTTime ); Upstream::Tracks outputTracks{&inputTracks, LHCb::getMemResource( evtCtx )}; m_seedsCounter += inputTracks.size(); - const auto& fudgeFactors = m_PrUTMagnetTool->DxLayTable(); - const auto& bdlTable = m_PrUTMagnetTool->BdlTable(); + // const auto& fudgeFactors = m_PrUTMagnetTool->DxLayTable(); + const auto& bdlTable = m_PrUTMagnetTool->BdlTable(); + + MiniStatesArray filteredStates = getStates( inputTracks, outputTracks ); - LHCb::Pr::UT::Mut::Hits hitsInLayers; + std::array<ExtrapolatedStates, 4> extrapStatesArray = extrapStates( filteredStates, geometry ); - // for now only scalar, but with some adaptation it can be vectorized - using dType = SIMDWrapper::scalar::types; + auto compBoundsArray = findAllSectors( extrapStatesArray, filteredStates, geometry ); - for ( int t = 0; t != inputTracks.size(); t++ ) { - MiniState trState; - if ( !getState<dType>( inputTracks, t, trState, outputTracks ) ) continue; + std::array<LHCb::Pr::UT::Mut::Hits, batchSize> hitsInLayers; - for ( auto& it : hitsInLayers.layerIndices ) it = -1; - if ( !getHits( hitsInLayers, hh, fudgeFactors, geometry, trState ) ) continue; + ProtoTracks pTracks; - TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); + // -- We cannot put all found hits in an array, as otherwise the stack overflows + // -- so we just do the whole thing in batches + for ( std::size_t t = 0; t < filteredStates.size; t += batchSize ) { - if ( !formClusters<true>( hitsInLayers, helper ) ) { - // std::reverse( hitsInLayers.begin(), hitsInLayers.end() ); // need some thinking - formClusters<false>( hitsInLayers, helper ); - // std::reverse( hitsInLayers.begin(), hitsInLayers.end() ); + for ( std::size_t m = 0; m < batchSize; ++m ) { + for ( auto& it : hitsInLayers[m].layerIndices ) it = -1; } - if ( helper.bestIndices[0] != -1 ) - prepareOutputTrack<dType>( inputTracks, t, helper, hitsInLayers, outputTracks, bdlTable ); + pTracks.size = 0; + + for ( std::size_t t2 = 0; t2 < batchSize && t2 + t < filteredStates.size; t2++ ) { + + std::size_t tEff = t + t2; + hitsInLayers[t2].size = 0; + + if ( !getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[t2], tEff ) ) continue; + + // -- this is a temporary solution to gradually adapt the algo + scalar::float_v x = filteredStates.x<scalar::float_v>( tEff ); + scalar::float_v y = filteredStates.y<scalar::float_v>( tEff ); + scalar::float_v z = filteredStates.z<scalar::float_v>( tEff ); + scalar::float_v tx = filteredStates.tx<scalar::float_v>( tEff ); + scalar::float_v ty = filteredStates.ty<scalar::float_v>( tEff ); + + MiniState trState; + trState.x = x.cast(); + trState.y = y.cast(); + trState.z = z.cast(); + trState.tx = tx.cast(); + trState.ty = ty.cast(); + + TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); + + if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } + if ( helper.bestIndices[0] == -1 ) continue; + + scalar::float_v covx = filteredStates.covx<scalar::float_v>( tEff ); + scalar::float_v covy = filteredStates.covy<scalar::float_v>( tEff ); + scalar::float_v covz = filteredStates.covz<scalar::float_v>( tEff ); + scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + + int trackIndex = pTracks.size; + // -- manual compressstore to keep everything in sync and fill the registers in the last function + pTracks.store_xState<scalar::float_v>( trackIndex, x ); + pTracks.store_yState<scalar::float_v>( trackIndex, y ); + pTracks.store_zState<scalar::float_v>( trackIndex, z ); + pTracks.store_txState<scalar::float_v>( trackIndex, tx ); + pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); + pTracks.store_covx<scalar::float_v>( trackIndex, covx ); + pTracks.store_covy<scalar::float_v>( trackIndex, covy ); + pTracks.store_covz<scalar::float_v>( trackIndex, covz ); + pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); + pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); + + // -- another temporary thing: Put the clusters in an array + // -- order is: + pTracks.store_xTT<scalar::float_v>( trackIndex, helper.bestParams[2] ); + pTracks.store_xSlopeTT<scalar::float_v>( trackIndex, helper.bestParams[3] ); + pTracks.store_qp<scalar::float_v>( trackIndex, helper.bestParams[0] ); + pTracks.store_chi2TT<scalar::float_v>( trackIndex, helper.bestParams[1] ); + + int nHits = 0; + // -- this runs over all 4 layers, even if no hit was found + // -- but it fills a weight of 0 + for ( auto hitIndex : helper.bestIndices ) { + pTracks.store_x<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].xs[hitIndex] ); + pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitIndex] ); + pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitIndex] ); + + scalar::float_v weight = ( hitIndex == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitIndex]; + pTracks.store_weight<scalar::float_v>( trackIndex, nHits, weight ); + + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitIndex] ) ); + pTracks.store_id<scalar::int_v>( trackIndex, nHits, id.lhcbID() ); // not sure if correct + nHits++; + } + + pTracks.size++; + } + + prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, bdlTable ); } m_tracksCounter += outputTracks.size(); @@ -291,117 +433,244 @@ namespace LHCb::Pr { //============================================================================= // Get the state, do some cuts //============================================================================= - template <typename dType> - bool VeloUT::getState( const Velo::Tracks& inputTracks, int at, MiniState& trState, - Upstream::Tracks& outputTracks ) const { - using I = typename dType::int_v; - using F = typename dType::float_v; - - const int EndVelo = 1; - auto pos = inputTracks.statePos<F>( at, EndVelo ); - auto dir = inputTracks.stateDir<F>( at, EndVelo ); - auto covX = inputTracks.stateCovX<F>( at, EndVelo ); - - // -- reject tracks outside of acceptance or pointing to the beam pipe - trState.tx = dir.x.cast(); - trState.ty = dir.y.cast(); - trState.x = pos.x.cast(); - trState.y = pos.y.cast(); - trState.z = pos.z.cast(); - - const float xMidUT = trState.x + trState.tx * ( m_zMidUT - trState.z ); - const float yMidUT = trState.y + trState.ty * ( m_zMidUT - trState.z ); - - if ( xMidUT * xMidUT + yMidUT * yMidUT < m_centralHoleSize * m_centralHoleSize ) return false; - if ( ( std::abs( trState.tx ) > m_maxXSlope ) || ( std::abs( trState.ty ) > m_maxYSlope ) ) return false; - - if ( m_passTracks && std::abs( xMidUT ) < m_passHoleSize && std::abs( yMidUT ) < m_passHoleSize ) { - int i = outputTracks.size(); - int mask = true; // dummy mask to be replace if we want to vectorize - - outputTracks.compressstore_trackVP<I>( i, mask, at ); // ancestor - outputTracks.compressstore_statePos<F>( i, mask, pos ); - outputTracks.compressstore_stateDir<F>( i, mask, dir ); - outputTracks.compressstore_stateCov<F>( i, mask, covX ); - outputTracks.compressstore_stateQoP<F>( i, mask, 0.f ); // no momentum - outputTracks.compressstore_nHits<I>( i, mask, 0 ); // no hits - - outputTracks.size() += dType::popcount( mask ); - - return false; - } + MiniStatesArray VeloUT::getStates( const Velo::Tracks& inputTracks, Upstream::Tracks& outputTracks ) const { + + const int EndVelo = 1; + const simd::mask_v passTracks = m_passTracks.value() ? simd::mask_true() : simd::mask_false(); + + MiniStatesArray filteredStates; + + for ( int t = 0; t < inputTracks.size(); t += simd::size ) { + + simd::mask_v loopMask = simd::loop_mask( t, inputTracks.size() ); - return true; + auto pos = inputTracks.statePos<simd::float_v>( t, EndVelo ); + auto dir = inputTracks.stateDir<simd::float_v>( t, EndVelo ); + auto covX = inputTracks.stateCovX<simd::float_v>( t, EndVelo ); + + simd::float_v xMidUT = pos.x + dir.x * ( m_zMidUT - pos.z ); + simd::float_v yMidUT = pos.y + dir.y * ( m_zMidUT - pos.z ); + + simd::mask_v centralHoleMask = xMidUT * xMidUT + yMidUT * yMidUT < simd::float_v{m_centralHoleSize.value()} * + simd::float_v{m_centralHoleSize.value()}; + simd::mask_v slopesMask = ( ( abs( dir.x ) > m_maxXSlope.value() ) || ( abs( dir.y ) > m_maxYSlope.value() ) ); + simd::mask_v passHoleMask = abs( xMidUT ) < m_passHoleSize.value() && abs( yMidUT ) < m_passHoleSize.value(); + simd::mask_v mask = centralHoleMask || slopesMask; + simd::mask_v csMask = loopMask && !mask && ( !passTracks || !passHoleMask ); + + int index = filteredStates.size; + filteredStates.compressstore_x<simd::float_v>( index, csMask, pos.x ); + filteredStates.compressstore_y<simd::float_v>( index, csMask, pos.y ); + filteredStates.compressstore_z<simd::float_v>( index, csMask, pos.z ); + filteredStates.compressstore_tx<simd::float_v>( index, csMask, dir.x ); + filteredStates.compressstore_ty<simd::float_v>( index, csMask, dir.y ); + filteredStates.compressstore_covx<simd::float_v>( index, csMask, covX.x ); + filteredStates.compressstore_covy<simd::float_v>( index, csMask, covX.y ); + filteredStates.compressstore_covz<simd::float_v>( index, csMask, covX.z ); + filteredStates.compressstore_index<simd::int_v>( index, csMask, simd::indices( t ) ); + filteredStates.size += simd::popcount( csMask ); + + if ( m_passTracks ) { + + auto outMask = loopMask && passHoleMask; // not sure if correct... + + int i = outputTracks.size(); + outputTracks.compressstore_trackVP<simd::int_v>( i, outMask, simd::indices( t ) ); // ancestor + outputTracks.compressstore_statePos<simd::float_v>( i, outMask, pos ); + outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); + outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); + outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum + outputTracks.compressstore_nHits<simd::int_v>( i, outMask, 0 ); // no hits + + outputTracks.size() += simd::popcount( outMask ); + } + } + return filteredStates; } + //============================================================================= + // Extrapolate the states + //============================================================================= + std::array<ExtrapolatedStates, UTInfo::TotalLayers> VeloUT::extrapStates( const MiniStatesArray& filteredStates, + const UTDAQ::GeomCache& geom ) const { + + std::array<ExtrapolatedStates, UTInfo::TotalLayers> eStatesArray; + + // -- Used for the calculation of the size of the search windows + constexpr const std::array<float, UTInfo::TotalLayers> normFact{0.95f, 1.0f, 1.36f, 1.41f}; + + for ( std::size_t t = 0; t < filteredStates.size; t += simd::size ) { + + simd::float_v x = filteredStates.x<simd::float_v>( t ); + simd::float_v y = filteredStates.y<simd::float_v>( t ); + simd::float_v z = filteredStates.z<simd::float_v>( t ); + simd::float_v tx = filteredStates.tx<simd::float_v>( t ); + simd::float_v ty = filteredStates.ty<simd::float_v>( t ); + // -- this 500 seems a little odd... + const simd::float_v invTheta = min( 500.0f, 1.0f * rsqrt( tx * tx + ty * ty ) ); + const simd::float_v minMom = max( m_minPT.value() * invTheta, m_minMomentum.value() ); + + for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + + const simd::float_v xTol = abs( 1.0f / ( m_distToMomentum * minMom ) ) * normFact[layerIndex]; + const simd::float_v yTol = m_yTol.value() + m_yTolSlope.value() * xTol; + + const simd::float_v zGeo{geom.layers[layerIndex].z}; + const simd::float_v dxDy{geom.layers[layerIndex].dxDy}; + + const simd::float_v yAtZ = y + ty * ( zGeo - z ); + const simd::float_v xLayer = x + tx * ( zGeo - z ); + const simd::float_v yLayer = yAtZ + yTol * dxDy; + + eStatesArray[layerIndex].store_xLayer<simd::float_v>( t, xLayer ); + eStatesArray[layerIndex].store_yLayer<simd::float_v>( t, yLayer ); + eStatesArray[layerIndex].store_xTol<simd::float_v>( t, xTol ); + eStatesArray[layerIndex].store_tx<simd::float_v>( t, tx ); + } + } + + eStatesArray[0].size = filteredStates.size; + eStatesArray[1].size = filteredStates.size; + eStatesArray[2].size = filteredStates.size; + eStatesArray[3].size = filteredStates.size; + + return eStatesArray; + } //============================================================================= - // Find the hits + // -- find the sectors //============================================================================= - template <typename FudgeTable> - bool VeloUT::getHits( LHCb::Pr::UT::Mut::Hits& hitsInLayers, const LHCb::Pr::UT::HitHandler& hh, - const FudgeTable& fudgeFactors, const UTDAQ::GeomCache& geom, MiniState& trState ) const { - - using simd = SIMDWrapper::avx2::types; - - hitsInLayers.size = 0; - // -- This is hardcoded, so faster - // -- If you ever change the Table in the magnet tool, this will be wrong - const float absSlopeY = std::abs( trState.ty ); - const int index = (int)( absSlopeY * 100 + 0.5f ); - span<const float, 4> normFact{&fudgeFactors.table()[4 * index], 4}; - - // -- this 500 seems a little odd... - const float invTheta = - std::min( 500.0f, 1.0f * vdt::fast_isqrtf( trState.tx * trState.tx + trState.ty * trState.ty ) ); - const float minMom = std::max( m_minPT.value() * invTheta, m_minMomentum.value() ); - const float xTol = std::abs( 1.0f / ( m_distToMomentum * minMom ) ); - const float yTol = m_yTol + m_yTolSlope * xTol; - - const simd::float_v yTolV{m_yTol.value()}; - const simd::float_v yTolSlopeV{m_yTolSlope.value()}; - - int nLayers = 0; - boost::container::small_vector<std::pair<int, int>, 9> sectors; - - std::size_t nSize = 0; - for ( int iStation = 0; iStation < 2; ++iStation ) { - - if ( iStation == 1 && nLayers == 0 ) { return false; } - - for ( int iLayer = 0; iLayer < 2; ++iLayer ) { - if ( iStation == 1 && iLayer == 1 && nLayers < 2 ) return false; - - const unsigned int layerIndex = 2 * iStation + iLayer; - const float z = geom.layers[layerIndex].z; - const float yAtZ = trState.y + trState.ty * ( z - trState.z ); - const float xLayer = trState.x + trState.tx * ( z - trState.z ); - const float yLayer = yAtZ + yTol * geom.layers[layerIndex].dxDy; - const float normFactNum = normFact[layerIndex]; - const float invNormFact = 1.0f / normFactNum; - - const simd::float_v invNormFactV{invNormFact}; - const simd::float_v xTolInvNormFactV{xTol * invNormFact}; - - UTDAQ::findSectors( layerIndex, xLayer, yLayer, - xTol * invNormFact - std::abs( trState.tx ) * m_intraLayerDist.value(), - m_yTol + m_yTolSlope * std::abs( xTol * invNormFact ), geom.layers[layerIndex], sectors ); - - std::pair pp{-1, -1}; - for ( auto& p : sectors ) { - // sectors can be duplicated in the list, but they are ordered - if ( p == pp ) continue; - pp = p; - const int fullChanIdx = ( layerIndex * 3 + ( p.first - 1 ) ) * 98 + ( p.second - 1 ); - findHits( hh, fullChanIdx, trState, xTolInvNormFactV, invNormFactV, hitsInLayers, yTolV, yTolSlopeV ); + std::array<Boundaries, UTInfo::TotalLayers> + VeloUT::findAllSectors( const std::array<ExtrapolatedStates, UTInfo::TotalLayers>& eStatesArray, + MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const { + + std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; + int contSize = filteredStates.size; + filteredStates.size = 0; + + std::array<simd::int_v, UTInfo::TotalLayers> posArray; + std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors + std::array<int, UTInfo::TotalLayers> maxColsRows; + + // -- This now works with up to 9 sectors + for ( int t = 0; t < contSize; t += simd::size ) { + auto loopMask = simd::loop_mask( t, contSize ); + + simd::int_v nLayers{0}; + + for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + + const simd::int_v regionBoundary1 = ( 2 * geom.layers[layerIndex].nColsPerSide + 3 ); + const simd::int_v regionBoundary2 = ( 2 * geom.layers[layerIndex].nColsPerSide - 5 ); + + simd::int_v subcolmin{0}; + simd::int_v subcolmax{0}; + simd::int_v subrowmin{0}; + simd::int_v subrowmax{0}; + + simd::float_v xLayer = eStatesArray[layerIndex].xLayer<simd::float_v>( t ); + simd::float_v yLayer = eStatesArray[layerIndex].yLayer<simd::float_v>( t ); + simd::float_v xTol = eStatesArray[layerIndex].xTol<simd::float_v>( t ); + simd::float_v tx = eStatesArray[layerIndex].tx<simd::float_v>( t ); + + simd::mask_v mask = UTDAQ::findSectors( layerIndex, xLayer, yLayer, xTol - abs( tx ) * m_intraLayerDist.value(), + m_yTol.value() + m_yTolSlope.value() * abs( xTol ), + geom.layers[layerIndex], subcolmin, subcolmax, subrowmin, subrowmax ); + + const simd::mask_v gathermask = loopMask && mask; + + // -- Determine the maximum number of rows and columns we have to take into account + // -- maximum 3 + const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); + const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 3 ); + + maxColsRows[layerIndex] = maxCols * maxRows; + + int counter = 0; + for ( int sc = 0; sc < maxCols; sc++ ) { + + simd::int_v realSC = min( subcolmax, subcolmin + sc ); + // -- Gives the region (actually region - 1): left 0, center 1, right 2 + simd::int_v region = select( realSC > regionBoundary1, simd::int_v{1}, simd::int_v{0} ) + + select( realSC > regionBoundary2, simd::int_v{1}, simd::int_v{0} ); + + for ( int sr = 0; sr < maxRows; sr++ ) { + + simd::int_v realSR = min( subrowmax, subrowmin + sr ); + simd::int_v sectorIndex = realSR + 28 * realSC; + + // -- only gather when we are not outside the acceptance + // -- if we are outside, fill 1 which is the lowest possible sector number + // -- We need to fill a valid number, as one can have 3 layers with a correct sector + // -- and one without a correct sector, in which case the track will not be masked off. + // -- However, these cases should happen very rarely + simd::int_v sect = ( layerIndex < 2 ) + ? geom.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) + : geom.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); + + // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 + // -- The regions are already calculated with a -1 + helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * 98 - 1; + counter++; + } } - sectors.clear(); - nLayers += int( nSize != hitsInLayers.size ); - hitsInLayers.layerIndices[layerIndex] = nSize; - nSize = hitsInLayers.size; + + // -- This is sorting + bubbleSortSIMD( maxCols * maxRows, helperArray, maxSectors * layerIndex ); + // -- This is uniquifying + posArray[layerIndex] = makeUniqueSIMD( helperArray, maxSectors * layerIndex, maxCols * maxRows ); + // -- count the number of layers which are 'valid' + nLayers += select( mask, simd::int_v{1}, simd::int_v{0} ); } + + // -- We need at least three layers + const simd::mask_v compressMask = ( nLayers > 2 ) && loopMask; + + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + int index = compBoundsArray[iLayer].size; + for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { + compBoundsArray[iLayer].compressstore_sect<simd::int_v>( index, iSector, compressMask, + helperArray[maxSectors * iLayer + iSector] ); + } + simd::float_v xTol = eStatesArray[iLayer].xTol<simd::float_v>( t ); + compBoundsArray[iLayer].compressstore_xTol<simd::float_v>( index, compressMask, xTol ); + compBoundsArray[iLayer].compressstore_nPos<simd::int_v>( index, compressMask, + posArray[iLayer] - maxSectors * iLayer ); + compBoundsArray[iLayer].size += simd::popcount( compressMask ); + } + + // -- Now need to compress the filtered states, such that they are + // -- in sync with the sectors + simd::float_v x = filteredStates.x<simd::float_v>( t ); + simd::float_v y = filteredStates.y<simd::float_v>( t ); + simd::float_v z = filteredStates.z<simd::float_v>( t ); + simd::float_v tx = filteredStates.tx<simd::float_v>( t ); + simd::float_v ty = filteredStates.ty<simd::float_v>( t ); + simd::float_v covx = filteredStates.covx<simd::float_v>( t ); + simd::float_v covy = filteredStates.covy<simd::float_v>( t ); + simd::float_v covz = filteredStates.covz<simd::float_v>( t ); + simd::int_v trackIndex = filteredStates.index<simd::int_v>( t ); + + auto index = filteredStates.size; + filteredStates.compressstore_x<simd::float_v>( index, compressMask, x ); + filteredStates.compressstore_y<simd::float_v>( index, compressMask, y ); + filteredStates.compressstore_z<simd::float_v>( index, compressMask, z ); + filteredStates.compressstore_tx<simd::float_v>( index, compressMask, tx ); + filteredStates.compressstore_ty<simd::float_v>( index, compressMask, ty ); + filteredStates.compressstore_covx<simd::float_v>( index, compressMask, covx ); + filteredStates.compressstore_covy<simd::float_v>( index, compressMask, covy ); + filteredStates.compressstore_covz<simd::float_v>( index, compressMask, covz ); + filteredStates.compressstore_index<simd::int_v>( index, compressMask, trackIndex ); + filteredStates.size += simd::popcount( compressMask ); } - return nLayers > 2; + + return compBoundsArray; } + //============================================================================= + // Find the hits + //============================================================================= + bool VeloUT::getHitsScalar( const LHCb::Pr::UT::HitHandler& hh, const MiniStatesArray& filteredStates, + const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { // ============================================================================== // -- Method that finds the hits in a given layer within a certain range @@ -411,41 +680,79 @@ namespace LHCb::Pr { LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v yTol, const simd::float_v yTolSlope ) const { - const LHCb::Pr::UT::Hits& myHits = hh.hits(); - std::pair<int, int> indices = hh.indices( fullChanIndex ); - if ( indices.first == indices.second ) return; - int firstIndex = indices.first; + std::size_t nSize = 0; + std::size_t nLayers = 0; + + // -- the protos could be precomputed + const simd::float_v yProto{yState - tyState * zState}; + const simd::float_v xOnTrackProto{xState - txState * zState}; + const simd::float_v ty{tyState}; + const simd::float_v tx{txState}; + + for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { - using simd = SIMDWrapper::avx2::types; - using scalar = SIMDWrapper::scalar::types; + if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; - simd::float_v xOnTrackProto{myState.x + - myState.tx * ( myHits.zAtYEq0<scalar::float_v>( firstIndex ).cast() - myState.z )}; - simd::float_v yProto{myState.y - myState.ty * myState.z}; - simd::float_v tyV{myState.ty}; - simd::float_v tolProto{yTolSlope * invNormFact}; + const float xTolS = compBoundsArray[layerIndex].xTol<scalar::float_v>( t ).cast(); + const int nPos = compBoundsArray[layerIndex].nPos<scalar::int_v>( t ).cast(); + const simd::float_v yTol = m_yTol.value() + m_yTolSlope.value() * xTolS; - for ( int i = indices.first; i < indices.second; i += simd::size ) { + const simd::float_v tolProto{m_yTol.value()}; + const simd::float_v xTol{xTolS}; - auto loop_mask = simd::loop_mask( i, indices.second ); + std::array<int, maxSectors + 1> sectors{0}; - simd::float_v yy = yProto + tyV * myHits.zAtYEq0<simd::float_v>( i ); - simd::float_v xx = myHits.xAtYEq0<simd::float_v>( i ) + yy * myHits.dxDy<simd::float_v>( i ); - simd::float_v absdx = abs( xx - xOnTrackProto ); + for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(); } + + for ( int j = 0; j < nPos; j++ ) { + // -- let's try to make it branchless + const std::pair<int, int>& temp = hh.indices( sectors[j] ); + const std::pair<int, int>& temp2 = hh.indices( sectors[j + 1] ); + const int firstIndex = temp.first; + const int shift = ( temp2.first == temp.second ); + const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; + j += shift; + + findHits( hh, yProto, ty, tx, xOnTrackProto, tolProto, xTol, hitsInLayers, yTol, firstIndex, lastIndex ); + } + + nLayers += int( nSize != hitsInLayers.size ); + hitsInLayers.layerIndices[layerIndex] = nSize; + nSize = hitsInLayers.size; + } + // -- only use these hits, if we have at least 3 layers + return nLayers > 2; + } + // ============================================================================== + // -- Method that finds the hits in a given layer within a certain range + // ============================================================================== + void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, + const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, + const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, + const int firstIndex, const int lastIndex ) const { + + const LHCb::Pr::UT::Hits& myHits = hh.hits(); + + for ( int i = firstIndex; i < lastIndex; i += simd::size ) { + // -- Calculate distance between straight line extrapolation from Velo and hit position + const simd::float_v yy = yProto + ty * myHits.zAtYEq0<simd::float_v>( i ); + const simd::float_v xx = myHits.xAtYEq0<simd::float_v>( i ) + yy * myHits.dxDy<simd::float_v>( i ); + const simd::float_v xOnTrack = xOnTrackProto + tx * myHits.zAtYEq0<simd::float_v>( i ); + const simd::float_v absdx = abs( xx - xOnTrack ); if ( none( absdx < xTolNormFact ) ) continue; + auto loopMask = simd::loop_mask( i, lastIndex ); - // template this - simd::float_v yMin = min( myHits.yBegin<simd::float_v>( i ), myHits.yEnd<simd::float_v>( i ) ); - simd::float_v yMax = max( myHits.yBegin<simd::float_v>( i ), myHits.yEnd<simd::float_v>( i ) ); + // is there anything like minmax? + const simd::float_v yMin = min( myHits.yBegin<simd::float_v>( i ), myHits.yEnd<simd::float_v>( i ) ); + const simd::float_v yMax = max( myHits.yBegin<simd::float_v>( i ), myHits.yEnd<simd::float_v>( i ) ); - simd::float_v tol = yTol + absdx * tolProto; - auto mask = ( yMin - tol < yy && yy < yMax + tol ) && loop_mask && ( absdx < xTolNormFact ); + const simd::float_v tol = yTol + absdx * tolProto; + auto mask = ( yMin - tol < yy && yy < yMax + tol ) && ( absdx < xTolNormFact ) && loopMask; if ( none( mask ) ) continue; auto index = mutHits.size; - auto nPlus = simd::popcount( mask ); if ( ( index + simd::size ) >= LHCb::Pr::UT::Mut::Hits::max_hits ) { error() << "Reached maximum number of hits. This is a temporary limitation and needs to be fixed" << endmsg; @@ -459,7 +766,7 @@ namespace LHCb::Pr { myHits.cos<simd::float_v>( i ) * -1.0f * myHits.dxDy<simd::float_v>( i ) ); mutHits.compressstore_weight( index, mask, myHits.weight<simd::float_v>( i ) ); mutHits.compressstore_channelID( index, mask, myHits.channelID<simd::int_v>( i ) ); - mutHits.size += nPlus; + mutHits.size += simd::popcount( mask ); } } //========================================================================= @@ -553,157 +860,201 @@ namespace LHCb::Pr { } return fourLayerSolution; } - //========================================================================= // Create the Velo-UT tracks //========================================================================= - template <typename dType, typename BdlTable> - void VeloUT::prepareOutputTrack( const Velo::Tracks& inputTracks, int ancestor, const TrackHelper& helper, - const LHCb::Pr::UT::Mut::Hits& hitsInLayers, Upstream::Tracks& outputTracks, - const BdlTable& bdlTable ) const { - - using I = typename dType::int_v; - using F = typename dType::float_v; - - //== Handle states. copy Velo one, add TT. - const float zOrigin = ( std::fabs( helper.state.ty ) > 0.001f ) ? helper.state.z - helper.state.y / helper.state.ty - : helper.state.z - helper.state.x / helper.state.tx; - - // const float bdl1 = m_PrUTMagnetTool->bdlIntegral(helper.state.ty,zOrigin,helper.state.z); - - // -- These are calculations, copied and simplified from PrTableForFunction - // -- FIXME: these rely on the internal details of PrTableForFunction!!! - // and should at least be put back in there, and used from here - // to make sure everything _stays_ consistent... - const auto var = std::array{helper.state.ty, zOrigin, helper.state.z}; - - const int index1 = std::clamp( int( ( var[0] + 0.3f ) / 0.6f * 30 ), 0, 30 ); - const int index2 = std::clamp( int( ( var[1] + 250 ) / 500 * 10 ), 0, 10 ); - const int index3 = std::clamp( int( var[2] / 800 * 10 ), 0, 10 ); - - float bdl = bdlTable.table()[masterIndex( index1, index2, index3 )]; - - const auto bdls = std::array{bdlTable.table()[masterIndex( index1 + 1, index2, index3 )], - bdlTable.table()[masterIndex( index1, index2 + 1, index3 )], - bdlTable.table()[masterIndex( index1, index2, index3 + 1 )]}; - - const auto boundaries = std::array{-0.3f + float( index1 ) * deltaBdl[0], -250.0f + float( index2 ) * deltaBdl[1], - 0.0f + float( index3 ) * deltaBdl[2]}; + template <typename BdlTable> + void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, + const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, + Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const { + + for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { + + //== Handle states. copy Velo one, add TT. + const simd::float_v zOrigin = + select( protoTracks.tyState<simd::float_v>( t ) > 0.001f, + protoTracks.zState<simd::float_v>( t ) - + protoTracks.yState<simd::float_v>( t ) / protoTracks.tyState<simd::float_v>( t ), + protoTracks.zState<simd::float_v>( t ) - + protoTracks.xState<simd::float_v>( t ) / protoTracks.txState<simd::float_v>( t ) ); + + auto loopMask = simd::loop_mask( t, protoTracks.size ); + // -- this is to filter tracks where the fit had a too large chi2 + simd::mask_v fourHitTrack = protoTracks.weight<simd::float_v>( t, 3 ) > 0.0001f; + + // const float bdl1 = m_PrUTMagnetTool->bdlIntegral(helper.state.ty,zOrigin,helper.state.z); + + // -- These are calculations, copied and simplified from PrTableForFunction + // -- FIXME: these rely on the internal details of PrTableForFunction!!! + // and should at least be put back in there, and used from here + // to make sure everything _stays_ consistent... + auto var = std::array{protoTracks.tyState<simd::float_v>( t ), zOrigin, protoTracks.zState<simd::float_v>( t )}; + + simd::int_v index1 = min( max( simd::int_v{( var[0] + 0.3f ) / 0.6f * 30}, 0 ), 30 ); + simd::int_v index2 = min( max( simd::int_v{( var[1] + 250 ) / 500 * 10}, 0 ), 10 ); + simd::int_v index3 = min( max( simd::int_v{var[2] / 800 * 10}, 0 ), 10 ); + + simd::float_v bdl = gather( bdlTable.table().data(), masterIndexSIMD( index1, index2, index3 ) ); + + // -- TODO: check if we can go outside this table... + const std::array<simd::float_v, 3> bdls = + std::array{gather( bdlTable.table().data(), masterIndexSIMD( index1 + 1, index2, index3 ) ), + gather( bdlTable.table().data(), masterIndexSIMD( index1, index2 + 1, index3 ) ), + gather( bdlTable.table().data(), masterIndexSIMD( index1, index2, index3 + 1 ) )}; + + const std::array<simd::float_v, 3> boundaries = {-0.3f + simd::float_v{index1} * deltaBdl[0], + -250.0f + simd::float_v{index2} * deltaBdl[1], + 0.0f + simd::float_v{index3} * deltaBdl[2]}; + + // -- This is an interpolation, to get a bit more precision + simd::float_v addBdlVal{0.0f}; + for ( int i = 0; i < 3; ++i ) { + + // -- this should make sure that values outside the range add nothing to the sum + var[i] = select( minValsBdl[i] > var[i], boundaries[i], var[i] ); + var[i] = select( maxValsBdl[i] < var[i], boundaries[i], var[i] ); + + const simd::float_v dTab_dVar = ( bdls[i] - bdl ) / deltaBdl[i]; + const simd::float_v dVar = ( var[i] - boundaries[i] ); + addBdlVal += dTab_dVar * dVar; + } + bdl += addBdlVal; + // ---- + + // -- order is: x, tx, y, chi2 + std::array<simd::float_v, 4> finalParams = { + protoTracks.xTT<simd::float_v>( t ), protoTracks.xSlopeTT<simd::float_v>( t ), + protoTracks.yState<simd::float_v>( t ) + + protoTracks.tyState<simd::float_v>( t ) * ( m_zMidUT - protoTracks.zState<simd::float_v>( t ) ), + protoTracks.chi2TT<simd::float_v>( t )}; + + const simd::float_v qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; + simd::mask_v fitMask = simd::mask_true(); + simd::float_v qp = m_finalFit + ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) + : protoTracks.qp<simd::float_v>( t ) * + rsqrt( 1.0f + protoTracks.tyState<simd::float_v>( t ) * + protoTracks.tyState<simd::float_v>( t ) ); // is this correct? + + qp = select( fitMask, qp, protoTracks.qp<simd::float_v>( t ) ); + const simd::float_v qop = select( abs( bdl ) < 1.e-8f, simd::float_v{1000.0f}, qp * qpxz2p ); + + // -- Don't make tracks that have grossly too low momentum + // -- Beware of the momentum resolution! + const simd::float_v p = abs( 1.0f / qop ); + const simd::float_v pt = + p * sqrt( protoTracks.txState<simd::float_v>( t ) * protoTracks.txState<simd::float_v>( t ) + + protoTracks.tyState<simd::float_v>( t ) * protoTracks.tyState<simd::float_v>( t ) ); + const simd::mask_v pPTMask = ( p > m_minMomentumFinal.value() && pt > m_minPTFinal.value() ); + + const simd::float_v xUT = finalParams[0]; + const simd::float_v txUT = finalParams[1]; + const simd::float_v yUT = finalParams[2]; + + // -- apply some fiducial cuts + // -- they are optimised for high pT tracks (> 500 MeV) + simd::mask_v fiducialMask = simd::mask_false(); + + if ( m_fiducialCuts ) { + const float magSign = m_magFieldSvc->signedRelativeCurrent(); + + fiducialMask = ( magSign * qop < 0.0f && xUT > -48.0f && xUT < 0.0f && abs( yUT ) < 33.0f ); + fiducialMask = fiducialMask || ( magSign * qop > 0.0f && xUT < 48.0f && xUT > 0.0f && abs( yUT ) < 33.0f ); + + fiducialMask = fiducialMask || ( magSign * qop < 0.0f && txUT > 0.09f + 0.0003f * pt ); + fiducialMask = fiducialMask || ( magSign * qop > 0.0f && txUT < -0.09f - 0.0003f * pt ); + } - // -- This is an interpolation, to get a bit more precision - float addBdlVal = 0.0; - for ( int i = 0; i < 3; ++i ) { + // -- evaluate the linear discriminant and reject ghosts + // -- the values only make sense if the final fit is performed + simd::mask_v mvaMask = simd::mask_true(); - if ( var[i] < minValsBdl[i] || var[i] > maxValsBdl[i] ) continue; + if ( m_finalFit ) { - const float dTab_dVar = ( bdls[i] - bdl ) / deltaBdl[i]; - const float dVar = ( var[i] - boundaries[i] ); - addBdlVal += dTab_dVar * dVar; - } - bdl += addBdlVal; - // ---- + const simd::float_v fourHitDisc = evaluateLinearDiscriminantSIMD<4>( {p, pt, finalParams[3]} ); + const simd::float_v threeHitDisc = evaluateLinearDiscriminantSIMD<3>( {p, pt, finalParams[3]} ); - // -- order is: x, tx, y, chi2 - std::array<float, 4> finalParams = {helper.bestParams[2], helper.bestParams[3], - helper.state.y + helper.state.ty * ( m_zMidUT - helper.state.z ), - helper.bestParams[1]}; + simd::mask_v fourHitMask = fourHitDisc > m_LD4Hits.value(); + simd::mask_v threeHitMask = threeHitDisc > m_LD3Hits.value(); - const float qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; - const float qp = m_finalFit ? fastfitter( helper, hitsInLayers, finalParams, m_zMidUT, qpxz2p ) - : helper.bestParams[0] * vdt::fast_isqrtf( 1.0f + helper.state.ty * helper.state.ty ); - const float qop = ( std::abs( bdl ) < 1.e-8f ) ? 0.0f : qp * qpxz2p; + // -- only have 3 or 4 hit tracks + mvaMask = ( fourHitTrack && fourHitMask ) || ( !fourHitTrack && threeHitMask ); + } - // -- Don't make tracks that have grossly too low momentum - // -- Beware of the momentum resolution! - const float p = std::abs( 1.0f / qop ); - const float pt = p * std::sqrt( helper.state.tx * helper.state.tx + helper.state.ty * helper.state.ty ); + simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; - if ( p < m_minMomentumFinal || pt < m_minPTFinal ) return; + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); + auto pos = protoTracks.pos<simd::float_v>( t ); + auto dir = protoTracks.dir<simd::float_v>( t ); + auto covX = protoTracks.cov<simd::float_v>( t ); - const float xUT = finalParams[0]; - const float txUT = finalParams[1]; - const float yUT = finalParams[2]; + int trackIndex = outputTracks.size(); + outputTracks.compressstore_trackVP<simd::int_v>( trackIndex, validTrackMask, ancestor ); + outputTracks.compressstore_statePos<simd::float_v>( trackIndex, validTrackMask, pos ); + outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); + outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); + outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); - // -- apply some fiducial cuts - // -- they are optimised for high pT tracks (> 500 MeV) - if ( m_fiducialCuts ) { - const float magSign = m_magFieldSvc->signedRelativeCurrent(); + // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); + // a simple helper class that facilitates changing from simd to scalar for the slope + TxStorage txArray; + txArray.store_txUT<simd::float_v>( 0, txUT ); - if ( magSign * qop < 0.0f && xUT > -48.0f && xUT < 0.0f && std::abs( yUT ) < 33.0f ) return; - if ( magSign * qop > 0.0f && xUT < 48.0f && xUT > 0.0f && std::abs( yUT ) < 33.0f ) return; + simd::int_v nHits{0}; - if ( magSign * qop < 0.0f && txUT > 0.09f + 0.0003f * pt ) return; - if ( magSign * qop > 0.0f && txUT < -0.09f - 0.0003f * pt ) return; - } + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); + simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); - // -- evaluate the linear discriminant and reject ghosts - // -- the values only make sense if the final fit is performed - if ( m_finalFit ) { - const auto nHits = std::count_if( helper.bestIndices.begin(), helper.bestIndices.end(), - []( auto index ) { return index != -1; } ); - if ( nHits == 3 ) { - if ( evaluateLinearDiscriminant<3>( {p, pt, finalParams[3]} ) < m_LD3Hits ) return; - } else { - if ( evaluateLinearDiscriminant<4>( {p, pt, finalParams[3]} ) < m_LD4Hits ) return; + // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); + outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); + nHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); + outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, nHits ); } - } - - // Make tracks : - int i = outputTracks.size(); - int mask = true; // dummy mask - - // Refined state ? - // auto pos = Vec3<F>( helper.state.x, helper.state.y, helper.state.z ); - // auto dir = Vec3<F>( helper.state.tx, helper.state.ty, 1.f ); - - // Or EndVelo state ? - auto pos = inputTracks.statePos<F>( ancestor, 1 ); - auto dir = inputTracks.stateDir<F>( ancestor, 1 ); - auto covX = inputTracks.stateCovX<F>( ancestor, 1 ); - outputTracks.compressstore_trackVP<I>( i, mask, ancestor ); - outputTracks.compressstore_statePos<F>( i, mask, pos ); - outputTracks.compressstore_stateDir<F>( i, mask, dir ); - outputTracks.compressstore_stateCov<F>( i, mask, covX ); - outputTracks.compressstore_stateQoP<F>( i, mask, qop ); + // -- from here on, go over each track individually to find and add the overlap hits + // -- this is not particularly elegant... - int n_hits = 0; - for ( int index : helper.bestIndices ) { - if ( index == -1 ) break; // only the last one can be a nullptr. + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { - LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers.channelIDs[index] ) ); - const int stationLayer = 2 * ( id.utID().station() - 1 ) + ( id.utID().layer() - 1 ); + int trackIndex2 = 0; + for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { + if ( !testbit( validTrackMask, t2 ) ) continue; - outputTracks.compressstore_hit<I>( i, n_hits, mask, id.lhcbID() ); // not sure if correct - n_hits++; + const std::size_t tscalar = t + t2; - const float xhit = hitsInLayers.xs[index]; - const float zhit = hitsInLayers.zs[index]; + const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); + const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); + const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); - const int begin = hitsInLayers.layerIndices[stationLayer]; - const int end = ( stationLayer == 3 ) ? hitsInLayers.size : hitsInLayers.layerIndices[stationLayer + 1]; + int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); - for ( int index2 = begin; index2 < end; ++index2 ) { - const float zohit = hitsInLayers.zs[index2]; - if ( zohit == zhit ) continue; + const int begin = hitsInLayers[hitContIndex].layerIndices[iLayer]; + const int end = + ( iLayer == 3 ) ? hitsInLayers[hitContIndex].size : hitsInLayers[hitContIndex].layerIndices[iLayer + 1]; - const float xohit = hitsInLayers.xs[index2]; - const float xextrap = xhit + txUT * ( zohit - zhit ); - if ( xohit - xextrap < -m_overlapTol ) continue; - if ( xohit - xextrap > m_overlapTol ) break; + for ( int index2 = begin; index2 < end; ++index2 ) { + const float zohit = hitsInLayers[hitContIndex].zs[index2]; + if ( zohit == zhit ) continue; - if ( n_hits > 30 ) continue; - LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers.channelIDs[index2] ) ); + const float xohit = hitsInLayers[hitContIndex].xs[index2]; + const float xextrap = xhit + txUTS * ( zohit - zhit ); + if ( xohit - xextrap < -m_overlapTol ) continue; + if ( xohit - xextrap > m_overlapTol ) break; - outputTracks.compressstore_hit<I>( i, n_hits, mask, oid.lhcbID() ); - n_hits++; + LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - // only one overlap hit - // break; + int nHits = outputTracks.nHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + if ( nHits > 30 ) continue; + outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); + outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); + // only one overlap hit + // break; + } + trackIndex2++; + } } + outputTracks.size() += simd::popcount( validTrackMask ); } - - outputTracks.compressstore_nHits<I>( i, mask, n_hits ); - outputTracks.size() += dType::popcount( mask ); } } // namespace LHCb::Pr diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index ba81118c669..48acbb5a6a3 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -62,10 +62,142 @@ */ namespace LHCb::Pr { + + constexpr static int batchSize = align_size( 48 ); + constexpr static int maxSectors = 9; // if needed, algo can be templated with this + + using simd = SIMDWrapper::avx2::types; + using scalar = SIMDWrapper::scalar::types; + struct MiniState final { float x, y, z, tx, ty; }; + struct MiniStatesArray final { + + constexpr static int max_tracks = align_size( 1024 ); + std::array<float, max_tracks> xs; + std::array<float, max_tracks> ys; + std::array<float, max_tracks> zs; + std::array<float, max_tracks> txs; + std::array<float, max_tracks> tys; + std::array<int, max_tracks> indexs; + + std::array<float, max_tracks> covxs; + std::array<float, max_tracks> covys; + std::array<float, max_tracks> covzs; + + std::size_t size{0}; + + SOA_ACCESSOR( x, xs.data() ) + SOA_ACCESSOR( y, ys.data() ) + SOA_ACCESSOR( z, zs.data() ) + SOA_ACCESSOR( tx, txs.data() ) + SOA_ACCESSOR( ty, tys.data() ) + SOA_ACCESSOR( covx, covxs.data() ) + SOA_ACCESSOR( covy, covys.data() ) + SOA_ACCESSOR( covz, covzs.data() ) + SOA_ACCESSOR( index, indexs.data() ) + VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) + VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) + VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) + }; + + struct ExtrapolatedStates final { + + constexpr static int max_tracks = align_size( 1024 ); + + std::array<float, max_tracks> xLayers; + std::array<float, max_tracks> yLayers; + std::array<float, max_tracks> xTols; + std::array<float, max_tracks> txs; + + std::size_t size{0}; + SOA_ACCESSOR( xLayer, xLayers.data() ) + SOA_ACCESSOR( yLayer, yLayers.data() ) + SOA_ACCESSOR( xTol, xTols.data() ) + SOA_ACCESSOR( tx, txs.data() ) + }; + + struct Boundaries final { + + constexpr static int max_tracks = align_size( 1024 ); + + std::array<int, 9 * max_tracks> sects; + std::array<float, max_tracks> xTols; + std::array<int, max_tracks> nPoss; + + std::size_t size{0}; + SOA_ACCESSOR_VAR( sect, &( sects[pos * max_tracks] ), int pos ) + SOA_ACCESSOR( xTol, xTols.data() ) + SOA_ACCESSOR( nPos, nPoss.data() ) + }; + + struct ProtoTracks final { + + // -- this is for the hits + // -- this does _not_ include overlap hits, so only 4 per track + std::array<float, 4 * batchSize> xs; + std::array<float, 4 * batchSize> zs; + std::array<float, 4 * batchSize> weightss{}; // this needs to be zero-initialized + std::array<float, 4 * batchSize> sins; + std::array<int, 4 * batchSize> ids; + + // -- this is the output of the fit + std::array<float, batchSize> qps; + std::array<float, batchSize> chi2TTs; + std::array<float, batchSize> xTTs; + std::array<float, batchSize> xSlopeTTs; + std::array<float, batchSize> ys; + + // -- and this the original state (in the Velo) + std::array<float, batchSize> xStates; + std::array<float, batchSize> yStates; + std::array<float, batchSize> zStates; + std::array<float, batchSize> txStates; + std::array<float, batchSize> tyStates; + std::array<int, batchSize> indexs; + + std::array<float, batchSize> covxs; + std::array<float, batchSize> covys; + std::array<float, batchSize> covzs; + + // -- and this and index to find the hit containers + std::array<int, batchSize> hitContIndexs; + + std::size_t size{0}; + SOA_ACCESSOR_VAR( x, &( xs[pos * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( z, &( zs[pos * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( weight, &( weightss[pos * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( sin, &( sins[pos * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( id, &( ids[pos * batchSize] ), int pos ) + + SOA_ACCESSOR( qp, qps.data() ) + SOA_ACCESSOR( chi2TT, chi2TTs.data() ) + SOA_ACCESSOR( xTT, xTTs.data() ) + SOA_ACCESSOR( xSlopeTT, xSlopeTTs.data() ) + SOA_ACCESSOR( y, ys.data() ) + + SOA_ACCESSOR( xState, xStates.data() ) + SOA_ACCESSOR( yState, yStates.data() ) + SOA_ACCESSOR( zState, zStates.data() ) + SOA_ACCESSOR( txState, txStates.data() ) + SOA_ACCESSOR( tyState, tyStates.data() ) + SOA_ACCESSOR( covx, covxs.data() ) + SOA_ACCESSOR( covy, covys.data() ) + SOA_ACCESSOR( covz, covzs.data() ) + SOA_ACCESSOR( index, indexs.data() ) + SOA_ACCESSOR( hitContIndex, hitContIndexs.data() ) + VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) + VEC3_SOA_ACCESSOR( pos, xStates.data(), yStates.data(), zStates.data() ) + VEC3_XY_SOA_ACCESSOR( dir, txStates.data(), tyStates.data(), 1.0f ) + }; + + struct TxStorage final { + std::array<float, simd::size> txUTs; + SOA_ACCESSOR( txUT, txUTs.data() ) + }; + struct TrackHelper final { TrackHelper( const MiniState& miniState, const float zKink, const float sigmaVeloSlope, const float maxPseudoChi2 ) : state( miniState ), bestParams{{0.0f, maxPseudoChi2, 0.0f, 0.0f}} { @@ -111,13 +243,11 @@ namespace LHCb::Pr { Gaudi::Property<float> m_maxYSlope{this, "MaxYSlope", 0.300}; Gaudi::Property<float> m_centralHoleSize{this, "centralHoleSize", 33. * Gaudi::Units::mm}; Gaudi::Property<float> m_intraLayerDist{this, "IntraLayerDist", 15.0 * Gaudi::Units::mm}; - Gaudi::Property<float> m_overlapTol{this, "OverlapTol", 0.7 * Gaudi::Units::mm}; + Gaudi::Property<float> m_overlapTol{this, "OverlapTol", 0.5 * Gaudi::Units::mm}; Gaudi::Property<float> m_passHoleSize{this, "PassHoleSize", 40. * Gaudi::Units::mm}; Gaudi::Property<float> m_LD3Hits{this, "LD3HitsMin", -0.5}; Gaudi::Property<float> m_LD4Hits{this, "LD4HitsMin", -0.5}; - // Gaudi::Property<int> m_minHighThres {this, "MinHighThreshold", 1}; // commented, as the threshold bit might / - // will be removed Gaudi::Property<bool> m_printVariables{this, "PrintVariables", false}; Gaudi::Property<bool> m_passTracks{this, "PassTracks", false}; Gaudi::Property<bool> m_doTiming{this, "TimingMeasurement", false}; @@ -129,12 +259,18 @@ namespace LHCb::Pr { StatusCode recomputeGeometry(); - template <typename dType> - bool getState( const Velo::Tracks& inputTracks, int at, MiniState& trState, Upstream::Tracks& outputTracks ) const; + MiniStatesArray getStates( const Velo::Tracks& inputTracks, Upstream::Tracks& outputTracks ) const; + + std::array<ExtrapolatedStates, UTInfo::TotalLayers> extrapStates( const MiniStatesArray& filteredStates, + const UTDAQ::GeomCache& geom ) const; + + std::array<Boundaries, UTInfo::TotalLayers> + findAllSectors( const std::array<ExtrapolatedStates, UTInfo::TotalLayers>& eStatesArray, + MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const; - template <typename FudgeTable> - bool getHits( LHCb::Pr::UT::Mut::Hits& hitsInLayers, const LHCb::Pr::UT::HitHandler& hh, - const FudgeTable& fudgeFactors, const UTDAQ::GeomCache& geom, MiniState& trState ) const; + bool getHitsScalar( const LHCb::Pr::UT::HitHandler& hh, const MiniStatesArray& filteredStates, + const std::array<Boundaries, 4>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, + const std::size_t t ) const; using simd = SIMDWrapper::avx2::types; void inline findHits( const LHCb::Pr::UT::HitHandler& hh, const int fullChanIndex, const MiniState& myState, @@ -145,10 +281,10 @@ namespace LHCb::Pr { template <bool forward> bool formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, TrackHelper& helper ) const; - template <typename dType, typename BdlTable> - void prepareOutputTrack( const Velo::Tracks& inputTracks, int ancestor, const TrackHelper& helper, - const LHCb::Pr::UT::Mut::Hits& hitsInLayers, Upstream::Tracks& outputTracks, - const BdlTable& bdlTable ) const; + template <typename BdlTable> + void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, + const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, + Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From c5ffb253cb2aa7be6695ab10a5c398d4706ce814 Mon Sep 17 00:00:00 2001 From: Christoph Hasse <hasse.christoph@outlook.de> Date: Thu, 2 Apr 2020 13:58:28 +0200 Subject: [PATCH 010/111] add the inline suggested by arthur --- Pr/PrVeloUT/src/PrVeloUT.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index f81b938c341..c8a5995362f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -726,7 +726,7 @@ namespace LHCb::Pr { // ============================================================================== // -- Method that finds the hits in a given layer within a certain range // ============================================================================== - void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, + inline void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, const int firstIndex, const int lastIndex ) const { -- GitLab From 5e0ec7632d4777564ac3e3bc4fd6120d3fd8425a Mon Sep 17 00:00:00 2001 From: Louis Henry <louis.henry@cern.ch> Date: Fri, 10 Apr 2020 00:14:23 +0200 Subject: [PATCH 011/111] Corrected compilation error --- Pr/PrVeloUT/src/PrVeloUT.cpp | 14 +++----------- Pr/PrVeloUT/src/PrVeloUT.h | 11 +++++------ 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index c8a5995362f..8ef8c2d6a98 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -672,14 +672,6 @@ namespace LHCb::Pr { const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { - // ============================================================================== - // -- Method that finds the hits in a given layer within a certain range - // ============================================================================== - void inline VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const int fullChanIndex, const MiniState& myState, - const simd::float_v xTolNormFact, const simd::float_v invNormFact, - LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v yTol, - const simd::float_v yTolSlope ) const { - std::size_t nSize = 0; std::size_t nLayers = 0; @@ -727,9 +719,9 @@ namespace LHCb::Pr { // -- Method that finds the hits in a given layer within a certain range // ============================================================================== inline void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, - const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, - const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, - const int firstIndex, const int lastIndex ) const { + const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, + const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, + const int firstIndex, const int lastIndex ) const { const LHCb::Pr::UT::Hits& myHits = hh.hits(); diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 48acbb5a6a3..bec720f4e25 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -272,12 +272,11 @@ namespace LHCb::Pr { const std::array<Boundaries, 4>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const; - using simd = SIMDWrapper::avx2::types; - void inline findHits( const LHCb::Pr::UT::HitHandler& hh, const int fullChanIndex, const MiniState& myState, - const simd::float_v xTolNormFact, const simd::float_v invNormFact, - LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v yTol, - const simd::float_v yTolSlope ) const; - + inline void findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, + const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, + const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, + const simd::float_v& yTol, const int firstIndex, const int lastIndex ) const; + template <bool forward> bool formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, TrackHelper& helper ) const; -- GitLab From 6deb4c82782ae2544184cdf93dac65213dfd75cc Mon Sep 17 00:00:00 2001 From: Louis Henry <louis.henry@cern.ch> Date: Fri, 10 Apr 2020 00:18:18 +0200 Subject: [PATCH 012/111] added UT/UTDAQ from LHCb (LHCb/master) --- .git-lb-checkout | 3 + UT/UTDAQ/CMakeLists.txt | 43 ++ UT/UTDAQ/UTDAQ/UTBoardToBankMap.h | 42 ++ UT/UTDAQ/UTDAQ/UTClustersOnBoard.h | 93 ++++ UT/UTDAQ/UTDAQ/UTDAQFunctor.h | 57 ++ UT/UTDAQ/UTDAQ/UTDAQHelper.h | 80 +++ UT/UTDAQ/UTDAQ/UTInfo.h | 29 + UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp | 24 + UT/UTDAQ/src/Lib/UTDAQHelper.cpp | 190 +++++++ .../src/component/RawBankToUTClusterAlg.cpp | 368 +++++++++++++ .../component/RawBankToUTLiteClusterAlg.cpp | 271 +++++++++ .../src/component/UTClustersToRawBankAlg.cpp | 290 ++++++++++ UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp | 316 +++++++++++ UT/UTDAQ/src/component/UTDecodingBaseAlg.h | 133 +++++ .../src/component/UTDigitsToUTTELL1Data.cpp | 105 ++++ UT/UTDAQ/src/component/UTErrorDecoding.cpp | 69 +++ UT/UTDAQ/src/component/UTFullDecoding.cpp | 335 ++++++++++++ UT/UTDAQ/src/component/UTLayerSelector.cpp | 63 +++ UT/UTDAQ/src/component/UTLayerSelector.h | 46 ++ UT/UTDAQ/src/component/UTPedestalDecoding.cpp | 156 ++++++ UT/UTDAQ/src/component/UTRawBankMonitor.cpp | 135 +++++ UT/UTDAQ/src/component/UTReadoutTool.cpp | 515 ++++++++++++++++++ 22 files changed, 3363 insertions(+) create mode 100644 .git-lb-checkout create mode 100644 UT/UTDAQ/CMakeLists.txt create mode 100644 UT/UTDAQ/UTDAQ/UTBoardToBankMap.h create mode 100644 UT/UTDAQ/UTDAQ/UTClustersOnBoard.h create mode 100644 UT/UTDAQ/UTDAQ/UTDAQFunctor.h create mode 100644 UT/UTDAQ/UTDAQ/UTDAQHelper.h create mode 100644 UT/UTDAQ/UTDAQ/UTInfo.h create mode 100644 UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp create mode 100644 UT/UTDAQ/src/Lib/UTDAQHelper.cpp create mode 100644 UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp create mode 100644 UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp create mode 100644 UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp create mode 100644 UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp create mode 100644 UT/UTDAQ/src/component/UTDecodingBaseAlg.h create mode 100644 UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp create mode 100644 UT/UTDAQ/src/component/UTErrorDecoding.cpp create mode 100644 UT/UTDAQ/src/component/UTFullDecoding.cpp create mode 100644 UT/UTDAQ/src/component/UTLayerSelector.cpp create mode 100644 UT/UTDAQ/src/component/UTLayerSelector.h create mode 100644 UT/UTDAQ/src/component/UTPedestalDecoding.cpp create mode 100644 UT/UTDAQ/src/component/UTRawBankMonitor.cpp create mode 100644 UT/UTDAQ/src/component/UTReadoutTool.cpp diff --git a/.git-lb-checkout b/.git-lb-checkout new file mode 100644 index 00000000000..2d1309ae369 --- /dev/null +++ b/.git-lb-checkout @@ -0,0 +1,3 @@ +[lb-checkout "LHCb.UT/UTDAQ"] + base = 473cc402d1a933e1a80c9e67ba12c862e1ac1f4d + imported = 29025f31a42a3f710edc5a5cd53a9a39106a4523 diff --git a/UT/UTDAQ/CMakeLists.txt b/UT/UTDAQ/CMakeLists.txt new file mode 100644 index 00000000000..488423fc7bd --- /dev/null +++ b/UT/UTDAQ/CMakeLists.txt @@ -0,0 +1,43 @@ +############################################################################### +# (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration # +# # +# This software is distributed under the terms of the GNU General Public # +# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". # +# # +# In applying this licence, CERN does not waive the privileges and immunities # +# granted to it by virtue of its status as an Intergovernmental Organization # +# or submit itself to any jurisdiction. # +############################################################################### +################################################################################ +# Package: UTDAQ +################################################################################ +gaudi_subdir(UTDAQ) + +gaudi_depends_on_subdirs(DAQ/DAQUtils + Det/DetDesc + Det/UTDet + Event/DAQEvent + Event/DigiEvent + Event/RecEvent + GaudiAlg + GaudiKernel + Kernel/LHCbKernel + UT/UTKernel + UT/UTTELL1Event + Si/SiDAQ) + + +find_package(Boost) +find_package(ROOT) +# hide warnings from some external projects +include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS}) + +gaudi_add_library(UTDAQLib + src/Lib/*.cpp + PUBLIC_HEADERS UTDAQ + INCLUDE_DIRS Boost Event/RawEvent DAQ/DAQUtils Event/DigiEvent Si/SiDAQ UT/UTKernel + LINK_LIBRARIES DetDescLib UTDetLib DAQEventLib RecEvent GaudiAlgLib GaudiKernel LHCbKernel UTKernelLib UTTELL1Event) + +gaudi_add_module(UTDAQ + src/component/*.cpp + LINK_LIBRARIES UTDAQLib) diff --git a/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h b/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h new file mode 100644 index 00000000000..fa1898bde60 --- /dev/null +++ b/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h @@ -0,0 +1,42 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef _UTBoardToBankMap_H +#define _UTBoardToBankMap_H 1 + +/** @class UTBoardToBankMap UTBoardToBankMap.h + * + * Helper class for mapping boards to banks + * basically hides a a map - used in 2 places.... + * + * @author A Beiter (based on code by M Needham) + * @date 2018-09-04 + */ + +#include <map> + +#include "Kernel/UTTell1ID.h" + +class UTBoardToBankMap final { + +public: + void addEntry( UTTell1ID aBoard, unsigned int aBank ); + UTTell1ID findBoard( const unsigned int aBank ) const; + + // bank to board + unsigned int findBank( const UTTell1ID aBoard ) const { return m_bankMapping.at( aBoard ); } + + void clear() { m_bankMapping.clear(); } + +private: + std::map<UTTell1ID, unsigned int> m_bankMapping; +}; + +#endif // _UTBoardToBankMap_H diff --git a/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h b/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h new file mode 100644 index 00000000000..aea93f74d57 --- /dev/null +++ b/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h @@ -0,0 +1,93 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef _UTClustersOnBoard_H +#define _UTClustersOnBoard_H + +/** @class UTClustersOnBoard UTClustersOnBoard.h + * + * Helper class for keeping track of clusters... + * + * @author A Beiter (based on code by M Needham) + * @date 2018-09-04 + */ + +#include <algorithm> +#include <array> +#include <utility> +#include <vector> + +#include "Event/UTCluster.h" +#include "Kernel/UTDAQDefinitions.h" + +class UTClustersOnBoard final { + +public: + UTClustersOnBoard( unsigned int nMax ); + + typedef std::pair<LHCb::UTCluster*, unsigned int> boardPair; + typedef std::vector<boardPair> ClusterVector; + + void addCluster( LHCb::UTCluster* aCluster ); + + ClusterVector clusters() const; + + bool inOverflow() const; + + bool inOverflow( const unsigned int ppx ) const; + + void clear(); + +private: + unsigned int m_maxClustersPerPPx; + mutable ClusterVector m_clusCont; + std::array<unsigned int, 4> m_ppxCount; +}; + +inline UTClustersOnBoard::UTClustersOnBoard( unsigned int nMax ) : m_maxClustersPerPPx( nMax ) { + // constructer + m_clusCont.reserve( 200 ); + clear(); +} + +inline UTClustersOnBoard::ClusterVector UTClustersOnBoard::clusters() const { + std::sort( m_clusCont.begin(), m_clusCont.end(), + []( const boardPair& obj1, const boardPair& obj2 ) { return obj1.second < obj2.second; } ); + return m_clusCont; +} + +inline void UTClustersOnBoard::addCluster( LHCb::UTCluster* aCluster ) { + + const unsigned int daqChan = aCluster->tell1Channel(); + const unsigned int ppx = daqChan / UTDAQ::nStripPerPPx; + if ( !inOverflow( ppx ) ) { + m_clusCont.emplace_back( aCluster, daqChan ); + ++m_ppxCount[ppx]; + } else { + // data went into the void + } +} + +inline bool UTClustersOnBoard::inOverflow( const unsigned int ppx ) const { + return m_ppxCount[ppx] >= m_maxClustersPerPPx; +} + +inline bool UTClustersOnBoard::inOverflow() const { + auto iter = std::find_if( m_ppxCount.begin(), m_ppxCount.end(), + [&]( unsigned int ppx ) { return ppx >= m_maxClustersPerPPx; } ); + return iter != m_ppxCount.end(); +} + +inline void UTClustersOnBoard::clear() { + m_clusCont.clear(); + m_ppxCount.fill( 0 ); +} + +#endif // ClustersOnBoard diff --git a/UT/UTDAQ/UTDAQ/UTDAQFunctor.h b/UT/UTDAQ/UTDAQ/UTDAQFunctor.h new file mode 100644 index 00000000000..a9b222c063b --- /dev/null +++ b/UT/UTDAQ/UTDAQ/UTDAQFunctor.h @@ -0,0 +1,57 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef _UTDataFunctor_H_ +#define _UTDataFunctor_H_ + +#include "Kernel/UTTell1ID.h" +#include <numeric> + +namespace UTDAQFunctor { + + template <class TYPE> + struct Less_by_Tell1Board { + + /** compare the channel of one object with the + * channel of another object + * @param obj1 first object + * @param obj2 second object + * @return result of the comparision + */ + bool operator()( TYPE obj1, TYPE obj2 ) const { + return ( !obj1 ) ? true : ( !obj2 ) ? false : obj1->boardID() < obj2->boardID(); + } + }; + + template <class TYPE> + class Tell1Board_eq { + UTTell1ID aBoard; + + public: + explicit Tell1Board_eq( const UTTell1ID& testBoard ) : aBoard( testBoard ) {} + inline bool operator()( TYPE obj ) const { return obj->boardID() == aBoard; } + }; + + template <class TYPE> + struct compByTell1Board_LB { + bool operator()( const TYPE& obj, const UTTell1ID& testID ) const { + return ( ( !obj ) ? false : testID > obj->boardID() ); + } + }; + + template <class TYPE> + struct compByTell1Board_UB { + bool operator()( const UTTell1ID& testID, const TYPE& obj ) const { + return ( ( !obj ) ? false : testID > obj->boardID() ); + } + }; + +} // namespace UTDAQFunctor +#endif // UTDAQFunctor diff --git a/UT/UTDAQ/UTDAQ/UTDAQHelper.h b/UT/UTDAQ/UTDAQ/UTDAQHelper.h new file mode 100644 index 00000000000..4871106fc5b --- /dev/null +++ b/UT/UTDAQ/UTDAQ/UTDAQHelper.h @@ -0,0 +1,80 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef UTDAQHELPER_H +#define UTDAQHELPER_H 1 + +#include "Kernel/STLExtensions.h" +#include "UTDet/DeUTDetector.h" +#include "UTDet/DeUTSector.h" +#include "UTInfo.h" +#include <array> +#include <boost/container/small_vector.hpp> +#include <optional> + +namespace LHCb { + + // forward declaration + class RawBank; + + namespace UTDAQ { + + /** + * counts number of UT clusters in the given raw banks + * if count exceeds max, it gives up and returns no value + */ + std::optional<unsigned int> nbUTClusters( LHCb::span<const RawBank*> banks, unsigned int maxNbClusters ); + + struct LayerInfo final { + float z; + unsigned int nColsPerSide; + unsigned int nRowsPerSide; + float invHalfSectorYSize; + float invHalfSectorXSize; + float dxDy; + }; + using SectorsInRegionZ = std::array<float, UTInfo::Sectors>; + using SectorsInLayerZ = std::array<SectorsInRegionZ, UTInfo::Regions>; + using SectorsInStationZ = std::array<SectorsInLayerZ, UTInfo::Layers>; + + // -- For the moment, this is assigned here and overwritten in "computeGeometry" in case a geometry + // -- version with a "wrong" sector ordering is used + extern std::array<int, 64> mapQuarterSectorToSectorCentralRegion; + + constexpr static const auto mapSectorToSector = std::array{ + 1, 2, 3, 4, 5, 0, 0, 0, 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 0, 0, 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 0, 0, 0, 76, 77, 78, 79, 80}; + + /** + * fills container of (region, sector) pairs with all sectors concerned by + * a hit at given layer and coordinates and with given x tolerance + */ + void findSectors( unsigned int layer, float x, float y, float xTol, float yTol, const LayerInfo& info, + boost::container::small_vector_base<std::pair<int, int>>& sectors ); + + struct GeomCache { + std::array<LayerInfo, UTInfo::TotalLayers> layers; + std::array<SectorsInStationZ, UTInfo::Stations> sectorsZ; + }; + GeomCache computeGeometry( const DeUTDetector& utDet ); + + [[deprecated( "Please use computeGeometry(const DeUTDetector&) instead" )]] inline void + computeGeometry( const DeUTDetector& utDet, std::array<LayerInfo, UTInfo::TotalLayers>& layers, + std::array<SectorsInStationZ, UTInfo::Stations>& sectorsZ ) { + auto cache = computeGeometry( utDet ); + layers = cache.layers; + sectorsZ = cache.sectorsZ; + } + + } // namespace UTDAQ + +} // namespace LHCb + +#endif // UTDAQHELPER_H diff --git a/UT/UTDAQ/UTDAQ/UTInfo.h b/UT/UTDAQ/UTDAQ/UTInfo.h new file mode 100644 index 00000000000..5983a83a1d4 --- /dev/null +++ b/UT/UTDAQ/UTDAQ/UTInfo.h @@ -0,0 +1,29 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef UTDAQ_UTINFO_H +#define UTDAQ_UTINFO_H 1 + +// Include files + +/** Define some numbers for the UT which are detector specific + * + * + * @author Michel De Cian + * @date 2019-05-31 + */ + +namespace UTInfo { + + enum Numbers { Sectors = 98, Regions = 3, Layers = 2, Stations = 2, TotalLayers = 4 }; + +} + +#endif // UTDAQ_UTINFO_H diff --git a/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp b/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp new file mode 100644 index 00000000000..60e2565976b --- /dev/null +++ b/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp @@ -0,0 +1,24 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "UTDAQ/UTBoardToBankMap.h" +#include <algorithm> + +void UTBoardToBankMap::addEntry( UTTell1ID aBoard, unsigned int aBank ) { + // add entry to map + m_bankMapping[aBoard] = aBank; +} + +UTTell1ID UTBoardToBankMap::findBoard( const unsigned int aBank ) const { + // board to bank + auto i = std::find_if( m_bankMapping.begin(), m_bankMapping.end(), + [&]( const std::pair<const UTTell1ID, unsigned int>& p ) { return p.second == aBank; } ); + return i != m_bankMapping.end() ? i->first : UTTell1ID( UTTell1ID::nullBoard ); +} diff --git a/UT/UTDAQ/src/Lib/UTDAQHelper.cpp b/UT/UTDAQ/src/Lib/UTDAQHelper.cpp new file mode 100644 index 00000000000..2f8229bc9bf --- /dev/null +++ b/UT/UTDAQ/src/Lib/UTDAQHelper.cpp @@ -0,0 +1,190 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "UTDAQ/UTDAQHelper.h" + +#include "DetDesc/SolidBox.h" +#include "Event/RawBank.h" +#include "SiDAQ/SiHeaderWord.h" + +#include <cmath> +#include <limits> + +namespace LHCb::UTDAQ { + + std::array<int, 64> mapQuarterSectorToSectorCentralRegion{}; + + std::optional<unsigned int> nbUTClusters( span<const RawBank*> banks, unsigned int maxNbClusters ) { + size_t nbClusters = 0; + for ( auto& bank : banks ) { + nbClusters += SiHeaderWord( bank->data()[0] ).nClusters(); + // cut as soon as we have too many + if ( nbClusters > maxNbClusters ) { return {}; } + } + return nbClusters; + } + + void findSectors( unsigned int layer, float x, float y, float xTol, float yTol, const LayerInfo& info, + boost::container::small_vector_base<std::pair<int, int>>& sectors ) { + auto localX = x - info.dxDy * y; + // deal with sector overlaps and geometry imprecision + xTol += 1; // mm + auto localXmin = localX - xTol; + auto localXmax = localX + xTol; + int subcolmin = std::nearbyintf( localXmin * info.invHalfSectorXSize - 0.5 ) + 2 * info.nColsPerSide; + int subcolmax = std::nearbyintf( localXmax * info.invHalfSectorXSize - 0.5 ) + 2 * info.nColsPerSide; + if ( subcolmax < 0 || subcolmin >= (int)( 4 * info.nColsPerSide ) ) { + // out of acceptance, return empty result + return; + } + // on the acceptance limit + if ( subcolmax >= (int)( 4 * info.nColsPerSide ) ) subcolmax = (int)( 4 * info.nColsPerSide ) - 1; + if ( subcolmin < 0 ) subcolmin = 0; + // deal with sector shifts in tilted layers and overlaps in regular ones + yTol += ( layer == 1 || layer == 2 ) ? 8 : 1; // mm + auto localYmin = y - yTol; + auto localYmax = y + yTol; + int subrowmin = std::nearbyintf( localYmin * info.invHalfSectorYSize - 0.5 ) + 2 * info.nRowsPerSide; + int subrowmax = std::nearbyintf( localYmax * info.invHalfSectorYSize - 0.5 ) + 2 * info.nRowsPerSide; + if ( subrowmax < 0 || subrowmin >= (int)( 4 * info.nRowsPerSide ) ) { + // out of acceptance, return empty result + return; + } + // on the acceptance limit + if ( subrowmax >= (int)( 4 * info.nRowsPerSide ) ) subrowmax = (int)( 4 * info.nRowsPerSide ) - 1; + if ( subrowmin < 0 ) subrowmin = 0; + for ( int subcol = subcolmin; subcol <= subcolmax; subcol++ ) { + int region = + subcol < (int)( 2 * info.nColsPerSide - 4 ) ? 1 : subcol >= (int)( 2 * info.nColsPerSide + 4 ) ? 3 : 2; + if ( region == 1 ) { + for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { + sectors.emplace_back( 1, ( subcol / 2 ) * info.nRowsPerSide * 2 + subrow / 2 + 1 ); + } + } else if ( region == 2 ) { + int subcolInReg = subcol - 2 * info.nColsPerSide + 4; + for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { + if ( subrow < (int)( 2 * info.nRowsPerSide - 4 ) || subrow >= (int)( 2 * info.nRowsPerSide + 4 ) ) { + // no in central Region + sectors.emplace_back( 2, mapSectorToSector[( subcolInReg / 2 ) * 14 + ( subrow / 2 )] ); + } else { + // central region + sectors.emplace_back( + 2, mapQuarterSectorToSectorCentralRegion[subcolInReg * 8 + subrow - 2 * info.nRowsPerSide + 4] ); + } + } + } else { + for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { + sectors.emplace_back( 3, ( subcol / 2 - info.nColsPerSide - 2 ) * info.nRowsPerSide * 2 + subrow / 2 + 1 ); + } + } + } + } + + GeomCache computeGeometry( const DeUTDetector& utDet ) { + GeomCache cache; + for ( int iStation = 0; iStation < UTInfo::Stations; ++iStation ) { + for ( int iLayer = 0; iLayer < UTInfo::Layers; ++iLayer ) { + // get layer + unsigned int layerIndex = 2 * iStation + iLayer; + const DeUTLayer* layer = utDet.layers()[layerIndex]; + // get the z position of the layer and store it + cache.layers[layerIndex].z = layer->sectors()[0]->sensors()[0]->plane().ProjectOntoPlane( {0, 0, 0} ).z(); + // get the layer size and sector sizes. Go through all sectors, do not rely on ordering + float YFirstRow = std::numeric_limits<float>::max(); + float YLastRow = std::numeric_limits<float>::lowest(); + float smallestXLastCol = std::numeric_limits<float>::max(); + float smallestXFirstcol = std::numeric_limits<float>::max(); + float biggestXFirstCol = std::numeric_limits<float>::lowest(); + unsigned int biggestColumn = 0; + unsigned int smallestColumn = 999; + unsigned int topMostRow = 0; + unsigned int bottomMostRow = 999; + // First pass + // deal with col/row numbers, we need a UTSector for that + // Note that rows/cols cannot be relied on the middle columns/rows + // as they are not anymore "rows/cols" but "number of sectors + // in the column/row". So we use only first column and row + for ( const auto& sector : layer->sectors() ) { + const DeUTSector& utSector = dynamic_cast<const DeUTSector&>( *sector ); + auto column = utSector.column(); + auto row = utSector.row(); + smallestColumn = std::min( smallestColumn, column ); + if ( utSector.column() == smallestColumn ) { + topMostRow = std::max( topMostRow, row ); + bottomMostRow = std::min( bottomMostRow, row ); + } + if ( utSector.row() == bottomMostRow ) { biggestColumn = std::max( biggestColumn, column ); } + } + // Second pass + // find x and y values in the corners to deduce the geometry of the layer + for ( const auto& sector : layer->sectors() ) { + // deal with x,y coordinates. Remember the corner coordinates + const DeUTSector& utSector = dynamic_cast<const DeUTSector&>( *sector ); + auto column = utSector.column(); + auto row = utSector.row(); + auto center = sector->geometry()->toGlobal( Gaudi::XYZPoint{0, 0, 0} ); + if ( column == smallestColumn ) { + if ( row == bottomMostRow ) { + smallestXFirstcol = center.x(); + YFirstRow = center.y(); + } else if ( row == topMostRow ) { + biggestXFirstCol = center.x(); + YLastRow = center.y(); + } + } + if ( column == biggestColumn && row == bottomMostRow ) { smallestXLastCol = center.x(); } + cache.sectorsZ[iStation][iLayer][sector->elementID().detRegion() - 1][sector->elementID().sector() - 1] = + center.z(); + } + // gather all information into the corresponding LayerInfo object + auto ncols = biggestColumn - smallestColumn + 1; + auto nrows = topMostRow - bottomMostRow + 1; + cache.layers[layerIndex].nColsPerSide = ncols / 2; + cache.layers[layerIndex].nRowsPerSide = nrows / 2; + cache.layers[layerIndex].invHalfSectorYSize = 2 * ( nrows - 1 ) / ( YLastRow - YFirstRow ); + cache.layers[layerIndex].invHalfSectorXSize = 2 * ( ncols - 1 ) / ( smallestXLastCol - smallestXFirstcol ); + cache.layers[layerIndex].dxDy = ( biggestXFirstCol - smallestXFirstcol ) / ( YLastRow - YFirstRow ); + } + } + // Fill the mapQuarterSectorToSectorCentralRegion array according to current geometry using layer 0 of station 0 + auto& info = cache.layers[0]; + const DeUTLayer* layer = utDet.layers()[0]; + for ( const auto& utSector : layer->sectors() ) { + // check for middle region + if ( utSector->elementID().detRegion() == 2 ) { + // get the physical box representing the sector + auto solid = utSector->geometry()->lvolume()->solid(); + const auto& box = dynamic_cast<const SolidBox&>( *solid ); + // compute rows spanned by the sector + // check corners but take 5mm margin to avoid rounding issues + auto corner0 = utSector->toGlobal( Gaudi::XYZPoint( -box.xHalfLength() + 5, -box.yHalfLength() + 5, 0 ) ); + auto corner1 = utSector->toGlobal( Gaudi::XYZPoint( box.xHalfLength() - 5, box.yHalfLength() - 5, 0 ) ); + int subrow0 = std::nearbyintf( corner0.Y() * info.invHalfSectorYSize - 0.5 ); + int subrow1 = std::nearbyintf( corner1.Y() * info.invHalfSectorYSize - 0.5 ); + int subrowmin = std::min( subrow0, subrow1 ); + int subrowmax = std::max( subrow0, subrow1 ); + // check for central part of middle region + if ( subrowmax >= -4 && subrowmin < 4 ) { + int subcol0 = std::nearbyintf( corner0.X() * info.invHalfSectorXSize - 0.5 ); + int subcol1 = std::nearbyintf( corner1.X() * info.invHalfSectorXSize - 0.5 ); + int subcolmin = std::min( subcol0, subcol1 ); + int subcolmax = std::max( subcol0, subcol1 ); + for ( auto subrow = subrowmin; subrow <= subrowmax; subrow++ ) { + for ( auto subcol = subcolmin; subcol <= subcolmax; subcol++ ) { + auto index = ( subcol + 4 ) * 8 + subrow + 4; + mapQuarterSectorToSectorCentralRegion[index] = utSector->id(); + } + } + } + } + } + return cache; + } +} // namespace LHCb::UTDAQ diff --git a/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp b/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp new file mode 100644 index 00000000000..50e519be18a --- /dev/null +++ b/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp @@ -0,0 +1,368 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTCluster.h" +#include "Event/UTLiteCluster.h" +#include "Event/UTSummary.h" +#include "GaudiAlg/Transformer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/STLExtensions.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/UTClusterWord.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTDataFunctor.h" +#include "Kernel/UTDecoder.h" +#include "Kernel/UTFun.h" +#include "Kernel/UTRawBankMap.h" +#include "Kernel/UTStripRepresentation.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" +#include "LHCbMath/LHCbMath.h" +#include "SiDAQ/SiADCWord.h" +#include "UTDecodingBaseAlg.h" +#include <algorithm> +#include <boost/container/small_vector.hpp> +#include <string> +#include <vector> + +/** @class RawBankToUTClusterAlg RawBankToUTClusterAlg.h + * + * Algorithm to create UTClusters from RawEvent object + * + * @author A Beiter (based on code by M. Needham) + * @date 2018-09-04 + */ + +typedef Gaudi::Functional::MultiTransformer<std::tuple<LHCb::UTClusters, LHCb::UTSummary>( const LHCb::ODIN&, + const LHCb::RawEvent& ), + Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> + RawBankToUTClusterAlgBaseClass; + +class RawBankToUTClusterAlg : public RawBankToUTClusterAlgBaseClass { + +public: + /// Standard constructor + RawBankToUTClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ); + + /// initialize + StatusCode initialize() override; + /// finalize + StatusCode finalize() override; + /// Algorithm execution + std::tuple<LHCb::UTClusters, LHCb::UTSummary> operator()( const LHCb::ODIN&, const LHCb::RawEvent& ) const override; + +private: + LHCb::UTSummary decodeBanks( const LHCb::RawEvent& rawEvt, LHCb::UTClusters& clusCont ) const; + + void createCluster( const UTClusterWord& aWord, const UTTell1Board* aBoard, LHCb::span<const SiADCWord> adcValues, + const UTDAQ::version& bankVersion, LHCb::UTClusters& clusCont ) const; + + double mean( LHCb::span<const SiADCWord> adcValues ) const; + + LHCb::UTLiteCluster word2LiteCluster( const UTClusterWord aWord, const LHCb::UTChannelID chan, + const unsigned int fracStrip ) const; + + LHCb::UTSummary createSummaryBlock( const LHCb::RawEvent& rawEvt, const unsigned int& nclus, const unsigned int& pcn, + const bool pcnsync, const unsigned int bytes, + const std::vector<unsigned int>& bankList, + const std::vector<unsigned int>& missing, + const LHCb::UTSummary::RecoveredInfo& recoveredBanks ) const; + + double stripFraction( const double interStripPos ) const; + + Gaudi::Property<std::string> m_pedestalBankString{this, "PedestalBank", "UTPedestal"}; + LHCb::RawBank::BankType m_pedestalType; + Gaudi::Property<std::string> m_fullBankString{this, "FullBank", "UTFull"}; + LHCb::RawBank::BankType m_fullType; +}; + +LHCb::UTLiteCluster RawBankToUTClusterAlg::word2LiteCluster( const UTClusterWord aWord, const LHCb::UTChannelID chan, + const unsigned int fracStrip ) const { + return LHCb::UTLiteCluster( fracStrip, aWord.pseudoSizeBits(), aWord.hasHighThreshold(), chan, + ( detType() == "UT" ) ); +} + +using namespace LHCb; + +//----------------------------------------------------------------------------- +// Implementation file for class : RawBufferToUTClusterAlg +// +// 2004-01-07 : Matthew Needham +// 2016-10-07 : Sebastien Ponce +//----------------------------------------------------------------------------- + +DECLARE_COMPONENT( RawBankToUTClusterAlg ) + +RawBankToUTClusterAlg::RawBankToUTClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ) + : MultiTransformer( + name, pSvcLocator, + {KeyValue{"OdinLocation", LHCb::ODINLocation::Default}, + KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( LHCb::RawEventLocation::Tracker, + LHCb::RawEventLocation::Other, + LHCb::RawEventLocation::Default )}}, + {KeyValue( "clusterLocation", UTClusterLocation::UTClusters ), + KeyValue( "summaryLocation", UTSummaryLocation::UTSummary )} ) { + // Standard constructor, initializes variables + setProperty( "BankType", "UT" ).ignore(); +} + +StatusCode RawBankToUTClusterAlg::initialize() { + // Initialization + StatusCode sc = MultiTransformer::initialize(); + if ( sc.isFailure() ) return Error( "Failed to initialize", sc ); + // pedestal bank + m_pedestalType = UTRawBankMap::stringToType( m_pedestalBankString ); + if ( m_bankType == LHCb::RawBank::Velo ) { + fatal() << "Wrong detector type: only UT !" << endmsg; + return StatusCode::FAILURE; + } + // full bank + m_fullType = UTRawBankMap::stringToType( m_fullBankString ); + if ( m_fullType == LHCb::RawBank::Velo ) { + fatal() << "Wrong detector type: only UT!" << endmsg; + return StatusCode::FAILURE; + } + // Spill + computeSpillOffset( inputLocation<1>() ); + // return + return StatusCode::SUCCESS; +} + +std::tuple<LHCb::UTClusters, LHCb::UTSummary> RawBankToUTClusterAlg::operator()( const LHCb::ODIN& odin, + const LHCb::RawEvent& rawEvt ) const { + // make a new digits container + UTClusters clusCont; + if ( !validSpill( odin ) ) { + warning() << "Not a valid spill" << endmsg; + } else { + clusCont.reserve( 2000 ); + // decode banks + LHCb::UTSummary summary = decodeBanks( rawEvt, clusCont ); + // sort + std::sort( clusCont.begin(), clusCont.end(), UTDataFunctor::Less_by_Channel<const UTCluster*>() ); + return std::make_tuple( std::move( clusCont ), std::move( summary ) ); + } + return std::make_tuple( std::move( clusCont ), LHCb::UTSummary() ); +} + +LHCb::UTSummary RawBankToUTClusterAlg::decodeBanks( const RawEvent& rawEvt, LHCb::UTClusters& clusCont ) const { + std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> errorBanks = nullptr; + bool errorBanksFailed = false; + + // create Clusters from this type + bool pcnSync = true; + std::vector<unsigned int> bankList; + UTSummary::RecoveredInfo recoveredBanks; + + const auto& tBanks = rawEvt.banks( bankType() ); + + std::vector<unsigned int> missing = missingInAction( tBanks ); + if ( !missing.empty() ) { + counter( "lost Banks" ) += missing.size(); + if ( tBanks.empty() ) { + ++counter( "no banks found" ); + return createSummaryBlock( rawEvt, 0, UTDAQ::inValidPcn, false, 0, bankList, missing, recoveredBanks ); + } + } + + // vote on the pcns + const unsigned int pcn = pcnVote( tBanks ); + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "PCN was voted to be " << pcn << endmsg; + if ( pcn == UTDAQ::inValidPcn && !m_skipErrors ) { + counter( "skipped Banks" ) += tBanks.size(); + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "PCN vote failed with " << tBanks.size() << endmsg; + warning() << "PCN vote failed" << endmsg; + return UTSummary(); + } + + // loop over the banks of this type.. + for ( const auto& bank : tBanks ) { + + ++counter( "# banks found" ); + // get the board and data + UTTell1ID tell1ID = UTTell1ID( (unsigned int)bank->sourceID(), detType() == "UT" ); + const UTTell1Board* aBoard = readoutTool()->findByBoardID( tell1ID ); + + if ( !aBoard && !m_skipErrors ) { + // not a valid UT + Warning( "Invalid source ID --> skip bank" + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ) + .ignore(); + ++counter( "skipped Banks" ); + continue; + } + + ++counter( "# valid banks" ); + + if ( bank->magic() != RawBank::MagicPattern ) { + Warning( "wrong magic pattern " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); + ++counter( "skipped banks" ); + continue; + } + + // make a decoder + UTDecoder decoder( bank->data() ); + // get verion of the bank + const UTDAQ::version bankVersion = + forceVersion() ? UTDAQ::version( m_forcedVersion.value() ) : UTDAQ::version( bank->version() ); + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "decoding bank version " << bankVersion << endmsg; + + bool recover = false; + if ( decoder.hasError() == true && !m_skipErrors ) { + + if ( !recoverMode() ) { + bankList.push_back( bank->sourceID() ); + Warning( "bank has errors, skip sourceID " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ) + .ignore(); + ++counter( "skipped Banks" ); + continue; + } else { + // flag that need to recover.... + recover = true; + ++counter( "recovered banks" + std::to_string( bank->sourceID() ) ); + } + } + + UTTELL1BoardErrorBank* errorBank = nullptr; + if ( recover ) { + if ( !errorBanks.get() && !errorBanksFailed ) { + try { + errorBanks = decodeErrors( rawEvt ); + } catch ( GaudiException& e ) { + errorBanksFailed = true; + warning() << e.what() << endmsg; + } + } + if ( errorBanks.get() ) { errorBank = errorBanks->object( bank->sourceID() ); } + // check what fraction we can recover + if ( errorBank != 0 ) recoveredBanks[bank->sourceID()] += errorBank->fractionOK( pcn ); + } + + if ( errorBank == 0 ) { + const unsigned bankpcn = decoder.header().pcn(); + if ( pcn != bankpcn && !m_skipErrors ) { + bankList.push_back( bank->sourceID() ); + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Expected " << pcn << " found " << bankpcn << endmsg; + Warning( "PCNs out of sync, sourceID " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); + ++counter( "skipped Banks" ); + continue; + } + } + + // check the integrity of the bank --> always skip if not ok + if ( !m_skipErrors && !checkDataIntegrity( decoder, aBoard, bank->size(), bankVersion ) ) { + bankList.push_back( bank->sourceID() ); + continue; + } + + // iterator over the data.... + for ( const auto& iterDecoder : decoder.posAdcRange() ) { + if ( !recover ) { + createCluster( iterDecoder.first, aBoard, iterDecoder.second, bankVersion, clusCont ); + } else { + // check that this cluster is ok to be recovered + if ( errorBank != 0 && canBeRecovered( errorBank, iterDecoder.first, pcn ) ) { + createCluster( iterDecoder.first, aBoard, iterDecoder.second, bankVersion, clusCont ); + } + } + } // iterDecoder + } // bank + + const unsigned int bsize = byteSize( tBanks ); + return createSummaryBlock( rawEvt, clusCont.size(), pcn, pcnSync, bsize, bankList, missing, recoveredBanks ); +} + +void RawBankToUTClusterAlg::createCluster( const UTClusterWord& aWord, const UTTell1Board* aBoard, + LHCb::span<const SiADCWord> adcValues, const UTDAQ::version& bankVersion, + UTClusters& clusCont ) const { + // stream the neighbour sum + auto iterADC = adcValues.begin(); + char neighbour = *iterADC; + ++iterADC; + + unsigned int fracStrip = aWord.fracStripBits(); + + // estimate the offset + double stripNum = mean( adcValues ); + double interStripPos = stripNum - floor( stripNum ); + + // If fracStrip equals zero and the interStripPos equals one, the stripNum + // must be incremented. Note that since the rounding can be different from + // rounding on the Tell1, the interStripPos can be 0.75. Trust me, this is + // correct.-- JvT + if ( fracStrip == 0u && interStripPos > 0.5 ) stripNum += 1; + unsigned int offset = (unsigned int)stripNum; + + UTCluster::ADCVector adcs; + adcs.reserve( adcValues.size() ); + for ( unsigned int i = 1; i < adcValues.size(); ++i ) { + adcs.emplace_back( i - 1 - offset, (int)adcValues[i].adc() ); + } // iDigit + + UTTell1Board::chanPair nearestChan = + aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ); + + aBoard->ADCToOffline( aWord.channelID(), adcs, bankVersion, offset, fracStrip ); + + // make cluster +set things + auto newCluster = std::make_unique<UTCluster>( this->word2LiteCluster( aWord, nearestChan.first, nearestChan.second ), + adcs, neighbour, aBoard->boardID().id(), aWord.channelID(), spill() ); + + if ( !clusCont.object( nearestChan.first ) ) { + clusCont.insert( newCluster.release(), nearestChan.first ); + } else { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Cluster already exists not inserted: " << aBoard->boardID() << " " << aWord.channelID() << endmsg; + Warning( "Failed to insert cluster --> exists in container", StatusCode::SUCCESS, 100 ).ignore(); + } +} + +LHCb::UTSummary RawBankToUTClusterAlg::createSummaryBlock( + const RawEvent& rawEvt, const unsigned int& nclus, const unsigned int& pcn, const bool pcnsync, + const unsigned int bytes, const std::vector<unsigned int>& bankList, const std::vector<unsigned int>& missing, + const LHCb::UTSummary::RecoveredInfo& recoveredBanks ) const { + unsigned totalBytes = bytes; + // get the error banks + const auto& errorBanks = rawEvt.banks( LHCb::RawBank::BankType( m_errorType ) ); + totalBytes += byteSize( errorBanks ); + // get the pedestal banks + const auto& pBanks = rawEvt.banks( LHCb::RawBank::BankType( m_pedestalType ) ); + totalBytes += byteSize( pBanks ); + // get the full banks + const auto& fullBanks = rawEvt.banks( LHCb::RawBank::BankType( m_fullType ) ); + totalBytes += byteSize( fullBanks ); + return UTSummary( nclus, pcn, pcnsync, totalBytes, fullBanks.size(), pBanks.size(), errorBanks.size(), bankList, + missing, recoveredBanks ); +} + +double RawBankToUTClusterAlg::mean( LHCb::span<const SiADCWord> adcValues ) const { + double sum = 0; + double totCharge = 0; + // note the first is the neighbour sum.. + for ( unsigned int i = 1; i < adcValues.size(); ++i ) { + sum += adcValues[i].adc() * ( i - 1 ); + totCharge += adcValues[i].adc(); + } // i + return ( sum / totCharge ); +} + +StatusCode RawBankToUTClusterAlg::finalize() { + + const double failed = counter( "skipped Banks" ).flag(); + const double processed = counter( "# valid banks" ).flag(); + + double eff = 0.0; + if ( !LHCb::Math::Equal_To<double>()( processed, 0.0 ) ) { eff = 1.0 - ( failed / processed ); } + info() << "Successfully processed " << 100 * eff << " %" << endmsg; + + return MultiTransformer::finalize(); +} diff --git a/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp b/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp new file mode 100644 index 00000000000..c82945e63b9 --- /dev/null +++ b/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp @@ -0,0 +1,271 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTLiteCluster.h" +#include "GaudiAlg/Transformer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTClusterWord.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTDataFunctor.h" +#include "Kernel/UTDecoder.h" +#include "Kernel/UTStripRepresentation.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" +#include "LHCbMath/LHCbMath.h" +#include "UTDecodingBaseAlg.h" +#include <algorithm> +#include <string> +#include <utility> +#include <vector> + +/** @class RawBankToUTLiteClusterAlg RawBankToUTLiteClusterAlg.h + * + * Algorithm to create UTClusters from RawEvent object + * + * @author A. Beiter based on code by: + * @author M. Needham + * @author S. Ponce + */ + +typedef Gaudi::Functional::Transformer<LHCb::UTLiteCluster::UTLiteClusters( const LHCb::ODIN&, const LHCb::RawEvent& ), + Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> + RawBankToUTLiteClusterAlgBaseClass; + +class RawBankToUTLiteClusterAlg final : public RawBankToUTLiteClusterAlgBaseClass { + +public: + /// Standard constructor + RawBankToUTLiteClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ); + + StatusCode initialize() override; ///< Algorithm initialization + StatusCode finalize() override; ///< Algorithm finalization + LHCb::UTLiteCluster::UTLiteClusters operator()( const LHCb::ODIN&, const LHCb::RawEvent& ) const override; + +private: + // create Clusters from this type + StatusCode decodeBanks( const LHCb::RawEvent& rawEvt, LHCb::UTLiteCluster::UTLiteClusters& fCont ) const; + + // add a single cluster to the output container + void createCluster( const UTTell1Board* aBoard, const UTDAQ::version& bankVersion, const UTClusterWord& aWord, + LHCb::UTLiteCluster::UTLiteClusters& fCont, const bool isUT ) const; +}; + +void RawBankToUTLiteClusterAlg::createCluster( const UTTell1Board* aBoard, const UTDAQ::version& bankVersion, + const UTClusterWord& aWord, LHCb::UTLiteCluster::UTLiteClusters& fCont, + const bool isUT ) const { + + const unsigned int fracStrip = aWord.fracStripBits(); + const UTTell1Board::chanPair chan = + aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ); + fCont.emplace_back( chan.second, aWord.pseudoSizeBits(), aWord.hasHighThreshold(), chan.first, isUT ); +} + +using namespace LHCb; + +namespace { + struct Less_by_Channel { + + /** compare the channel of one object with the + * channel of another object + * @param obj1 first object + * @param obj2 second object + * @return result of the comparision + */ + // + inline bool operator()( LHCb::UTLiteCluster obj1, LHCb::UTLiteCluster obj2 ) const { + return obj1.channelID() < obj2.channelID(); + } + }; + struct Equal_Channel { + + /** compare the channel of one object with the + * channel of another object + * @param obj1 first object + * @param obj2 second object + * @return result of the comparision + */ + // + inline bool operator()( LHCb::UTLiteCluster obj1, LHCb::UTLiteCluster obj2 ) const { + return obj1.channelID() == obj2.channelID(); + } + }; + +} // namespace + +//----------------------------------------------------------------------------- +// Implementation file for class : RawBufferToUTLiteClusterAlg +// +// 2004-01-07 : Matthew Needham +// 2016-10-07 : Sebastien Ponce +//----------------------------------------------------------------------------- + +DECLARE_COMPONENT( RawBankToUTLiteClusterAlg ) + +RawBankToUTLiteClusterAlg::RawBankToUTLiteClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, + {KeyValue{"OdinLocation", LHCb::ODINLocation::Default}, + KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( + LHCb::RawEventLocation::Tracker, LHCb::RawEventLocation::Other, + LHCb::RawEventLocation::Default )}}, + KeyValue( "clusterLocation", UTLiteClusterLocation::UTClusters ) ) { + setProperty( "BankType", detType() ).ignore(); +} + +StatusCode RawBankToUTLiteClusterAlg::initialize() { + return Transformer::initialize().andThen( [&] { computeSpillOffset( inputLocation<LHCb::RawEvent>() ); } ); +} + +LHCb::UTLiteCluster::UTLiteClusters RawBankToUTLiteClusterAlg::operator()( const LHCb::ODIN& odin, + const LHCb::RawEvent& rawEvt ) const { + UTLiteCluster::UTLiteClusters fCont; + if ( !validSpill( odin ) ) { + warning() << "Not a valid spill" << endmsg; + } else { + fCont.reserve( 5000 ); + // decode banks + decodeBanks( rawEvt, fCont ).orThrow( "Problems in decoding event", "RawBankToUTLiteClusterAlg" ).ignore(); + } + return fCont; +} + +StatusCode RawBankToUTLiteClusterAlg::decodeBanks( const RawEvent& rawEvt, + UTLiteCluster::UTLiteClusters& fCont ) const { + std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> errorBanks = nullptr; + bool errorBanksFailed = false; + + const LHCb::span<const RawBank*> tBanks = rawEvt.banks( bankType() ); + std::vector<unsigned int> missing = missingInAction( tBanks ); + if ( !missing.empty() ) { + counter( "lost Banks" ) += missing.size(); + if ( tBanks.empty() ) { + ++counter( "no banks found" ); + return StatusCode::SUCCESS; + } + } + + const unsigned int pcn = pcnVote( tBanks ); + if ( msgLevel( MSG::DEBUG ) ) debug() << "PCN was voted to be " << pcn << endmsg; + if ( pcn == UTDAQ::inValidPcn ) { + counter( "skipped Banks" ) += tBanks.size(); + return Warning( "PCN vote failed", StatusCode::SUCCESS, 2 ); + } + + const bool isUT = ( detType() == "UT" ); + + // loop over the banks of this type.. + + for ( auto iterBank = tBanks.begin(); iterBank != tBanks.end(); ++iterBank ) { + + ++counter( "# valid banks" ); + + // get the board and data + UTTell1Board* aBoard = readoutTool()->findByBoardID( UTTell1ID( ( *iterBank )->sourceID() ) ); + if ( !aBoard && !m_skipErrors ) { + Warning( "Invalid source ID --> skip bank" + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ) + .ignore(); + ++counter( "skipped Banks" ); + continue; + } + + ++counter( "# valid source ID" ); + + if ( ( *iterBank )->magic() != RawBank::MagicPattern ) { + Warning( "wrong magic pattern " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); + counter( "skipped Banks" ) += tBanks.size(); + continue; + } + + // make a SmartBank of shorts... + UTDecoder decoder( ( *iterBank )->data() ); + + bool recover = false; + if ( decoder.hasError() && !m_skipErrors ) { + + if ( !recoverMode() ) { + Warning( "bank has errors, skip sourceID " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, + 2 ) + .ignore(); + ++counter( "skipped Banks" ); + continue; + } else { + // flag that need to recover.... + recover = true; + ++counter( "recovered banks" + std::to_string( ( *iterBank )->sourceID() ) ); + } + } + + // ok this is a bit ugly..... + UTTELL1BoardErrorBank* errorBank = nullptr; + if ( recover ) { + if ( !errorBanks.get() && !errorBanksFailed ) { + try { + errorBanks = decodeErrors( rawEvt ); + } catch ( GaudiException& e ) { + errorBanksFailed = true; + warning() << e.what() << endmsg; + } + } + if ( errorBanks.get() ) { errorBank = errorBanks->object( ( *iterBank )->sourceID() ); } + } + + if ( errorBank ) { + const unsigned bankpcn = decoder.header().pcn(); + if ( pcn != bankpcn && !m_skipErrors ) { + debug() << "Expected " << pcn << " found " << bankpcn << endmsg; + if ( msgLevel( MSG::DEBUG ) ) + Warning( "PCNs out of sync sourceID " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ) + .ignore(); + ++counter( "skipped Banks" ); + continue; + } + } // errorbank == 0 + + const UTDAQ::version bankVersion = + UTDAQ::version( forceVersion() ? m_forcedVersion.value() : ( *iterBank )->version() ); + + // check the integrity of the bank --> always skip if not ok + if ( !m_skipErrors && !checkDataIntegrity( decoder, aBoard, ( *iterBank )->size(), bankVersion ) ) continue; + + // read in the first half of the bank + for ( auto iterDecoder : decoder.posRange() ) { + + if ( !recover ) { + createCluster( aBoard, bankVersion, iterDecoder, fCont, isUT ); + } else { + if ( errorBank && canBeRecovered( errorBank, iterDecoder, pcn ) ) { + createCluster( aBoard, bankVersion, iterDecoder, fCont, isUT ); + } // errorbank + } // recover == false + + } // decoder + + } // iterBank + + // sort and remove any duplicates + std::stable_sort( fCont.begin(), fCont.end(), Less_by_Channel() ); + auto iter = std::unique( fCont.begin(), fCont.end(), Equal_Channel() ); + if ( iter != fCont.end() ) { + fCont.resize( iter - fCont.begin() ); + return Warning( "Removed duplicate clusters in the decoding", StatusCode::SUCCESS, 100 ); + } + return StatusCode::SUCCESS; +} + +StatusCode RawBankToUTLiteClusterAlg::finalize() { + + const double failed = counter( "skipped Banks" ).flag(); + const double processed = counter( "# valid banks" ).flag(); + double eff = ( !LHCb::Math::Equal_To<double>()( processed, 0.0 ) ? 1.0 - ( failed / processed ) : 0.0 ); + info() << "Successfully processed " << 100 * eff << " %" << endmsg; + + return Transformer::finalize(); +} diff --git a/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp b/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp new file mode 100644 index 00000000000..ba0b7370b46 --- /dev/null +++ b/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp @@ -0,0 +1,290 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/BankWriter.h" +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTCluster.h" +#include "Event/UTSummary.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/UTClusterWord.h" +#include "Kernel/UTCommonBase.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTRawBankMap.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" +#include "SiDAQ/SiADCWord.h" +#include "SiDAQ/SiHeaderWord.h" +#include "UTDAQ/UTBoardToBankMap.h" +#include "UTDAQ/UTClustersOnBoard.h" +#include "UTDAQ/UTDAQFunctor.h" +#include <algorithm> +#include <map> +#include <string> +#include <vector> + +/** @class UTClustersToRawBankAlg UTClustersToRawBankAlg.h + * + * Algorithm to fill the Raw buffer with UT information from UTClusters + * + * @author A Beiter (based on code by M Needham) + * @date 2018-09-04 + */ + +template <class IReadoutTool = IUTReadoutTool> +class UTClustersToRawBankAlgT : public UT::CommonBase<GaudiAlgorithm, IReadoutTool> { + +public: + /// Standard constructor + UTClustersToRawBankAlgT( const std::string& name, ISvcLocator* pSvcLocator ); + + StatusCode initialize() override; ///< Algorithm initialization + StatusCode execute() override; ///< Algorithm execution + StatusCode finalize() override; ///< Algorithm finalization + +private: + /// convert string to enum + StatusCode configureBankType(); + + /// initialize event + void initEvent(); + + /// fill the banks + StatusCode groupByBoard( const LHCb::UTClusters* clusCont ); + + unsigned int bankSize( UTClustersOnBoard::ClusterVector& clusCont ) const; + + unsigned int getPCN() const; + + // create a new bank + void writeBank( const UTClustersOnBoard::ClusterVector& clusCont, LHCb::BankWriter& bWriter, + const UTTell1ID aBoardID ); + + Gaudi::Property<int> m_maxClustersPerPPx{this, "maxClusters", 512}; + + DataObjectReadHandle<LHCb::RawEvent> m_raw{this, "rawLocation", LHCb::RawEventLocation::Default}; + DataObjectReadHandle<LHCb::UTClusters> m_clusters{this, "clusterLocation", LHCb::UTClusterLocation::UTClusters}; + DataObjectReadHandle<LHCb::UTSummary> m_summary{this, "summaryLocation", LHCb::UTSummaryLocation::UTSummary}; + + LHCb::RawBank::BankType m_bankType; + + UTBoardToBankMap m_bankMapping; + + std::map<UTTell1ID, UTClustersOnBoard*> m_clusMap; + std::vector<UTClustersOnBoard> m_clusVectors; + + unsigned int m_overflow = 0; + unsigned int m_maxClusterSize = 4; + unsigned int m_pcn = 128; +}; + +// Declaration of the backward compatible UTClustersToRawBankAlg class (not templated for the original UT case) +using UTClustersToRawBankAlg = UTClustersToRawBankAlgT<>; + +using namespace LHCb; + +//----------------------------------------------------------------------------- +// Implementation file for class : UTClusterToRawBankAlg +// +// 2004-08-01 : M. Needham +//----------------------------------------------------------------------------- + +DECLARE_COMPONENT_WITH_ID( UTClustersToRawBankAlgT<IUTReadoutTool>, "UTClustersToRawBankAlg" ) + +template <class IReadoutTool> +UTClustersToRawBankAlgT<IReadoutTool>::UTClustersToRawBankAlgT( const std::string& name, ISvcLocator* pSvcLocator ) + : UT::CommonBase<GaudiAlgorithm, IReadoutTool>( name, pSvcLocator ) { + this->setForcedInit(); +} + +// Finalisation. +template <class IReadoutTool> +StatusCode UTClustersToRawBankAlgT<IReadoutTool>::finalize() { + m_clusVectors.clear(); + m_bankMapping.clear(); + return StatusCode::SUCCESS; +} + +// Initialisation. +template <class IReadoutTool> +StatusCode UTClustersToRawBankAlgT<IReadoutTool>::initialize() { + + // initialize + StatusCode sc = UT::CommonBase<GaudiAlgorithm, IReadoutTool>::initialize(); + if ( sc.isFailure() ) { return this->Error( "Failed to initialize", sc ); } + + // banktype + if ( configureBankType().isFailure() ) { + this->fatal() << "unknown bank type" << endmsg; + return StatusCode::FAILURE; + } + + // init the map + unsigned int nBoard = this->readoutTool()->nBoard(); + m_clusVectors.reserve( nBoard ); + for ( unsigned int iVal = 0; iVal < nBoard; ++iVal ) { + + UTTell1Board* aBoard = this->readoutTool()->findByOrder( iVal ); + m_bankMapping.addEntry( aBoard->boardID(), iVal ); + + m_clusVectors.emplace_back( m_maxClustersPerPPx ); + m_clusMap[aBoard->boardID()] = &m_clusVectors.back(); + } // iVal + + return StatusCode::SUCCESS; +} + +template <class IReadoutTool> +StatusCode UTClustersToRawBankAlgT<IReadoutTool>::configureBankType() { + + // configure the type of bank to write (UT) + m_bankType = UTRawBankMap::stringToType( this->detType() ); + return m_bankType != RawBank::Velo ? StatusCode::SUCCESS : StatusCode::FAILURE; +} + +template <class IReadoutTool> +StatusCode UTClustersToRawBankAlgT<IReadoutTool>::execute() { + + // Retrieve the RawBank + RawEvent* tEvent = m_raw.get(); + + // initialize this event + initEvent(); + + // get the data.... + const UTClusters* clusCont = m_clusters.get(); + + // group the data by banks.. + StatusCode sc = groupByBoard( clusCont ); + if ( sc.isFailure() ) { return this->Error( "Problems linking offline to DAQ channel", sc ); } + + // convert to a bank and add to buffer + const unsigned int nBoard = this->readoutTool()->nBoard(); + for ( unsigned int iBoard = 0u; iBoard < nBoard; ++iBoard ) { + // get the data .... + const UTTell1ID aBoardID = m_bankMapping.findBoard( iBoard ); + UTClustersOnBoard::ClusterVector boardClusCont = m_clusVectors[iBoard].clusters(); + + if ( m_clusVectors[iBoard].inOverflow() ) ++m_overflow; + + // make the a bankwriter.... + BankWriter bWriter( bankSize( boardClusCont ) ); + + writeBank( boardClusCont, bWriter, aBoardID ); + + RawBank* tBank = tEvent->createBank( UTDAQ::rawInt( aBoardID.id() ), m_bankType, UTDAQ::v4, bWriter.byteSize(), + &( bWriter.dataBank()[0] ) ); + + tEvent->adoptBank( tBank, true ); + + } // iBoard + + // flag overflow + if ( m_overflow > 0 ) { return this->Warning( "RAWBank overflow some banks truncated", StatusCode::SUCCESS ); } + + return StatusCode::SUCCESS; +} + +template <class IReadoutTool> +void UTClustersToRawBankAlgT<IReadoutTool>::initEvent() { + + // intialize temp bank structure each event + std::for_each( m_clusVectors.begin(), m_clusVectors.end(), []( UTClustersOnBoard& i ) { i.clear(); } ); + m_overflow = 0; + + // locate and set the pcn from the summary block if it exists + // in the case there is no summary block write 128 + const LHCb::UTSummary* sum = m_summary.getIfExists(); + if ( sum ) m_pcn = sum->pcn(); +} + +template <class IReadoutTool> +StatusCode UTClustersToRawBankAlgT<IReadoutTool>::groupByBoard( const UTClusters* clusCont ) { + + // divide up the clusters by readout board + for ( const auto& clus : *clusCont ) { + + // find the online channel and board + auto iterMap = m_clusMap.find( UTTell1ID( clus->sourceID(), this->detType() == "UT" ) ); + if ( iterMap != m_clusMap.end() ) { + UTClustersOnBoard* tVec = iterMap->second; + tVec->addCluster( clus ); + } else { + return this->Warning( "Failed to find board in map ", StatusCode::SUCCESS ); + } + } // clusCont + return StatusCode::SUCCESS; +} + +template <class IReadoutTool> +unsigned int UTClustersToRawBankAlgT<IReadoutTool>::bankSize( UTClustersOnBoard::ClusterVector& clusCont ) const { + // bank size in 32 bit words + // 1 short (header) + // + n short (clusters) + // + n char (neighbour sum) + // + n adc * n cluster (char) + unsigned int nClus = clusCont.size(); + unsigned int nADC = + std::accumulate( clusCont.begin(), clusCont.end(), 0u, + []( unsigned n, const UTClustersOnBoard::boardPair& p ) { return n + p.first->size(); } ); + + unsigned int nByte = sizeof( short ) + nClus * sizeof( short ) + nClus * sizeof( char ) + nADC * sizeof( char ); + + return (unsigned int)ceil( nByte / (double)sizeof( int ) ); +} + +template <class IReadoutTool> +void UTClustersToRawBankAlgT<IReadoutTool>::writeBank( const UTClustersOnBoard::ClusterVector& clusCont, + LHCb::BankWriter& bWriter, const UTTell1ID aBoardID ) { + auto nClus = clusCont.size(); + // make a bank header + SiHeaderWord aHeader = SiHeaderWord( nClus, getPCN() ); + bWriter << aHeader.value(); + + // pick up the data and write first half of the bank into temp container... + for ( const auto& clus : clusCont ) { + UTCluster* aClus = clus.first; + UTChannelID aChan = aClus->channelID(); + + double isf = this->readoutTool()->interStripToDAQ( aChan, aBoardID, aClus->interStripFraction() ); + bWriter << UTClusterWord( clus.second, isf, aClus->size(), aClus->highThreshold() ); + } // clusCont + + if ( nClus & 1 ) { // add padding if odd number of clusters + short padding = 0; + bWriter << padding; + } + + // now the second half neighbour sum and ADC + for ( const auto& clus : clusCont ) { + UTCluster* aCluster = clus.first; + // implicit double->char conversion! + char neighbourSum = std::min( std::max( aCluster->neighbourSum(), -16. ), 15. ); + bWriter << neighbourSum; + UTCluster::ADCVector adcs = aCluster->stripValues(); + + // flip ADC values for rotated staves + UTChannelID channelID = aCluster->channelID(); + this->readoutTool()->ADCOfflineToDAQ( channelID, aBoardID, adcs ); + + unsigned int nToWrite = std::min( aCluster->size(), m_maxClusterSize ); + for ( unsigned int i = 0; i < nToWrite; ++i ) { + bool last = ( i == nToWrite - 1 ); + bWriter << SiADCWord( adcs[i].second, last ); + } // iter + + } // clusCont +} + +template <class IReadoutTool> +unsigned int UTClustersToRawBankAlgT<IReadoutTool>::getPCN() const { + return m_pcn; +} diff --git a/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp b/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp new file mode 100644 index 00000000000..14bd78ec9ec --- /dev/null +++ b/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp @@ -0,0 +1,316 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include <algorithm> + +// local +#include "UTDecodingBaseAlg.h" + +// Event +#include "Event/ByteStream.h" +#include "Event/ODIN.h" +#include "Event/RawEvent.h" +#include "Event/UTCluster.h" + +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTDataFunctor.h" +#include "Kernel/UTDecoder.h" +#include "Kernel/UTLexicalCaster.h" +#include "Kernel/UTRawBankMap.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" + +#include "SiDAQ/SiADCWord.h" +#include "SiDAQ/SiHeaderWord.h" +#include "boost/lexical_cast.hpp" + +#include "UTDet/DeUTDetector.h" + +using namespace LHCb; + +//----------------------------------------------------------------------------- +// Implementation file for class : RawBufferToUTClusterAlg +// +// 2004-01-07 : Matthew Needham +// 2016-10-07 : Sebastien Ponce +//----------------------------------------------------------------------------- + +UTDecodingBaseAlg::UTDecodingBaseAlg( const std::string& name, ISvcLocator* pSvcLocator ) + : UT::AlgBase( name, pSvcLocator ) { + setForcedInit(); +} + +StatusCode UTDecodingBaseAlg::initialize() { + + // Initialization + StatusCode sc = UT::AlgBase::initialize(); + if ( sc.isFailure() ) return sc; + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> initialize " << endmsg; + + // bank type + if ( !m_bankTypeString.empty() ) { + m_bankType = UTRawBankMap::stringToType( m_bankTypeString ); + if ( m_bankType == LHCb::RawBank::Velo ) { + fatal() << "Wrong detector type: only UT !" << endmsg; + return StatusCode::FAILURE; + } + } + + // bank type + m_errorType = UTRawBankMap::stringToType( m_errorBankString ); + if ( m_errorType == LHCb::RawBank::Velo ) { + fatal() << "Wrong detector type: only UT error banks!" << endmsg; + return StatusCode::FAILURE; + } + + return StatusCode::SUCCESS; +} + +unsigned int UTDecodingBaseAlg::pcnVote( LHCb::span<const RawBank*> banks ) const { + + // make a majority vote to get the correct PCN in the event + std::map<unsigned int, unsigned int> pcns; + for ( const auto& bank : banks ) { + UTDecoder decoder( bank->data() ); + // only the good are allowed to vote [the US system..] + if ( !decoder.header().hasError() ) ++pcns[decoder.header().pcn()]; + } // banks + + auto max = + std::max_element( pcns.begin(), pcns.end(), + []( const std::pair<unsigned int, unsigned int>& lhs, + const std::pair<unsigned int, unsigned int>& rhs ) { return lhs.second < rhs.second; } ); + return max == pcns.end() ? UTDAQ::inValidPcn : max->first; +} + +bool UTDecodingBaseAlg::checkDataIntegrity( UTDecoder& decoder, const UTTell1Board* aBoard, const unsigned int bankSize, + const UTDAQ::version& bankVersion ) const { + // check the consistancy of the data + + bool ok = true; + auto iterDecoder = decoder.posAdcBegin(); + for ( ; iterDecoder != decoder.posAdcEnd(); ++iterDecoder ) { + + const UTClusterWord aWord = iterDecoder->first; + + // make some consistancy checks + if ( ( iterDecoder->second.size() - 1u < aWord.pseudoSize() ) ) { + unsigned int fracStrip = aWord.fracStripBits(); + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "adc values do not match ! " << iterDecoder->second.size() - 1u << " " << aWord.pseudoSize() + << " offline chan " + << aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ) + << " source ID " << aBoard->boardID() << " chan " << aWord.channelID() << endmsg; + Warning( "ADC values do not match", StatusCode::SUCCESS, 2 ).ignore(); + ok = false; + break; + } + + // decode the channel + if ( !aBoard->validChannel( aWord.channelID() ) ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "invalid TELL1 channel number board: " << aBoard->boardID() << " chan " << aWord.channelID() + << endmsg; + Warning( "Invalid tell1 channel", StatusCode::SUCCESS, 2 ).ignore(); + ok = false; + break; + } + + } // loop clusters + + // final check that we read the total number of bytes in the bank + if ( ok && (unsigned int)iterDecoder.bytesRead() != bankSize ) { + ok = false; + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Inconsistant byte count " << aBoard->boardID() << " Read: " << iterDecoder.bytesRead() + << " Expected: " << bankSize << endmsg; + Warning( "Inconsistant byte count", StatusCode::SUCCESS ).ignore(); + } + + if ( !ok ) ++counter( "skipped Banks" ); + + return ok; +} + +std::vector<unsigned int> UTDecodingBaseAlg::missingInAction( LHCb::span<const RawBank*> banks ) const { + + std::vector<unsigned int> missing; + if ( banks.size() != readoutTool()->nBoard() ) { + for ( unsigned int iBoard = 0u; iBoard < readoutTool()->nBoard(); ++iBoard ) { + int testID = readoutTool()->findByOrder( iBoard )->boardID().id(); + auto iterBank = + std::find_if( banks.begin(), banks.end(), [&]( const RawBank* b ) { return b->sourceID() == testID; } ); + if ( iterBank == banks.end() ) { + missing.push_back( (unsigned int)testID ); + std::string lostBank = "lost bank " + boost::lexical_cast<std::string>( testID ); + Warning( lostBank, StatusCode::SUCCESS, 0 ).ignore(); + } + } // iBoard + } + return missing; +} + +std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> UTDecodingBaseAlg::decodeErrors( const LHCb::RawEvent& raw ) const { + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> Execute " << endmsg; + + // make an empty output vector + auto outputErrors = std::make_unique<UTTELL1BoardErrorBanks>(); + + // Pick up UTError bank + const auto& itf = raw.banks( LHCb::RawBank::BankType( m_errorType ) ); + + if ( itf.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "event has no error banks " << endmsg; + } else { + ++counter( "events with error banks" ); + counter( "total # error banks" ) += itf.size(); + } + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Starting to decode " << itf.size() << detType() << "Error bank(s)" << endmsg; + + for ( const auto& bank : itf ) { + + std::string errorBank = "sourceID " + boost::lexical_cast<std::string>( bank->sourceID() ); + ++counter( errorBank ); + + if ( bank->magic() != RawBank::MagicPattern ) { + std::string pattern = "wrong magic pattern " + boost::lexical_cast<std::string>( bank->sourceID() ); + Warning( pattern, StatusCode::SUCCESS, 2 ).ignore(); + continue; + } + + const unsigned int* p = bank->data(); + unsigned int w = 0; + const unsigned int bankEnd = bank->size() / sizeof( unsigned int ); + + // bank has to be at least 28 words + if ( bankEnd < UTDAQ::minErrorBankWords ) { + warning() << "Error bank length is " << bankEnd << " and should be at least " << UTDAQ::minErrorBankWords + << endmsg; + Warning( "Error bank too short --> not decoded for TELL1 " + UT::toString( bank->sourceID() ), + StatusCode::SUCCESS, 2 ) + .ignore(); + continue; + } + + // and less than 56 words + if ( bankEnd > UTDAQ::maxErrorBankWords ) { + warning() << "Error bank length is " << bankEnd << " and should be at most " << UTDAQ::maxErrorBankWords + << endmsg; + Warning( "Error bank too long --> not decoded for TELL1 " + UT::toString( bank->sourceID() ), StatusCode::SUCCESS, + 2 ) + .ignore(); + continue; + } + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Decoding bank number of type " << detType() << "Error (TELL1 ID: " << bank->sourceID() + << ", Size: " << bank->size() << " bytes)" << endmsg; + + // make an empty tell1 data object + UTTELL1BoardErrorBank* myData = new UTTELL1BoardErrorBank(); + outputErrors->insert( myData, bank->sourceID() ); + + for ( unsigned int ipp = 0; ipp < UTDAQ::npp && w != bankEnd; ++ipp ) { + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "####### Parsing now data from PP " << ipp << " #####################" << endmsg; + + // we must find 5 words + if ( bankEnd - w < 5 ) { + Warning( "Ran out of words to read", StatusCode::SUCCESS, 2 ).ignore(); + break; + } + + UTTELL1Error* errorInfo = new UTTELL1Error( p[w], p[w + 1], p[w + 2], p[w + 3], p[w + 4] ); + myData->addToErrorInfo( errorInfo ); + w += 5; // read 5 first words + + const unsigned int nOptional = errorInfo->nOptionalWords(); + + // we must find the optional words + 2 more control words + if ( bankEnd - w < nOptional + 2 ) { + Warning( "Ran out of words to read", StatusCode::SUCCESS, 2 ).ignore(); + break; + } + + const unsigned int* eInfo = nullptr; + + if ( errorInfo->hasErrorInfo() ) { + // errorInfo->setOptionalErrorWords(p[w], p[w+1], p[w+2], p[w+3], p[w+4]); + eInfo = &p[w]; + w += 5; + } // has error information + + errorInfo->setWord10( p[w] ); + ++w; + errorInfo->setWord11( p[w] ); + ++w; + + // then some more optional stuff + if ( errorInfo->hasNZS() ) { + errorInfo->setWord12( p[w] ); + ++w; + } // nsz info... + + // then some more optional stuff + if ( errorInfo->hasPed() ) { + errorInfo->setWord13( p[w] ); + ++w; + } + + if ( errorInfo->hasErrorInfo() ) { + errorInfo->setOptionalErrorWords( eInfo[0], eInfo[1], eInfo[2], eInfo[3], eInfo[4] ); + } // has error information + + } // loop ip [ppx's] + + if ( w != bankEnd ) { error() << "read " << w << " words, expected: " << bankEnd << endmsg; } + + } // end of loop over banks of a certain type + + return outputErrors; +} + +std::string UTDecodingBaseAlg::toSpill( std::string_view location ) const { + + std::string theSpill; + for ( const auto* name : {"Prev", "Next"} ) { + auto iPos = location.find( name ); + if ( iPos != std::string::npos ) { + auto startSpill = location.substr( iPos ); + auto iPos2 = startSpill.find( "/" ); + theSpill = startSpill.substr( 0, iPos2 ); + break; + } + } // is + return theSpill; +} + +void UTDecodingBaseAlg::computeSpillOffset( std::string_view location ) { + // convert spill to offset in time + auto spill = toSpill( location ); + m_spillOffset = ( spill.size() > 4u ? LHCb::UTCluster::SpillToType( spill ) : LHCb::UTCluster::Spill::Central ); +} + +bool UTDecodingBaseAlg::validSpill( const LHCb::ODIN& odin ) const { + if ( !m_checkValidSpill ) return true; + + // check spill is actually read out using the ODIN + const unsigned int numberOfSpills = odin.timeAlignmentEventWindow(); + return (unsigned int)abs( m_spillOffset ) <= numberOfSpills; +} + +unsigned int UTDecodingBaseAlg::byteSize( LHCb::span<const RawBank*> banks ) const { + return std::accumulate( banks.begin(), banks.end(), 0u, + []( unsigned int s, const RawBank* b ) { return s + b->totalSize(); } ); +} diff --git a/UT/UTDAQ/src/component/UTDecodingBaseAlg.h b/UT/UTDAQ/src/component/UTDecodingBaseAlg.h new file mode 100644 index 00000000000..68e37c2bc7c --- /dev/null +++ b/UT/UTDAQ/src/component/UTDecodingBaseAlg.h @@ -0,0 +1,133 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef UTDECODINGBASEALG_H +#define UTDECODINGBASEALG_H 1 + +#include "Event/RawBank.h" +#include "GaudiKernel/DataObjectHandle.h" +#include "Kernel/STLExtensions.h" +#include "Kernel/UTAlgBase.h" +#include "Kernel/UTDAQDefinitions.h" + +#include "Event/ODIN.h" +#include "Event/UTCluster.h" + +#include <string> +#include <vector> + +/** @class UTDecodingBaseAlg UTDecodingBaseAlg.h + * + * Algorithm to create UTClusters from RawEvent object + * + * @author A. Beiter based on code by: + * @author M. Needham + * @author S. Ponce + */ + +#include "Event/RawEvent.h" + +#include "Event/UTTELL1BoardErrorBank.h" +#include "Kernel/UTClusterWord.h" +#include "Kernel/UTDecoder.h" + +#include <string> + +class UTTell1Board; + +class UTDecodingBaseAlg : public UT::AlgBase { + +public: + /// Standard constructor + UTDecodingBaseAlg( const std::string& name, ISvcLocator* pSvcLocator ); + + StatusCode initialize() override; ///< Algorithm initialization + +protected: + LHCb::RawBank::BankType bankType() const; + + bool forceVersion() const; + + unsigned int pcnVote( LHCb::span<const LHCb::RawBank*> banks ) const; + + bool checkDataIntegrity( UTDecoder& decoder, const UTTell1Board* aBoard, const unsigned int bankSize, + const UTDAQ::version& bankVersion ) const; + + /** list of boards missing in action */ + std::vector<unsigned int> missingInAction( LHCb::span<const LHCb::RawBank*> banks ) const; + + /// Decodes error banks + std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> decodeErrors( const LHCb::RawEvent& raw ) const; + + /** recover mode **/ + bool recoverMode() const; + + /** can be recovered recover **/ + bool canBeRecovered( const LHCb::UTTELL1BoardErrorBank* bank, const UTClusterWord& word, + const unsigned int pcn ) const; + + /// compute the spill offset + void computeSpillOffset( std::string_view location ); + + /** check the spill is read out **/ + bool validSpill( const LHCb::ODIN& odin ) const; + + /** return spill offset */ + LHCb::UTCluster::Spill spill() const; + + unsigned int byteSize( LHCb::span<const LHCb::RawBank*> banks ) const; + + Gaudi::Property<bool> m_skipErrors{this, "skipBanksWithErrors", false}; + Gaudi::Property<std::string> m_bankTypeString{this, "BankType", {}}; + + Gaudi::Property<int> m_forcedVersion{this, "forcedVersion", UTDAQ::inValidVersion}; + Gaudi::Property<bool> m_checkValidSpill{this, "checkValidity", false}; + LHCb::RawBank::BankType m_errorType; + LHCb::RawBank::BankType m_bankType; + +private: + std::string toSpill( std::string_view location ) const; + LHCb::UTCluster::Spill m_spillOffset; + + Gaudi::Property<std::string> m_errorBankString{this, "ErrorBank", "UTError"}; + + Gaudi::Property<bool> m_recoverMode{this, "recoverMode", true}; +}; + +inline LHCb::RawBank::BankType UTDecodingBaseAlg::bankType() const { return m_bankType; } + +inline bool UTDecodingBaseAlg::forceVersion() const { return m_forcedVersion >= 0; } + +inline bool UTDecodingBaseAlg::recoverMode() const { return m_recoverMode; } + +#include "Event/UTTELL1Error.h" +#include "Kernel/LHCbConstants.h" +#include "Kernel/UTClusterWord.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTPPRepresentation.h" +#include "Kernel/UTStripRepresentation.h" + +inline bool UTDecodingBaseAlg::canBeRecovered( const LHCb::UTTELL1BoardErrorBank* bank, const UTClusterWord& word, + const unsigned int pcn ) const { + + UTDAQ::UTPPRepresentation ppRep = UTDAQ::UTPPRepresentation( UTDAQ::UTStripRepresentation( word.channelID() ) ); + unsigned int pp, beetle, port, strip; + ppRep.decompose( pp, beetle, port, strip ); // split up the word + const LHCb::UTTELL1Error* errorInfo = bank->ppErrorInfo( pp ); + bool ok = false; + if ( errorInfo != 0 ) { + if ( errorInfo->linkInfo( beetle, port, pcn ) == LHCb::UTTELL1Error::FailureMode::kNone ) { ok = true; } + } + return ok; +} + +inline LHCb::UTCluster::Spill UTDecodingBaseAlg::spill() const { return m_spillOffset; } + +#endif // UTDECODINGBASEALG_H diff --git a/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp b/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp new file mode 100644 index 00000000000..5b753a38c8d --- /dev/null +++ b/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp @@ -0,0 +1,105 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTDigit.h" +#include "Event/UTTELL1Data.h" +#include "GaudiAlg/Transformer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/LHCbConstants.h" +#include "Kernel/UTAlgBase.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTTell1Board.h" +#include "UTDet/DeUTDetector.h" +#include <algorithm> +#include <string> +#include <vector> + +/** @class RawBankToUTProcFull RawBankToUTProcFull.h + * + * Algorithm to create UTTELL1Data (type ProcFull) from RawEvent object + * + * @author A. Beiter (based on code by M. Needham) + * @date 2018-09-04 + */ + +class UTDigitsToUTTELL1Data + : public Gaudi::Functional::Transformer<LHCb::UTTELL1Datas( const LHCb::UTDigits& ), + Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { + +public: + /// Standard constructor + UTDigitsToUTTELL1Data( const std::string& name, ISvcLocator* pSvcLocator ); + + LHCb::UTTELL1Datas operator()( const LHCb::UTDigits& ) const override; ///< Algorithm execution + +private: + StatusCode createTell1Data( const LHCb::UTDigits* digits, LHCb::UTTELL1Datas* outCont ) const; + + std::string m_inputLocation; + std::string m_outputLocation; +}; + +using namespace LHCb; + +//----------------------------------------------------------------------------- +// Implementation file for class : RawBufferToUTClusterAlg +// +// 2004-01-07 : Matthew Needham +//----------------------------------------------------------------------------- + +DECLARE_COMPONENT( UTDigitsToUTTELL1Data ) + +UTDigitsToUTTELL1Data::UTDigitsToUTTELL1Data( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer{name, + pSvcLocator, + + {"inputLocation", UTDigitLocation::UTDigits}, + + {"outputLocation", UTTELL1DataLocation::UTSubPeds}} {} + +LHCb::UTTELL1Datas UTDigitsToUTTELL1Data::operator()( const LHCb::UTDigits& digitCont ) const { + + // make a new digits container + UTTELL1Datas outCont; + createTell1Data( &digitCont, &outCont ).orThrow( "Problems creating Tell1 data", "UTDigitsToUTTELL1Data" ).ignore(); + return outCont; +} + +StatusCode UTDigitsToUTTELL1Data::createTell1Data( const UTDigits* digits, UTTELL1Datas* outCont ) const { + + if ( digits->size() != tracker()->nStrip() ) { + return Warning( "Digit cont size does not equal number of detector strips", StatusCode::SUCCESS, 1 ); + } + + // make correct number of output containers + for ( unsigned int i = 0; i < readoutTool()->nBoard(); ++i ) { + UTTell1Board* board = readoutTool()->findByOrder( i ); + UTTELL1Data::Data dataVec; + dataVec.resize( UTDAQ::noptlinks ); + for ( auto& dv : dataVec ) dv.resize( LHCbConstants::nStripsInBeetle ); + UTTELL1Data* tell1Data = new UTTELL1Data( dataVec ); + int key = (int)board->boardID().id(); + outCont->insert( tell1Data, key ); + } // nBoard + + // then its just one big loop + for ( const auto& digit : *digits ) { + UTDAQ::chanPair aPair = readoutTool()->offlineChanToDAQ( digit->channelID(), 0.0 ); + UTTELL1Data* adcBank = outCont->object( aPair.first.id() ); + UTTELL1Data::Data& dataVec = adcBank->data(); + const unsigned int beetle = aPair.second / LHCbConstants::nStripsInBeetle; + const unsigned int strip = aPair.second % LHCbConstants::nStripsInBeetle; + dataVec[beetle][strip] = int( digit->depositedCharge() ); + } + + return StatusCode::SUCCESS; +} diff --git a/UT/UTDAQ/src/component/UTErrorDecoding.cpp b/UT/UTDAQ/src/component/UTErrorDecoding.cpp new file mode 100644 index 00000000000..1e5e226f844 --- /dev/null +++ b/UT/UTDAQ/src/component/UTErrorDecoding.cpp @@ -0,0 +1,69 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/UTTELL1BoardErrorBank.h" +#include "GaudiAlg/Consumer.h" +#include "UTDecodingBaseAlg.h" + +/** @class UTErrorDecoding UTErrorDecoding.h public/UTErrorDecoding.h + * + * + * @author A Beiter (based on code by Mathias Knecht, M Needham, S Ponce) + * @date 2018-09-04 + */ + +class UTErrorDecoding : public Gaudi::Functional::Consumer<void( const LHCb::RawEvent& ), + Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> { +public: + /// Standard constructor + UTErrorDecoding( const std::string& name, ISvcLocator* pSvcLocator ); + + /// Algorithm execution + void operator()( const LHCb::RawEvent& ) const override; + +private: + Gaudi::Property<bool> m_PrintErrorInfo{this, "PrintErrorInfo", false}; +}; + +using namespace LHCb; + +//----------------------------------------------------------------------------- +// Implementation file for class : UTErrorDecoding +// +// 2007-11-29: Mathias Knecht +// Update 2008 M Needham +// Update 2016 S Ponce +//----------------------------------------------------------------------------- + +// ---------------------------------------------------------------------------- +// Declaration of the Algorithm Factory +DECLARE_COMPONENT( UTErrorDecoding ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +UTErrorDecoding::UTErrorDecoding( const std::string& name, ISvcLocator* pSvcLocator ) + : Consumer( name, pSvcLocator, + KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( + LHCb::RawEventLocation::Tracker, LHCb::RawEventLocation::Other, + LHCb::RawEventLocation::Default )} ) {} + +//============================================================================= +// Main execution +//============================================================================= +void UTErrorDecoding::operator()( const LHCb::RawEvent& raw ) const { + // in fact all the work is delegated to the base class + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> Execute " << endmsg; + auto errorBanks = decodeErrors( raw ); + // print out the error banks + if ( m_PrintErrorInfo ) { + for ( const auto& b : *errorBanks ) info() << b << endmsg; + } +} diff --git a/UT/UTDAQ/src/component/UTFullDecoding.cpp b/UT/UTDAQ/src/component/UTFullDecoding.cpp new file mode 100644 index 00000000000..eb4cbfd3af6 --- /dev/null +++ b/UT/UTDAQ/src/component/UTFullDecoding.cpp @@ -0,0 +1,335 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTTELL1Data.h" +#include "GaudiAlg/Transformer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTAlgBase.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTRawBankMap.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" +#include "boost/lexical_cast.hpp" +#include <bitset> + +/** @class UTFullDecoding UTFullDecoding.h + * + * Algorithm to decode the NZS UT data in the RawEvent buffer into UUTELL1Data + * objects. Job options: + * - \b PrintErrorInfo: Flag to print out errors from event info. + * - \b InputLocation: Location of RawEvent + * - \b OutputLocation: Location of NZS output data, e.g. UTFull + * - \b EventInfoLocation: Location of event info data + * \sa{https://edms.cern.ch/document/692431/3}. + * + * @author Andy Beiter (based on code by Mathias Knecht, Jeroen van Tilburg) + * @date 2018-09-04 + */ +class UTFullDecoding + : public Gaudi::Functional::MultiTransformer<std::tuple<LHCb::UTTELL1Datas, LHCb::UTTELL1EventInfos>( + const LHCb::RawEvent& ), + Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { + +public: + /// Standard constructor + UTFullDecoding( const std::string& name, ISvcLocator* pSvcLocator ); + StatusCode initialize() override; ///< Algorithm initialization + std::tuple<LHCb::UTTELL1Datas, LHCb::UTTELL1EventInfos> + operator()( const LHCb::RawEvent& ) const override; ///< Algorithm execution + +private: + LHCb::RawBank::BankType m_bankType; + + // job options + Gaudi::Property<bool> m_printErrorInfo{this, "PrintErrorInfo", true}; ///< Flag to print out errors from event info +}; + +using namespace LHCb; +using namespace UTDAQ; + +//----------------------------------------------------------------------------- +// Implementation file for class : UTFullDecoding +// +// 2007-09-11: Mathias Knecht, Jeroen van Tilburg +//----------------------------------------------------------------------------- + +// ---------------------------------------------------------------------------- +// Declaration of the Algorithm Factory +DECLARE_COMPONENT( UTFullDecoding ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +UTFullDecoding::UTFullDecoding( const std::string& name, ISvcLocator* pSvcLocator ) + : MultiTransformer{name, + pSvcLocator, + {"InputLocation", RawEventLocation::Default}, + {KeyValue{"OutputLocation", UTTELL1DataLocation::UTFull}, + KeyValue{"EventInfoLocation", UTTELL1EventInfoLocation::UTEventInfo}}} { + + setForcedInit(); +} + +//============================================================================= +// Initialization +//============================================================================= +StatusCode UTFullDecoding::initialize() { + return UT::AlgBase::initialize().andThen( [&] { + // initialize bank type + m_bankType = UTRawBankMap::stringToType( detType() + "Full" ); + } ); +} + +//============================================================================= +// Main execution +//============================================================================= +std::tuple<UTTELL1Datas, UTTELL1EventInfos> UTFullDecoding::operator()( const LHCb::RawEvent& raw ) const { + // Get the raw data + std::tuple<UTTELL1Datas, UTTELL1EventInfos> output; + // make container of TELL1 boards + auto& [outputData, eventInfos] = output; + + // Initialize some counters + unsigned int L0EvtID = 0; + + // Pick up UTFull bank + const auto& itf = raw.banks( RawBank::BankType( m_bankType ) ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "Starting to decode " << itf.size() << detType() << "Full bank(s)" << endmsg; + } + + int nBank = 0; + for ( const RawBank* p : itf ) { + + // Create an empty tell1 data object + UTTELL1Data::Data tell1Data; + tell1Data.resize( noptlinks ); + for ( auto& i : tell1Data ) i.resize( nports * nstrips, 0 ); + + // Create an empty tell1 header object + UTTELL1Data::Data tell1Header; + tell1Header.resize( noptlinks ); + + for ( auto j = tell1Header.begin(); j != tell1Header.end(); ++j ) { j->resize( nports * nheaders, 0 ); } + + // Create an empty eventInfo object + UTTELL1Data::Info eventInfo; + for ( unsigned int i = 0; i < npp; ++i ) { + UTTELL1EventInfo* evtInfo = new UTTELL1EventInfo(); + eventInfo.push_back( evtInfo ); + eventInfos.insert( evtInfo ); + } + std::vector<unsigned int> sentPP; + + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "Decoding bank number [" << nBank++ << "] of type " << detType() << "Full (TELL1 ID: " << p->sourceID() + << ", Size: " << p->size() << " bytes)" << endmsg; + } + + // Check if the board is valid + UTTell1ID tell1ID = UTTell1ID( (unsigned int)p->sourceID(), detType() == "UT" ); + const UTTell1Board* aBoard = readoutTool()->findByBoardID( tell1ID ); + if ( !aBoard ) { // not a valid b + std::string invalidSource = + "Invalid source ID --> skip bank " + boost::lexical_cast<std::string>( p->sourceID() ); + Warning( invalidSource, StatusCode::SUCCESS, 2 ).ignore(); + ++counter( "Skipped banks" ); + continue; + } + + if ( (unsigned int)p->size() % nwordsFull != 0 ) { + error() << "Wrong bank size for this type!! You should have multiple of " << nwordsFull << " bytes" << endmsg; + } + + // Counters + unsigned int cntWD = 0; // Word counter, resets for each PP. Range 0 to 223. + unsigned int cntPP = 0; // PP-FPGA counter, goes from 0 to 3. + + // Now loop over all WORDS in a bank + + for ( const unsigned int* w = p->begin<unsigned int>(); w != p->end<unsigned int>(); ++w ) { + + if ( cntWD % 224 == 0 ) { // Each 224 words we have a new PP-FPGA + cntWD = 0; + cntPP = ( *( w + 219 ) & UTTELL1EventInfo::ChipAddrMask ) >> UTTELL1EventInfo::ChipAddrBits; + sentPP.push_back( cntPP ); + + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "####### Parsing now data from PP " << cntPP << " ##################" << endmsg; + } + } + + // Set the Tell1 sourceID for each PP + UTTELL1EventInfo* evtInfo = eventInfo[cntPP]; + evtInfo->setSourceID( p->sourceID() ); + + // Unpack the 32-bit word into 8-bit chunks + unsigned int bits1 = 0; + unsigned int bits2 = 8; + unsigned int bits3 = 16; + unsigned int bits4 = 24; + unsigned int p1 = ( *w & mask1 ) >> bits1; + unsigned int p2 = ( *w & mask2 ) >> bits2; + unsigned int p3 = ( *w & mask3 ) >> bits3; + unsigned int p4 = ( *w & mask4 ) >> bits4; + + if ( cntWD < 216 ) { // Words below 216 contain data and header + int iPort = cntWD / ( nbeetles * 36 ); + int iWord = ( cntWD % ( nbeetles * 36 ) ) / nbeetles; + int iBeetle = 2 * ( cntWD % nbeetles ) + nBeetlesPerPPx * cntPP; + + if ( iWord >= 1 && iWord < 4 ) { // Header part + int iHeader = iWord - 1; + tell1Header[iBeetle][iHeader + 3 * iPort] = p1; + tell1Header[iBeetle][iHeader + 3 * ( iPort + 2 )] = p2; + tell1Header[iBeetle + 1][iHeader + 3 * iPort] = p3; + tell1Header[iBeetle + 1][iHeader + 3 * ( iPort + 2 )] = p4; + } else if ( iWord >= 4 && iWord < 36 ) { // Data part + int iChan = iWord - 4; + tell1Data[iBeetle][iChan + 32 * iPort] = p1; + tell1Data[iBeetle][iChan + 32 * ( iPort + 2 )] = p2; + tell1Data[iBeetle + 1][iChan + 32 * iPort] = p3; + tell1Data[iBeetle + 1][iChan + 32 * ( iPort + 2 )] = p4; + } + } else { // Words 216-223 contains Event Info + switch ( cntWD ) { + case 216: { + evtInfo->setWord0( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Event Information (bits): " << std::bitset<8>( evtInfo->EventInformation() ) + << " | Bank List (bits): " << std::bitset<8>( evtInfo->BankList() ) + << " | Detector ID (dec): " << (unsigned int)evtInfo->DetectorID() + << " | Bunch Counter (dec): " << evtInfo->bCnt() << endmsg; + } + break; + } + + case 217: { + evtInfo->setWord1( *w ); + if ( msgLevel( MSG::DEBUG ) ) { debug() << "(Event Info) L0-EventID (dec): " << (int)*w << endmsg; } + if ( L0EvtID == 0 ) { + // For each bank, L0EvtID is initialized. So the first time in the + // bank, L0EvtID is checked. + L0EvtID = (unsigned int)evtInfo->L0EvID(); + } else { + // The rest of the time (for all PPs, all TELL1), there's a check + // that L0EvtID is the same for all. + if ( (unsigned int)evtInfo->L0EvID() != L0EvtID ) { + error() << "L0-Event ID not the same for all!" << endmsg; + } + } + break; + } + case 218: { + evtInfo->setWord2( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Reserved Bits (hex): " << std::hex << evtInfo->R1() + << " | Process Info (bits): " << std::bitset<8>( evtInfo->ProcessInfo() ) + << " | PCN (from Beetle 0) (dec): " << std::dec << (unsigned int)evtInfo->pcn() << endmsg; + } + break; + } + case 219: { + evtInfo->setWord3( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Chip Addr (dec): " << (unsigned int)evtInfo->ChipAddr() + << " | Sync RAM Full (bits): " << std::bitset<6>( evtInfo->SyncRAMFull() ) + << " | TLK Link Loss (bits): " << std::bitset<6>( evtInfo->tlkLnkLoss() ) << endmsg; + debug() << "(Event Info) | Sync Evt Size Err. (bits): " << std::bitset<6>( evtInfo->SyncEvtSizeError() ) + << " | Opt. Link Disable (bits): " << std::bitset<6>( evtInfo->OptLnkDisable() ) + << " | Opt. Link NoEvent (bits): " << std::bitset<6>( evtInfo->OptLnkNoEvt() ) << endmsg; + } + if ( m_printErrorInfo ) { + if ( evtInfo->SyncRAMFull() != 0 ) + error() << "Sync RAM Full in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP + << ". Value (One bit per link): " << std::bitset<6>( evtInfo->SyncRAMFull() ) << endmsg; + if ( evtInfo->tlkLnkLoss() != 0 ) + error() << "TLK Link loss in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP + << ". Value (One bit per link): " << std::bitset<6>( evtInfo->tlkLnkLoss() ) << endmsg; + if ( evtInfo->SyncEvtSizeError() != 0 ) + error() << "Sync Event size error in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP + << ". Value (One bit per link): " << std::bitset<6>( evtInfo->SyncEvtSizeError() ) << endmsg; + if ( evtInfo->OptLnkNoEvt() != 0 ) + error() << "Optical Link No Event in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP + << ". Value (One bit per link): " << std::bitset<6>( evtInfo->OptLnkNoEvt() ) << endmsg; + } + break; + } + case 220: { + evtInfo->setWord4( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Reserved bit (bits): " << std::bitset<1>( evtInfo->R2() ) + << " | PCN Error (bits):" << std::bitset<1>( evtInfo->pcnError() ) + << " | Optical Link no clock (bits): " << std::bitset<6>( evtInfo->OptLnkNoClock() ) << endmsg; + debug() << "(Event Info) | Header Pseudo Err. (bits): " << std::bitset<24>( evtInfo->HeaderPseudoError() ) + << endmsg; + } + + if ( m_printErrorInfo ) { + if ( evtInfo->HeaderPseudoError() != 0 ) + error() << "Header Pseudo Error in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP + << ". Value (One bit per port=24 bits): " << std::bitset<24>( evtInfo->HeaderPseudoError() ) + << endmsg; + } + break; + } + case 221: { + evtInfo->setWord5( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Beetle3 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle3() + << " | Beetle2 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle2() + << " | Beetle1 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle1() + << " | Beetle0 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle0() << endmsg; + } + break; + } + case 222: { + eventInfo[cntPP]->setWord6( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Reserved bits (hex): " << std::hex << evtInfo->R3() + << " | Beetle5 PCN (dec): " << std::dec << (unsigned int)evtInfo->pcnBeetle5() + << " | Beetle4 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle4() << endmsg; + } + break; + } + case 223: { + eventInfo[cntPP]->setWord7( *w ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "(Event Info) Reserved bits (hex): " << std::hex << (unsigned int)evtInfo->R4() + << " | I Headers: Beet.5 (dec): " << std::dec << (unsigned int)evtInfo->iHeaderBeetle5() + << " | Beet.4: " << (unsigned int)evtInfo->iHeaderBeetle4() + << " | Beet.3: " << (unsigned int)evtInfo->iHeaderBeetle3() + << " | Beet.2: " << (unsigned int)evtInfo->iHeaderBeetle2() + << " | Beet.1: " << (unsigned int)evtInfo->iHeaderBeetle1() + << " | Beet.0: " << (unsigned int)evtInfo->iHeaderBeetle0() << std::dec << endmsg; + } + break; + } + default: + error() << "Not the right number of words: word number " << cntWD << ", you should have 224 words per PP" + << endmsg; + } + } + + cntWD++; + } // Loop over all words + + // make an empty tell1 data object + UTTELL1Data* myData = new UTTELL1Data( tell1Data, tell1Header, sentPP, eventInfo ); + + // put into the container, second argument is TELL1 id + outputData.insert( myData, int( p->sourceID() ) ); + + } // end of loop over banks of a certain type + + return output; +} diff --git a/UT/UTDAQ/src/component/UTLayerSelector.cpp b/UT/UTDAQ/src/component/UTLayerSelector.cpp new file mode 100644 index 00000000000..06ff22077e8 --- /dev/null +++ b/UT/UTDAQ/src/component/UTLayerSelector.cpp @@ -0,0 +1,63 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2019 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +// local +#include "UTLayerSelector.h" +// kernel +#include "Kernel/UTChannelID.h" + +// ==================================================================== +namespace { + static const auto s_layerMap = + std::array{std::pair{"UTaX", 9}, std::pair{"UTaU", 10}, std::pair{"UTbV", 17}, std::pair{"UTbX", 18}}; + +} // namespace +// ==================================================================== + +DECLARE_COMPONENT( UTLayerSelector ) + +// ==================================================================== + +// ==================================================================== +bool UTLayerSelector::select( const LHCb::UTChannelID& id ) const { return ( *this )( id ); } +// ==================================================================== + +// ==================================================================== +bool UTLayerSelector::operator()( const LHCb::UTChannelID& id ) const { + // Checks detector + if ( ( m_detType == "UT" && id.isUT() ) ) { + if ( msgLevel( MSG::DEBUG ) ) + debug() << "Excluded layers are in " << m_detType << ". Cluster is in other detector." << endmsg; + + return false; + } + + // Checks layer + + for ( auto it = m_ignoredLayers.begin(); it != m_ignoredLayers.end(); it++ ) { + if ( static_cast<int>( it->find( m_detType ) ) == -1 ) { // Checks if detector and layer agree + continue; + } + auto jt = std::find_if( s_layerMap.begin(), s_layerMap.end(), [&]( const std::pair<const char*, unsigned int>& p ) { + return id.uniqueLayer() == p.second && *it == p.first; + } ); + if ( jt != s_layerMap.end() ) { + if ( msgLevel( MSG::DEBUG ) ) + debug() << "Cluster is in " << m_detType << " layer " << ( *it ) << " and will be removed!" << endmsg; + return true; + } + + if ( msgLevel( MSG::DEBUG ) ) debug() << "Cluster will not be removed!" << endmsg; + } + + return false; +} +// ==================================================================== diff --git a/UT/UTDAQ/src/component/UTLayerSelector.h b/UT/UTDAQ/src/component/UTLayerSelector.h new file mode 100644 index 00000000000..ac8172f180b --- /dev/null +++ b/UT/UTDAQ/src/component/UTLayerSelector.h @@ -0,0 +1,46 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#ifndef UTLAYERSELECTOR_H +#define UTLAYERSELECTOR_H 1 + +// Include files +// from STL +#include <string> +#include <vector> + +// from GaudiAlg +#include "GaudiAlg/GaudiTool.h" + +// from Kernel +#include "Kernel/IUTChannelIDSelector.h" + +/** @class UTLayerSelector UTLayerSelector.h + * + * Algorithm to remove clusters in excluded layers + * + * @author A. Beiter (based on code by Ch. Elsasser) + * @date 2018-09-04 + */ + +class UTLayerSelector : public extends<GaudiTool, IUTChannelIDSelector> { + +public: + using extends::extends; + + bool select( const LHCb::UTChannelID& id ) const override; + bool operator()( const LHCb::UTChannelID& id ) const override; + +private: + Gaudi::Property<std::string> m_detType{this, "DetType", "UT"}; + Gaudi::Property<std::vector<std::string>> m_ignoredLayers{this, "IgnoredLayers"}; +}; + +#endif // UTLAYERSELECTOR_H diff --git a/UT/UTDAQ/src/component/UTPedestalDecoding.cpp b/UT/UTDAQ/src/component/UTPedestalDecoding.cpp new file mode 100644 index 00000000000..26adbe6ee50 --- /dev/null +++ b/UT/UTDAQ/src/component/UTPedestalDecoding.cpp @@ -0,0 +1,156 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "Event/RawBank.h" +#include "Kernel/UTAlgBase.h" +//#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "Event/UTTELL1Data.h" +#include "GaudiAlg/Transformer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTRawBankMap.h" +#include "boost/lexical_cast.hpp" + +/** @class UTPedestalDecoding UTPedestalDecoding.h + * + * Algorithm to decode the pedestal bank in the RawEvent buffer into + * UTTELL1Data objects. Job options: + * - \b InputLocation: Location of RawEvent + * - \b OutputLocation: Location of output pedestal data, e.g. TTPedestal + * \sa{http://edms.cern.ch/doc/695007}. + * + * @author Andy Beiter (based on code by Mathias Knecht, Jeroen van Tilburg) + * @date 2018-09-04 + */ +class UTPedestalDecoding : public Gaudi::Functional::Transformer<LHCb::UTTELL1Datas( const LHCb::RawEvent& ), + Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { + +public: + /// Standard constructor + UTPedestalDecoding( const std::string& name, ISvcLocator* pSvcLocator ); + + StatusCode initialize() override; ///< Algorithm initialization + LHCb::UTTELL1Datas operator()( const LHCb::RawEvent& ) const override; ///< Algorithm execution + +private: + LHCb::RawBank::BankType m_bankType; +}; + +using namespace LHCb; +using namespace UTDAQ; + +//----------------------------------------------------------------------------- +// Implementation file for class : UTPedestalDecoding +// +// 2007-09-11: Mathias Knecht, Jeroen van Tilburg +//----------------------------------------------------------------------------- + +// ---------------------------------------------------------------------------- +// Declaration of the Algorithm Factory +DECLARE_COMPONENT( UTPedestalDecoding ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +UTPedestalDecoding::UTPedestalDecoding( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer{name, + pSvcLocator, + {"InputLocation", RawEventLocation::Default}, + {"OutputLocation", UTTELL1DataLocation::UTPedestal}} {} + +//============================================================================= +// Initialization +//============================================================================= +StatusCode UTPedestalDecoding::initialize() { + return Transformer::initialize().andThen( + [&] { m_bankType = UTRawBankMap::stringToType( detType() + "Pedestal" ); } ); +} + +//============================================================================= +// Main execution +//============================================================================= +UTTELL1Datas UTPedestalDecoding::operator()( const RawEvent& raw ) const { + + // make container of TELL1 boards + UTTELL1Datas outputPedestals; + + // Pick up pedestal bank + const auto& itf = raw.banks( RawBank::BankType( m_bankType ) ); + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "Starting to decode " << itf.size() << detType() << "Pedestal bank(s)" << endmsg; + } + + for ( const LHCb::RawBank* p : itf ) { + + if ( msgLevel( MSG::DEBUG ) ) { + debug() << "Decoding bank of type " << detType() << "Pedestal (TELL1 ID: " << p->sourceID() + << ", Size: " << p->size() << " bytes)" << endmsg; + } + + // Check if the board is valid + UTTell1ID tell1ID = UTTell1ID( (unsigned int)( p->sourceID() ), detType() == "UT" ); + const UTTell1Board* aBoard = this->readoutTool()->findByBoardID( tell1ID ); + if ( !aBoard ) { + std::string invalidSource = "Invalid source ID --> skip bank" + boost::lexical_cast<std::string>( p->sourceID() ); + Warning( invalidSource, StatusCode::SUCCESS, 2 ).ignore(); + ++counter( "skipped Banks" ); + continue; + } + + // Create an empty tell1 pedestal object + UTTELL1Data::Data pedestals; + pedestals.resize( noptlinks ); + for ( auto& i : pedestals ) i.resize( nports * nstrips, 0 ); + + if ( (unsigned int)p->size() != sizebankPedestal ) { + error() << "Wrong bank size for this type!! You should have " << sizebankPedestal << " bytes" << endmsg; + } + + // Counters + unsigned int cntWD = 0; // Word counter, resets for each PP. Range 0 to 191. + unsigned int cntPP = 0; // PP-FPGA counter, goes from 0 to 3. + + // Now loop over all WORDS in a bank + for ( const unsigned int* w = p->begin<unsigned int>(); w != p->end<unsigned int>(); ++w ) { + + if ( cntWD == 192 ) { // Each 192 words we have a new PP-FPGA + cntWD = 0; + ++cntPP; + } + + if ( cntWD == 0 && msgLevel( MSG::DEBUG ) ) { + debug() << "####### Parsing now data from PP " << cntPP << " ##################" << endmsg; + } + + // Unpack the 32-bit word into 8-bit chunks + unsigned int p1 = ( *w & mask1 ); + unsigned int p2 = ( ( *w & mask2 ) / 0x100 ); + unsigned int p3 = ( ( *w & mask3 ) / 0x10000 ); + unsigned int p4 = ( ( *w & mask4 ) / 0x1000000 ); + + int iPort = cntWD / ( nbeetles * nstrips ); // range 0 to 1 + int iWord = ( cntWD % ( nbeetles * nstrips ) ) / nbeetles; // range: 0 to 32 + int iBeetle = 2 * ( cntWD % nbeetles ) + nBeetlesPerPPx * cntPP; // range: 0 to 22 + + pedestals[iBeetle][iWord + nstrips * iPort] = p1; + pedestals[iBeetle][iWord + nstrips * ( iPort + 2 )] = p2; + pedestals[iBeetle + 1][iWord + nstrips * iPort] = p3; + pedestals[iBeetle + 1][iWord + nstrips * ( iPort + 2 )] = p4; + + ++cntWD; + } // Loop over all words + + // make an empty tell1 data object + // and put into the container, second argument is Tell1 id + outputPedestals.insert( new UTTELL1Data( pedestals ), int( p->sourceID() ) ); + + } // end of loop over banks of a certain type + return outputPedestals; +} diff --git a/UT/UTDAQ/src/component/UTRawBankMonitor.cpp b/UT/UTDAQ/src/component/UTRawBankMonitor.cpp new file mode 100644 index 00000000000..4cb09ed4979 --- /dev/null +++ b/UT/UTDAQ/src/component/UTRawBankMonitor.cpp @@ -0,0 +1,135 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// C++ code for 'LHCb Tracking package(s)' +// +// Author: A. Beiter (based on code by M. Needham) +// Created: 2018-09-04 + +#include "Event/RawBank.h" +#include "Event/RawEvent.h" +#include "GaudiAlg/Consumer.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTHistoAlgBase.h" +#include "Kernel/UTRawBankMap.h" +#include "Kernel/UTTell1ID.h" +#include <string> + +/** @class UTRawBankMonitor UTRawBankMonitor.h + * UTCheckers/UTRawBankMonitor.h + * + * Class for checking UT RAW buffer + * + * @author A. Beiter (based on code by M.Needham) + * @date 2018-09-04 + */ + +class UTRawBankMonitor : public Gaudi::Functional::Consumer<void( LHCb::RawEvent const& ), + Gaudi::Functional::Traits::BaseClass_t<UT::HistoAlgBase>> { + +public: + /// constructor + UTRawBankMonitor( const std::string& name, ISvcLocator* pSvcLocator ) + : Consumer{name, + pSvcLocator, + {"RawEventLocations", Gaudi::Functional::concat_alternatives( LHCb::RawEventLocation::Other, + LHCb::RawEventLocation::Default )}} {}; + + /// initialize + StatusCode initialize() override; + + /// execute + void operator()( const LHCb::RawEvent& ) const override; + +private: + StatusCode configureBankType(); + + LHCb::RawBank::BankType m_bankType = LHCb::RawBank::LastType; +}; + +DECLARE_COMPONENT( UTRawBankMonitor ) + +using namespace LHCb; + +//-------------------------------------------------------------------- +// +//-------------------------------------------------------------------- + +StatusCode UTRawBankMonitor::initialize() { + + if ( histoTopDir().empty() ) setHistoTopDir( detType() + "/" ); + + return UT::HistoAlgBase::initialize().andThen( &UTRawBankMonitor::configureBankType, this ); // configure banktype +} + +void UTRawBankMonitor::operator()( const LHCb::RawEvent& rawEvt ) const { + + // execute once per event + + // init counters + unsigned int maxBoardSize = 0; + UTTell1ID hotBoard( 0, detType() == "UT" ); + unsigned int eventDataSize = 0; + + const auto& tBanks = rawEvt.banks( m_bankType ); + for ( const auto* iterBank : tBanks ) { + + // board info.... + size_t bankSize = iterBank->size() / sizeof( char ); + UTTell1ID aBoard( iterBank->sourceID(), detType() == "UT" ); + + // event counters + if ( bankSize > maxBoardSize ) { + maxBoardSize = bankSize; + hotBoard = aBoard; + } + eventDataSize += bankSize; + + // histogram per board + plot( (double)bankSize, "board data size", 0., 200., 200 ); + + // data size per board + // unsigned int id = (aBoard.region()*20) + aBoard.subID(); + // const std::map< unsigned int, unsigned int > & SourceIDToTELLmap = readoutTool()->SourceIDToTELLNumberMap(); + // unsigned int tellNumber = SourceIDToTELLmap.find(iterBank->sourceID())->second; + unsigned int tellNumber = readoutTool()->SourceIDToTELLNumber( iterBank->sourceID() ); + + // These hard coded numbers come from here: https://lbtwiki.cern.ch/bin/view/Online/Tell1PortNum + unsigned int doubleLinkedUTtell1s[] = {1, 2, 3, 4, 5, 6, 8, 9, 10, 13, 14, 15}; + unsigned int numberOfLinks = 1; + + if ( detType() == "UT" ) { + for ( unsigned int i = 0; i < 12; i++ ) { + if ( tellNumber == doubleLinkedUTtell1s[i] ) numberOfLinks = 2; + } + } + + double datasize = bankSize / (double)numberOfLinks; + plot( tellNumber, "data size", 0., 100., 100, datasize ); + plot( tellNumber, "data size unnormalised", 0., 100., 100, (double)bankSize ); + + } // iterBank + + // data size + plot( (double)eventDataSize, 1, "event data size", 0., 10000., 500 ); + + // include standard header HARDCODE !!! + unsigned int headerSize = tBanks.size() * 2u; + plot( (double)( eventDataSize + headerSize ), 2, "total data size", 0., 10000., 500 ); + + plot( (double)maxBoardSize, 3, "hot board size", 0., 200., 200 ); + unsigned int id = ( hotBoard.region() * 20 ) + hotBoard.subID(); + plot( (double)id, 4, "hot board ID", 0., 100., 100 ); +} + +StatusCode UTRawBankMonitor::configureBankType() { + m_bankType = UTRawBankMap::stringToType( detType() ); + return m_bankType != RawBank::Velo ? StatusCode::SUCCESS : StatusCode::FAILURE; +} diff --git a/UT/UTDAQ/src/component/UTReadoutTool.cpp b/UT/UTDAQ/src/component/UTReadoutTool.cpp new file mode 100644 index 00000000000..d9fdd736927 --- /dev/null +++ b/UT/UTDAQ/src/component/UTReadoutTool.cpp @@ -0,0 +1,515 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +#include "DetDesc/Condition.h" +#include "Event/UTCluster.h" +#include "GaudiAlg/GaudiTool.h" +#include "Kernel/IUTReadoutTool.h" +#include "Kernel/UTBoardMapping.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/UTDAQDefinitions.h" +#include "Kernel/UTTell1Board.h" +#include "Kernel/UTTell1ID.h" +#include "Kernel/UTXMLUtils.h" +#include "UTDet/DeUTDetector.h" +#include <algorithm> +#include <fstream> +#include <string> +#include <vector> + +/** + * Concret Class for things related to the Readout of the UT Tell1 Boards + */ + +class UTReadoutTool : public extends<GaudiTool, IUTReadoutTool> { + +public: + /// Constructer + UTReadoutTool( const std::string& type, const std::string& name, const IInterface* parent ); + + /// nBoard + unsigned int nBoard() const override; + + /// return vector of Tell1IDs + std::vector<UTTell1ID> boardIDs() const override; + + /// convert ITChannelID to DAQ ChannelID + UTDAQ::chanPair offlineChanToDAQ( const LHCb::UTChannelID aOfflineChan, double isf ) const override; + + /// convert offline interStripFraction to DAQ interStripFraction + double interStripToDAQ( const LHCb::UTChannelID aOfflineChan, const UTTell1ID aBoardID, + const double isf ) const override; + + bool ADCOfflineToDAQ( const LHCb::UTChannelID aOfflineChan, const UTTell1ID aBoardID, + LHCb::UTCluster::ADCVector& adcs ) const override; + + /// find the Tell1 board given a board ID + UTTell1Board* findByBoardID( const UTTell1ID aBoardID ) const override; + + /// find Tell1 board by storage order + UTTell1Board* findByOrder( const unsigned int aValue ) const override; + + /// Add the mapping of source ID to TELL1 board number + unsigned int SourceIDToTELLNumber( unsigned int sourceID ) const override; + + /** Add the mapping of source ID to board number for UT */ + const std::map<unsigned int, unsigned int>& SourceIDToTELLNumberMap() const override; + + /// list of the readout sector ids on the board + std::vector<LHCb::UTChannelID> sectorIDs( const UTTell1ID board ) const override; + + /// list of the readout sectors + std::vector<DeUTSector*> sectors( const UTTell1ID board ) const override; + + /// service box + unsigned int nServiceBox() const override; + + /// service box number + std::string serviceBox( const LHCb::UTChannelID& aChan ) const override; + + /// list of the readout sectors ids in a service box + std::vector<LHCb::UTChannelID> sectorIDsOnServiceBox( const std::string& serviceBox ) const override; + + /// list of the readout sectors in a service box + std::vector<DeUTSector*> sectorsOnServiceBox( const std::string& serviceBox ) const override; + + /// list of service boxes + const std::vector<std::string>& serviceBoxes() const override; + + /// Add the mapping of TELL1 board number to source ID + unsigned int TELLNumberToSourceID( unsigned int TELL ) const override; + + /// print mapping + void printMapping() const override; + + /// write out the mapping as xml + StatusCode writeMappingToXML() const override; + + StatusCode validate() const; + + /// finalize + StatusCode finalize() override; + + /// init + StatusCode initialize() override; + + /// get region + unsigned int region( const LHCb::UTChannelID aChan ) const override; + + /** Add the mapping of board number to source ID for UT */ + const std::map<unsigned int, unsigned int>& TELLNumberToSourceIDMap() const override; + +protected: + void clear(); + + std::string footer() const; + std::string header( const std::string& conString ) const; + std::string strip( const std::string& conString ) const; + + unsigned int m_hybridsPerBoard; + unsigned int m_nBoard{0}; + unsigned int m_nServiceBox; + std::vector<std::unique_ptr<UTTell1Board>> m_boards; + std::map<UTTell1ID, UTTell1Board*> m_boardsMap; + std::vector<std::string> m_serviceBoxes; + std::vector<unsigned int> m_firstBoardInRegion; + + Gaudi::Property<bool> m_printMapping{this, "printMapping", false}; + DeUTDetector* m_tracker = nullptr; + Gaudi::Property<std::string> m_conditionLocation{this, "conditionLocation", + "/dd/Conditions/ReadoutConf/UT/ReadoutMap"}; + +private: + Gaudi::Property<bool> m_writeXML{this, "writeMappingToXML", false}; + Gaudi::Property<std::string> m_footer{this, "footer", "</DDDB>"}; + Gaudi::Property<std::string> m_startTag{this, "startTag", "<condition"}; + Gaudi::Property<std::string> m_outputFileName{this, "outputFile", "ReadoutMap.xml"}; + std::ofstream m_outputFile; + Gaudi::Property<std::string> m_author{this, "author", "Joe Bloggs"}; + Gaudi::Property<std::string> m_tag{this, "tag", "None"}; + Gaudi::Property<std::string> m_desc{this, "description", "BlahBlahBlah"}; + Gaudi::Property<bool> m_removeCondb{this, "removeCondb", false}; + Gaudi::Property<unsigned int> m_precision{this, "precision", 16u}; + Gaudi::Property<unsigned int> m_depth{this, "depths", 3u}; + +private: + StatusCode createBoards(); + StatusCode createTell1Map(); + + unsigned int m_nRegionA = 512; + unsigned int m_firstStation = 512; +}; + +using namespace LHCb; + +DECLARE_COMPONENT( UTReadoutTool ) + +UTReadoutTool::UTReadoutTool( const std::string& type, const std::string& name, const IInterface* parent ) + : base_class( type, name, parent ) { + // constructor + m_boards.reserve( 100 ); // about correct +} + +void UTReadoutTool::clear() { + // clear the boards + m_boards.clear(); + m_nBoard = 0; +} + +StatusCode UTReadoutTool::initialize() { + // initialization phase... + StatusCode sc = GaudiTool::initialize(); + if ( sc.isFailure() ) { return Error( "Failed to initialize", sc ); } + + // tracker + m_tracker = getDet<DeUTDetector>( DeUTDetLocation::location() ); + + registerCondition( m_conditionLocation, &UTReadoutTool::createTell1Map ); + + registerCondition( m_conditionLocation, &UTReadoutTool::createBoards ); + + sc = runUpdate(); // force update + if ( sc.isFailure() ) return Error( "Failed first UMS update for readout tool", sc ); + + if ( m_printMapping ) printMapping(); + + return StatusCode::SUCCESS; +} + +StatusCode UTReadoutTool::finalize() { + + if ( m_writeXML ) writeMappingToXML().ignore( /* AUTOMATICALLY ADDED FOR gaudi/Gaudi!763 */ ); + return base_class::finalize(); +} + +StatusCode UTReadoutTool::writeMappingToXML() const { + + // load conditions + Condition* rInfo = getDet<Condition>( m_conditionLocation ); + + std::ofstream outputFile( m_outputFileName.value() ); + if ( outputFile.fail() ) { return Warning( "Failed to open output file", StatusCode::FAILURE ); } + + // write the xml headers + outputFile << header( rInfo->toXml( "", true, m_precision ) ) << '\n'; + + // add comments + std::ostringstream comment; + UT::XMLUtils::fullComment( comment, m_author, m_tag, m_desc ); + outputFile << comment.str() << '\n'; + + std::string temp = strip( rInfo->toXml( "", false, m_precision ) ); + outputFile << temp << "\n\n"; + + // footer + outputFile << footer() << '\n'; + + return StatusCode::SUCCESS; +} + +unsigned int UTReadoutTool::nBoard() const { + // number of boards + return m_nBoard; +} + +unsigned int UTReadoutTool::nServiceBox() const { return m_serviceBoxes.size(); } + +std::string UTReadoutTool::serviceBox( const LHCb::UTChannelID& aChan ) const { + + // find the board + + static const std::string InValidBox = "Unknown"; + bool isFound = false; + unsigned int waferIndex = 999u; + unsigned int iBoard = m_firstBoardInRegion[region( aChan )]; + while ( ( iBoard != m_nBoard ) && ( isFound == false ) ) { + if ( m_boards[iBoard]->isInside( aChan, waferIndex ) ) { + isFound = true; + } else { + ++iBoard; + } + } // iBoard + return ( isFound ? m_boards[iBoard]->serviceBoxes()[waferIndex] : InValidBox ); +} + +std::vector<UTTell1ID> UTReadoutTool::boardIDs() const { + std::vector<UTTell1ID> ids; + ids.reserve( m_boards.size() ); + std::transform( m_boards.begin(), m_boards.end(), std::back_inserter( ids ), + []( const auto& b ) { return b->boardID(); } ); + return ids; +} + +UTDAQ::chanPair UTReadoutTool::offlineChanToDAQ( const UTChannelID aOfflineChan, double isf ) const { + // look up region start..... + unsigned int iBoard = m_firstBoardInRegion[region( aOfflineChan )]; + unsigned int waferIndex = 999u; + + bool isFound = false; + while ( ( iBoard != m_nBoard ) && !isFound ) { + if ( m_boards[iBoard]->isInside( aOfflineChan, waferIndex ) ) { + isFound = true; + } else { + ++iBoard; + } + } // iBoard + + if ( !isFound ) { + return {UTTell1ID( UTTell1ID::nullBoard, false ), 0}; + } else { + return {m_boards[iBoard]->boardID(), m_boards[iBoard]->offlineToDAQ( aOfflineChan, waferIndex, isf )}; + } +} + +double UTReadoutTool::interStripToDAQ( const UTChannelID aOfflineChan, const UTTell1ID aBoardID, + const double isf ) const { + unsigned int waferIndex = 999u; + + auto aBoard = findByBoardID( aBoardID ); + double newisf = 0; + + if ( aBoard->isInside( aOfflineChan, waferIndex ) ) { + unsigned int orientation = aBoard->orientation()[waferIndex]; + if ( orientation == 0 && isf > 0.01 ) { + newisf = 1 - isf; + } else { + newisf = isf; + } + } else { // Can not find board! + newisf = -1; + } + + return newisf; +} + +bool UTReadoutTool::ADCOfflineToDAQ( const UTChannelID aOfflineChan, const UTTell1ID aBoardID, + UTCluster::ADCVector& adcs ) const { + unsigned int waferIndex = 999u; + auto aBoard = findByBoardID( aBoardID ); + + if ( !aBoard->isInside( aOfflineChan, waferIndex ) ) return false; // can not find board! + + if ( aBoard->orientation()[waferIndex] == 0 ) { std::reverse( std::begin( adcs ), std::end( adcs ) ); } + return true; +} + +UTTell1Board* UTReadoutTool::findByBoardID( const UTTell1ID aBoardID ) const { + // find by board id + try { + return m_boardsMap.at( aBoardID ); + } catch ( std::out_of_range& e ) { return nullptr; } +} + +UTTell1Board* UTReadoutTool::findByOrder( const unsigned int aValue ) const { + // find by order + return aValue < m_nBoard ? m_boards[aValue].get() : nullptr; +} + +void UTReadoutTool::printMapping() const { + // dump out the readout mapping + info() << "print mapping for: " << name() << " tool" << endmsg; + info() << " Number of boards " << m_nBoard << endmsg; + for ( const auto& b : m_boards ) info() << *b << endmsg; +} + +/// Add the mapping of source ID to TELL1 board number +unsigned int UTReadoutTool::SourceIDToTELLNumber( unsigned int sourceID ) const { + return ( this->SourceIDToTELLNumberMap().find( sourceID ) )->second; +} + +/// Add the mapping of TELL1 board number to source ID +unsigned int UTReadoutTool::TELLNumberToSourceID( unsigned int TELL ) const { + return ( this->TELLNumberToSourceIDMap().find( TELL ) )->second; +} + +StatusCode UTReadoutTool::validate() const { + // validate the map - every sector must go somewhere ! + const auto& dSectors = m_tracker->sectors(); + return StatusCode{std::none_of( std::begin( dSectors ), std::end( dSectors ), [this]( const DeUTSector* s ) { + UTChannelID chan = s->elementID(); + auto chanPair = offlineChanToDAQ( chan, 0.0 ); + return chanPair.first == UTTell1ID( UTTell1ID::nullBoard, false ); + } )}; +} + +std::vector<LHCb::UTChannelID> UTReadoutTool::sectorIDs( const UTTell1ID board ) const { + + std::vector<LHCb::UTChannelID> sectors; + sectors.reserve( 8 ); + auto theBoard = findByBoardID( board ); + if ( theBoard ) { + sectors.insert( sectors.begin(), theBoard->sectorIDs().begin(), theBoard->sectorIDs().end() ); + } else { + Error( "Failed to find Board", StatusCode::SUCCESS, 100 ).ignore( /* AUTOMATICALLY ADDED FOR gaudi/Gaudi!763 */ ); + } + return sectors; +} + +std::vector<DeUTSector*> UTReadoutTool::sectors( const UTTell1ID board ) const { + + return m_tracker->findSectors( sectorIDs( board ) ); +} + +std::vector<DeUTSector*> UTReadoutTool::sectorsOnServiceBox( const std::string& serviceBox ) const { + + return m_tracker->findSectors( sectorIDsOnServiceBox( serviceBox ) ); +} + +std::vector<LHCb::UTChannelID> UTReadoutTool::sectorIDsOnServiceBox( const std::string& serviceBox ) const { + // loop over all boards + std::vector<LHCb::UTChannelID> sectors; + sectors.reserve( 16 ); + for ( const auto& board : m_boards ) { + const auto& sectorVec = board->sectorIDs(); + const auto& sBoxes = board->serviceBoxes(); + for ( unsigned int iS = 0u; iS < board->nSectors(); ++iS ) { + if ( sBoxes[iS] == serviceBox ) sectors.push_back( sectorVec[iS] ); + } // iS + } // iterB + return sectors; +} + +const std::vector<std::string>& UTReadoutTool::serviceBoxes() const { return m_serviceBoxes; } + +std::string UTReadoutTool::footer() const { + std::string temp = m_footer; + temp.insert( 0, "</catalog>" ); + return temp; +} + +std::string UTReadoutTool::header( const std::string& conString ) const { + // get the header + auto startpos = conString.find( m_startTag ); + auto temp = conString.substr( 0, startpos ); + temp.insert( startpos, "<catalog name=\"ReadoutSectors\">" ); + + // correct the location of the DTD + if ( m_removeCondb ) { + UT::XMLUtils::replace( temp, "conddb:", "" ); + std::string location; + for ( unsigned int i = 0; i < m_depth; ++i ) location += "../"; + auto pos = temp.find( "/DTD/" ); + temp.insert( pos, location ); + UT::XMLUtils::replace( temp, "//", "/" ); + } + + return temp; +} + +std::string UTReadoutTool::strip( const std::string& conString ) const { + auto startpos = conString.find( m_startTag ); + auto endpos = conString.find( m_footer ); + return conString.substr( startpos, endpos - startpos ); +} + +unsigned int UTReadoutTool::region( const UTChannelID aChan ) const { + // convert channel to region + return aChan.station() == 1 ? aChan.layer() - 1 : m_nRegionA + aChan.layer() - 1; +} + +// Add the mapping of source ID to TELL1 board number +const std::map<unsigned int, unsigned int>& UTReadoutTool::SourceIDToTELLNumberMap() const { + return UTBoardMapping::UTSourceIDToNumberMap(); +} + +// Add the mapping of TELL1 board number to source ID +const std::map<unsigned int, unsigned int>& UTReadoutTool::TELLNumberToSourceIDMap() const { + return UTBoardMapping::UTNumberToSourceIDMap(); +} + +StatusCode UTReadoutTool::createTell1Map() { + auto rInfo = getDet<Condition>( m_conditionLocation ); + const auto& layers = rInfo->param<std::vector<std::string>>( "layers" ); + + UTBoardMapping::ClearUTMap(); + + unsigned int sourceIDBase = 0; + for ( unsigned int iReg = 0; iReg < layers.size(); ++iReg ) { + std::string tell1Loc = layers[iReg] + "TELL1"; + if ( rInfo->exists( tell1Loc ) ) { + // printf("Extracting TELL1 map from %s\n", tell1Loc.c_str()); + + const auto& tell1 = rInfo->param<std::vector<int>>( tell1Loc ); + for ( unsigned int i = 0; i < tell1.size(); i++ ) { + UTBoardMapping::AddUTMapEntry( sourceIDBase + i, tell1.at( i ) ); + } + } + sourceIDBase += 64; + } + + return StatusCode::SUCCESS; +} + +StatusCode UTReadoutTool::createBoards() { + + bool isUT = true; + clear(); + + // load conditions + auto rInfo = getDet<Condition>( m_conditionLocation ); + + // vector of layer types + // const std::vector<std::string>& layers = rInfo->paramAsStringVect("layers"); + const auto layers = rInfo->param<std::vector<std::string>>( "layers" ); + const auto nBoards = rInfo->paramAsIntVect( "nBoardsPerLayer" ); + + m_hybridsPerBoard = rInfo->param<int>( "hybridsPerBoard" ); + m_nRegionA = rInfo->param<int>( "nRegionsInUTa" ); + const auto nStripsPerHybrid = UTDAQ::nStripsPerBoard / m_hybridsPerBoard; + + for ( unsigned int iReg = 0; iReg < layers.size(); ++iReg ) { + + assert( iReg < layers.size() ); + assert( iReg < nBoards.size() ); + + m_firstBoardInRegion.push_back( m_boards.size() ); + m_nBoard += nBoards[iReg]; + + const auto& tMap = rInfo->param<std::vector<int>>( layers[iReg] ); + const auto& orientation = rInfo->param<std::vector<int>>( layers[iReg] + "HybridOrientation" ); + const auto& serviceBoxes = rInfo->param<std::vector<std::string>>( layers[iReg] + "ServiceBox" ); + + unsigned int vecLoc = 0; + assert( !tMap.empty() ); + if ( 0 == iReg ) { m_firstStation = UTChannelID( tMap[0] ).station(); } + + for ( unsigned int iBoard = 0; iBoard < (unsigned int)nBoards[iReg]; ++iBoard ) { + + // make new board + const UTTell1ID anID( iReg, iBoard, isUT ); + auto aBoard = std::make_unique<UTTell1Board>( anID, nStripsPerHybrid, "UT" ); + + for ( unsigned iH = 0; iH < m_hybridsPerBoard; ++iH, ++vecLoc ) { + assert( vecLoc < tMap.size() ); + assert( vecLoc < orientation.size() ); + assert( vecLoc < serviceBoxes.size() ); + if ( 0 != tMap[vecLoc] ) { // skip strange 0's in conditions vector !! + UTChannelID sectorID( (unsigned int)tMap[vecLoc] ); + aBoard->addSector( sectorID, (unsigned int)orientation[vecLoc], serviceBoxes[vecLoc] ); + + // add to the list of service boxs if not already there + if ( std::find( m_serviceBoxes.begin(), m_serviceBoxes.end(), serviceBoxes[vecLoc] ) == + m_serviceBoxes.end() ) { + m_serviceBoxes.push_back( serviceBoxes[vecLoc] ); + } + } + } // iH + + m_boards.push_back( std::move( aBoard ) ); + + if ( m_boardsMap.find( anID ) == m_boardsMap.end() ) { m_boardsMap[anID] = m_boards.back().get(); } + + } // boards per region + } // iterS + + // validate the mapping --> all sectors should go somewhere ! + const auto sc = validate(); + return ( sc.isFailure() ? Error( "Failed to validate mapping", sc ) : sc ); +} -- GitLab From 89972d20979b87d6c793fe26760c6c0575e57f5f Mon Sep 17 00:00:00 2001 From: Louis Henry <louis.henry@cern.ch> Date: Sun, 12 Apr 2020 19:23:33 +0200 Subject: [PATCH 013/111] Corrected mistake --- Pr/PrVeloUT/src/PrVeloUT.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 8ef8c2d6a98..89e2103fe8e 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -672,6 +672,14 @@ namespace LHCb::Pr { const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { + const simd::float_v yTolSlope{m_yTolSlope.value()}; + + const float xState = filteredStates.x<scalar::float_v>( t ).cast(); + const float yState = filteredStates.y<scalar::float_v>( t ).cast(); + const float zState = filteredStates.z<scalar::float_v>( t ).cast(); + const float txState = filteredStates.tx<scalar::float_v>( t ).cast(); + const float tyState = filteredStates.ty<scalar::float_v>( t ).cast(); + std::size_t nSize = 0; std::size_t nLayers = 0; -- GitLab From 233cc15157a89a36c7e14f6f1c082f7514e2730d Mon Sep 17 00:00:00 2001 From: Louis Henry <louis.henry@cern.ch> Date: Mon, 13 Apr 2020 14:58:07 +0200 Subject: [PATCH 014/111] Solving the mess: removing UT/UTDAQ and git-lb-checkout --- .git-lb-checkout | 3 - UT/UTDAQ/CMakeLists.txt | 43 -- UT/UTDAQ/UTDAQ/UTBoardToBankMap.h | 42 -- UT/UTDAQ/UTDAQ/UTClustersOnBoard.h | 93 ---- UT/UTDAQ/UTDAQ/UTDAQFunctor.h | 57 -- UT/UTDAQ/UTDAQ/UTDAQHelper.h | 80 --- UT/UTDAQ/UTDAQ/UTInfo.h | 29 - UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp | 24 - UT/UTDAQ/src/Lib/UTDAQHelper.cpp | 190 ------- .../src/component/RawBankToUTClusterAlg.cpp | 368 ------------- .../component/RawBankToUTLiteClusterAlg.cpp | 271 --------- .../src/component/UTClustersToRawBankAlg.cpp | 290 ---------- UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp | 316 ----------- UT/UTDAQ/src/component/UTDecodingBaseAlg.h | 133 ----- .../src/component/UTDigitsToUTTELL1Data.cpp | 105 ---- UT/UTDAQ/src/component/UTErrorDecoding.cpp | 69 --- UT/UTDAQ/src/component/UTFullDecoding.cpp | 335 ------------ UT/UTDAQ/src/component/UTLayerSelector.cpp | 63 --- UT/UTDAQ/src/component/UTLayerSelector.h | 46 -- UT/UTDAQ/src/component/UTPedestalDecoding.cpp | 156 ------ UT/UTDAQ/src/component/UTRawBankMonitor.cpp | 135 ----- UT/UTDAQ/src/component/UTReadoutTool.cpp | 515 ------------------ 22 files changed, 3363 deletions(-) delete mode 100644 .git-lb-checkout delete mode 100644 UT/UTDAQ/CMakeLists.txt delete mode 100644 UT/UTDAQ/UTDAQ/UTBoardToBankMap.h delete mode 100644 UT/UTDAQ/UTDAQ/UTClustersOnBoard.h delete mode 100644 UT/UTDAQ/UTDAQ/UTDAQFunctor.h delete mode 100644 UT/UTDAQ/UTDAQ/UTDAQHelper.h delete mode 100644 UT/UTDAQ/UTDAQ/UTInfo.h delete mode 100644 UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp delete mode 100644 UT/UTDAQ/src/Lib/UTDAQHelper.cpp delete mode 100644 UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp delete mode 100644 UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp delete mode 100644 UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp delete mode 100644 UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp delete mode 100644 UT/UTDAQ/src/component/UTDecodingBaseAlg.h delete mode 100644 UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp delete mode 100644 UT/UTDAQ/src/component/UTErrorDecoding.cpp delete mode 100644 UT/UTDAQ/src/component/UTFullDecoding.cpp delete mode 100644 UT/UTDAQ/src/component/UTLayerSelector.cpp delete mode 100644 UT/UTDAQ/src/component/UTLayerSelector.h delete mode 100644 UT/UTDAQ/src/component/UTPedestalDecoding.cpp delete mode 100644 UT/UTDAQ/src/component/UTRawBankMonitor.cpp delete mode 100644 UT/UTDAQ/src/component/UTReadoutTool.cpp diff --git a/.git-lb-checkout b/.git-lb-checkout deleted file mode 100644 index 2d1309ae369..00000000000 --- a/.git-lb-checkout +++ /dev/null @@ -1,3 +0,0 @@ -[lb-checkout "LHCb.UT/UTDAQ"] - base = 473cc402d1a933e1a80c9e67ba12c862e1ac1f4d - imported = 29025f31a42a3f710edc5a5cd53a9a39106a4523 diff --git a/UT/UTDAQ/CMakeLists.txt b/UT/UTDAQ/CMakeLists.txt deleted file mode 100644 index 488423fc7bd..00000000000 --- a/UT/UTDAQ/CMakeLists.txt +++ /dev/null @@ -1,43 +0,0 @@ -############################################################################### -# (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration # -# # -# This software is distributed under the terms of the GNU General Public # -# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". # -# # -# In applying this licence, CERN does not waive the privileges and immunities # -# granted to it by virtue of its status as an Intergovernmental Organization # -# or submit itself to any jurisdiction. # -############################################################################### -################################################################################ -# Package: UTDAQ -################################################################################ -gaudi_subdir(UTDAQ) - -gaudi_depends_on_subdirs(DAQ/DAQUtils - Det/DetDesc - Det/UTDet - Event/DAQEvent - Event/DigiEvent - Event/RecEvent - GaudiAlg - GaudiKernel - Kernel/LHCbKernel - UT/UTKernel - UT/UTTELL1Event - Si/SiDAQ) - - -find_package(Boost) -find_package(ROOT) -# hide warnings from some external projects -include_directories(SYSTEM ${Boost_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS}) - -gaudi_add_library(UTDAQLib - src/Lib/*.cpp - PUBLIC_HEADERS UTDAQ - INCLUDE_DIRS Boost Event/RawEvent DAQ/DAQUtils Event/DigiEvent Si/SiDAQ UT/UTKernel - LINK_LIBRARIES DetDescLib UTDetLib DAQEventLib RecEvent GaudiAlgLib GaudiKernel LHCbKernel UTKernelLib UTTELL1Event) - -gaudi_add_module(UTDAQ - src/component/*.cpp - LINK_LIBRARIES UTDAQLib) diff --git a/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h b/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h deleted file mode 100644 index fa1898bde60..00000000000 --- a/UT/UTDAQ/UTDAQ/UTBoardToBankMap.h +++ /dev/null @@ -1,42 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef _UTBoardToBankMap_H -#define _UTBoardToBankMap_H 1 - -/** @class UTBoardToBankMap UTBoardToBankMap.h - * - * Helper class for mapping boards to banks - * basically hides a a map - used in 2 places.... - * - * @author A Beiter (based on code by M Needham) - * @date 2018-09-04 - */ - -#include <map> - -#include "Kernel/UTTell1ID.h" - -class UTBoardToBankMap final { - -public: - void addEntry( UTTell1ID aBoard, unsigned int aBank ); - UTTell1ID findBoard( const unsigned int aBank ) const; - - // bank to board - unsigned int findBank( const UTTell1ID aBoard ) const { return m_bankMapping.at( aBoard ); } - - void clear() { m_bankMapping.clear(); } - -private: - std::map<UTTell1ID, unsigned int> m_bankMapping; -}; - -#endif // _UTBoardToBankMap_H diff --git a/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h b/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h deleted file mode 100644 index aea93f74d57..00000000000 --- a/UT/UTDAQ/UTDAQ/UTClustersOnBoard.h +++ /dev/null @@ -1,93 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef _UTClustersOnBoard_H -#define _UTClustersOnBoard_H - -/** @class UTClustersOnBoard UTClustersOnBoard.h - * - * Helper class for keeping track of clusters... - * - * @author A Beiter (based on code by M Needham) - * @date 2018-09-04 - */ - -#include <algorithm> -#include <array> -#include <utility> -#include <vector> - -#include "Event/UTCluster.h" -#include "Kernel/UTDAQDefinitions.h" - -class UTClustersOnBoard final { - -public: - UTClustersOnBoard( unsigned int nMax ); - - typedef std::pair<LHCb::UTCluster*, unsigned int> boardPair; - typedef std::vector<boardPair> ClusterVector; - - void addCluster( LHCb::UTCluster* aCluster ); - - ClusterVector clusters() const; - - bool inOverflow() const; - - bool inOverflow( const unsigned int ppx ) const; - - void clear(); - -private: - unsigned int m_maxClustersPerPPx; - mutable ClusterVector m_clusCont; - std::array<unsigned int, 4> m_ppxCount; -}; - -inline UTClustersOnBoard::UTClustersOnBoard( unsigned int nMax ) : m_maxClustersPerPPx( nMax ) { - // constructer - m_clusCont.reserve( 200 ); - clear(); -} - -inline UTClustersOnBoard::ClusterVector UTClustersOnBoard::clusters() const { - std::sort( m_clusCont.begin(), m_clusCont.end(), - []( const boardPair& obj1, const boardPair& obj2 ) { return obj1.second < obj2.second; } ); - return m_clusCont; -} - -inline void UTClustersOnBoard::addCluster( LHCb::UTCluster* aCluster ) { - - const unsigned int daqChan = aCluster->tell1Channel(); - const unsigned int ppx = daqChan / UTDAQ::nStripPerPPx; - if ( !inOverflow( ppx ) ) { - m_clusCont.emplace_back( aCluster, daqChan ); - ++m_ppxCount[ppx]; - } else { - // data went into the void - } -} - -inline bool UTClustersOnBoard::inOverflow( const unsigned int ppx ) const { - return m_ppxCount[ppx] >= m_maxClustersPerPPx; -} - -inline bool UTClustersOnBoard::inOverflow() const { - auto iter = std::find_if( m_ppxCount.begin(), m_ppxCount.end(), - [&]( unsigned int ppx ) { return ppx >= m_maxClustersPerPPx; } ); - return iter != m_ppxCount.end(); -} - -inline void UTClustersOnBoard::clear() { - m_clusCont.clear(); - m_ppxCount.fill( 0 ); -} - -#endif // ClustersOnBoard diff --git a/UT/UTDAQ/UTDAQ/UTDAQFunctor.h b/UT/UTDAQ/UTDAQ/UTDAQFunctor.h deleted file mode 100644 index a9b222c063b..00000000000 --- a/UT/UTDAQ/UTDAQ/UTDAQFunctor.h +++ /dev/null @@ -1,57 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef _UTDataFunctor_H_ -#define _UTDataFunctor_H_ - -#include "Kernel/UTTell1ID.h" -#include <numeric> - -namespace UTDAQFunctor { - - template <class TYPE> - struct Less_by_Tell1Board { - - /** compare the channel of one object with the - * channel of another object - * @param obj1 first object - * @param obj2 second object - * @return result of the comparision - */ - bool operator()( TYPE obj1, TYPE obj2 ) const { - return ( !obj1 ) ? true : ( !obj2 ) ? false : obj1->boardID() < obj2->boardID(); - } - }; - - template <class TYPE> - class Tell1Board_eq { - UTTell1ID aBoard; - - public: - explicit Tell1Board_eq( const UTTell1ID& testBoard ) : aBoard( testBoard ) {} - inline bool operator()( TYPE obj ) const { return obj->boardID() == aBoard; } - }; - - template <class TYPE> - struct compByTell1Board_LB { - bool operator()( const TYPE& obj, const UTTell1ID& testID ) const { - return ( ( !obj ) ? false : testID > obj->boardID() ); - } - }; - - template <class TYPE> - struct compByTell1Board_UB { - bool operator()( const UTTell1ID& testID, const TYPE& obj ) const { - return ( ( !obj ) ? false : testID > obj->boardID() ); - } - }; - -} // namespace UTDAQFunctor -#endif // UTDAQFunctor diff --git a/UT/UTDAQ/UTDAQ/UTDAQHelper.h b/UT/UTDAQ/UTDAQ/UTDAQHelper.h deleted file mode 100644 index 4871106fc5b..00000000000 --- a/UT/UTDAQ/UTDAQ/UTDAQHelper.h +++ /dev/null @@ -1,80 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef UTDAQHELPER_H -#define UTDAQHELPER_H 1 - -#include "Kernel/STLExtensions.h" -#include "UTDet/DeUTDetector.h" -#include "UTDet/DeUTSector.h" -#include "UTInfo.h" -#include <array> -#include <boost/container/small_vector.hpp> -#include <optional> - -namespace LHCb { - - // forward declaration - class RawBank; - - namespace UTDAQ { - - /** - * counts number of UT clusters in the given raw banks - * if count exceeds max, it gives up and returns no value - */ - std::optional<unsigned int> nbUTClusters( LHCb::span<const RawBank*> banks, unsigned int maxNbClusters ); - - struct LayerInfo final { - float z; - unsigned int nColsPerSide; - unsigned int nRowsPerSide; - float invHalfSectorYSize; - float invHalfSectorXSize; - float dxDy; - }; - using SectorsInRegionZ = std::array<float, UTInfo::Sectors>; - using SectorsInLayerZ = std::array<SectorsInRegionZ, UTInfo::Regions>; - using SectorsInStationZ = std::array<SectorsInLayerZ, UTInfo::Layers>; - - // -- For the moment, this is assigned here and overwritten in "computeGeometry" in case a geometry - // -- version with a "wrong" sector ordering is used - extern std::array<int, 64> mapQuarterSectorToSectorCentralRegion; - - constexpr static const auto mapSectorToSector = std::array{ - 1, 2, 3, 4, 5, 0, 0, 0, 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 36, 37, 38, 39, 40, - 41, 42, 43, 44, 45, 0, 0, 0, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 0, 0, 0, 76, 77, 78, 79, 80}; - - /** - * fills container of (region, sector) pairs with all sectors concerned by - * a hit at given layer and coordinates and with given x tolerance - */ - void findSectors( unsigned int layer, float x, float y, float xTol, float yTol, const LayerInfo& info, - boost::container::small_vector_base<std::pair<int, int>>& sectors ); - - struct GeomCache { - std::array<LayerInfo, UTInfo::TotalLayers> layers; - std::array<SectorsInStationZ, UTInfo::Stations> sectorsZ; - }; - GeomCache computeGeometry( const DeUTDetector& utDet ); - - [[deprecated( "Please use computeGeometry(const DeUTDetector&) instead" )]] inline void - computeGeometry( const DeUTDetector& utDet, std::array<LayerInfo, UTInfo::TotalLayers>& layers, - std::array<SectorsInStationZ, UTInfo::Stations>& sectorsZ ) { - auto cache = computeGeometry( utDet ); - layers = cache.layers; - sectorsZ = cache.sectorsZ; - } - - } // namespace UTDAQ - -} // namespace LHCb - -#endif // UTDAQHELPER_H diff --git a/UT/UTDAQ/UTDAQ/UTInfo.h b/UT/UTDAQ/UTDAQ/UTInfo.h deleted file mode 100644 index 5983a83a1d4..00000000000 --- a/UT/UTDAQ/UTDAQ/UTInfo.h +++ /dev/null @@ -1,29 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef UTDAQ_UTINFO_H -#define UTDAQ_UTINFO_H 1 - -// Include files - -/** Define some numbers for the UT which are detector specific - * - * - * @author Michel De Cian - * @date 2019-05-31 - */ - -namespace UTInfo { - - enum Numbers { Sectors = 98, Regions = 3, Layers = 2, Stations = 2, TotalLayers = 4 }; - -} - -#endif // UTDAQ_UTINFO_H diff --git a/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp b/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp deleted file mode 100644 index 60e2565976b..00000000000 --- a/UT/UTDAQ/src/Lib/UTBoardToBankMap.cpp +++ /dev/null @@ -1,24 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "UTDAQ/UTBoardToBankMap.h" -#include <algorithm> - -void UTBoardToBankMap::addEntry( UTTell1ID aBoard, unsigned int aBank ) { - // add entry to map - m_bankMapping[aBoard] = aBank; -} - -UTTell1ID UTBoardToBankMap::findBoard( const unsigned int aBank ) const { - // board to bank - auto i = std::find_if( m_bankMapping.begin(), m_bankMapping.end(), - [&]( const std::pair<const UTTell1ID, unsigned int>& p ) { return p.second == aBank; } ); - return i != m_bankMapping.end() ? i->first : UTTell1ID( UTTell1ID::nullBoard ); -} diff --git a/UT/UTDAQ/src/Lib/UTDAQHelper.cpp b/UT/UTDAQ/src/Lib/UTDAQHelper.cpp deleted file mode 100644 index 2f8229bc9bf..00000000000 --- a/UT/UTDAQ/src/Lib/UTDAQHelper.cpp +++ /dev/null @@ -1,190 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "UTDAQ/UTDAQHelper.h" - -#include "DetDesc/SolidBox.h" -#include "Event/RawBank.h" -#include "SiDAQ/SiHeaderWord.h" - -#include <cmath> -#include <limits> - -namespace LHCb::UTDAQ { - - std::array<int, 64> mapQuarterSectorToSectorCentralRegion{}; - - std::optional<unsigned int> nbUTClusters( span<const RawBank*> banks, unsigned int maxNbClusters ) { - size_t nbClusters = 0; - for ( auto& bank : banks ) { - nbClusters += SiHeaderWord( bank->data()[0] ).nClusters(); - // cut as soon as we have too many - if ( nbClusters > maxNbClusters ) { return {}; } - } - return nbClusters; - } - - void findSectors( unsigned int layer, float x, float y, float xTol, float yTol, const LayerInfo& info, - boost::container::small_vector_base<std::pair<int, int>>& sectors ) { - auto localX = x - info.dxDy * y; - // deal with sector overlaps and geometry imprecision - xTol += 1; // mm - auto localXmin = localX - xTol; - auto localXmax = localX + xTol; - int subcolmin = std::nearbyintf( localXmin * info.invHalfSectorXSize - 0.5 ) + 2 * info.nColsPerSide; - int subcolmax = std::nearbyintf( localXmax * info.invHalfSectorXSize - 0.5 ) + 2 * info.nColsPerSide; - if ( subcolmax < 0 || subcolmin >= (int)( 4 * info.nColsPerSide ) ) { - // out of acceptance, return empty result - return; - } - // on the acceptance limit - if ( subcolmax >= (int)( 4 * info.nColsPerSide ) ) subcolmax = (int)( 4 * info.nColsPerSide ) - 1; - if ( subcolmin < 0 ) subcolmin = 0; - // deal with sector shifts in tilted layers and overlaps in regular ones - yTol += ( layer == 1 || layer == 2 ) ? 8 : 1; // mm - auto localYmin = y - yTol; - auto localYmax = y + yTol; - int subrowmin = std::nearbyintf( localYmin * info.invHalfSectorYSize - 0.5 ) + 2 * info.nRowsPerSide; - int subrowmax = std::nearbyintf( localYmax * info.invHalfSectorYSize - 0.5 ) + 2 * info.nRowsPerSide; - if ( subrowmax < 0 || subrowmin >= (int)( 4 * info.nRowsPerSide ) ) { - // out of acceptance, return empty result - return; - } - // on the acceptance limit - if ( subrowmax >= (int)( 4 * info.nRowsPerSide ) ) subrowmax = (int)( 4 * info.nRowsPerSide ) - 1; - if ( subrowmin < 0 ) subrowmin = 0; - for ( int subcol = subcolmin; subcol <= subcolmax; subcol++ ) { - int region = - subcol < (int)( 2 * info.nColsPerSide - 4 ) ? 1 : subcol >= (int)( 2 * info.nColsPerSide + 4 ) ? 3 : 2; - if ( region == 1 ) { - for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { - sectors.emplace_back( 1, ( subcol / 2 ) * info.nRowsPerSide * 2 + subrow / 2 + 1 ); - } - } else if ( region == 2 ) { - int subcolInReg = subcol - 2 * info.nColsPerSide + 4; - for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { - if ( subrow < (int)( 2 * info.nRowsPerSide - 4 ) || subrow >= (int)( 2 * info.nRowsPerSide + 4 ) ) { - // no in central Region - sectors.emplace_back( 2, mapSectorToSector[( subcolInReg / 2 ) * 14 + ( subrow / 2 )] ); - } else { - // central region - sectors.emplace_back( - 2, mapQuarterSectorToSectorCentralRegion[subcolInReg * 8 + subrow - 2 * info.nRowsPerSide + 4] ); - } - } - } else { - for ( int subrow = subrowmin; subrow <= subrowmax; subrow++ ) { - sectors.emplace_back( 3, ( subcol / 2 - info.nColsPerSide - 2 ) * info.nRowsPerSide * 2 + subrow / 2 + 1 ); - } - } - } - } - - GeomCache computeGeometry( const DeUTDetector& utDet ) { - GeomCache cache; - for ( int iStation = 0; iStation < UTInfo::Stations; ++iStation ) { - for ( int iLayer = 0; iLayer < UTInfo::Layers; ++iLayer ) { - // get layer - unsigned int layerIndex = 2 * iStation + iLayer; - const DeUTLayer* layer = utDet.layers()[layerIndex]; - // get the z position of the layer and store it - cache.layers[layerIndex].z = layer->sectors()[0]->sensors()[0]->plane().ProjectOntoPlane( {0, 0, 0} ).z(); - // get the layer size and sector sizes. Go through all sectors, do not rely on ordering - float YFirstRow = std::numeric_limits<float>::max(); - float YLastRow = std::numeric_limits<float>::lowest(); - float smallestXLastCol = std::numeric_limits<float>::max(); - float smallestXFirstcol = std::numeric_limits<float>::max(); - float biggestXFirstCol = std::numeric_limits<float>::lowest(); - unsigned int biggestColumn = 0; - unsigned int smallestColumn = 999; - unsigned int topMostRow = 0; - unsigned int bottomMostRow = 999; - // First pass - // deal with col/row numbers, we need a UTSector for that - // Note that rows/cols cannot be relied on the middle columns/rows - // as they are not anymore "rows/cols" but "number of sectors - // in the column/row". So we use only first column and row - for ( const auto& sector : layer->sectors() ) { - const DeUTSector& utSector = dynamic_cast<const DeUTSector&>( *sector ); - auto column = utSector.column(); - auto row = utSector.row(); - smallestColumn = std::min( smallestColumn, column ); - if ( utSector.column() == smallestColumn ) { - topMostRow = std::max( topMostRow, row ); - bottomMostRow = std::min( bottomMostRow, row ); - } - if ( utSector.row() == bottomMostRow ) { biggestColumn = std::max( biggestColumn, column ); } - } - // Second pass - // find x and y values in the corners to deduce the geometry of the layer - for ( const auto& sector : layer->sectors() ) { - // deal with x,y coordinates. Remember the corner coordinates - const DeUTSector& utSector = dynamic_cast<const DeUTSector&>( *sector ); - auto column = utSector.column(); - auto row = utSector.row(); - auto center = sector->geometry()->toGlobal( Gaudi::XYZPoint{0, 0, 0} ); - if ( column == smallestColumn ) { - if ( row == bottomMostRow ) { - smallestXFirstcol = center.x(); - YFirstRow = center.y(); - } else if ( row == topMostRow ) { - biggestXFirstCol = center.x(); - YLastRow = center.y(); - } - } - if ( column == biggestColumn && row == bottomMostRow ) { smallestXLastCol = center.x(); } - cache.sectorsZ[iStation][iLayer][sector->elementID().detRegion() - 1][sector->elementID().sector() - 1] = - center.z(); - } - // gather all information into the corresponding LayerInfo object - auto ncols = biggestColumn - smallestColumn + 1; - auto nrows = topMostRow - bottomMostRow + 1; - cache.layers[layerIndex].nColsPerSide = ncols / 2; - cache.layers[layerIndex].nRowsPerSide = nrows / 2; - cache.layers[layerIndex].invHalfSectorYSize = 2 * ( nrows - 1 ) / ( YLastRow - YFirstRow ); - cache.layers[layerIndex].invHalfSectorXSize = 2 * ( ncols - 1 ) / ( smallestXLastCol - smallestXFirstcol ); - cache.layers[layerIndex].dxDy = ( biggestXFirstCol - smallestXFirstcol ) / ( YLastRow - YFirstRow ); - } - } - // Fill the mapQuarterSectorToSectorCentralRegion array according to current geometry using layer 0 of station 0 - auto& info = cache.layers[0]; - const DeUTLayer* layer = utDet.layers()[0]; - for ( const auto& utSector : layer->sectors() ) { - // check for middle region - if ( utSector->elementID().detRegion() == 2 ) { - // get the physical box representing the sector - auto solid = utSector->geometry()->lvolume()->solid(); - const auto& box = dynamic_cast<const SolidBox&>( *solid ); - // compute rows spanned by the sector - // check corners but take 5mm margin to avoid rounding issues - auto corner0 = utSector->toGlobal( Gaudi::XYZPoint( -box.xHalfLength() + 5, -box.yHalfLength() + 5, 0 ) ); - auto corner1 = utSector->toGlobal( Gaudi::XYZPoint( box.xHalfLength() - 5, box.yHalfLength() - 5, 0 ) ); - int subrow0 = std::nearbyintf( corner0.Y() * info.invHalfSectorYSize - 0.5 ); - int subrow1 = std::nearbyintf( corner1.Y() * info.invHalfSectorYSize - 0.5 ); - int subrowmin = std::min( subrow0, subrow1 ); - int subrowmax = std::max( subrow0, subrow1 ); - // check for central part of middle region - if ( subrowmax >= -4 && subrowmin < 4 ) { - int subcol0 = std::nearbyintf( corner0.X() * info.invHalfSectorXSize - 0.5 ); - int subcol1 = std::nearbyintf( corner1.X() * info.invHalfSectorXSize - 0.5 ); - int subcolmin = std::min( subcol0, subcol1 ); - int subcolmax = std::max( subcol0, subcol1 ); - for ( auto subrow = subrowmin; subrow <= subrowmax; subrow++ ) { - for ( auto subcol = subcolmin; subcol <= subcolmax; subcol++ ) { - auto index = ( subcol + 4 ) * 8 + subrow + 4; - mapQuarterSectorToSectorCentralRegion[index] = utSector->id(); - } - } - } - } - } - return cache; - } -} // namespace LHCb::UTDAQ diff --git a/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp b/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp deleted file mode 100644 index 50e519be18a..00000000000 --- a/UT/UTDAQ/src/component/RawBankToUTClusterAlg.cpp +++ /dev/null @@ -1,368 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTCluster.h" -#include "Event/UTLiteCluster.h" -#include "Event/UTSummary.h" -#include "GaudiAlg/Transformer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/STLExtensions.h" -#include "Kernel/UTChannelID.h" -#include "Kernel/UTClusterWord.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTDataFunctor.h" -#include "Kernel/UTDecoder.h" -#include "Kernel/UTFun.h" -#include "Kernel/UTRawBankMap.h" -#include "Kernel/UTStripRepresentation.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" -#include "LHCbMath/LHCbMath.h" -#include "SiDAQ/SiADCWord.h" -#include "UTDecodingBaseAlg.h" -#include <algorithm> -#include <boost/container/small_vector.hpp> -#include <string> -#include <vector> - -/** @class RawBankToUTClusterAlg RawBankToUTClusterAlg.h - * - * Algorithm to create UTClusters from RawEvent object - * - * @author A Beiter (based on code by M. Needham) - * @date 2018-09-04 - */ - -typedef Gaudi::Functional::MultiTransformer<std::tuple<LHCb::UTClusters, LHCb::UTSummary>( const LHCb::ODIN&, - const LHCb::RawEvent& ), - Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> - RawBankToUTClusterAlgBaseClass; - -class RawBankToUTClusterAlg : public RawBankToUTClusterAlgBaseClass { - -public: - /// Standard constructor - RawBankToUTClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ); - - /// initialize - StatusCode initialize() override; - /// finalize - StatusCode finalize() override; - /// Algorithm execution - std::tuple<LHCb::UTClusters, LHCb::UTSummary> operator()( const LHCb::ODIN&, const LHCb::RawEvent& ) const override; - -private: - LHCb::UTSummary decodeBanks( const LHCb::RawEvent& rawEvt, LHCb::UTClusters& clusCont ) const; - - void createCluster( const UTClusterWord& aWord, const UTTell1Board* aBoard, LHCb::span<const SiADCWord> adcValues, - const UTDAQ::version& bankVersion, LHCb::UTClusters& clusCont ) const; - - double mean( LHCb::span<const SiADCWord> adcValues ) const; - - LHCb::UTLiteCluster word2LiteCluster( const UTClusterWord aWord, const LHCb::UTChannelID chan, - const unsigned int fracStrip ) const; - - LHCb::UTSummary createSummaryBlock( const LHCb::RawEvent& rawEvt, const unsigned int& nclus, const unsigned int& pcn, - const bool pcnsync, const unsigned int bytes, - const std::vector<unsigned int>& bankList, - const std::vector<unsigned int>& missing, - const LHCb::UTSummary::RecoveredInfo& recoveredBanks ) const; - - double stripFraction( const double interStripPos ) const; - - Gaudi::Property<std::string> m_pedestalBankString{this, "PedestalBank", "UTPedestal"}; - LHCb::RawBank::BankType m_pedestalType; - Gaudi::Property<std::string> m_fullBankString{this, "FullBank", "UTFull"}; - LHCb::RawBank::BankType m_fullType; -}; - -LHCb::UTLiteCluster RawBankToUTClusterAlg::word2LiteCluster( const UTClusterWord aWord, const LHCb::UTChannelID chan, - const unsigned int fracStrip ) const { - return LHCb::UTLiteCluster( fracStrip, aWord.pseudoSizeBits(), aWord.hasHighThreshold(), chan, - ( detType() == "UT" ) ); -} - -using namespace LHCb; - -//----------------------------------------------------------------------------- -// Implementation file for class : RawBufferToUTClusterAlg -// -// 2004-01-07 : Matthew Needham -// 2016-10-07 : Sebastien Ponce -//----------------------------------------------------------------------------- - -DECLARE_COMPONENT( RawBankToUTClusterAlg ) - -RawBankToUTClusterAlg::RawBankToUTClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ) - : MultiTransformer( - name, pSvcLocator, - {KeyValue{"OdinLocation", LHCb::ODINLocation::Default}, - KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( LHCb::RawEventLocation::Tracker, - LHCb::RawEventLocation::Other, - LHCb::RawEventLocation::Default )}}, - {KeyValue( "clusterLocation", UTClusterLocation::UTClusters ), - KeyValue( "summaryLocation", UTSummaryLocation::UTSummary )} ) { - // Standard constructor, initializes variables - setProperty( "BankType", "UT" ).ignore(); -} - -StatusCode RawBankToUTClusterAlg::initialize() { - // Initialization - StatusCode sc = MultiTransformer::initialize(); - if ( sc.isFailure() ) return Error( "Failed to initialize", sc ); - // pedestal bank - m_pedestalType = UTRawBankMap::stringToType( m_pedestalBankString ); - if ( m_bankType == LHCb::RawBank::Velo ) { - fatal() << "Wrong detector type: only UT !" << endmsg; - return StatusCode::FAILURE; - } - // full bank - m_fullType = UTRawBankMap::stringToType( m_fullBankString ); - if ( m_fullType == LHCb::RawBank::Velo ) { - fatal() << "Wrong detector type: only UT!" << endmsg; - return StatusCode::FAILURE; - } - // Spill - computeSpillOffset( inputLocation<1>() ); - // return - return StatusCode::SUCCESS; -} - -std::tuple<LHCb::UTClusters, LHCb::UTSummary> RawBankToUTClusterAlg::operator()( const LHCb::ODIN& odin, - const LHCb::RawEvent& rawEvt ) const { - // make a new digits container - UTClusters clusCont; - if ( !validSpill( odin ) ) { - warning() << "Not a valid spill" << endmsg; - } else { - clusCont.reserve( 2000 ); - // decode banks - LHCb::UTSummary summary = decodeBanks( rawEvt, clusCont ); - // sort - std::sort( clusCont.begin(), clusCont.end(), UTDataFunctor::Less_by_Channel<const UTCluster*>() ); - return std::make_tuple( std::move( clusCont ), std::move( summary ) ); - } - return std::make_tuple( std::move( clusCont ), LHCb::UTSummary() ); -} - -LHCb::UTSummary RawBankToUTClusterAlg::decodeBanks( const RawEvent& rawEvt, LHCb::UTClusters& clusCont ) const { - std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> errorBanks = nullptr; - bool errorBanksFailed = false; - - // create Clusters from this type - bool pcnSync = true; - std::vector<unsigned int> bankList; - UTSummary::RecoveredInfo recoveredBanks; - - const auto& tBanks = rawEvt.banks( bankType() ); - - std::vector<unsigned int> missing = missingInAction( tBanks ); - if ( !missing.empty() ) { - counter( "lost Banks" ) += missing.size(); - if ( tBanks.empty() ) { - ++counter( "no banks found" ); - return createSummaryBlock( rawEvt, 0, UTDAQ::inValidPcn, false, 0, bankList, missing, recoveredBanks ); - } - } - - // vote on the pcns - const unsigned int pcn = pcnVote( tBanks ); - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "PCN was voted to be " << pcn << endmsg; - if ( pcn == UTDAQ::inValidPcn && !m_skipErrors ) { - counter( "skipped Banks" ) += tBanks.size(); - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "PCN vote failed with " << tBanks.size() << endmsg; - warning() << "PCN vote failed" << endmsg; - return UTSummary(); - } - - // loop over the banks of this type.. - for ( const auto& bank : tBanks ) { - - ++counter( "# banks found" ); - // get the board and data - UTTell1ID tell1ID = UTTell1ID( (unsigned int)bank->sourceID(), detType() == "UT" ); - const UTTell1Board* aBoard = readoutTool()->findByBoardID( tell1ID ); - - if ( !aBoard && !m_skipErrors ) { - // not a valid UT - Warning( "Invalid source ID --> skip bank" + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ) - .ignore(); - ++counter( "skipped Banks" ); - continue; - } - - ++counter( "# valid banks" ); - - if ( bank->magic() != RawBank::MagicPattern ) { - Warning( "wrong magic pattern " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); - ++counter( "skipped banks" ); - continue; - } - - // make a decoder - UTDecoder decoder( bank->data() ); - // get verion of the bank - const UTDAQ::version bankVersion = - forceVersion() ? UTDAQ::version( m_forcedVersion.value() ) : UTDAQ::version( bank->version() ); - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "decoding bank version " << bankVersion << endmsg; - - bool recover = false; - if ( decoder.hasError() == true && !m_skipErrors ) { - - if ( !recoverMode() ) { - bankList.push_back( bank->sourceID() ); - Warning( "bank has errors, skip sourceID " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ) - .ignore(); - ++counter( "skipped Banks" ); - continue; - } else { - // flag that need to recover.... - recover = true; - ++counter( "recovered banks" + std::to_string( bank->sourceID() ) ); - } - } - - UTTELL1BoardErrorBank* errorBank = nullptr; - if ( recover ) { - if ( !errorBanks.get() && !errorBanksFailed ) { - try { - errorBanks = decodeErrors( rawEvt ); - } catch ( GaudiException& e ) { - errorBanksFailed = true; - warning() << e.what() << endmsg; - } - } - if ( errorBanks.get() ) { errorBank = errorBanks->object( bank->sourceID() ); } - // check what fraction we can recover - if ( errorBank != 0 ) recoveredBanks[bank->sourceID()] += errorBank->fractionOK( pcn ); - } - - if ( errorBank == 0 ) { - const unsigned bankpcn = decoder.header().pcn(); - if ( pcn != bankpcn && !m_skipErrors ) { - bankList.push_back( bank->sourceID() ); - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Expected " << pcn << " found " << bankpcn << endmsg; - Warning( "PCNs out of sync, sourceID " + std::to_string( bank->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); - ++counter( "skipped Banks" ); - continue; - } - } - - // check the integrity of the bank --> always skip if not ok - if ( !m_skipErrors && !checkDataIntegrity( decoder, aBoard, bank->size(), bankVersion ) ) { - bankList.push_back( bank->sourceID() ); - continue; - } - - // iterator over the data.... - for ( const auto& iterDecoder : decoder.posAdcRange() ) { - if ( !recover ) { - createCluster( iterDecoder.first, aBoard, iterDecoder.second, bankVersion, clusCont ); - } else { - // check that this cluster is ok to be recovered - if ( errorBank != 0 && canBeRecovered( errorBank, iterDecoder.first, pcn ) ) { - createCluster( iterDecoder.first, aBoard, iterDecoder.second, bankVersion, clusCont ); - } - } - } // iterDecoder - } // bank - - const unsigned int bsize = byteSize( tBanks ); - return createSummaryBlock( rawEvt, clusCont.size(), pcn, pcnSync, bsize, bankList, missing, recoveredBanks ); -} - -void RawBankToUTClusterAlg::createCluster( const UTClusterWord& aWord, const UTTell1Board* aBoard, - LHCb::span<const SiADCWord> adcValues, const UTDAQ::version& bankVersion, - UTClusters& clusCont ) const { - // stream the neighbour sum - auto iterADC = adcValues.begin(); - char neighbour = *iterADC; - ++iterADC; - - unsigned int fracStrip = aWord.fracStripBits(); - - // estimate the offset - double stripNum = mean( adcValues ); - double interStripPos = stripNum - floor( stripNum ); - - // If fracStrip equals zero and the interStripPos equals one, the stripNum - // must be incremented. Note that since the rounding can be different from - // rounding on the Tell1, the interStripPos can be 0.75. Trust me, this is - // correct.-- JvT - if ( fracStrip == 0u && interStripPos > 0.5 ) stripNum += 1; - unsigned int offset = (unsigned int)stripNum; - - UTCluster::ADCVector adcs; - adcs.reserve( adcValues.size() ); - for ( unsigned int i = 1; i < adcValues.size(); ++i ) { - adcs.emplace_back( i - 1 - offset, (int)adcValues[i].adc() ); - } // iDigit - - UTTell1Board::chanPair nearestChan = - aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ); - - aBoard->ADCToOffline( aWord.channelID(), adcs, bankVersion, offset, fracStrip ); - - // make cluster +set things - auto newCluster = std::make_unique<UTCluster>( this->word2LiteCluster( aWord, nearestChan.first, nearestChan.second ), - adcs, neighbour, aBoard->boardID().id(), aWord.channelID(), spill() ); - - if ( !clusCont.object( nearestChan.first ) ) { - clusCont.insert( newCluster.release(), nearestChan.first ); - } else { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Cluster already exists not inserted: " << aBoard->boardID() << " " << aWord.channelID() << endmsg; - Warning( "Failed to insert cluster --> exists in container", StatusCode::SUCCESS, 100 ).ignore(); - } -} - -LHCb::UTSummary RawBankToUTClusterAlg::createSummaryBlock( - const RawEvent& rawEvt, const unsigned int& nclus, const unsigned int& pcn, const bool pcnsync, - const unsigned int bytes, const std::vector<unsigned int>& bankList, const std::vector<unsigned int>& missing, - const LHCb::UTSummary::RecoveredInfo& recoveredBanks ) const { - unsigned totalBytes = bytes; - // get the error banks - const auto& errorBanks = rawEvt.banks( LHCb::RawBank::BankType( m_errorType ) ); - totalBytes += byteSize( errorBanks ); - // get the pedestal banks - const auto& pBanks = rawEvt.banks( LHCb::RawBank::BankType( m_pedestalType ) ); - totalBytes += byteSize( pBanks ); - // get the full banks - const auto& fullBanks = rawEvt.banks( LHCb::RawBank::BankType( m_fullType ) ); - totalBytes += byteSize( fullBanks ); - return UTSummary( nclus, pcn, pcnsync, totalBytes, fullBanks.size(), pBanks.size(), errorBanks.size(), bankList, - missing, recoveredBanks ); -} - -double RawBankToUTClusterAlg::mean( LHCb::span<const SiADCWord> adcValues ) const { - double sum = 0; - double totCharge = 0; - // note the first is the neighbour sum.. - for ( unsigned int i = 1; i < adcValues.size(); ++i ) { - sum += adcValues[i].adc() * ( i - 1 ); - totCharge += adcValues[i].adc(); - } // i - return ( sum / totCharge ); -} - -StatusCode RawBankToUTClusterAlg::finalize() { - - const double failed = counter( "skipped Banks" ).flag(); - const double processed = counter( "# valid banks" ).flag(); - - double eff = 0.0; - if ( !LHCb::Math::Equal_To<double>()( processed, 0.0 ) ) { eff = 1.0 - ( failed / processed ); } - info() << "Successfully processed " << 100 * eff << " %" << endmsg; - - return MultiTransformer::finalize(); -} diff --git a/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp b/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp deleted file mode 100644 index c82945e63b9..00000000000 --- a/UT/UTDAQ/src/component/RawBankToUTLiteClusterAlg.cpp +++ /dev/null @@ -1,271 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTLiteCluster.h" -#include "GaudiAlg/Transformer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTClusterWord.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTDataFunctor.h" -#include "Kernel/UTDecoder.h" -#include "Kernel/UTStripRepresentation.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" -#include "LHCbMath/LHCbMath.h" -#include "UTDecodingBaseAlg.h" -#include <algorithm> -#include <string> -#include <utility> -#include <vector> - -/** @class RawBankToUTLiteClusterAlg RawBankToUTLiteClusterAlg.h - * - * Algorithm to create UTClusters from RawEvent object - * - * @author A. Beiter based on code by: - * @author M. Needham - * @author S. Ponce - */ - -typedef Gaudi::Functional::Transformer<LHCb::UTLiteCluster::UTLiteClusters( const LHCb::ODIN&, const LHCb::RawEvent& ), - Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> - RawBankToUTLiteClusterAlgBaseClass; - -class RawBankToUTLiteClusterAlg final : public RawBankToUTLiteClusterAlgBaseClass { - -public: - /// Standard constructor - RawBankToUTLiteClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ); - - StatusCode initialize() override; ///< Algorithm initialization - StatusCode finalize() override; ///< Algorithm finalization - LHCb::UTLiteCluster::UTLiteClusters operator()( const LHCb::ODIN&, const LHCb::RawEvent& ) const override; - -private: - // create Clusters from this type - StatusCode decodeBanks( const LHCb::RawEvent& rawEvt, LHCb::UTLiteCluster::UTLiteClusters& fCont ) const; - - // add a single cluster to the output container - void createCluster( const UTTell1Board* aBoard, const UTDAQ::version& bankVersion, const UTClusterWord& aWord, - LHCb::UTLiteCluster::UTLiteClusters& fCont, const bool isUT ) const; -}; - -void RawBankToUTLiteClusterAlg::createCluster( const UTTell1Board* aBoard, const UTDAQ::version& bankVersion, - const UTClusterWord& aWord, LHCb::UTLiteCluster::UTLiteClusters& fCont, - const bool isUT ) const { - - const unsigned int fracStrip = aWord.fracStripBits(); - const UTTell1Board::chanPair chan = - aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ); - fCont.emplace_back( chan.second, aWord.pseudoSizeBits(), aWord.hasHighThreshold(), chan.first, isUT ); -} - -using namespace LHCb; - -namespace { - struct Less_by_Channel { - - /** compare the channel of one object with the - * channel of another object - * @param obj1 first object - * @param obj2 second object - * @return result of the comparision - */ - // - inline bool operator()( LHCb::UTLiteCluster obj1, LHCb::UTLiteCluster obj2 ) const { - return obj1.channelID() < obj2.channelID(); - } - }; - struct Equal_Channel { - - /** compare the channel of one object with the - * channel of another object - * @param obj1 first object - * @param obj2 second object - * @return result of the comparision - */ - // - inline bool operator()( LHCb::UTLiteCluster obj1, LHCb::UTLiteCluster obj2 ) const { - return obj1.channelID() == obj2.channelID(); - } - }; - -} // namespace - -//----------------------------------------------------------------------------- -// Implementation file for class : RawBufferToUTLiteClusterAlg -// -// 2004-01-07 : Matthew Needham -// 2016-10-07 : Sebastien Ponce -//----------------------------------------------------------------------------- - -DECLARE_COMPONENT( RawBankToUTLiteClusterAlg ) - -RawBankToUTLiteClusterAlg::RawBankToUTLiteClusterAlg( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer( name, pSvcLocator, - {KeyValue{"OdinLocation", LHCb::ODINLocation::Default}, - KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( - LHCb::RawEventLocation::Tracker, LHCb::RawEventLocation::Other, - LHCb::RawEventLocation::Default )}}, - KeyValue( "clusterLocation", UTLiteClusterLocation::UTClusters ) ) { - setProperty( "BankType", detType() ).ignore(); -} - -StatusCode RawBankToUTLiteClusterAlg::initialize() { - return Transformer::initialize().andThen( [&] { computeSpillOffset( inputLocation<LHCb::RawEvent>() ); } ); -} - -LHCb::UTLiteCluster::UTLiteClusters RawBankToUTLiteClusterAlg::operator()( const LHCb::ODIN& odin, - const LHCb::RawEvent& rawEvt ) const { - UTLiteCluster::UTLiteClusters fCont; - if ( !validSpill( odin ) ) { - warning() << "Not a valid spill" << endmsg; - } else { - fCont.reserve( 5000 ); - // decode banks - decodeBanks( rawEvt, fCont ).orThrow( "Problems in decoding event", "RawBankToUTLiteClusterAlg" ).ignore(); - } - return fCont; -} - -StatusCode RawBankToUTLiteClusterAlg::decodeBanks( const RawEvent& rawEvt, - UTLiteCluster::UTLiteClusters& fCont ) const { - std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> errorBanks = nullptr; - bool errorBanksFailed = false; - - const LHCb::span<const RawBank*> tBanks = rawEvt.banks( bankType() ); - std::vector<unsigned int> missing = missingInAction( tBanks ); - if ( !missing.empty() ) { - counter( "lost Banks" ) += missing.size(); - if ( tBanks.empty() ) { - ++counter( "no banks found" ); - return StatusCode::SUCCESS; - } - } - - const unsigned int pcn = pcnVote( tBanks ); - if ( msgLevel( MSG::DEBUG ) ) debug() << "PCN was voted to be " << pcn << endmsg; - if ( pcn == UTDAQ::inValidPcn ) { - counter( "skipped Banks" ) += tBanks.size(); - return Warning( "PCN vote failed", StatusCode::SUCCESS, 2 ); - } - - const bool isUT = ( detType() == "UT" ); - - // loop over the banks of this type.. - - for ( auto iterBank = tBanks.begin(); iterBank != tBanks.end(); ++iterBank ) { - - ++counter( "# valid banks" ); - - // get the board and data - UTTell1Board* aBoard = readoutTool()->findByBoardID( UTTell1ID( ( *iterBank )->sourceID() ) ); - if ( !aBoard && !m_skipErrors ) { - Warning( "Invalid source ID --> skip bank" + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ) - .ignore(); - ++counter( "skipped Banks" ); - continue; - } - - ++counter( "# valid source ID" ); - - if ( ( *iterBank )->magic() != RawBank::MagicPattern ) { - Warning( "wrong magic pattern " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ).ignore(); - counter( "skipped Banks" ) += tBanks.size(); - continue; - } - - // make a SmartBank of shorts... - UTDecoder decoder( ( *iterBank )->data() ); - - bool recover = false; - if ( decoder.hasError() && !m_skipErrors ) { - - if ( !recoverMode() ) { - Warning( "bank has errors, skip sourceID " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, - 2 ) - .ignore(); - ++counter( "skipped Banks" ); - continue; - } else { - // flag that need to recover.... - recover = true; - ++counter( "recovered banks" + std::to_string( ( *iterBank )->sourceID() ) ); - } - } - - // ok this is a bit ugly..... - UTTELL1BoardErrorBank* errorBank = nullptr; - if ( recover ) { - if ( !errorBanks.get() && !errorBanksFailed ) { - try { - errorBanks = decodeErrors( rawEvt ); - } catch ( GaudiException& e ) { - errorBanksFailed = true; - warning() << e.what() << endmsg; - } - } - if ( errorBanks.get() ) { errorBank = errorBanks->object( ( *iterBank )->sourceID() ); } - } - - if ( errorBank ) { - const unsigned bankpcn = decoder.header().pcn(); - if ( pcn != bankpcn && !m_skipErrors ) { - debug() << "Expected " << pcn << " found " << bankpcn << endmsg; - if ( msgLevel( MSG::DEBUG ) ) - Warning( "PCNs out of sync sourceID " + std::to_string( ( *iterBank )->sourceID() ), StatusCode::SUCCESS, 2 ) - .ignore(); - ++counter( "skipped Banks" ); - continue; - } - } // errorbank == 0 - - const UTDAQ::version bankVersion = - UTDAQ::version( forceVersion() ? m_forcedVersion.value() : ( *iterBank )->version() ); - - // check the integrity of the bank --> always skip if not ok - if ( !m_skipErrors && !checkDataIntegrity( decoder, aBoard, ( *iterBank )->size(), bankVersion ) ) continue; - - // read in the first half of the bank - for ( auto iterDecoder : decoder.posRange() ) { - - if ( !recover ) { - createCluster( aBoard, bankVersion, iterDecoder, fCont, isUT ); - } else { - if ( errorBank && canBeRecovered( errorBank, iterDecoder, pcn ) ) { - createCluster( aBoard, bankVersion, iterDecoder, fCont, isUT ); - } // errorbank - } // recover == false - - } // decoder - - } // iterBank - - // sort and remove any duplicates - std::stable_sort( fCont.begin(), fCont.end(), Less_by_Channel() ); - auto iter = std::unique( fCont.begin(), fCont.end(), Equal_Channel() ); - if ( iter != fCont.end() ) { - fCont.resize( iter - fCont.begin() ); - return Warning( "Removed duplicate clusters in the decoding", StatusCode::SUCCESS, 100 ); - } - return StatusCode::SUCCESS; -} - -StatusCode RawBankToUTLiteClusterAlg::finalize() { - - const double failed = counter( "skipped Banks" ).flag(); - const double processed = counter( "# valid banks" ).flag(); - double eff = ( !LHCb::Math::Equal_To<double>()( processed, 0.0 ) ? 1.0 - ( failed / processed ) : 0.0 ); - info() << "Successfully processed " << 100 * eff << " %" << endmsg; - - return Transformer::finalize(); -} diff --git a/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp b/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp deleted file mode 100644 index ba0b7370b46..00000000000 --- a/UT/UTDAQ/src/component/UTClustersToRawBankAlg.cpp +++ /dev/null @@ -1,290 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/BankWriter.h" -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTCluster.h" -#include "Event/UTSummary.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTChannelID.h" -#include "Kernel/UTClusterWord.h" -#include "Kernel/UTCommonBase.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTRawBankMap.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" -#include "SiDAQ/SiADCWord.h" -#include "SiDAQ/SiHeaderWord.h" -#include "UTDAQ/UTBoardToBankMap.h" -#include "UTDAQ/UTClustersOnBoard.h" -#include "UTDAQ/UTDAQFunctor.h" -#include <algorithm> -#include <map> -#include <string> -#include <vector> - -/** @class UTClustersToRawBankAlg UTClustersToRawBankAlg.h - * - * Algorithm to fill the Raw buffer with UT information from UTClusters - * - * @author A Beiter (based on code by M Needham) - * @date 2018-09-04 - */ - -template <class IReadoutTool = IUTReadoutTool> -class UTClustersToRawBankAlgT : public UT::CommonBase<GaudiAlgorithm, IReadoutTool> { - -public: - /// Standard constructor - UTClustersToRawBankAlgT( const std::string& name, ISvcLocator* pSvcLocator ); - - StatusCode initialize() override; ///< Algorithm initialization - StatusCode execute() override; ///< Algorithm execution - StatusCode finalize() override; ///< Algorithm finalization - -private: - /// convert string to enum - StatusCode configureBankType(); - - /// initialize event - void initEvent(); - - /// fill the banks - StatusCode groupByBoard( const LHCb::UTClusters* clusCont ); - - unsigned int bankSize( UTClustersOnBoard::ClusterVector& clusCont ) const; - - unsigned int getPCN() const; - - // create a new bank - void writeBank( const UTClustersOnBoard::ClusterVector& clusCont, LHCb::BankWriter& bWriter, - const UTTell1ID aBoardID ); - - Gaudi::Property<int> m_maxClustersPerPPx{this, "maxClusters", 512}; - - DataObjectReadHandle<LHCb::RawEvent> m_raw{this, "rawLocation", LHCb::RawEventLocation::Default}; - DataObjectReadHandle<LHCb::UTClusters> m_clusters{this, "clusterLocation", LHCb::UTClusterLocation::UTClusters}; - DataObjectReadHandle<LHCb::UTSummary> m_summary{this, "summaryLocation", LHCb::UTSummaryLocation::UTSummary}; - - LHCb::RawBank::BankType m_bankType; - - UTBoardToBankMap m_bankMapping; - - std::map<UTTell1ID, UTClustersOnBoard*> m_clusMap; - std::vector<UTClustersOnBoard> m_clusVectors; - - unsigned int m_overflow = 0; - unsigned int m_maxClusterSize = 4; - unsigned int m_pcn = 128; -}; - -// Declaration of the backward compatible UTClustersToRawBankAlg class (not templated for the original UT case) -using UTClustersToRawBankAlg = UTClustersToRawBankAlgT<>; - -using namespace LHCb; - -//----------------------------------------------------------------------------- -// Implementation file for class : UTClusterToRawBankAlg -// -// 2004-08-01 : M. Needham -//----------------------------------------------------------------------------- - -DECLARE_COMPONENT_WITH_ID( UTClustersToRawBankAlgT<IUTReadoutTool>, "UTClustersToRawBankAlg" ) - -template <class IReadoutTool> -UTClustersToRawBankAlgT<IReadoutTool>::UTClustersToRawBankAlgT( const std::string& name, ISvcLocator* pSvcLocator ) - : UT::CommonBase<GaudiAlgorithm, IReadoutTool>( name, pSvcLocator ) { - this->setForcedInit(); -} - -// Finalisation. -template <class IReadoutTool> -StatusCode UTClustersToRawBankAlgT<IReadoutTool>::finalize() { - m_clusVectors.clear(); - m_bankMapping.clear(); - return StatusCode::SUCCESS; -} - -// Initialisation. -template <class IReadoutTool> -StatusCode UTClustersToRawBankAlgT<IReadoutTool>::initialize() { - - // initialize - StatusCode sc = UT::CommonBase<GaudiAlgorithm, IReadoutTool>::initialize(); - if ( sc.isFailure() ) { return this->Error( "Failed to initialize", sc ); } - - // banktype - if ( configureBankType().isFailure() ) { - this->fatal() << "unknown bank type" << endmsg; - return StatusCode::FAILURE; - } - - // init the map - unsigned int nBoard = this->readoutTool()->nBoard(); - m_clusVectors.reserve( nBoard ); - for ( unsigned int iVal = 0; iVal < nBoard; ++iVal ) { - - UTTell1Board* aBoard = this->readoutTool()->findByOrder( iVal ); - m_bankMapping.addEntry( aBoard->boardID(), iVal ); - - m_clusVectors.emplace_back( m_maxClustersPerPPx ); - m_clusMap[aBoard->boardID()] = &m_clusVectors.back(); - } // iVal - - return StatusCode::SUCCESS; -} - -template <class IReadoutTool> -StatusCode UTClustersToRawBankAlgT<IReadoutTool>::configureBankType() { - - // configure the type of bank to write (UT) - m_bankType = UTRawBankMap::stringToType( this->detType() ); - return m_bankType != RawBank::Velo ? StatusCode::SUCCESS : StatusCode::FAILURE; -} - -template <class IReadoutTool> -StatusCode UTClustersToRawBankAlgT<IReadoutTool>::execute() { - - // Retrieve the RawBank - RawEvent* tEvent = m_raw.get(); - - // initialize this event - initEvent(); - - // get the data.... - const UTClusters* clusCont = m_clusters.get(); - - // group the data by banks.. - StatusCode sc = groupByBoard( clusCont ); - if ( sc.isFailure() ) { return this->Error( "Problems linking offline to DAQ channel", sc ); } - - // convert to a bank and add to buffer - const unsigned int nBoard = this->readoutTool()->nBoard(); - for ( unsigned int iBoard = 0u; iBoard < nBoard; ++iBoard ) { - // get the data .... - const UTTell1ID aBoardID = m_bankMapping.findBoard( iBoard ); - UTClustersOnBoard::ClusterVector boardClusCont = m_clusVectors[iBoard].clusters(); - - if ( m_clusVectors[iBoard].inOverflow() ) ++m_overflow; - - // make the a bankwriter.... - BankWriter bWriter( bankSize( boardClusCont ) ); - - writeBank( boardClusCont, bWriter, aBoardID ); - - RawBank* tBank = tEvent->createBank( UTDAQ::rawInt( aBoardID.id() ), m_bankType, UTDAQ::v4, bWriter.byteSize(), - &( bWriter.dataBank()[0] ) ); - - tEvent->adoptBank( tBank, true ); - - } // iBoard - - // flag overflow - if ( m_overflow > 0 ) { return this->Warning( "RAWBank overflow some banks truncated", StatusCode::SUCCESS ); } - - return StatusCode::SUCCESS; -} - -template <class IReadoutTool> -void UTClustersToRawBankAlgT<IReadoutTool>::initEvent() { - - // intialize temp bank structure each event - std::for_each( m_clusVectors.begin(), m_clusVectors.end(), []( UTClustersOnBoard& i ) { i.clear(); } ); - m_overflow = 0; - - // locate and set the pcn from the summary block if it exists - // in the case there is no summary block write 128 - const LHCb::UTSummary* sum = m_summary.getIfExists(); - if ( sum ) m_pcn = sum->pcn(); -} - -template <class IReadoutTool> -StatusCode UTClustersToRawBankAlgT<IReadoutTool>::groupByBoard( const UTClusters* clusCont ) { - - // divide up the clusters by readout board - for ( const auto& clus : *clusCont ) { - - // find the online channel and board - auto iterMap = m_clusMap.find( UTTell1ID( clus->sourceID(), this->detType() == "UT" ) ); - if ( iterMap != m_clusMap.end() ) { - UTClustersOnBoard* tVec = iterMap->second; - tVec->addCluster( clus ); - } else { - return this->Warning( "Failed to find board in map ", StatusCode::SUCCESS ); - } - } // clusCont - return StatusCode::SUCCESS; -} - -template <class IReadoutTool> -unsigned int UTClustersToRawBankAlgT<IReadoutTool>::bankSize( UTClustersOnBoard::ClusterVector& clusCont ) const { - // bank size in 32 bit words - // 1 short (header) - // + n short (clusters) - // + n char (neighbour sum) - // + n adc * n cluster (char) - unsigned int nClus = clusCont.size(); - unsigned int nADC = - std::accumulate( clusCont.begin(), clusCont.end(), 0u, - []( unsigned n, const UTClustersOnBoard::boardPair& p ) { return n + p.first->size(); } ); - - unsigned int nByte = sizeof( short ) + nClus * sizeof( short ) + nClus * sizeof( char ) + nADC * sizeof( char ); - - return (unsigned int)ceil( nByte / (double)sizeof( int ) ); -} - -template <class IReadoutTool> -void UTClustersToRawBankAlgT<IReadoutTool>::writeBank( const UTClustersOnBoard::ClusterVector& clusCont, - LHCb::BankWriter& bWriter, const UTTell1ID aBoardID ) { - auto nClus = clusCont.size(); - // make a bank header - SiHeaderWord aHeader = SiHeaderWord( nClus, getPCN() ); - bWriter << aHeader.value(); - - // pick up the data and write first half of the bank into temp container... - for ( const auto& clus : clusCont ) { - UTCluster* aClus = clus.first; - UTChannelID aChan = aClus->channelID(); - - double isf = this->readoutTool()->interStripToDAQ( aChan, aBoardID, aClus->interStripFraction() ); - bWriter << UTClusterWord( clus.second, isf, aClus->size(), aClus->highThreshold() ); - } // clusCont - - if ( nClus & 1 ) { // add padding if odd number of clusters - short padding = 0; - bWriter << padding; - } - - // now the second half neighbour sum and ADC - for ( const auto& clus : clusCont ) { - UTCluster* aCluster = clus.first; - // implicit double->char conversion! - char neighbourSum = std::min( std::max( aCluster->neighbourSum(), -16. ), 15. ); - bWriter << neighbourSum; - UTCluster::ADCVector adcs = aCluster->stripValues(); - - // flip ADC values for rotated staves - UTChannelID channelID = aCluster->channelID(); - this->readoutTool()->ADCOfflineToDAQ( channelID, aBoardID, adcs ); - - unsigned int nToWrite = std::min( aCluster->size(), m_maxClusterSize ); - for ( unsigned int i = 0; i < nToWrite; ++i ) { - bool last = ( i == nToWrite - 1 ); - bWriter << SiADCWord( adcs[i].second, last ); - } // iter - - } // clusCont -} - -template <class IReadoutTool> -unsigned int UTClustersToRawBankAlgT<IReadoutTool>::getPCN() const { - return m_pcn; -} diff --git a/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp b/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp deleted file mode 100644 index 14bd78ec9ec..00000000000 --- a/UT/UTDAQ/src/component/UTDecodingBaseAlg.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include <algorithm> - -// local -#include "UTDecodingBaseAlg.h" - -// Event -#include "Event/ByteStream.h" -#include "Event/ODIN.h" -#include "Event/RawEvent.h" -#include "Event/UTCluster.h" - -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTDataFunctor.h" -#include "Kernel/UTDecoder.h" -#include "Kernel/UTLexicalCaster.h" -#include "Kernel/UTRawBankMap.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" - -#include "SiDAQ/SiADCWord.h" -#include "SiDAQ/SiHeaderWord.h" -#include "boost/lexical_cast.hpp" - -#include "UTDet/DeUTDetector.h" - -using namespace LHCb; - -//----------------------------------------------------------------------------- -// Implementation file for class : RawBufferToUTClusterAlg -// -// 2004-01-07 : Matthew Needham -// 2016-10-07 : Sebastien Ponce -//----------------------------------------------------------------------------- - -UTDecodingBaseAlg::UTDecodingBaseAlg( const std::string& name, ISvcLocator* pSvcLocator ) - : UT::AlgBase( name, pSvcLocator ) { - setForcedInit(); -} - -StatusCode UTDecodingBaseAlg::initialize() { - - // Initialization - StatusCode sc = UT::AlgBase::initialize(); - if ( sc.isFailure() ) return sc; - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> initialize " << endmsg; - - // bank type - if ( !m_bankTypeString.empty() ) { - m_bankType = UTRawBankMap::stringToType( m_bankTypeString ); - if ( m_bankType == LHCb::RawBank::Velo ) { - fatal() << "Wrong detector type: only UT !" << endmsg; - return StatusCode::FAILURE; - } - } - - // bank type - m_errorType = UTRawBankMap::stringToType( m_errorBankString ); - if ( m_errorType == LHCb::RawBank::Velo ) { - fatal() << "Wrong detector type: only UT error banks!" << endmsg; - return StatusCode::FAILURE; - } - - return StatusCode::SUCCESS; -} - -unsigned int UTDecodingBaseAlg::pcnVote( LHCb::span<const RawBank*> banks ) const { - - // make a majority vote to get the correct PCN in the event - std::map<unsigned int, unsigned int> pcns; - for ( const auto& bank : banks ) { - UTDecoder decoder( bank->data() ); - // only the good are allowed to vote [the US system..] - if ( !decoder.header().hasError() ) ++pcns[decoder.header().pcn()]; - } // banks - - auto max = - std::max_element( pcns.begin(), pcns.end(), - []( const std::pair<unsigned int, unsigned int>& lhs, - const std::pair<unsigned int, unsigned int>& rhs ) { return lhs.second < rhs.second; } ); - return max == pcns.end() ? UTDAQ::inValidPcn : max->first; -} - -bool UTDecodingBaseAlg::checkDataIntegrity( UTDecoder& decoder, const UTTell1Board* aBoard, const unsigned int bankSize, - const UTDAQ::version& bankVersion ) const { - // check the consistancy of the data - - bool ok = true; - auto iterDecoder = decoder.posAdcBegin(); - for ( ; iterDecoder != decoder.posAdcEnd(); ++iterDecoder ) { - - const UTClusterWord aWord = iterDecoder->first; - - // make some consistancy checks - if ( ( iterDecoder->second.size() - 1u < aWord.pseudoSize() ) ) { - unsigned int fracStrip = aWord.fracStripBits(); - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "adc values do not match ! " << iterDecoder->second.size() - 1u << " " << aWord.pseudoSize() - << " offline chan " - << aBoard->DAQToOffline( fracStrip, bankVersion, UTDAQ::UTStripRepresentation( aWord.channelID() ) ) - << " source ID " << aBoard->boardID() << " chan " << aWord.channelID() << endmsg; - Warning( "ADC values do not match", StatusCode::SUCCESS, 2 ).ignore(); - ok = false; - break; - } - - // decode the channel - if ( !aBoard->validChannel( aWord.channelID() ) ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "invalid TELL1 channel number board: " << aBoard->boardID() << " chan " << aWord.channelID() - << endmsg; - Warning( "Invalid tell1 channel", StatusCode::SUCCESS, 2 ).ignore(); - ok = false; - break; - } - - } // loop clusters - - // final check that we read the total number of bytes in the bank - if ( ok && (unsigned int)iterDecoder.bytesRead() != bankSize ) { - ok = false; - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Inconsistant byte count " << aBoard->boardID() << " Read: " << iterDecoder.bytesRead() - << " Expected: " << bankSize << endmsg; - Warning( "Inconsistant byte count", StatusCode::SUCCESS ).ignore(); - } - - if ( !ok ) ++counter( "skipped Banks" ); - - return ok; -} - -std::vector<unsigned int> UTDecodingBaseAlg::missingInAction( LHCb::span<const RawBank*> banks ) const { - - std::vector<unsigned int> missing; - if ( banks.size() != readoutTool()->nBoard() ) { - for ( unsigned int iBoard = 0u; iBoard < readoutTool()->nBoard(); ++iBoard ) { - int testID = readoutTool()->findByOrder( iBoard )->boardID().id(); - auto iterBank = - std::find_if( banks.begin(), banks.end(), [&]( const RawBank* b ) { return b->sourceID() == testID; } ); - if ( iterBank == banks.end() ) { - missing.push_back( (unsigned int)testID ); - std::string lostBank = "lost bank " + boost::lexical_cast<std::string>( testID ); - Warning( lostBank, StatusCode::SUCCESS, 0 ).ignore(); - } - } // iBoard - } - return missing; -} - -std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> UTDecodingBaseAlg::decodeErrors( const LHCb::RawEvent& raw ) const { - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> Execute " << endmsg; - - // make an empty output vector - auto outputErrors = std::make_unique<UTTELL1BoardErrorBanks>(); - - // Pick up UTError bank - const auto& itf = raw.banks( LHCb::RawBank::BankType( m_errorType ) ); - - if ( itf.empty() ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "event has no error banks " << endmsg; - } else { - ++counter( "events with error banks" ); - counter( "total # error banks" ) += itf.size(); - } - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Starting to decode " << itf.size() << detType() << "Error bank(s)" << endmsg; - - for ( const auto& bank : itf ) { - - std::string errorBank = "sourceID " + boost::lexical_cast<std::string>( bank->sourceID() ); - ++counter( errorBank ); - - if ( bank->magic() != RawBank::MagicPattern ) { - std::string pattern = "wrong magic pattern " + boost::lexical_cast<std::string>( bank->sourceID() ); - Warning( pattern, StatusCode::SUCCESS, 2 ).ignore(); - continue; - } - - const unsigned int* p = bank->data(); - unsigned int w = 0; - const unsigned int bankEnd = bank->size() / sizeof( unsigned int ); - - // bank has to be at least 28 words - if ( bankEnd < UTDAQ::minErrorBankWords ) { - warning() << "Error bank length is " << bankEnd << " and should be at least " << UTDAQ::minErrorBankWords - << endmsg; - Warning( "Error bank too short --> not decoded for TELL1 " + UT::toString( bank->sourceID() ), - StatusCode::SUCCESS, 2 ) - .ignore(); - continue; - } - - // and less than 56 words - if ( bankEnd > UTDAQ::maxErrorBankWords ) { - warning() << "Error bank length is " << bankEnd << " and should be at most " << UTDAQ::maxErrorBankWords - << endmsg; - Warning( "Error bank too long --> not decoded for TELL1 " + UT::toString( bank->sourceID() ), StatusCode::SUCCESS, - 2 ) - .ignore(); - continue; - } - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Decoding bank number of type " << detType() << "Error (TELL1 ID: " << bank->sourceID() - << ", Size: " << bank->size() << " bytes)" << endmsg; - - // make an empty tell1 data object - UTTELL1BoardErrorBank* myData = new UTTELL1BoardErrorBank(); - outputErrors->insert( myData, bank->sourceID() ); - - for ( unsigned int ipp = 0; ipp < UTDAQ::npp && w != bankEnd; ++ipp ) { - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "####### Parsing now data from PP " << ipp << " #####################" << endmsg; - - // we must find 5 words - if ( bankEnd - w < 5 ) { - Warning( "Ran out of words to read", StatusCode::SUCCESS, 2 ).ignore(); - break; - } - - UTTELL1Error* errorInfo = new UTTELL1Error( p[w], p[w + 1], p[w + 2], p[w + 3], p[w + 4] ); - myData->addToErrorInfo( errorInfo ); - w += 5; // read 5 first words - - const unsigned int nOptional = errorInfo->nOptionalWords(); - - // we must find the optional words + 2 more control words - if ( bankEnd - w < nOptional + 2 ) { - Warning( "Ran out of words to read", StatusCode::SUCCESS, 2 ).ignore(); - break; - } - - const unsigned int* eInfo = nullptr; - - if ( errorInfo->hasErrorInfo() ) { - // errorInfo->setOptionalErrorWords(p[w], p[w+1], p[w+2], p[w+3], p[w+4]); - eInfo = &p[w]; - w += 5; - } // has error information - - errorInfo->setWord10( p[w] ); - ++w; - errorInfo->setWord11( p[w] ); - ++w; - - // then some more optional stuff - if ( errorInfo->hasNZS() ) { - errorInfo->setWord12( p[w] ); - ++w; - } // nsz info... - - // then some more optional stuff - if ( errorInfo->hasPed() ) { - errorInfo->setWord13( p[w] ); - ++w; - } - - if ( errorInfo->hasErrorInfo() ) { - errorInfo->setOptionalErrorWords( eInfo[0], eInfo[1], eInfo[2], eInfo[3], eInfo[4] ); - } // has error information - - } // loop ip [ppx's] - - if ( w != bankEnd ) { error() << "read " << w << " words, expected: " << bankEnd << endmsg; } - - } // end of loop over banks of a certain type - - return outputErrors; -} - -std::string UTDecodingBaseAlg::toSpill( std::string_view location ) const { - - std::string theSpill; - for ( const auto* name : {"Prev", "Next"} ) { - auto iPos = location.find( name ); - if ( iPos != std::string::npos ) { - auto startSpill = location.substr( iPos ); - auto iPos2 = startSpill.find( "/" ); - theSpill = startSpill.substr( 0, iPos2 ); - break; - } - } // is - return theSpill; -} - -void UTDecodingBaseAlg::computeSpillOffset( std::string_view location ) { - // convert spill to offset in time - auto spill = toSpill( location ); - m_spillOffset = ( spill.size() > 4u ? LHCb::UTCluster::SpillToType( spill ) : LHCb::UTCluster::Spill::Central ); -} - -bool UTDecodingBaseAlg::validSpill( const LHCb::ODIN& odin ) const { - if ( !m_checkValidSpill ) return true; - - // check spill is actually read out using the ODIN - const unsigned int numberOfSpills = odin.timeAlignmentEventWindow(); - return (unsigned int)abs( m_spillOffset ) <= numberOfSpills; -} - -unsigned int UTDecodingBaseAlg::byteSize( LHCb::span<const RawBank*> banks ) const { - return std::accumulate( banks.begin(), banks.end(), 0u, - []( unsigned int s, const RawBank* b ) { return s + b->totalSize(); } ); -} diff --git a/UT/UTDAQ/src/component/UTDecodingBaseAlg.h b/UT/UTDAQ/src/component/UTDecodingBaseAlg.h deleted file mode 100644 index 68e37c2bc7c..00000000000 --- a/UT/UTDAQ/src/component/UTDecodingBaseAlg.h +++ /dev/null @@ -1,133 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef UTDECODINGBASEALG_H -#define UTDECODINGBASEALG_H 1 - -#include "Event/RawBank.h" -#include "GaudiKernel/DataObjectHandle.h" -#include "Kernel/STLExtensions.h" -#include "Kernel/UTAlgBase.h" -#include "Kernel/UTDAQDefinitions.h" - -#include "Event/ODIN.h" -#include "Event/UTCluster.h" - -#include <string> -#include <vector> - -/** @class UTDecodingBaseAlg UTDecodingBaseAlg.h - * - * Algorithm to create UTClusters from RawEvent object - * - * @author A. Beiter based on code by: - * @author M. Needham - * @author S. Ponce - */ - -#include "Event/RawEvent.h" - -#include "Event/UTTELL1BoardErrorBank.h" -#include "Kernel/UTClusterWord.h" -#include "Kernel/UTDecoder.h" - -#include <string> - -class UTTell1Board; - -class UTDecodingBaseAlg : public UT::AlgBase { - -public: - /// Standard constructor - UTDecodingBaseAlg( const std::string& name, ISvcLocator* pSvcLocator ); - - StatusCode initialize() override; ///< Algorithm initialization - -protected: - LHCb::RawBank::BankType bankType() const; - - bool forceVersion() const; - - unsigned int pcnVote( LHCb::span<const LHCb::RawBank*> banks ) const; - - bool checkDataIntegrity( UTDecoder& decoder, const UTTell1Board* aBoard, const unsigned int bankSize, - const UTDAQ::version& bankVersion ) const; - - /** list of boards missing in action */ - std::vector<unsigned int> missingInAction( LHCb::span<const LHCb::RawBank*> banks ) const; - - /// Decodes error banks - std::unique_ptr<LHCb::UTTELL1BoardErrorBanks> decodeErrors( const LHCb::RawEvent& raw ) const; - - /** recover mode **/ - bool recoverMode() const; - - /** can be recovered recover **/ - bool canBeRecovered( const LHCb::UTTELL1BoardErrorBank* bank, const UTClusterWord& word, - const unsigned int pcn ) const; - - /// compute the spill offset - void computeSpillOffset( std::string_view location ); - - /** check the spill is read out **/ - bool validSpill( const LHCb::ODIN& odin ) const; - - /** return spill offset */ - LHCb::UTCluster::Spill spill() const; - - unsigned int byteSize( LHCb::span<const LHCb::RawBank*> banks ) const; - - Gaudi::Property<bool> m_skipErrors{this, "skipBanksWithErrors", false}; - Gaudi::Property<std::string> m_bankTypeString{this, "BankType", {}}; - - Gaudi::Property<int> m_forcedVersion{this, "forcedVersion", UTDAQ::inValidVersion}; - Gaudi::Property<bool> m_checkValidSpill{this, "checkValidity", false}; - LHCb::RawBank::BankType m_errorType; - LHCb::RawBank::BankType m_bankType; - -private: - std::string toSpill( std::string_view location ) const; - LHCb::UTCluster::Spill m_spillOffset; - - Gaudi::Property<std::string> m_errorBankString{this, "ErrorBank", "UTError"}; - - Gaudi::Property<bool> m_recoverMode{this, "recoverMode", true}; -}; - -inline LHCb::RawBank::BankType UTDecodingBaseAlg::bankType() const { return m_bankType; } - -inline bool UTDecodingBaseAlg::forceVersion() const { return m_forcedVersion >= 0; } - -inline bool UTDecodingBaseAlg::recoverMode() const { return m_recoverMode; } - -#include "Event/UTTELL1Error.h" -#include "Kernel/LHCbConstants.h" -#include "Kernel/UTClusterWord.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTPPRepresentation.h" -#include "Kernel/UTStripRepresentation.h" - -inline bool UTDecodingBaseAlg::canBeRecovered( const LHCb::UTTELL1BoardErrorBank* bank, const UTClusterWord& word, - const unsigned int pcn ) const { - - UTDAQ::UTPPRepresentation ppRep = UTDAQ::UTPPRepresentation( UTDAQ::UTStripRepresentation( word.channelID() ) ); - unsigned int pp, beetle, port, strip; - ppRep.decompose( pp, beetle, port, strip ); // split up the word - const LHCb::UTTELL1Error* errorInfo = bank->ppErrorInfo( pp ); - bool ok = false; - if ( errorInfo != 0 ) { - if ( errorInfo->linkInfo( beetle, port, pcn ) == LHCb::UTTELL1Error::FailureMode::kNone ) { ok = true; } - } - return ok; -} - -inline LHCb::UTCluster::Spill UTDecodingBaseAlg::spill() const { return m_spillOffset; } - -#endif // UTDECODINGBASEALG_H diff --git a/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp b/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp deleted file mode 100644 index 5b753a38c8d..00000000000 --- a/UT/UTDAQ/src/component/UTDigitsToUTTELL1Data.cpp +++ /dev/null @@ -1,105 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTDigit.h" -#include "Event/UTTELL1Data.h" -#include "GaudiAlg/Transformer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/LHCbConstants.h" -#include "Kernel/UTAlgBase.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTTell1Board.h" -#include "UTDet/DeUTDetector.h" -#include <algorithm> -#include <string> -#include <vector> - -/** @class RawBankToUTProcFull RawBankToUTProcFull.h - * - * Algorithm to create UTTELL1Data (type ProcFull) from RawEvent object - * - * @author A. Beiter (based on code by M. Needham) - * @date 2018-09-04 - */ - -class UTDigitsToUTTELL1Data - : public Gaudi::Functional::Transformer<LHCb::UTTELL1Datas( const LHCb::UTDigits& ), - Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { - -public: - /// Standard constructor - UTDigitsToUTTELL1Data( const std::string& name, ISvcLocator* pSvcLocator ); - - LHCb::UTTELL1Datas operator()( const LHCb::UTDigits& ) const override; ///< Algorithm execution - -private: - StatusCode createTell1Data( const LHCb::UTDigits* digits, LHCb::UTTELL1Datas* outCont ) const; - - std::string m_inputLocation; - std::string m_outputLocation; -}; - -using namespace LHCb; - -//----------------------------------------------------------------------------- -// Implementation file for class : RawBufferToUTClusterAlg -// -// 2004-01-07 : Matthew Needham -//----------------------------------------------------------------------------- - -DECLARE_COMPONENT( UTDigitsToUTTELL1Data ) - -UTDigitsToUTTELL1Data::UTDigitsToUTTELL1Data( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer{name, - pSvcLocator, - - {"inputLocation", UTDigitLocation::UTDigits}, - - {"outputLocation", UTTELL1DataLocation::UTSubPeds}} {} - -LHCb::UTTELL1Datas UTDigitsToUTTELL1Data::operator()( const LHCb::UTDigits& digitCont ) const { - - // make a new digits container - UTTELL1Datas outCont; - createTell1Data( &digitCont, &outCont ).orThrow( "Problems creating Tell1 data", "UTDigitsToUTTELL1Data" ).ignore(); - return outCont; -} - -StatusCode UTDigitsToUTTELL1Data::createTell1Data( const UTDigits* digits, UTTELL1Datas* outCont ) const { - - if ( digits->size() != tracker()->nStrip() ) { - return Warning( "Digit cont size does not equal number of detector strips", StatusCode::SUCCESS, 1 ); - } - - // make correct number of output containers - for ( unsigned int i = 0; i < readoutTool()->nBoard(); ++i ) { - UTTell1Board* board = readoutTool()->findByOrder( i ); - UTTELL1Data::Data dataVec; - dataVec.resize( UTDAQ::noptlinks ); - for ( auto& dv : dataVec ) dv.resize( LHCbConstants::nStripsInBeetle ); - UTTELL1Data* tell1Data = new UTTELL1Data( dataVec ); - int key = (int)board->boardID().id(); - outCont->insert( tell1Data, key ); - } // nBoard - - // then its just one big loop - for ( const auto& digit : *digits ) { - UTDAQ::chanPair aPair = readoutTool()->offlineChanToDAQ( digit->channelID(), 0.0 ); - UTTELL1Data* adcBank = outCont->object( aPair.first.id() ); - UTTELL1Data::Data& dataVec = adcBank->data(); - const unsigned int beetle = aPair.second / LHCbConstants::nStripsInBeetle; - const unsigned int strip = aPair.second % LHCbConstants::nStripsInBeetle; - dataVec[beetle][strip] = int( digit->depositedCharge() ); - } - - return StatusCode::SUCCESS; -} diff --git a/UT/UTDAQ/src/component/UTErrorDecoding.cpp b/UT/UTDAQ/src/component/UTErrorDecoding.cpp deleted file mode 100644 index 1e5e226f844..00000000000 --- a/UT/UTDAQ/src/component/UTErrorDecoding.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/UTTELL1BoardErrorBank.h" -#include "GaudiAlg/Consumer.h" -#include "UTDecodingBaseAlg.h" - -/** @class UTErrorDecoding UTErrorDecoding.h public/UTErrorDecoding.h - * - * - * @author A Beiter (based on code by Mathias Knecht, M Needham, S Ponce) - * @date 2018-09-04 - */ - -class UTErrorDecoding : public Gaudi::Functional::Consumer<void( const LHCb::RawEvent& ), - Gaudi::Functional::Traits::BaseClass_t<UTDecodingBaseAlg>> { -public: - /// Standard constructor - UTErrorDecoding( const std::string& name, ISvcLocator* pSvcLocator ); - - /// Algorithm execution - void operator()( const LHCb::RawEvent& ) const override; - -private: - Gaudi::Property<bool> m_PrintErrorInfo{this, "PrintErrorInfo", false}; -}; - -using namespace LHCb; - -//----------------------------------------------------------------------------- -// Implementation file for class : UTErrorDecoding -// -// 2007-11-29: Mathias Knecht -// Update 2008 M Needham -// Update 2016 S Ponce -//----------------------------------------------------------------------------- - -// ---------------------------------------------------------------------------- -// Declaration of the Algorithm Factory -DECLARE_COMPONENT( UTErrorDecoding ) - -//============================================================================= -// Standard constructor, initializes variables -//============================================================================= -UTErrorDecoding::UTErrorDecoding( const std::string& name, ISvcLocator* pSvcLocator ) - : Consumer( name, pSvcLocator, - KeyValue{"RawEventLocations", Gaudi::Functional::concat_alternatives( - LHCb::RawEventLocation::Tracker, LHCb::RawEventLocation::Other, - LHCb::RawEventLocation::Default )} ) {} - -//============================================================================= -// Main execution -//============================================================================= -void UTErrorDecoding::operator()( const LHCb::RawEvent& raw ) const { - // in fact all the work is delegated to the base class - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "==> Execute " << endmsg; - auto errorBanks = decodeErrors( raw ); - // print out the error banks - if ( m_PrintErrorInfo ) { - for ( const auto& b : *errorBanks ) info() << b << endmsg; - } -} diff --git a/UT/UTDAQ/src/component/UTFullDecoding.cpp b/UT/UTDAQ/src/component/UTFullDecoding.cpp deleted file mode 100644 index eb4cbfd3af6..00000000000 --- a/UT/UTDAQ/src/component/UTFullDecoding.cpp +++ /dev/null @@ -1,335 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTTELL1Data.h" -#include "GaudiAlg/Transformer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTAlgBase.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTRawBankMap.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" -#include "boost/lexical_cast.hpp" -#include <bitset> - -/** @class UTFullDecoding UTFullDecoding.h - * - * Algorithm to decode the NZS UT data in the RawEvent buffer into UUTELL1Data - * objects. Job options: - * - \b PrintErrorInfo: Flag to print out errors from event info. - * - \b InputLocation: Location of RawEvent - * - \b OutputLocation: Location of NZS output data, e.g. UTFull - * - \b EventInfoLocation: Location of event info data - * \sa{https://edms.cern.ch/document/692431/3}. - * - * @author Andy Beiter (based on code by Mathias Knecht, Jeroen van Tilburg) - * @date 2018-09-04 - */ -class UTFullDecoding - : public Gaudi::Functional::MultiTransformer<std::tuple<LHCb::UTTELL1Datas, LHCb::UTTELL1EventInfos>( - const LHCb::RawEvent& ), - Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { - -public: - /// Standard constructor - UTFullDecoding( const std::string& name, ISvcLocator* pSvcLocator ); - StatusCode initialize() override; ///< Algorithm initialization - std::tuple<LHCb::UTTELL1Datas, LHCb::UTTELL1EventInfos> - operator()( const LHCb::RawEvent& ) const override; ///< Algorithm execution - -private: - LHCb::RawBank::BankType m_bankType; - - // job options - Gaudi::Property<bool> m_printErrorInfo{this, "PrintErrorInfo", true}; ///< Flag to print out errors from event info -}; - -using namespace LHCb; -using namespace UTDAQ; - -//----------------------------------------------------------------------------- -// Implementation file for class : UTFullDecoding -// -// 2007-09-11: Mathias Knecht, Jeroen van Tilburg -//----------------------------------------------------------------------------- - -// ---------------------------------------------------------------------------- -// Declaration of the Algorithm Factory -DECLARE_COMPONENT( UTFullDecoding ) - -//============================================================================= -// Standard constructor, initializes variables -//============================================================================= -UTFullDecoding::UTFullDecoding( const std::string& name, ISvcLocator* pSvcLocator ) - : MultiTransformer{name, - pSvcLocator, - {"InputLocation", RawEventLocation::Default}, - {KeyValue{"OutputLocation", UTTELL1DataLocation::UTFull}, - KeyValue{"EventInfoLocation", UTTELL1EventInfoLocation::UTEventInfo}}} { - - setForcedInit(); -} - -//============================================================================= -// Initialization -//============================================================================= -StatusCode UTFullDecoding::initialize() { - return UT::AlgBase::initialize().andThen( [&] { - // initialize bank type - m_bankType = UTRawBankMap::stringToType( detType() + "Full" ); - } ); -} - -//============================================================================= -// Main execution -//============================================================================= -std::tuple<UTTELL1Datas, UTTELL1EventInfos> UTFullDecoding::operator()( const LHCb::RawEvent& raw ) const { - // Get the raw data - std::tuple<UTTELL1Datas, UTTELL1EventInfos> output; - // make container of TELL1 boards - auto& [outputData, eventInfos] = output; - - // Initialize some counters - unsigned int L0EvtID = 0; - - // Pick up UTFull bank - const auto& itf = raw.banks( RawBank::BankType( m_bankType ) ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "Starting to decode " << itf.size() << detType() << "Full bank(s)" << endmsg; - } - - int nBank = 0; - for ( const RawBank* p : itf ) { - - // Create an empty tell1 data object - UTTELL1Data::Data tell1Data; - tell1Data.resize( noptlinks ); - for ( auto& i : tell1Data ) i.resize( nports * nstrips, 0 ); - - // Create an empty tell1 header object - UTTELL1Data::Data tell1Header; - tell1Header.resize( noptlinks ); - - for ( auto j = tell1Header.begin(); j != tell1Header.end(); ++j ) { j->resize( nports * nheaders, 0 ); } - - // Create an empty eventInfo object - UTTELL1Data::Info eventInfo; - for ( unsigned int i = 0; i < npp; ++i ) { - UTTELL1EventInfo* evtInfo = new UTTELL1EventInfo(); - eventInfo.push_back( evtInfo ); - eventInfos.insert( evtInfo ); - } - std::vector<unsigned int> sentPP; - - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "Decoding bank number [" << nBank++ << "] of type " << detType() << "Full (TELL1 ID: " << p->sourceID() - << ", Size: " << p->size() << " bytes)" << endmsg; - } - - // Check if the board is valid - UTTell1ID tell1ID = UTTell1ID( (unsigned int)p->sourceID(), detType() == "UT" ); - const UTTell1Board* aBoard = readoutTool()->findByBoardID( tell1ID ); - if ( !aBoard ) { // not a valid b - std::string invalidSource = - "Invalid source ID --> skip bank " + boost::lexical_cast<std::string>( p->sourceID() ); - Warning( invalidSource, StatusCode::SUCCESS, 2 ).ignore(); - ++counter( "Skipped banks" ); - continue; - } - - if ( (unsigned int)p->size() % nwordsFull != 0 ) { - error() << "Wrong bank size for this type!! You should have multiple of " << nwordsFull << " bytes" << endmsg; - } - - // Counters - unsigned int cntWD = 0; // Word counter, resets for each PP. Range 0 to 223. - unsigned int cntPP = 0; // PP-FPGA counter, goes from 0 to 3. - - // Now loop over all WORDS in a bank - - for ( const unsigned int* w = p->begin<unsigned int>(); w != p->end<unsigned int>(); ++w ) { - - if ( cntWD % 224 == 0 ) { // Each 224 words we have a new PP-FPGA - cntWD = 0; - cntPP = ( *( w + 219 ) & UTTELL1EventInfo::ChipAddrMask ) >> UTTELL1EventInfo::ChipAddrBits; - sentPP.push_back( cntPP ); - - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "####### Parsing now data from PP " << cntPP << " ##################" << endmsg; - } - } - - // Set the Tell1 sourceID for each PP - UTTELL1EventInfo* evtInfo = eventInfo[cntPP]; - evtInfo->setSourceID( p->sourceID() ); - - // Unpack the 32-bit word into 8-bit chunks - unsigned int bits1 = 0; - unsigned int bits2 = 8; - unsigned int bits3 = 16; - unsigned int bits4 = 24; - unsigned int p1 = ( *w & mask1 ) >> bits1; - unsigned int p2 = ( *w & mask2 ) >> bits2; - unsigned int p3 = ( *w & mask3 ) >> bits3; - unsigned int p4 = ( *w & mask4 ) >> bits4; - - if ( cntWD < 216 ) { // Words below 216 contain data and header - int iPort = cntWD / ( nbeetles * 36 ); - int iWord = ( cntWD % ( nbeetles * 36 ) ) / nbeetles; - int iBeetle = 2 * ( cntWD % nbeetles ) + nBeetlesPerPPx * cntPP; - - if ( iWord >= 1 && iWord < 4 ) { // Header part - int iHeader = iWord - 1; - tell1Header[iBeetle][iHeader + 3 * iPort] = p1; - tell1Header[iBeetle][iHeader + 3 * ( iPort + 2 )] = p2; - tell1Header[iBeetle + 1][iHeader + 3 * iPort] = p3; - tell1Header[iBeetle + 1][iHeader + 3 * ( iPort + 2 )] = p4; - } else if ( iWord >= 4 && iWord < 36 ) { // Data part - int iChan = iWord - 4; - tell1Data[iBeetle][iChan + 32 * iPort] = p1; - tell1Data[iBeetle][iChan + 32 * ( iPort + 2 )] = p2; - tell1Data[iBeetle + 1][iChan + 32 * iPort] = p3; - tell1Data[iBeetle + 1][iChan + 32 * ( iPort + 2 )] = p4; - } - } else { // Words 216-223 contains Event Info - switch ( cntWD ) { - case 216: { - evtInfo->setWord0( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Event Information (bits): " << std::bitset<8>( evtInfo->EventInformation() ) - << " | Bank List (bits): " << std::bitset<8>( evtInfo->BankList() ) - << " | Detector ID (dec): " << (unsigned int)evtInfo->DetectorID() - << " | Bunch Counter (dec): " << evtInfo->bCnt() << endmsg; - } - break; - } - - case 217: { - evtInfo->setWord1( *w ); - if ( msgLevel( MSG::DEBUG ) ) { debug() << "(Event Info) L0-EventID (dec): " << (int)*w << endmsg; } - if ( L0EvtID == 0 ) { - // For each bank, L0EvtID is initialized. So the first time in the - // bank, L0EvtID is checked. - L0EvtID = (unsigned int)evtInfo->L0EvID(); - } else { - // The rest of the time (for all PPs, all TELL1), there's a check - // that L0EvtID is the same for all. - if ( (unsigned int)evtInfo->L0EvID() != L0EvtID ) { - error() << "L0-Event ID not the same for all!" << endmsg; - } - } - break; - } - case 218: { - evtInfo->setWord2( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Reserved Bits (hex): " << std::hex << evtInfo->R1() - << " | Process Info (bits): " << std::bitset<8>( evtInfo->ProcessInfo() ) - << " | PCN (from Beetle 0) (dec): " << std::dec << (unsigned int)evtInfo->pcn() << endmsg; - } - break; - } - case 219: { - evtInfo->setWord3( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Chip Addr (dec): " << (unsigned int)evtInfo->ChipAddr() - << " | Sync RAM Full (bits): " << std::bitset<6>( evtInfo->SyncRAMFull() ) - << " | TLK Link Loss (bits): " << std::bitset<6>( evtInfo->tlkLnkLoss() ) << endmsg; - debug() << "(Event Info) | Sync Evt Size Err. (bits): " << std::bitset<6>( evtInfo->SyncEvtSizeError() ) - << " | Opt. Link Disable (bits): " << std::bitset<6>( evtInfo->OptLnkDisable() ) - << " | Opt. Link NoEvent (bits): " << std::bitset<6>( evtInfo->OptLnkNoEvt() ) << endmsg; - } - if ( m_printErrorInfo ) { - if ( evtInfo->SyncRAMFull() != 0 ) - error() << "Sync RAM Full in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP - << ". Value (One bit per link): " << std::bitset<6>( evtInfo->SyncRAMFull() ) << endmsg; - if ( evtInfo->tlkLnkLoss() != 0 ) - error() << "TLK Link loss in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP - << ". Value (One bit per link): " << std::bitset<6>( evtInfo->tlkLnkLoss() ) << endmsg; - if ( evtInfo->SyncEvtSizeError() != 0 ) - error() << "Sync Event size error in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP - << ". Value (One bit per link): " << std::bitset<6>( evtInfo->SyncEvtSizeError() ) << endmsg; - if ( evtInfo->OptLnkNoEvt() != 0 ) - error() << "Optical Link No Event in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP - << ". Value (One bit per link): " << std::bitset<6>( evtInfo->OptLnkNoEvt() ) << endmsg; - } - break; - } - case 220: { - evtInfo->setWord4( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Reserved bit (bits): " << std::bitset<1>( evtInfo->R2() ) - << " | PCN Error (bits):" << std::bitset<1>( evtInfo->pcnError() ) - << " | Optical Link no clock (bits): " << std::bitset<6>( evtInfo->OptLnkNoClock() ) << endmsg; - debug() << "(Event Info) | Header Pseudo Err. (bits): " << std::bitset<24>( evtInfo->HeaderPseudoError() ) - << endmsg; - } - - if ( m_printErrorInfo ) { - if ( evtInfo->HeaderPseudoError() != 0 ) - error() << "Header Pseudo Error in TELL1 ID " << p->sourceID() << ", PP-FPGA " << cntPP - << ". Value (One bit per port=24 bits): " << std::bitset<24>( evtInfo->HeaderPseudoError() ) - << endmsg; - } - break; - } - case 221: { - evtInfo->setWord5( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Beetle3 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle3() - << " | Beetle2 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle2() - << " | Beetle1 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle1() - << " | Beetle0 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle0() << endmsg; - } - break; - } - case 222: { - eventInfo[cntPP]->setWord6( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Reserved bits (hex): " << std::hex << evtInfo->R3() - << " | Beetle5 PCN (dec): " << std::dec << (unsigned int)evtInfo->pcnBeetle5() - << " | Beetle4 PCN (dec): " << (unsigned int)evtInfo->pcnBeetle4() << endmsg; - } - break; - } - case 223: { - eventInfo[cntPP]->setWord7( *w ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "(Event Info) Reserved bits (hex): " << std::hex << (unsigned int)evtInfo->R4() - << " | I Headers: Beet.5 (dec): " << std::dec << (unsigned int)evtInfo->iHeaderBeetle5() - << " | Beet.4: " << (unsigned int)evtInfo->iHeaderBeetle4() - << " | Beet.3: " << (unsigned int)evtInfo->iHeaderBeetle3() - << " | Beet.2: " << (unsigned int)evtInfo->iHeaderBeetle2() - << " | Beet.1: " << (unsigned int)evtInfo->iHeaderBeetle1() - << " | Beet.0: " << (unsigned int)evtInfo->iHeaderBeetle0() << std::dec << endmsg; - } - break; - } - default: - error() << "Not the right number of words: word number " << cntWD << ", you should have 224 words per PP" - << endmsg; - } - } - - cntWD++; - } // Loop over all words - - // make an empty tell1 data object - UTTELL1Data* myData = new UTTELL1Data( tell1Data, tell1Header, sentPP, eventInfo ); - - // put into the container, second argument is TELL1 id - outputData.insert( myData, int( p->sourceID() ) ); - - } // end of loop over banks of a certain type - - return output; -} diff --git a/UT/UTDAQ/src/component/UTLayerSelector.cpp b/UT/UTDAQ/src/component/UTLayerSelector.cpp deleted file mode 100644 index 06ff22077e8..00000000000 --- a/UT/UTDAQ/src/component/UTLayerSelector.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2019 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -// Include files -// local -#include "UTLayerSelector.h" -// kernel -#include "Kernel/UTChannelID.h" - -// ==================================================================== -namespace { - static const auto s_layerMap = - std::array{std::pair{"UTaX", 9}, std::pair{"UTaU", 10}, std::pair{"UTbV", 17}, std::pair{"UTbX", 18}}; - -} // namespace -// ==================================================================== - -DECLARE_COMPONENT( UTLayerSelector ) - -// ==================================================================== - -// ==================================================================== -bool UTLayerSelector::select( const LHCb::UTChannelID& id ) const { return ( *this )( id ); } -// ==================================================================== - -// ==================================================================== -bool UTLayerSelector::operator()( const LHCb::UTChannelID& id ) const { - // Checks detector - if ( ( m_detType == "UT" && id.isUT() ) ) { - if ( msgLevel( MSG::DEBUG ) ) - debug() << "Excluded layers are in " << m_detType << ". Cluster is in other detector." << endmsg; - - return false; - } - - // Checks layer - - for ( auto it = m_ignoredLayers.begin(); it != m_ignoredLayers.end(); it++ ) { - if ( static_cast<int>( it->find( m_detType ) ) == -1 ) { // Checks if detector and layer agree - continue; - } - auto jt = std::find_if( s_layerMap.begin(), s_layerMap.end(), [&]( const std::pair<const char*, unsigned int>& p ) { - return id.uniqueLayer() == p.second && *it == p.first; - } ); - if ( jt != s_layerMap.end() ) { - if ( msgLevel( MSG::DEBUG ) ) - debug() << "Cluster is in " << m_detType << " layer " << ( *it ) << " and will be removed!" << endmsg; - return true; - } - - if ( msgLevel( MSG::DEBUG ) ) debug() << "Cluster will not be removed!" << endmsg; - } - - return false; -} -// ==================================================================== diff --git a/UT/UTDAQ/src/component/UTLayerSelector.h b/UT/UTDAQ/src/component/UTLayerSelector.h deleted file mode 100644 index ac8172f180b..00000000000 --- a/UT/UTDAQ/src/component/UTLayerSelector.h +++ /dev/null @@ -1,46 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#ifndef UTLAYERSELECTOR_H -#define UTLAYERSELECTOR_H 1 - -// Include files -// from STL -#include <string> -#include <vector> - -// from GaudiAlg -#include "GaudiAlg/GaudiTool.h" - -// from Kernel -#include "Kernel/IUTChannelIDSelector.h" - -/** @class UTLayerSelector UTLayerSelector.h - * - * Algorithm to remove clusters in excluded layers - * - * @author A. Beiter (based on code by Ch. Elsasser) - * @date 2018-09-04 - */ - -class UTLayerSelector : public extends<GaudiTool, IUTChannelIDSelector> { - -public: - using extends::extends; - - bool select( const LHCb::UTChannelID& id ) const override; - bool operator()( const LHCb::UTChannelID& id ) const override; - -private: - Gaudi::Property<std::string> m_detType{this, "DetType", "UT"}; - Gaudi::Property<std::vector<std::string>> m_ignoredLayers{this, "IgnoredLayers"}; -}; - -#endif // UTLAYERSELECTOR_H diff --git a/UT/UTDAQ/src/component/UTPedestalDecoding.cpp b/UT/UTDAQ/src/component/UTPedestalDecoding.cpp deleted file mode 100644 index 26adbe6ee50..00000000000 --- a/UT/UTDAQ/src/component/UTPedestalDecoding.cpp +++ /dev/null @@ -1,156 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "Event/RawBank.h" -#include "Kernel/UTAlgBase.h" -//#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "Event/UTTELL1Data.h" -#include "GaudiAlg/Transformer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTRawBankMap.h" -#include "boost/lexical_cast.hpp" - -/** @class UTPedestalDecoding UTPedestalDecoding.h - * - * Algorithm to decode the pedestal bank in the RawEvent buffer into - * UTTELL1Data objects. Job options: - * - \b InputLocation: Location of RawEvent - * - \b OutputLocation: Location of output pedestal data, e.g. TTPedestal - * \sa{http://edms.cern.ch/doc/695007}. - * - * @author Andy Beiter (based on code by Mathias Knecht, Jeroen van Tilburg) - * @date 2018-09-04 - */ -class UTPedestalDecoding : public Gaudi::Functional::Transformer<LHCb::UTTELL1Datas( const LHCb::RawEvent& ), - Gaudi::Functional::Traits::BaseClass_t<UT::AlgBase>> { - -public: - /// Standard constructor - UTPedestalDecoding( const std::string& name, ISvcLocator* pSvcLocator ); - - StatusCode initialize() override; ///< Algorithm initialization - LHCb::UTTELL1Datas operator()( const LHCb::RawEvent& ) const override; ///< Algorithm execution - -private: - LHCb::RawBank::BankType m_bankType; -}; - -using namespace LHCb; -using namespace UTDAQ; - -//----------------------------------------------------------------------------- -// Implementation file for class : UTPedestalDecoding -// -// 2007-09-11: Mathias Knecht, Jeroen van Tilburg -//----------------------------------------------------------------------------- - -// ---------------------------------------------------------------------------- -// Declaration of the Algorithm Factory -DECLARE_COMPONENT( UTPedestalDecoding ) - -//============================================================================= -// Standard constructor, initializes variables -//============================================================================= -UTPedestalDecoding::UTPedestalDecoding( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer{name, - pSvcLocator, - {"InputLocation", RawEventLocation::Default}, - {"OutputLocation", UTTELL1DataLocation::UTPedestal}} {} - -//============================================================================= -// Initialization -//============================================================================= -StatusCode UTPedestalDecoding::initialize() { - return Transformer::initialize().andThen( - [&] { m_bankType = UTRawBankMap::stringToType( detType() + "Pedestal" ); } ); -} - -//============================================================================= -// Main execution -//============================================================================= -UTTELL1Datas UTPedestalDecoding::operator()( const RawEvent& raw ) const { - - // make container of TELL1 boards - UTTELL1Datas outputPedestals; - - // Pick up pedestal bank - const auto& itf = raw.banks( RawBank::BankType( m_bankType ) ); - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "Starting to decode " << itf.size() << detType() << "Pedestal bank(s)" << endmsg; - } - - for ( const LHCb::RawBank* p : itf ) { - - if ( msgLevel( MSG::DEBUG ) ) { - debug() << "Decoding bank of type " << detType() << "Pedestal (TELL1 ID: " << p->sourceID() - << ", Size: " << p->size() << " bytes)" << endmsg; - } - - // Check if the board is valid - UTTell1ID tell1ID = UTTell1ID( (unsigned int)( p->sourceID() ), detType() == "UT" ); - const UTTell1Board* aBoard = this->readoutTool()->findByBoardID( tell1ID ); - if ( !aBoard ) { - std::string invalidSource = "Invalid source ID --> skip bank" + boost::lexical_cast<std::string>( p->sourceID() ); - Warning( invalidSource, StatusCode::SUCCESS, 2 ).ignore(); - ++counter( "skipped Banks" ); - continue; - } - - // Create an empty tell1 pedestal object - UTTELL1Data::Data pedestals; - pedestals.resize( noptlinks ); - for ( auto& i : pedestals ) i.resize( nports * nstrips, 0 ); - - if ( (unsigned int)p->size() != sizebankPedestal ) { - error() << "Wrong bank size for this type!! You should have " << sizebankPedestal << " bytes" << endmsg; - } - - // Counters - unsigned int cntWD = 0; // Word counter, resets for each PP. Range 0 to 191. - unsigned int cntPP = 0; // PP-FPGA counter, goes from 0 to 3. - - // Now loop over all WORDS in a bank - for ( const unsigned int* w = p->begin<unsigned int>(); w != p->end<unsigned int>(); ++w ) { - - if ( cntWD == 192 ) { // Each 192 words we have a new PP-FPGA - cntWD = 0; - ++cntPP; - } - - if ( cntWD == 0 && msgLevel( MSG::DEBUG ) ) { - debug() << "####### Parsing now data from PP " << cntPP << " ##################" << endmsg; - } - - // Unpack the 32-bit word into 8-bit chunks - unsigned int p1 = ( *w & mask1 ); - unsigned int p2 = ( ( *w & mask2 ) / 0x100 ); - unsigned int p3 = ( ( *w & mask3 ) / 0x10000 ); - unsigned int p4 = ( ( *w & mask4 ) / 0x1000000 ); - - int iPort = cntWD / ( nbeetles * nstrips ); // range 0 to 1 - int iWord = ( cntWD % ( nbeetles * nstrips ) ) / nbeetles; // range: 0 to 32 - int iBeetle = 2 * ( cntWD % nbeetles ) + nBeetlesPerPPx * cntPP; // range: 0 to 22 - - pedestals[iBeetle][iWord + nstrips * iPort] = p1; - pedestals[iBeetle][iWord + nstrips * ( iPort + 2 )] = p2; - pedestals[iBeetle + 1][iWord + nstrips * iPort] = p3; - pedestals[iBeetle + 1][iWord + nstrips * ( iPort + 2 )] = p4; - - ++cntWD; - } // Loop over all words - - // make an empty tell1 data object - // and put into the container, second argument is Tell1 id - outputPedestals.insert( new UTTELL1Data( pedestals ), int( p->sourceID() ) ); - - } // end of loop over banks of a certain type - return outputPedestals; -} diff --git a/UT/UTDAQ/src/component/UTRawBankMonitor.cpp b/UT/UTDAQ/src/component/UTRawBankMonitor.cpp deleted file mode 100644 index 4cb09ed4979..00000000000 --- a/UT/UTDAQ/src/component/UTRawBankMonitor.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -// C++ code for 'LHCb Tracking package(s)' -// -// Author: A. Beiter (based on code by M. Needham) -// Created: 2018-09-04 - -#include "Event/RawBank.h" -#include "Event/RawEvent.h" -#include "GaudiAlg/Consumer.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTHistoAlgBase.h" -#include "Kernel/UTRawBankMap.h" -#include "Kernel/UTTell1ID.h" -#include <string> - -/** @class UTRawBankMonitor UTRawBankMonitor.h - * UTCheckers/UTRawBankMonitor.h - * - * Class for checking UT RAW buffer - * - * @author A. Beiter (based on code by M.Needham) - * @date 2018-09-04 - */ - -class UTRawBankMonitor : public Gaudi::Functional::Consumer<void( LHCb::RawEvent const& ), - Gaudi::Functional::Traits::BaseClass_t<UT::HistoAlgBase>> { - -public: - /// constructor - UTRawBankMonitor( const std::string& name, ISvcLocator* pSvcLocator ) - : Consumer{name, - pSvcLocator, - {"RawEventLocations", Gaudi::Functional::concat_alternatives( LHCb::RawEventLocation::Other, - LHCb::RawEventLocation::Default )}} {}; - - /// initialize - StatusCode initialize() override; - - /// execute - void operator()( const LHCb::RawEvent& ) const override; - -private: - StatusCode configureBankType(); - - LHCb::RawBank::BankType m_bankType = LHCb::RawBank::LastType; -}; - -DECLARE_COMPONENT( UTRawBankMonitor ) - -using namespace LHCb; - -//-------------------------------------------------------------------- -// -//-------------------------------------------------------------------- - -StatusCode UTRawBankMonitor::initialize() { - - if ( histoTopDir().empty() ) setHistoTopDir( detType() + "/" ); - - return UT::HistoAlgBase::initialize().andThen( &UTRawBankMonitor::configureBankType, this ); // configure banktype -} - -void UTRawBankMonitor::operator()( const LHCb::RawEvent& rawEvt ) const { - - // execute once per event - - // init counters - unsigned int maxBoardSize = 0; - UTTell1ID hotBoard( 0, detType() == "UT" ); - unsigned int eventDataSize = 0; - - const auto& tBanks = rawEvt.banks( m_bankType ); - for ( const auto* iterBank : tBanks ) { - - // board info.... - size_t bankSize = iterBank->size() / sizeof( char ); - UTTell1ID aBoard( iterBank->sourceID(), detType() == "UT" ); - - // event counters - if ( bankSize > maxBoardSize ) { - maxBoardSize = bankSize; - hotBoard = aBoard; - } - eventDataSize += bankSize; - - // histogram per board - plot( (double)bankSize, "board data size", 0., 200., 200 ); - - // data size per board - // unsigned int id = (aBoard.region()*20) + aBoard.subID(); - // const std::map< unsigned int, unsigned int > & SourceIDToTELLmap = readoutTool()->SourceIDToTELLNumberMap(); - // unsigned int tellNumber = SourceIDToTELLmap.find(iterBank->sourceID())->second; - unsigned int tellNumber = readoutTool()->SourceIDToTELLNumber( iterBank->sourceID() ); - - // These hard coded numbers come from here: https://lbtwiki.cern.ch/bin/view/Online/Tell1PortNum - unsigned int doubleLinkedUTtell1s[] = {1, 2, 3, 4, 5, 6, 8, 9, 10, 13, 14, 15}; - unsigned int numberOfLinks = 1; - - if ( detType() == "UT" ) { - for ( unsigned int i = 0; i < 12; i++ ) { - if ( tellNumber == doubleLinkedUTtell1s[i] ) numberOfLinks = 2; - } - } - - double datasize = bankSize / (double)numberOfLinks; - plot( tellNumber, "data size", 0., 100., 100, datasize ); - plot( tellNumber, "data size unnormalised", 0., 100., 100, (double)bankSize ); - - } // iterBank - - // data size - plot( (double)eventDataSize, 1, "event data size", 0., 10000., 500 ); - - // include standard header HARDCODE !!! - unsigned int headerSize = tBanks.size() * 2u; - plot( (double)( eventDataSize + headerSize ), 2, "total data size", 0., 10000., 500 ); - - plot( (double)maxBoardSize, 3, "hot board size", 0., 200., 200 ); - unsigned int id = ( hotBoard.region() * 20 ) + hotBoard.subID(); - plot( (double)id, 4, "hot board ID", 0., 100., 100 ); -} - -StatusCode UTRawBankMonitor::configureBankType() { - m_bankType = UTRawBankMap::stringToType( detType() ); - return m_bankType != RawBank::Velo ? StatusCode::SUCCESS : StatusCode::FAILURE; -} diff --git a/UT/UTDAQ/src/component/UTReadoutTool.cpp b/UT/UTDAQ/src/component/UTReadoutTool.cpp deleted file mode 100644 index d9fdd736927..00000000000 --- a/UT/UTDAQ/src/component/UTReadoutTool.cpp +++ /dev/null @@ -1,515 +0,0 @@ -/*****************************************************************************\ -* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * -* * -* This software is distributed under the terms of the GNU General Public * -* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * -* * -* In applying this licence, CERN does not waive the privileges and immunities * -* granted to it by virtue of its status as an Intergovernmental Organization * -* or submit itself to any jurisdiction. * -\*****************************************************************************/ -#include "DetDesc/Condition.h" -#include "Event/UTCluster.h" -#include "GaudiAlg/GaudiTool.h" -#include "Kernel/IUTReadoutTool.h" -#include "Kernel/UTBoardMapping.h" -#include "Kernel/UTChannelID.h" -#include "Kernel/UTDAQDefinitions.h" -#include "Kernel/UTTell1Board.h" -#include "Kernel/UTTell1ID.h" -#include "Kernel/UTXMLUtils.h" -#include "UTDet/DeUTDetector.h" -#include <algorithm> -#include <fstream> -#include <string> -#include <vector> - -/** - * Concret Class for things related to the Readout of the UT Tell1 Boards - */ - -class UTReadoutTool : public extends<GaudiTool, IUTReadoutTool> { - -public: - /// Constructer - UTReadoutTool( const std::string& type, const std::string& name, const IInterface* parent ); - - /// nBoard - unsigned int nBoard() const override; - - /// return vector of Tell1IDs - std::vector<UTTell1ID> boardIDs() const override; - - /// convert ITChannelID to DAQ ChannelID - UTDAQ::chanPair offlineChanToDAQ( const LHCb::UTChannelID aOfflineChan, double isf ) const override; - - /// convert offline interStripFraction to DAQ interStripFraction - double interStripToDAQ( const LHCb::UTChannelID aOfflineChan, const UTTell1ID aBoardID, - const double isf ) const override; - - bool ADCOfflineToDAQ( const LHCb::UTChannelID aOfflineChan, const UTTell1ID aBoardID, - LHCb::UTCluster::ADCVector& adcs ) const override; - - /// find the Tell1 board given a board ID - UTTell1Board* findByBoardID( const UTTell1ID aBoardID ) const override; - - /// find Tell1 board by storage order - UTTell1Board* findByOrder( const unsigned int aValue ) const override; - - /// Add the mapping of source ID to TELL1 board number - unsigned int SourceIDToTELLNumber( unsigned int sourceID ) const override; - - /** Add the mapping of source ID to board number for UT */ - const std::map<unsigned int, unsigned int>& SourceIDToTELLNumberMap() const override; - - /// list of the readout sector ids on the board - std::vector<LHCb::UTChannelID> sectorIDs( const UTTell1ID board ) const override; - - /// list of the readout sectors - std::vector<DeUTSector*> sectors( const UTTell1ID board ) const override; - - /// service box - unsigned int nServiceBox() const override; - - /// service box number - std::string serviceBox( const LHCb::UTChannelID& aChan ) const override; - - /// list of the readout sectors ids in a service box - std::vector<LHCb::UTChannelID> sectorIDsOnServiceBox( const std::string& serviceBox ) const override; - - /// list of the readout sectors in a service box - std::vector<DeUTSector*> sectorsOnServiceBox( const std::string& serviceBox ) const override; - - /// list of service boxes - const std::vector<std::string>& serviceBoxes() const override; - - /// Add the mapping of TELL1 board number to source ID - unsigned int TELLNumberToSourceID( unsigned int TELL ) const override; - - /// print mapping - void printMapping() const override; - - /// write out the mapping as xml - StatusCode writeMappingToXML() const override; - - StatusCode validate() const; - - /// finalize - StatusCode finalize() override; - - /// init - StatusCode initialize() override; - - /// get region - unsigned int region( const LHCb::UTChannelID aChan ) const override; - - /** Add the mapping of board number to source ID for UT */ - const std::map<unsigned int, unsigned int>& TELLNumberToSourceIDMap() const override; - -protected: - void clear(); - - std::string footer() const; - std::string header( const std::string& conString ) const; - std::string strip( const std::string& conString ) const; - - unsigned int m_hybridsPerBoard; - unsigned int m_nBoard{0}; - unsigned int m_nServiceBox; - std::vector<std::unique_ptr<UTTell1Board>> m_boards; - std::map<UTTell1ID, UTTell1Board*> m_boardsMap; - std::vector<std::string> m_serviceBoxes; - std::vector<unsigned int> m_firstBoardInRegion; - - Gaudi::Property<bool> m_printMapping{this, "printMapping", false}; - DeUTDetector* m_tracker = nullptr; - Gaudi::Property<std::string> m_conditionLocation{this, "conditionLocation", - "/dd/Conditions/ReadoutConf/UT/ReadoutMap"}; - -private: - Gaudi::Property<bool> m_writeXML{this, "writeMappingToXML", false}; - Gaudi::Property<std::string> m_footer{this, "footer", "</DDDB>"}; - Gaudi::Property<std::string> m_startTag{this, "startTag", "<condition"}; - Gaudi::Property<std::string> m_outputFileName{this, "outputFile", "ReadoutMap.xml"}; - std::ofstream m_outputFile; - Gaudi::Property<std::string> m_author{this, "author", "Joe Bloggs"}; - Gaudi::Property<std::string> m_tag{this, "tag", "None"}; - Gaudi::Property<std::string> m_desc{this, "description", "BlahBlahBlah"}; - Gaudi::Property<bool> m_removeCondb{this, "removeCondb", false}; - Gaudi::Property<unsigned int> m_precision{this, "precision", 16u}; - Gaudi::Property<unsigned int> m_depth{this, "depths", 3u}; - -private: - StatusCode createBoards(); - StatusCode createTell1Map(); - - unsigned int m_nRegionA = 512; - unsigned int m_firstStation = 512; -}; - -using namespace LHCb; - -DECLARE_COMPONENT( UTReadoutTool ) - -UTReadoutTool::UTReadoutTool( const std::string& type, const std::string& name, const IInterface* parent ) - : base_class( type, name, parent ) { - // constructor - m_boards.reserve( 100 ); // about correct -} - -void UTReadoutTool::clear() { - // clear the boards - m_boards.clear(); - m_nBoard = 0; -} - -StatusCode UTReadoutTool::initialize() { - // initialization phase... - StatusCode sc = GaudiTool::initialize(); - if ( sc.isFailure() ) { return Error( "Failed to initialize", sc ); } - - // tracker - m_tracker = getDet<DeUTDetector>( DeUTDetLocation::location() ); - - registerCondition( m_conditionLocation, &UTReadoutTool::createTell1Map ); - - registerCondition( m_conditionLocation, &UTReadoutTool::createBoards ); - - sc = runUpdate(); // force update - if ( sc.isFailure() ) return Error( "Failed first UMS update for readout tool", sc ); - - if ( m_printMapping ) printMapping(); - - return StatusCode::SUCCESS; -} - -StatusCode UTReadoutTool::finalize() { - - if ( m_writeXML ) writeMappingToXML().ignore( /* AUTOMATICALLY ADDED FOR gaudi/Gaudi!763 */ ); - return base_class::finalize(); -} - -StatusCode UTReadoutTool::writeMappingToXML() const { - - // load conditions - Condition* rInfo = getDet<Condition>( m_conditionLocation ); - - std::ofstream outputFile( m_outputFileName.value() ); - if ( outputFile.fail() ) { return Warning( "Failed to open output file", StatusCode::FAILURE ); } - - // write the xml headers - outputFile << header( rInfo->toXml( "", true, m_precision ) ) << '\n'; - - // add comments - std::ostringstream comment; - UT::XMLUtils::fullComment( comment, m_author, m_tag, m_desc ); - outputFile << comment.str() << '\n'; - - std::string temp = strip( rInfo->toXml( "", false, m_precision ) ); - outputFile << temp << "\n\n"; - - // footer - outputFile << footer() << '\n'; - - return StatusCode::SUCCESS; -} - -unsigned int UTReadoutTool::nBoard() const { - // number of boards - return m_nBoard; -} - -unsigned int UTReadoutTool::nServiceBox() const { return m_serviceBoxes.size(); } - -std::string UTReadoutTool::serviceBox( const LHCb::UTChannelID& aChan ) const { - - // find the board - - static const std::string InValidBox = "Unknown"; - bool isFound = false; - unsigned int waferIndex = 999u; - unsigned int iBoard = m_firstBoardInRegion[region( aChan )]; - while ( ( iBoard != m_nBoard ) && ( isFound == false ) ) { - if ( m_boards[iBoard]->isInside( aChan, waferIndex ) ) { - isFound = true; - } else { - ++iBoard; - } - } // iBoard - return ( isFound ? m_boards[iBoard]->serviceBoxes()[waferIndex] : InValidBox ); -} - -std::vector<UTTell1ID> UTReadoutTool::boardIDs() const { - std::vector<UTTell1ID> ids; - ids.reserve( m_boards.size() ); - std::transform( m_boards.begin(), m_boards.end(), std::back_inserter( ids ), - []( const auto& b ) { return b->boardID(); } ); - return ids; -} - -UTDAQ::chanPair UTReadoutTool::offlineChanToDAQ( const UTChannelID aOfflineChan, double isf ) const { - // look up region start..... - unsigned int iBoard = m_firstBoardInRegion[region( aOfflineChan )]; - unsigned int waferIndex = 999u; - - bool isFound = false; - while ( ( iBoard != m_nBoard ) && !isFound ) { - if ( m_boards[iBoard]->isInside( aOfflineChan, waferIndex ) ) { - isFound = true; - } else { - ++iBoard; - } - } // iBoard - - if ( !isFound ) { - return {UTTell1ID( UTTell1ID::nullBoard, false ), 0}; - } else { - return {m_boards[iBoard]->boardID(), m_boards[iBoard]->offlineToDAQ( aOfflineChan, waferIndex, isf )}; - } -} - -double UTReadoutTool::interStripToDAQ( const UTChannelID aOfflineChan, const UTTell1ID aBoardID, - const double isf ) const { - unsigned int waferIndex = 999u; - - auto aBoard = findByBoardID( aBoardID ); - double newisf = 0; - - if ( aBoard->isInside( aOfflineChan, waferIndex ) ) { - unsigned int orientation = aBoard->orientation()[waferIndex]; - if ( orientation == 0 && isf > 0.01 ) { - newisf = 1 - isf; - } else { - newisf = isf; - } - } else { // Can not find board! - newisf = -1; - } - - return newisf; -} - -bool UTReadoutTool::ADCOfflineToDAQ( const UTChannelID aOfflineChan, const UTTell1ID aBoardID, - UTCluster::ADCVector& adcs ) const { - unsigned int waferIndex = 999u; - auto aBoard = findByBoardID( aBoardID ); - - if ( !aBoard->isInside( aOfflineChan, waferIndex ) ) return false; // can not find board! - - if ( aBoard->orientation()[waferIndex] == 0 ) { std::reverse( std::begin( adcs ), std::end( adcs ) ); } - return true; -} - -UTTell1Board* UTReadoutTool::findByBoardID( const UTTell1ID aBoardID ) const { - // find by board id - try { - return m_boardsMap.at( aBoardID ); - } catch ( std::out_of_range& e ) { return nullptr; } -} - -UTTell1Board* UTReadoutTool::findByOrder( const unsigned int aValue ) const { - // find by order - return aValue < m_nBoard ? m_boards[aValue].get() : nullptr; -} - -void UTReadoutTool::printMapping() const { - // dump out the readout mapping - info() << "print mapping for: " << name() << " tool" << endmsg; - info() << " Number of boards " << m_nBoard << endmsg; - for ( const auto& b : m_boards ) info() << *b << endmsg; -} - -/// Add the mapping of source ID to TELL1 board number -unsigned int UTReadoutTool::SourceIDToTELLNumber( unsigned int sourceID ) const { - return ( this->SourceIDToTELLNumberMap().find( sourceID ) )->second; -} - -/// Add the mapping of TELL1 board number to source ID -unsigned int UTReadoutTool::TELLNumberToSourceID( unsigned int TELL ) const { - return ( this->TELLNumberToSourceIDMap().find( TELL ) )->second; -} - -StatusCode UTReadoutTool::validate() const { - // validate the map - every sector must go somewhere ! - const auto& dSectors = m_tracker->sectors(); - return StatusCode{std::none_of( std::begin( dSectors ), std::end( dSectors ), [this]( const DeUTSector* s ) { - UTChannelID chan = s->elementID(); - auto chanPair = offlineChanToDAQ( chan, 0.0 ); - return chanPair.first == UTTell1ID( UTTell1ID::nullBoard, false ); - } )}; -} - -std::vector<LHCb::UTChannelID> UTReadoutTool::sectorIDs( const UTTell1ID board ) const { - - std::vector<LHCb::UTChannelID> sectors; - sectors.reserve( 8 ); - auto theBoard = findByBoardID( board ); - if ( theBoard ) { - sectors.insert( sectors.begin(), theBoard->sectorIDs().begin(), theBoard->sectorIDs().end() ); - } else { - Error( "Failed to find Board", StatusCode::SUCCESS, 100 ).ignore( /* AUTOMATICALLY ADDED FOR gaudi/Gaudi!763 */ ); - } - return sectors; -} - -std::vector<DeUTSector*> UTReadoutTool::sectors( const UTTell1ID board ) const { - - return m_tracker->findSectors( sectorIDs( board ) ); -} - -std::vector<DeUTSector*> UTReadoutTool::sectorsOnServiceBox( const std::string& serviceBox ) const { - - return m_tracker->findSectors( sectorIDsOnServiceBox( serviceBox ) ); -} - -std::vector<LHCb::UTChannelID> UTReadoutTool::sectorIDsOnServiceBox( const std::string& serviceBox ) const { - // loop over all boards - std::vector<LHCb::UTChannelID> sectors; - sectors.reserve( 16 ); - for ( const auto& board : m_boards ) { - const auto& sectorVec = board->sectorIDs(); - const auto& sBoxes = board->serviceBoxes(); - for ( unsigned int iS = 0u; iS < board->nSectors(); ++iS ) { - if ( sBoxes[iS] == serviceBox ) sectors.push_back( sectorVec[iS] ); - } // iS - } // iterB - return sectors; -} - -const std::vector<std::string>& UTReadoutTool::serviceBoxes() const { return m_serviceBoxes; } - -std::string UTReadoutTool::footer() const { - std::string temp = m_footer; - temp.insert( 0, "</catalog>" ); - return temp; -} - -std::string UTReadoutTool::header( const std::string& conString ) const { - // get the header - auto startpos = conString.find( m_startTag ); - auto temp = conString.substr( 0, startpos ); - temp.insert( startpos, "<catalog name=\"ReadoutSectors\">" ); - - // correct the location of the DTD - if ( m_removeCondb ) { - UT::XMLUtils::replace( temp, "conddb:", "" ); - std::string location; - for ( unsigned int i = 0; i < m_depth; ++i ) location += "../"; - auto pos = temp.find( "/DTD/" ); - temp.insert( pos, location ); - UT::XMLUtils::replace( temp, "//", "/" ); - } - - return temp; -} - -std::string UTReadoutTool::strip( const std::string& conString ) const { - auto startpos = conString.find( m_startTag ); - auto endpos = conString.find( m_footer ); - return conString.substr( startpos, endpos - startpos ); -} - -unsigned int UTReadoutTool::region( const UTChannelID aChan ) const { - // convert channel to region - return aChan.station() == 1 ? aChan.layer() - 1 : m_nRegionA + aChan.layer() - 1; -} - -// Add the mapping of source ID to TELL1 board number -const std::map<unsigned int, unsigned int>& UTReadoutTool::SourceIDToTELLNumberMap() const { - return UTBoardMapping::UTSourceIDToNumberMap(); -} - -// Add the mapping of TELL1 board number to source ID -const std::map<unsigned int, unsigned int>& UTReadoutTool::TELLNumberToSourceIDMap() const { - return UTBoardMapping::UTNumberToSourceIDMap(); -} - -StatusCode UTReadoutTool::createTell1Map() { - auto rInfo = getDet<Condition>( m_conditionLocation ); - const auto& layers = rInfo->param<std::vector<std::string>>( "layers" ); - - UTBoardMapping::ClearUTMap(); - - unsigned int sourceIDBase = 0; - for ( unsigned int iReg = 0; iReg < layers.size(); ++iReg ) { - std::string tell1Loc = layers[iReg] + "TELL1"; - if ( rInfo->exists( tell1Loc ) ) { - // printf("Extracting TELL1 map from %s\n", tell1Loc.c_str()); - - const auto& tell1 = rInfo->param<std::vector<int>>( tell1Loc ); - for ( unsigned int i = 0; i < tell1.size(); i++ ) { - UTBoardMapping::AddUTMapEntry( sourceIDBase + i, tell1.at( i ) ); - } - } - sourceIDBase += 64; - } - - return StatusCode::SUCCESS; -} - -StatusCode UTReadoutTool::createBoards() { - - bool isUT = true; - clear(); - - // load conditions - auto rInfo = getDet<Condition>( m_conditionLocation ); - - // vector of layer types - // const std::vector<std::string>& layers = rInfo->paramAsStringVect("layers"); - const auto layers = rInfo->param<std::vector<std::string>>( "layers" ); - const auto nBoards = rInfo->paramAsIntVect( "nBoardsPerLayer" ); - - m_hybridsPerBoard = rInfo->param<int>( "hybridsPerBoard" ); - m_nRegionA = rInfo->param<int>( "nRegionsInUTa" ); - const auto nStripsPerHybrid = UTDAQ::nStripsPerBoard / m_hybridsPerBoard; - - for ( unsigned int iReg = 0; iReg < layers.size(); ++iReg ) { - - assert( iReg < layers.size() ); - assert( iReg < nBoards.size() ); - - m_firstBoardInRegion.push_back( m_boards.size() ); - m_nBoard += nBoards[iReg]; - - const auto& tMap = rInfo->param<std::vector<int>>( layers[iReg] ); - const auto& orientation = rInfo->param<std::vector<int>>( layers[iReg] + "HybridOrientation" ); - const auto& serviceBoxes = rInfo->param<std::vector<std::string>>( layers[iReg] + "ServiceBox" ); - - unsigned int vecLoc = 0; - assert( !tMap.empty() ); - if ( 0 == iReg ) { m_firstStation = UTChannelID( tMap[0] ).station(); } - - for ( unsigned int iBoard = 0; iBoard < (unsigned int)nBoards[iReg]; ++iBoard ) { - - // make new board - const UTTell1ID anID( iReg, iBoard, isUT ); - auto aBoard = std::make_unique<UTTell1Board>( anID, nStripsPerHybrid, "UT" ); - - for ( unsigned iH = 0; iH < m_hybridsPerBoard; ++iH, ++vecLoc ) { - assert( vecLoc < tMap.size() ); - assert( vecLoc < orientation.size() ); - assert( vecLoc < serviceBoxes.size() ); - if ( 0 != tMap[vecLoc] ) { // skip strange 0's in conditions vector !! - UTChannelID sectorID( (unsigned int)tMap[vecLoc] ); - aBoard->addSector( sectorID, (unsigned int)orientation[vecLoc], serviceBoxes[vecLoc] ); - - // add to the list of service boxs if not already there - if ( std::find( m_serviceBoxes.begin(), m_serviceBoxes.end(), serviceBoxes[vecLoc] ) == - m_serviceBoxes.end() ) { - m_serviceBoxes.push_back( serviceBoxes[vecLoc] ); - } - } - } // iH - - m_boards.push_back( std::move( aBoard ) ); - - if ( m_boardsMap.find( anID ) == m_boardsMap.end() ) { m_boardsMap[anID] = m_boards.back().get(); } - - } // boards per region - } // iterS - - // validate the mapping --> all sectors should go somewhere ! - const auto sc = validate(); - return ( sc.isFailure() ? Error( "Failed to validate mapping", sc ) : sc ); -} -- GitLab From dc4e701b1bd3161074077a6fc22a15128c6181f0 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 6 May 2020 15:48:26 +0200 Subject: [PATCH 015/111] Add index field to PrMutUTHits and function to calculate the planeCode --- Pr/PrKernel/PrKernel/PrMutUTHits.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index 98e34a72530..67ad2e8f868 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -14,6 +14,12 @@ #include "LHCbMath/SIMDWrapper.h" #include "UTDAQ/UTInfo.h" +/** Mutable UT hit class for internal use in pattern recognition algorithms + * + * @author Michel De Cian + * @date 2020-04-06 + */ + namespace LHCb::Pr::UT { namespace Mut { @@ -30,6 +36,7 @@ namespace LHCb::Pr::UT { alignas( 64 ) std::array<float, max_hits> sins; alignas( 64 ) std::array<float, max_hits> weights; alignas( 64 ) std::array<int, max_hits> channelIDs; + alignas( 64 ) std::array<int, max_hits> indexs; std::array<int, UTInfo::TotalLayers> layerIndices; @@ -41,6 +48,17 @@ namespace LHCb::Pr::UT { SOA_ACCESSOR( sin, sins.data() ) SOA_ACCESSOR( weight, weights.data() ) SOA_ACCESSOR( channelID, channelIDs.data() ) + SOA_ACCESSOR( index, indexs.data() ) + + /// Retrieve the plane code + template <typename T> + T planeCode( int t ) const { + T id = channelID<T>( t ); + T station = ( id & UTInfo::StationMask ) >> UTInfo::StationBits; + T layer = ( id & UTInfo::LayerMask ) >> UTInfo::LayerBits; + + return 2 * ( station - 1 ) + ( layer - 1 ); + } }; } // namespace Mut -- GitLab From f2858fc897854f21e38de11cf4f322fa007a1748 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 6 May 2020 15:51:48 +0200 Subject: [PATCH 016/111] fixed formatting --- Pr/PrKernel/PrKernel/PrMutUTHits.h | 8 ++++---- Pr/PrVeloUT/src/PrVeloUT.cpp | 9 +++++---- Pr/PrVeloUT/src/PrVeloUT.h | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index 67ad2e8f868..0d7afa034ed 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -49,14 +49,14 @@ namespace LHCb::Pr::UT { SOA_ACCESSOR( weight, weights.data() ) SOA_ACCESSOR( channelID, channelIDs.data() ) SOA_ACCESSOR( index, indexs.data() ) - + /// Retrieve the plane code template <typename T> T planeCode( int t ) const { - T id = channelID<T>( t ); + T id = channelID<T>( t ); T station = ( id & UTInfo::StationMask ) >> UTInfo::StationBits; - T layer = ( id & UTInfo::LayerMask ) >> UTInfo::LayerBits; - + T layer = ( id & UTInfo::LayerMask ) >> UTInfo::LayerBits; + return 2 * ( station - 1 ) + ( layer - 1 ); } }; diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 89e2103fe8e..a7845330b00 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -726,10 +726,11 @@ namespace LHCb::Pr { // ============================================================================== // -- Method that finds the hits in a given layer within a certain range // ============================================================================== - inline void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, const simd::float_v& ty, - const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, - const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, - const int firstIndex, const int lastIndex ) const { + inline void VeloUT::findHits( const LHCb::Pr::UT::HitHandler& hh, const simd::float_v& yProto, + const simd::float_v& ty, const simd::float_v& tx, const simd::float_v& xOnTrackProto, + const simd::float_v& tolProto, const simd::float_v& xTolNormFact, + LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, const int firstIndex, + const int lastIndex ) const { const LHCb::Pr::UT::Hits& myHits = hh.hits(); diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index bec720f4e25..3b656d59800 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -276,7 +276,7 @@ namespace LHCb::Pr { const simd::float_v& tx, const simd::float_v& xOnTrackProto, const simd::float_v& tolProto, const simd::float_v& xTolNormFact, LHCb::Pr::UT::Mut::Hits& mutHits, const simd::float_v& yTol, const int firstIndex, const int lastIndex ) const; - + template <bool forward> bool formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, TrackHelper& helper ) const; -- GitLab From ada9e9b3400e478f00e4b47e73b9a9861845b4c9 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Sun, 10 May 2020 16:59:00 +0200 Subject: [PATCH 017/111] change the name of hit indices container --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 37 ++++++++++--------- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 4 +- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 34 +++++------------ .../src/SciFiTrackForwarding.cpp | 17 ++++++--- 4 files changed, 43 insertions(+), 49 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 2603451903c..c5027fe8365 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2213,29 +2213,35 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& int uttrack = cand.track(); + /// TO DO: change the LHCbID to be index + std::vector<LHCb::LHCbID> utid; + utid.reserve( 30 ); if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { result.store_trackVP<I>( currentsize, input_tracks.template trackVP<I>( uttrack ) ); result.store_trackUT<I>( currentsize, uttrack ); - const int veloidx = input_tracks.template trackVP<I>( uttrack ).cast(); - const int velohits = ( *velo_ancestors ).template nHits<I>( veloidx ).cast(); - for ( int idx{0}; idx < velohits; ++idx ) { - result.store_velohit_indices<I>( currentsize, idx, ( *velo_ancestors ).template hit<I>( veloidx, idx ) ); + const int vpidx = input_tracks.template trackVP<I>( uttrack ).cast(); + const int vphits = ( *velo_ancestors ).template nHits<I>( vpidx ).cast(); + for ( int idx{0}; idx < vphits; ++idx ) { + result.store_vp_index<I>( currentsize, idx, ( *velo_ancestors ).template hit<I>( vpidx, idx ) ); } + result.store_nVPHits<I>( currentsize, vphits ); /// TO Do: currently for UT hits, LHCbIDs are stored const int uthits = input_tracks.template nHits<I>( uttrack ).cast(); for ( int idx{0}; idx < uthits; ++idx ) { - result.store_uthit_indices<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); + result.store_ut_index<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); } + result.store_nUTHits<I>( currentsize, uthits ); } else { result.store_trackVP<I>( currentsize, uttrack ); result.store_trackUT<I>( currentsize, -1 ); - const int velohits = input_tracks.template nHits<I>( uttrack ).cast(); - for ( int idx{0}; idx < velohits; ++idx ) { - result.store_velohit_indices<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); + const int vphits = input_tracks.template nHits<I>( uttrack ).cast(); + for ( int idx{0}; idx < vphits; ++idx ) { + result.store_vp_index<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); } + result.store_nVPHits<I>( currentsize, vphits ); // only used to disable unused warning in the velo track input case // uttrack = input_tracks.size(); } @@ -2255,9 +2261,6 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_statePos<F>( currentsize, pos ); result.store_stateDir<F>( currentsize, dir ); - /// TO DO: change the LHCbID to be indices - std::vector<LHCb::LHCbID> utid; - utid.reserve( 4 ); if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { if ( m_addUTHitsTool.isEnabled() ) { double chi2{0}; @@ -2276,22 +2279,22 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& } } for ( size_t idx{0}; idx < utid.size(); ++idx ) { - result.store_uthit_indices<I>( currentsize, idx, utid[idx].lhcbID() ); + result.store_ut_index<I>( currentsize, idx, utid[idx].lhcbID() ); } + result.store_nUTHits<I>( currentsize, utid.size() ); } //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } - // result.store_nHits<I>( currentsize, id.size() ); + // TO Do: should we save the total hits size of this track + result.store_nHits<I>( currentsize, id.size() ); //== hits indices, max_fthits=15, not sure if we need this. // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") auto const& ihits = cand.ihits(); - result.store_nHits<I>( currentsize, ihits.size() ); - for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_fthit_indices<I>( currentsize, idx, ihits[idx] ); } + result.store_nFTHits<I>( currentsize, ihits.size() ); + for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_ft_index<I>( currentsize, idx, ihits[idx] ); } - // TO Do: not sure which size to be stored - // result.store_nHits<I>( currentsize, (id.size()+utid.size()+veloid.size()) ); result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Forward::Tracks::max_tracks ) ) { // FIXME: find a better way to define diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index 60258df24ea..41b8da4faf5 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -93,9 +93,9 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi /// mark used SciFi Hits for ( int t = 0; t < tracks.size(); t += simd::size ) { - const int nfthits = tracks.nHits<I>( t ).cast(); + const int nfthits = tracks.nFTHits<I>( t ).cast(); for ( int id = 0; id != nfthits; id++ ) { - auto idx = tracks.fthit_indices<I>( t, id ).cast(); + auto idx = tracks.ft_index<I>( t, id ).cast(); if ( idx != 0 ) used[idx] = true; } } diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index 23da3e92cad..682b574dfe7 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -35,6 +35,7 @@ #include "Kernel/AllocatorUtils.h" #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" +#include "boost/dynamic_bitset.hpp" #include <memory> //----------------------------------------------------------------------------- @@ -72,41 +73,26 @@ PrResidualVeloTracks::PrResidualVeloTracks( const std::string& name, ISvcLocator LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& tracks, const VeloTracks& velotracks ) const { - // using simd = SIMDWrapper::avx256::types; using simd = SIMDWrapper::scalar::types; using I = SIMDWrapper::scalar::types::int_v; auto tmp = LHCb::make_obj_propagating_allocator<LHCb::Pr::Velo::Tracks>( tracks, Zipping::generateZipIdentifier() ); - /* - if ( tracks.empty() ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; - return tmp; - } - */ - if ( velotracks.empty() ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Velo Track container '" << inputLocation<1>() << "' is empty" << endmsg; return tmp; } - for ( int t = 0; t < velotracks.size(); t += simd::size ) { - // auto loop_mask = simd :: loop_mask (t , velotracks.size()); - - bool usedtrack = false; - for ( int itrack = 0; itrack < tracks.size(); itrack += simd::size ) { - const auto veloidx = tracks.trackVP<I>( itrack ); - if ( t == veloidx ) { - usedtrack = true; - break; - } - } - if ( usedtrack ) continue; - - // auto mask = (!usedtrack) && loop_mask; - auto mask = ( !usedtrack ); + const unsigned int nvelo = velotracks.size(); + boost::dynamic_bitset<> used{nvelo, false}; + for ( int itrack = 0; itrack < tracks.size(); itrack += simd::size ) { + const auto veloidx = tracks.trackVP<I>( itrack ).cast(); + used[veloidx] = true; + } + for ( int t = 0; t < velotracks.size(); t += simd::size ) { + if ( used[t] ) continue; + auto mask = ( !used[t] ); tmp.copy_back<simd>( velotracks, t, mask ); } return tmp; diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 729c4e60fbc..8621f1dd6ca 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -670,25 +670,30 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac // store Velo hit indices LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); - const int velohits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); - for ( auto idx{0}; idx < velohits; ++idx ) { - Output.compressstore_velohit_indices<sI>( i, idx, mask, ( *velo_ancestors ).hit<sI>( velotrack, idx ) ); + const int vphits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); + for ( auto idx{0}; idx < vphits; ++idx ) { + Output.compressstore_vp_index<sI>( i, idx, mask, ( *velo_ancestors ).hit<sI>( velotrack, idx ) ); } + Output.compressstore_nVPHits<sI>( i, mask, vphits ); // TO Do: change the LHCbIDs to hit indices const int uthits = tracks.nHits<sI>( uttrack + tr ).cast(); for ( int idx{0}; idx < uthits; ++idx ) { - Output.compressstore_uthit_indices<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr, idx ) ); + Output.compressstore_ut_index<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr, idx ) ); } + Output.compressstore_nUTHits<sI>( i, mask, uthits ); int n_hits = 0; for ( auto idx{bestcandidate.ids.begin()}; idx != bestcandidate.ids.end(); ++idx, ++n_hits ) { + //To do: store the LHCbIDs of scifi hits, should we save the lhcbIDs of all hits of this tracks or none of them? Output.compressstore_hit<sI>( i, n_hits, mask, hithandler.IDs[*idx] ); /// FT hit indices - Output.compressstore_fthit_indices<sI>( i, n_hits, mask, *idx ); + Output.compressstore_ft_index<sI>( i, n_hits, mask, *idx ); } - + Output.compressstore_nFTHits<sI>( i, mask, bestcandidate.ids.size() ); + // TO do: bestcandidate.numHits is the total number of scifi hits, should we save number of all hits of this track? Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits ); + //Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits + uthits + vphits ); // AtT State float const endT_z = cache.LayerZPos[8]; -- GitLab From 139899f13bcda84ea174948da78451c8cb8f6c51 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Sun, 10 May 2020 14:59:48 +0000 Subject: [PATCH 018/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8311656 --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 3 +-- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 4 ++-- Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp | 10 ++++++---- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index c5027fe8365..af0ff635032 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -2287,7 +2287,7 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& //== LHCb ids. for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } // TO Do: should we save the total hits size of this track - result.store_nHits<I>( currentsize, id.size() ); + result.store_nHits<I>( currentsize, id.size() ); //== hits indices, max_fthits=15, not sure if we need this. // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") @@ -2295,7 +2295,6 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_nFTHits<I>( currentsize, ihits.size() ); for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_ft_index<I>( currentsize, idx, ihits[idx] ); } - result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Forward::Tracks::max_tracks ) ) { // FIXME: find a better way to define // size of container diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index 682b574dfe7..855dcbdd945 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -83,12 +83,12 @@ LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& track return tmp; } - const unsigned int nvelo = velotracks.size(); + const unsigned int nvelo = velotracks.size(); boost::dynamic_bitset<> used{nvelo, false}; for ( int itrack = 0; itrack < tracks.size(); itrack += simd::size ) { const auto veloidx = tracks.trackVP<I>( itrack ).cast(); - used[veloidx] = true; + used[veloidx] = true; } for ( int t = 0; t < velotracks.size(); t += simd::size ) { if ( used[t] ) continue; diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 8621f1dd6ca..637c32f56aa 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -670,7 +670,7 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac // store Velo hit indices LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); - const int vphits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); + const int vphits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); for ( auto idx{0}; idx < vphits; ++idx ) { Output.compressstore_vp_index<sI>( i, idx, mask, ( *velo_ancestors ).hit<sI>( velotrack, idx ) ); } @@ -685,15 +685,17 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac int n_hits = 0; for ( auto idx{bestcandidate.ids.begin()}; idx != bestcandidate.ids.end(); ++idx, ++n_hits ) { - //To do: store the LHCbIDs of scifi hits, should we save the lhcbIDs of all hits of this tracks or none of them? + // To do: store the LHCbIDs of scifi hits, should we save the lhcbIDs of all hits of this tracks or none of + // them? Output.compressstore_hit<sI>( i, n_hits, mask, hithandler.IDs[*idx] ); /// FT hit indices Output.compressstore_ft_index<sI>( i, n_hits, mask, *idx ); } Output.compressstore_nFTHits<sI>( i, mask, bestcandidate.ids.size() ); - // TO do: bestcandidate.numHits is the total number of scifi hits, should we save number of all hits of this track? + // TO do: bestcandidate.numHits is the total number of scifi hits, should we save number of all hits of this + // track? Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits ); - //Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits + uthits + vphits ); + // Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits + uthits + vphits ); // AtT State float const endT_z = cache.LayerZPos[8]; -- GitLab From afbc9e765ba096a8ab736a095e44e9c5883f6a1b Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 12 May 2020 22:15:11 +0200 Subject: [PATCH 019/111] update matching --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 149 ++++++++++++++++++++++-------- Pr/PrAlgorithms/src/PrMatchNN.h | 89 ++++++++++++++++-- 2 files changed, 193 insertions(+), 45 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 92042cf3665..e4d25025cf9 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,8 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"VeloHitsLocation", "Raw/VP/Hits"}, + KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,11 +49,14 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; @@ -65,9 +69,50 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + for ( int v = 0; v != velos.size(); v++ ) { + + auto mlpCounterBuf = m_tracksMLP.buffer(); + auto chi2CounterBuf = m_tracksChi2.buffer(); + + const int EndVelo = 1; + auto velo_pos = velos.statePos<F>( v, EndVelo ); + auto velo_dir = velos.stateDir<F>( v, EndVelo ); + + const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); + + const int EndT3 = 3; + for ( int s = 0; s != seeds.size(); s++ ) { + auto seed_pos = seeds.statePos<F>( s, EndT3 ); + auto seed_dir = seeds.stateDir<F>( s, EndT3 ); + + const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + + const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); + + if ( chi2 < m_maxChi2 ) { + + const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); + mlpCounterBuf += mlp; + chi2CounterBuf += chi2; + if ( mlp > m_minNN ) { + auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); + + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( match ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + } + } + } + } + + /* std::vector<MatchCandidate> cands; cands.reserve( seeds.size() ); + + + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // -- make pairs of Velo track and state @@ -76,8 +121,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - - /* + + for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -182,34 +227,37 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track m_tracksCount += matches.size(); return matches; } + //============================================================================= // -float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const { - const float tx2 = vState.tx() * vState.tx(); - const float ty2 = vState.ty() * vState.ty(); +float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { + + const float tx2 = vState_dir.x.cast() * vState_dir.x.cast(); + const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); - const float dSlope = vState.tx() - sState.tx(); + const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); if ( std::abs( dSlope ) > 1.5 ) return 99.; - const float dSlopeY = vState.ty() - sState.ty(); + const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); if ( std::abs( dSlopeY ) > 0.15 ) return 99.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + - m_zMagParams[3] * std::abs( sState.x() ) + m_zMagParams[4] * vState.tx() * vState.tx(); + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + + m_zMagParams[4] * vState_dir.x.cast() * vState_dir.x.cast(); const float dxTol2 = m_dxTol * m_dxTol; const float dxTolSlope2 = m_dxTolSlope * m_dxTolSlope; - const float xV = vState.x() + ( zForX - vState.z() ) * vState.tx(); + const float xV = vState_pos.x.cast() + ( zForX - vState_pos.z.cast() ) * vState_dir.x.cast(); // -- This is the function that calculates the 'bending' in y-direction // -- The parametrisation can be derived with the MatchFitParams package - const float yV = vState.y() + ( m_zMatchY - vState.z() ) * vState.ty() + - vState.ty() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); + const float yV = vState_pos.y.cast() + ( m_zMatchY - vState_pos.z.cast() ) * vState_dir.y.cast() + + vState_dir.y.cast() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); - const float xS = sState.x() + ( zForX - sState.z() ) * sState.tx(); - const float yS = sState.y() + ( m_zMatchY - sState.z() ) * sState.ty(); + const float xS = sState_pos.x.cast() + ( zForX - sState_pos.z.cast() ) * sState_dir.x.cast(); + const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; if ( std::abs( distX ) > 400 ) return 99.; @@ -237,40 +285,67 @@ float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sSt return chi2; } -PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMatchNN::Track& seed ) const { +PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; - output.addToAncestors( velo ); - output.addToAncestors( seed ); + + // output.addToAncestors( velo ); + // output.addToAncestors( seed ); + //== Adjust flags output.setType( Track::Type::Long ); output.setHistory( Track::History::PrMatch ); output.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); + //== copy LHCbIDs - output.addToLhcbIDs( velo.lhcbIDs(), LHCb::Tag::Sorted ); - output.addToLhcbIDs( seed.lhcbIDs(), LHCb::Tag::Sorted ); + int nSeedHits = seeds.nHits<I>( s ).cast(); + std::vector<LHCb::LHCbID> seedlhcbIDs; + seedlhcbIDs.reserve( nSeedHits ); + + for ( int i = 0; i < nSeedHits; ++i ) { seedlhcbIDs.emplace_back( seeds.hit<I>( s, i ).cast() ); } + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Sorted ); + + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Unordered ); + output.addToLhcbIDs( velos.lhcbIDs( v, veloHits ), LHCb::Tag::Unordered ); + //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; newstates.reserve( 6 ); - if ( velo.hasStateAt( LHCb::State::Location::ClosestToBeam ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::ClosestToBeam ) ); - if ( velo.hasStateAt( LHCb::State::Location::FirstMeasurement ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::FirstMeasurement ) ); - if ( velo.hasStateAt( LHCb::State::Location::EndVelo ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::EndVelo ) ); - newstates.push_back( seed.closestState( StateParameters::ZBegT ) ); - newstates.push_back( seed.closestState( StateParameters::ZMidT ) ); + auto state_beam = getVeloState( velos, v, 0 ); + state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_endvelo = getVeloState( velos, v, 1 ); + state_endvelo.setLocation( LHCb::State::Location::EndVelo ); + + auto state_firstmeas = getVeloState( velos, v, 2 ); + state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_beam ); + newstates.push_back( state_endvelo ); + newstates.push_back( state_firstmeas ); + + auto state_begT = getSeedState( seeds, s, 0 ); + state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_midT = getSeedState( seeds, s, 1 ); + state_midT.setLocation( LHCb::State::Location::EndVelo ); + + auto state_endT = getSeedState( seeds, s, 2 ); + state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_begT ); + + newstates.push_back( state_midT ); // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } - newstates.push_back( seed.closestState( StateParameters::ZEndT ) ); + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; + + newstates.push_back( state_endT ); // make sure we don't include same state twice if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } //== estimate q/p - double qOverP, sigmaQOverP; - bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); - const LHCb::State& vState = velo.closestState( 0. ); - const LHCb::State& sState = seed.closestState( m_zMatchY ); - StatusCode sc = m_fastMomentumTool->calculate( &vState, &sState, qOverP, sigmaQOverP, cubicFit ); + double qOverP, sigmaQOverP; + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + + StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { Warning( "momentum determination failed!", sc ).ignore(); // assume the Velo/T station standalone reco do something reasonable @@ -282,7 +357,9 @@ PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMat st.setQOverP( qOverP ); } } + //== add copied states to output track output.addToStates( newstates, LHCb::Tag::Unordered ); + return output; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 50071ecad47..922921cc620 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -13,9 +13,9 @@ // Include files // from Gaudi -#include "Event/Track_v2.h" -#include "Event/PrVeloTracks.h" #include "Event/PrSeedTracks.h" +#include "Event/PrVeloTracks.h" +#include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" @@ -39,7 +39,76 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { +namespace { + using dType = SIMDWrapper::scalar::types; + using I = dType::int_v; + using F = dType::float_v; + + using SeedTracks = LHCb::Pr::Seeding::Tracks; + using VeloTracks = LHCb::Pr::Velo::Tracks; + using Hits = LHCb::Pr::Velo::Hits; + + LHCb::State getVeloState( VeloTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + Vec3<F> covX = tracks.stateCovX<F>( t, index ); + Vec3<F> covY = tracks.stateCovY<F>( t, index ); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( 0. ); + + c( 0, 0 ) = covX.x.cast(); + c( 2, 0 ) = covX.y.cast(); + c( 2, 2 ) = covX.z.cast(); + c( 1, 1 ) = covY.x.cast(); + c( 3, 1 ) = covY.y.cast(); + c( 3, 3 ) = covY.z.cast(); + c( 4, 4 ) = 1.f; + + state.setState( s ); + + state.setCovariance( c ); + + return state; + } + LHCb::State getSeedState( SeedTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + auto const qop = tracks.QoP<F>( t ).cast(); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( qop ); + + state.setState( s ); + + return state; + } + +} // namespace + +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, const LHCb::Pr::Seeding::Tracks& )> { + using Track = LHCb::Event::v2::Track; public: @@ -50,7 +119,8 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, + const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * @@ -79,12 +149,13 @@ public: }; private: - /// calculate matching chi^2 - float getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const; + // calculate matching chi^2 + float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; - /// merge velo and seed segment to output track - Track makeTrack( const Track& velo, const Track& seed ) const; + // merge velo and seed segment to output track + Track makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const; Gaudi::Property<std::vector<double>> m_zMagParams{ this, "ZMagnetParams", {5287.6, -7.98878, 317.683, 0.0119379, -1418.42}}; -- GitLab From 0f85aa41b421c6cd1e96491d94503d04d596a145 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 13 May 2020 16:35:30 +0200 Subject: [PATCH 020/111] Add UT hit index to output of PrVeloUT --- Pr/PrVeloUT/src/PrVeloUT.cpp | 33 ++++++++++++++++++++++----------- Pr/PrVeloUT/src/PrVeloUT.h | 4 +++- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index a7845330b00..7cf61971303 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -407,16 +407,17 @@ namespace LHCb::Pr { int nHits = 0; // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 - for ( auto hitIndex : helper.bestIndices ) { - pTracks.store_x<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].xs[hitIndex] ); - pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitIndex] ); - pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitIndex] ); + for ( auto hitI : helper.bestIndices ) { + pTracks.store_x<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].xs[hitI] ); + pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitI] ); + pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitI] ); - scalar::float_v weight = ( hitIndex == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitIndex]; + scalar::float_v weight = ( hitIndex == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitI]; pTracks.store_weight<scalar::float_v>( trackIndex, nHits, weight ); - LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitIndex] ) ); + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitI] ) ); pTracks.store_id<scalar::int_v>( trackIndex, nHits, id.lhcbID() ); // not sure if correct + pTracks.store_hitIndex<scalar::int_v>( trackIndex, nHits, hitsInLayers[t2].indexs[hitI] ); // not sure if correct nHits++; } @@ -604,8 +605,8 @@ namespace LHCb::Pr { // -- and one without a correct sector, in which case the track will not be masked off. // -- However, these cases should happen very rarely simd::int_v sect = ( layerIndex < 2 ) - ? geom.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) - : geom.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); + ? geom.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) + : geom.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 @@ -767,6 +768,7 @@ namespace LHCb::Pr { myHits.cos<simd::float_v>( i ) * -1.0f * myHits.dxDy<simd::float_v>( i ) ); mutHits.compressstore_weight( index, mask, myHits.weight<simd::float_v>( i ) ); mutHits.compressstore_channelID( index, mask, myHits.channelID<simd::int_v>( i ) ); + mutHits.compressstore_index( index, mask, simd::indices(i)); // fill the index in the original hit container mutHits.size += simd::popcount( mask ); } } @@ -1001,16 +1003,22 @@ namespace LHCb::Pr { TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); - simd::int_v nHits{0}; + // -- this should count the hits in the Velo + simd::int_v nHits = outputTracks.nHits<simd::int_v>(t); + simd::int_v nUTHits{0}; for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); + simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); + outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); + nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); nHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, nHits ); + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nHits ); } // -- from here on, go over each track individually to find and add the overlap hits @@ -1045,10 +1053,13 @@ namespace LHCb::Pr { LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - int nHits = outputTracks.nHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + int nHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); if ( nHits > 30 ) continue; outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); + outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, hitsInLayers[hitContIndex].hitIndexs[index2] ); outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); + outputTracks.compressstore_nUTHits<scalar::int_v>( trackIndex + trackIndex2, true, nUTHits + 1 ); // only one overlap hit // break; } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 3b656d59800..21cecd4c7d2 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -142,6 +142,7 @@ namespace LHCb::Pr { std::array<float, 4 * batchSize> weightss{}; // this needs to be zero-initialized std::array<float, 4 * batchSize> sins; std::array<int, 4 * batchSize> ids; + std::array<int, 4 * batchSize> hitIndexs; // -- this is the output of the fit std::array<float, batchSize> qps; @@ -162,7 +163,7 @@ namespace LHCb::Pr { std::array<float, batchSize> covys; std::array<float, batchSize> covzs; - // -- and this and index to find the hit containers + // -- and this an index to find the hit containers std::array<int, batchSize> hitContIndexs; std::size_t size{0}; @@ -171,6 +172,7 @@ namespace LHCb::Pr { SOA_ACCESSOR_VAR( weight, &( weightss[pos * batchSize] ), int pos ) SOA_ACCESSOR_VAR( sin, &( sins[pos * batchSize] ), int pos ) SOA_ACCESSOR_VAR( id, &( ids[pos * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( hitIndex, &( hitIndexs[hitIndex * batchSize] ), int pos ) SOA_ACCESSOR( qp, qps.data() ) SOA_ACCESSOR( chi2TT, chi2TTs.data() ) -- GitLab From 71f6fa1282ac6baa360802bbd8d877b8cc7a5976 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 13 May 2020 16:35:55 +0200 Subject: [PATCH 021/111] fix formatting --- Pr/PrVeloUT/src/PrVeloUT.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 7cf61971303..90a5d11e0a3 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -417,7 +417,8 @@ namespace LHCb::Pr { LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitI] ) ); pTracks.store_id<scalar::int_v>( trackIndex, nHits, id.lhcbID() ); // not sure if correct - pTracks.store_hitIndex<scalar::int_v>( trackIndex, nHits, hitsInLayers[t2].indexs[hitI] ); // not sure if correct + pTracks.store_hitIndex<scalar::int_v>( trackIndex, nHits, + hitsInLayers[t2].indexs[hitI] ); // not sure if correct nHits++; } @@ -605,8 +606,8 @@ namespace LHCb::Pr { // -- and one without a correct sector, in which case the track will not be masked off. // -- However, these cases should happen very rarely simd::int_v sect = ( layerIndex < 2 ) - ? geom.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) - : geom.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); + ? geom.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) + : geom.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 @@ -768,7 +769,7 @@ namespace LHCb::Pr { myHits.cos<simd::float_v>( i ) * -1.0f * myHits.dxDy<simd::float_v>( i ) ); mutHits.compressstore_weight( index, mask, myHits.weight<simd::float_v>( i ) ); mutHits.compressstore_channelID( index, mask, myHits.channelID<simd::int_v>( i ) ); - mutHits.compressstore_index( index, mask, simd::indices(i)); // fill the index in the original hit container + mutHits.compressstore_index( index, mask, simd::indices( i ) ); // fill the index in the original hit container mutHits.size += simd::popcount( mask ); } } @@ -1004,7 +1005,7 @@ namespace LHCb::Pr { txArray.store_txUT<simd::float_v>( 0, txUT ); // -- this should count the hits in the Velo - simd::int_v nHits = outputTracks.nHits<simd::int_v>(t); + simd::int_v nHits = outputTracks.nHits<simd::int_v>( t ); simd::int_v nUTHits{0}; for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { @@ -1054,10 +1055,11 @@ namespace LHCb::Pr { LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - int nHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + int nHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); if ( nHits > 30 ) continue; outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); - outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, hitsInLayers[hitContIndex].hitIndexs[index2] ); + outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, + hitsInLayers[hitContIndex].hitIndexs[index2] ); outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); outputTracks.compressstore_nUTHits<scalar::int_v>( trackIndex + trackIndex2, true, nUTHits + 1 ); // only one overlap hit -- GitLab From 98fc19217e8800551a167b3718347b0f0a1ac720 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 13 May 2020 17:11:14 +0200 Subject: [PATCH 022/111] fix typo, move statement outside of loop --- Pr/PrVeloUT/src/PrVeloUT.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 90a5d11e0a3..ab6c06bf343 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -1004,10 +1004,11 @@ namespace LHCb::Pr { TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); + + simd::int_v nUTHits{0}; // -- this should count the hits in the Velo simd::int_v nHits = outputTracks.nHits<simd::int_v>( t ); - simd::int_v nUTHits{0}; - + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); @@ -1018,10 +1019,11 @@ namespace LHCb::Pr { outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); nHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); - outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nHits ); } + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); + outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, nHits ); + // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... -- GitLab From a1f67d2487855b44ba3bc6be6f8021830f38c33e Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Thu, 14 May 2020 17:11:05 +0200 Subject: [PATCH 023/111] fix some typos with the hit indices --- Pr/PrVeloUT/src/PrVeloUT.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index ab6c06bf343..7bc8ee43394 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -412,7 +412,7 @@ namespace LHCb::Pr { pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitI] ); pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitI] ); - scalar::float_v weight = ( hitIndex == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitI]; + scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitI]; pTracks.store_weight<scalar::float_v>( trackIndex, nHits, weight ); LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitI] ) ); @@ -1014,7 +1014,7 @@ namespace LHCb::Pr { simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); + //simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); @@ -1057,11 +1057,11 @@ namespace LHCb::Pr { LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - int nHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + int nHits = outputTracks.nHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); if ( nHits > 30 ) continue; outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, - hitsInLayers[hitContIndex].hitIndexs[index2] ); + hitsInLayers[hitContIndex].indexs[index2] ); outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); outputTracks.compressstore_nUTHits<scalar::int_v>( trackIndex + trackIndex2, true, nUTHits + 1 ); // only one overlap hit -- GitLab From 8e1bc4333092f6d82a85d54fc2bcd1d528145482 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Thu, 14 May 2020 17:20:16 +0200 Subject: [PATCH 024/111] more messing with nHits and nUTHits, etc. new copyBack function to store elements in their own array with a mask --- Pr/PrVeloUT/src/PrVeloUT.cpp | 39 +++++++----------------------------- Pr/PrVeloUT/src/PrVeloUT.h | 19 +++++++++++++++++- 2 files changed, 25 insertions(+), 33 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 7bc8ee43394..b95886d3804 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -642,27 +642,9 @@ namespace LHCb::Pr { // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - simd::float_v x = filteredStates.x<simd::float_v>( t ); - simd::float_v y = filteredStates.y<simd::float_v>( t ); - simd::float_v z = filteredStates.z<simd::float_v>( t ); - simd::float_v tx = filteredStates.tx<simd::float_v>( t ); - simd::float_v ty = filteredStates.ty<simd::float_v>( t ); - simd::float_v covx = filteredStates.covx<simd::float_v>( t ); - simd::float_v covy = filteredStates.covy<simd::float_v>( t ); - simd::float_v covz = filteredStates.covz<simd::float_v>( t ); - simd::int_v trackIndex = filteredStates.index<simd::int_v>( t ); - - auto index = filteredStates.size; - filteredStates.compressstore_x<simd::float_v>( index, compressMask, x ); - filteredStates.compressstore_y<simd::float_v>( index, compressMask, y ); - filteredStates.compressstore_z<simd::float_v>( index, compressMask, z ); - filteredStates.compressstore_tx<simd::float_v>( index, compressMask, tx ); - filteredStates.compressstore_ty<simd::float_v>( index, compressMask, ty ); - filteredStates.compressstore_covx<simd::float_v>( index, compressMask, covx ); - filteredStates.compressstore_covy<simd::float_v>( index, compressMask, covy ); - filteredStates.compressstore_covz<simd::float_v>( index, compressMask, covz ); - filteredStates.compressstore_index<simd::int_v>( index, compressMask, trackIndex ); - filteredStates.size += simd::popcount( compressMask ); + filteredStates.copyBack(t, compressMask); + + } return compBoundsArray; @@ -1006,24 +988,20 @@ namespace LHCb::Pr { simd::int_v nUTHits{0}; - // -- this should count the hits in the Velo - simd::int_v nHits = outputTracks.nHits<simd::int_v>( t ); for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); - simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); + //simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - //simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); + // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - nHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); } outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); - outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, nHits ); - + // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... @@ -1057,13 +1035,10 @@ namespace LHCb::Pr { LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - int nHits = outputTracks.nHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); if ( nHits > 30 ) continue; - outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); + //outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, hitsInLayers[hitContIndex].indexs[index2] ); - outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); - outputTracks.compressstore_nUTHits<scalar::int_v>( trackIndex + trackIndex2, true, nUTHits + 1 ); // only one overlap hit // break; } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 21cecd4c7d2..32f07328b32 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -101,6 +101,23 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) + + // -- Copy back the entries, but with a filtering mask + void copyBack(std::size_t at, simd::mask_v mask){ + simd::float_v( &xs[at]).compressstore( mask, &xs[size] ); + simd::float_v( &ys[at]).compressstore( mask, &ys[size] ); + simd::float_v( &zs[at]).compressstore( mask, &zs[size] ); + simd::float_v( &txs[at]).compressstore( mask, &txs[size] ); + simd::float_v( &tys[at]).compressstore( mask, &tys[size] ); + simd::float_v( &covxs[at]).compressstore( mask, &covxs[size] ); + simd::float_v( &covys[at]).compressstore( mask, &covys[size] ); + simd::float_v( &covzs[at]).compressstore( mask, &covzs[size] ); + simd::int_v( &indexs[at]).compressstore( mask, &indexs[size] ); + size += simd::popcount( mask ); + } + + + }; struct ExtrapolatedStates final { @@ -172,7 +189,7 @@ namespace LHCb::Pr { SOA_ACCESSOR_VAR( weight, &( weightss[pos * batchSize] ), int pos ) SOA_ACCESSOR_VAR( sin, &( sins[pos * batchSize] ), int pos ) SOA_ACCESSOR_VAR( id, &( ids[pos * batchSize] ), int pos ) - SOA_ACCESSOR_VAR( hitIndex, &( hitIndexs[hitIndex * batchSize] ), int pos ) + SOA_ACCESSOR_VAR( hitIndex, &( hitIndexs[pos * batchSize] ), int pos ) SOA_ACCESSOR( qp, qps.data() ) SOA_ACCESSOR( chi2TT, chi2TTs.data() ) -- GitLab From 5aea31fbb31bce24969b5c7a1f0dcbabc67380b6 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Fri, 15 May 2020 18:31:28 +0200 Subject: [PATCH 025/111] add indices/lhcbIDs to tracks --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 61 +++++++++++-------- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 6 +- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 4 +- Pr/PrAlgorithms/src/PrUpstreamFromVelo.cpp | 5 +- .../src/TrackCompactVertexToV1Vertex.cpp | 2 +- .../src/fromPrFittedTrackTrackv2.cpp | 20 +++--- Pr/PrKernel/PrKernel/PrPixelFastKalman.h | 6 +- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 11 +++- Pr/PrPixel/src/VeloKalman.cpp | 6 +- Pr/PrPixel/src/VeloKalmanHelpers.h | 14 ++--- .../src/SciFiTrackForwarding.cpp | 34 +++++------ Tr/TrackUtils/src/TracksUTConverter.cpp | 4 +- Tr/TrackUtils/src/TracksVPConverter.cpp | 6 +- 13 files changed, 95 insertions(+), 84 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index af0ff635032..868ed1cb703 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -20,7 +20,7 @@ #include "GaudiKernel/extends.h" // from LHCb -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/StateParameters.h" #include "Event/Track.h" #include "Event/Track_v2.h" @@ -531,13 +531,13 @@ namespace { template <typename T> class PrForwardTracking - : public Gaudi::Functional::Transformer<LHCb::Pr::Forward::Tracks( SciFiHits::PrSciFiHits const&, T const&, + : public Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( SciFiHits::PrSciFiHits const&, T const&, ZoneCache const& ), LHCb::DetDesc::usesConditions<ZoneCache>> { public: using PrSciFiHits = SciFiHits::PrSciFiHits; using base_class_t = - Gaudi::Functional::Transformer<LHCb::Pr::Forward::Tracks( PrSciFiHits const&, T const&, ZoneCache const& ), + Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( PrSciFiHits const&, T const&, ZoneCache const& ), LHCb::DetDesc::usesConditions<ZoneCache>>; using base_class_t::addConditionDerivation; using base_class_t::debug; @@ -589,7 +589,7 @@ public: } /// main call - LHCb::Pr::Forward::Tracks operator()( PrSciFiHits const&, T const&, ZoneCache const& ) const override final; + LHCb::Pr::Long::Tracks operator()( PrSciFiHits const&, T const&, ZoneCache const& ) const override final; private: // Parameters for debugging @@ -713,7 +713,7 @@ private: // save good tracks template <typename Container> - LHCb::Pr::Forward::Tracks makeLHCbTracks( Container const& trackCandidates, + LHCb::Pr::Long::Tracks makeLHCbTracks( Container const& trackCandidates, std::vector<std::vector<LHCb::LHCbID>> ids, T const& ) const; // ==================================================================================== @@ -776,7 +776,7 @@ DECLARE_COMPONENT_WITH_ID( PrForwardTracking<LHCb::Pr::Velo::Tracks>, "PrForward // Main execution //============================================================================= template <typename T> -LHCb::Pr::Forward::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSciFiHits, T const& input_tracks, +LHCb::Pr::Long::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSciFiHits, T const& input_tracks, ZoneCache const& cache ) const { if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Execute" << endmsg; @@ -2201,11 +2201,11 @@ bool PrForwardTracking<T>::selectStereoHits( PrForwardTrack<>& track, const PrSc //========================================================================= template <typename T> template <typename Container> -LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& trackCandidates, +LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& trackCandidates, std::vector<std::vector<LHCb::LHCbID>> ids, T const& input_tracks ) const { auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); - LHCb::Pr::Forward::Tracks result( velo_ancestors, upstream_ancestors ); + LHCb::Pr::Long::Tracks result( velo_ancestors, upstream_ancestors ); for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids ) ) { int const currentsize = result.size(); @@ -2216,30 +2216,38 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& /// TO DO: change the LHCbID to be index std::vector<LHCb::LHCbID> utid; utid.reserve( 30 ); + auto n_vphits = 0; + auto n_uthits = 0; if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { result.store_trackVP<I>( currentsize, input_tracks.template trackVP<I>( uttrack ) ); result.store_trackUT<I>( currentsize, uttrack ); const int vpidx = input_tracks.template trackVP<I>( uttrack ).cast(); const int vphits = ( *velo_ancestors ).template nHits<I>( vpidx ).cast(); - for ( int idx{0}; idx < vphits; ++idx ) { - result.store_vp_index<I>( currentsize, idx, ( *velo_ancestors ).template hit<I>( vpidx, idx ) ); - } - result.store_nVPHits<I>( currentsize, vphits ); - /// TO Do: currently for UT hits, LHCbIDs are stored const int uthits = input_tracks.template nHits<I>( uttrack ).cast(); - for ( int idx{0}; idx < uthits; ++idx ) { - result.store_ut_index<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); - } + n_vphits = vphits; + n_uthits = uthits; + result.store_nVPHits<I>( currentsize, vphits ); result.store_nUTHits<I>( currentsize, uthits ); + for ( auto idx{0}; idx < vphits; ++idx ) { + result.store_vp_index<I>( currentsize, idx, input_tracks.template vp_index<I>( vpidx, idx ) ); + } + for ( auto idx{0}; idx < uthits; ++idx ) { + result.store_ut_index<I>( currentsize, idx, input_tracks.template ut_index<I>( uttrack, idx ) ); + } + for ( auto idx{0}; idx < vphits + uthits; ++idx ) { + result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); + } } else { result.store_trackVP<I>( currentsize, uttrack ); result.store_trackUT<I>( currentsize, -1 ); const int vphits = input_tracks.template nHits<I>( uttrack ).cast(); + n_vphits = vphits; for ( int idx{0}; idx < vphits; ++idx ) { - result.store_vp_index<I>( currentsize, idx, input_tracks.template hit<I>( uttrack, idx ) ); + result.store_vp_index<I>( currentsize, idx, input_tracks.template vp_index<I>( uttrack, idx ) ); + result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); } result.store_nVPHits<I>( currentsize, vphits ); // only used to disable unused warning in the velo track input case @@ -2268,7 +2276,7 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& vState.setState( cand.seed().x0, cand.seed().y0, cand.seed().z0, cand.seed().tx, cand.seed().ty, qOverP ); auto uthits = m_addUTHitsTool->returnUTHits( vState, chi2, vState.p() ); // There are candidates with more than 8 UT hits. To be understood. Better protect this.... - if ( uthits.size() < 3 || uthits.size() > 20 ) { + if ( uthits.size() < 3 || uthits.size() > 16 ) { if ( msgLevel( MSG::DEBUG ) ) debug() << " Failure in adding UT hits to track" << endmsg; } else { for ( auto const hit : uthits ) id.emplace_back( hit.HitPtr->chanID() ); @@ -2280,23 +2288,24 @@ LHCb::Pr::Forward::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& } for ( size_t idx{0}; idx < utid.size(); ++idx ) { result.store_ut_index<I>( currentsize, idx, utid[idx].lhcbID() ); + result.store_lhcbID<I>( currentsize, n_vphits + idx, utid[idx].lhcbID() ); } + n_uthits = utid.size(); result.store_nUTHits<I>( currentsize, utid.size() ); } - //== LHCb ids. - for ( size_t idx{0}; idx < id.size(); ++idx ) { result.store_hit<I>( currentsize, idx, id[idx].lhcbID() ); } - // TO Do: should we save the total hits size of this track - result.store_nHits<I>( currentsize, id.size() ); - //== hits indices, max_fthits=15, not sure if we need this. - // assert(id.size()<=15 && "Container cannot store more than 15 hits per track") + assert(id.size()<=15 && "Container cannot store more than 15 SciFi hits per track"); + auto const& ihits = cand.ihits(); result.store_nFTHits<I>( currentsize, ihits.size() ); - for ( size_t idx{0}; idx < ihits.size(); ++idx ) { result.store_ft_index<I>( currentsize, idx, ihits[idx] ); } + for ( size_t idx{0}; idx < ihits.size(); ++idx ) { + result.store_ft_index<I>( currentsize, idx, ihits[idx] ); + result.store_lhcbID<I>( currentsize, n_vphits + n_uthits + idx, id[idx].lhcbID() ); + } result.size() += 1; - if ( UNLIKELY( result.size() == LHCb::Pr::Forward::Tracks::max_tracks ) ) { // FIXME: find a better way to define + if ( UNLIKELY( result.size() == LHCb::Pr::Long::Tracks::max_tracks ) ) { // FIXME: find a better way to define // size of container ++m_maxTracksErr; break; // FIXME: do something smarter than this diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index 41b8da4faf5..f083cdafb98 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -10,7 +10,7 @@ \*****************************************************************************/ // Include files #include "Event/ODIN.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/Track.h" #include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" @@ -43,8 +43,8 @@ namespace { } class PrResidualSciFiHits - : public Gaudi::Functional::Transformer<PrSciFiHits( const LHCb::Pr::Forward::Tracks&, const PrSciFiHits& )> { - using Tracks = LHCb::Pr::Forward::Tracks; + : public Gaudi::Functional::Transformer<PrSciFiHits( const LHCb::Pr::Long::Tracks&, const PrSciFiHits& )> { + using Tracks = LHCb::Pr::Long::Tracks; public: PrResidualSciFiHits( const std::string& name, ISvcLocator* pSvcLocator ); diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index 855dcbdd945..f51bb816ee3 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -24,7 +24,7 @@ #include "PrKernel/VeloPixelInfo.h" #include "VPDet/DeVP.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" @@ -46,7 +46,7 @@ // //----------------------------------------------------------------------------- -typedef LHCb::Pr::Forward::Tracks LongTracks; +typedef LHCb::Pr::Long::Tracks LongTracks; typedef LHCb::Pr::Velo::Tracks VeloTracks; class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( const LongTracks&, const VeloTracks& )> { diff --git a/Pr/PrAlgorithms/src/PrUpstreamFromVelo.cpp b/Pr/PrAlgorithms/src/PrUpstreamFromVelo.cpp index f0e4e39c7ca..2909df9ce31 100644 --- a/Pr/PrAlgorithms/src/PrUpstreamFromVelo.cpp +++ b/Pr/PrAlgorithms/src/PrUpstreamFromVelo.cpp @@ -43,7 +43,8 @@ namespace Pr { // Assign q/p assuming q=+1 and pT is 'AssumedPT' auto txy2 = dir.x * dir.x + dir.y * dir.y; auto qop = invAssumedPT * sqrt( txy2 / ( 1 + txy2 ) ); - outputTracks.compressstore_nHits<I>( i, mask, 0 ); + outputTracks.compressstore_nUTHits<I>( i, mask, 0 ); + outputTracks.compressstore_nVPHits<I>( i, mask, 0 ); outputTracks.compressstore_trackVP<I>( i, mask, dType::indices( i ) ); outputTracks.compressstore_statePos<F>( i, mask, pos ); outputTracks.compressstore_stateDir<F>( i, mask, dir ); @@ -58,4 +59,4 @@ namespace Pr { }; } // namespace Pr -DECLARE_COMPONENT_WITH_ID( Pr::UpstreamFromVelo, "PrUpstreamFromVelo" ) \ No newline at end of file +DECLARE_COMPONENT_WITH_ID( Pr::UpstreamFromVelo, "PrUpstreamFromVelo" ) diff --git a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp index 694f71bcf0d..247f5424789 100644 --- a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp +++ b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp @@ -119,7 +119,7 @@ namespace LHCb::Converters::TrackCompactVertex { auto converted_vertex = create_vertex( vertex ); ; for ( int i = 0; i < 2; ++i ) { - auto ids = tracks.lhcbIDs( vertex.child_relations()[i].index(), velo_hits ); + auto ids = tracks.lhcbIDs( vertex.child_relations()[i].index() ); // The LHCb::Event::v1::Track::containsLhcbIDs method implicitly // assumes that the input IDs are sorted; ordering is not guaranteed // by the fitted tracks so we must do that here diff --git a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp index 2933f6b4e83..1c5016f1027 100644 --- a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp +++ b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp @@ -19,9 +19,8 @@ #include "Event/Track.h" #include "Event/PrFittedForwardTracks.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/PrUpstreamTracks.h" -#include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" #include "Event/PrZip.h" #include "SelKernel/TrackZips.h" @@ -66,9 +65,8 @@ namespace { return state; } - std::vector<LHCb::Event::v2::Track> convert_tracks( LHCb::Pr::Forward::Tracks const& forward_tracks, + std::vector<LHCb::Event::v2::Track> convert_tracks( LHCb::Pr::Long::Tracks const& forward_tracks, LHCb::Pr::Fitted::Forward::Tracks const& fitted_tracks, - LHCb::Pr::Velo::Hits const& velo_hits, std::array<float, 5> const covarianceValues ) { std::vector<LHCb::Event::v2::Track> out; out.reserve( fitted_tracks.size() ); @@ -110,7 +108,7 @@ namespace { fitted_tracks.chi2nDof<I>( t ).cast()} ); // If we rely on pointers internally stored in the classes we can take it from fitted tracks - auto lhcbids = fitted_tracks.lhcbIDs( t, velo_hits ); + auto lhcbids = fitted_tracks.lhcbIDs( t ); newTrack.addToLhcbIDs( lhcbids, LHCb::Tag::Unordered_tag{} ); } return out; @@ -121,20 +119,20 @@ namespace { namespace LHCb::Converters::Track::v2 { template <typename FittedTrackType> class fromPrFittedForwardTrack : public Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( - const FittedTrackType&, const Pr::Velo::Hits& )> { + const FittedTrackType& )> { public: using base_class = - Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( const FittedTrackType&, const Pr::Velo::Hits& )>; + Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( const FittedTrackType& )>; using KeyValue = typename base_class::KeyValue; fromPrFittedForwardTrack( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class( name, pSvcLocator, {KeyValue{"FittedTracks", ""}, KeyValue{"VeloHits", ""}}, + : base_class( name, pSvcLocator, {KeyValue{"FittedTracks", ""}}, KeyValue{"OutputTracks", ""} ) {} Gaudi::Property<std::array<float, 5>> m_covarianceValues{this, "covarianceValues", default_covarianceValues}; - std::vector<Event::v2::Track> operator()( const FittedTrackType& fitted_tracks_like, - const Pr::Velo::Hits& velo_hits ) const override { + std::vector<Event::v2::Track> operator()( const FittedTrackType& fitted_tracks_like ) const override { + auto const& fitted_tracks = get_fitted_tracks( fitted_tracks_like ); auto const* forward_tracks = fitted_tracks.getForwardAncestors(); if ( forward_tracks == nullptr ) { @@ -144,7 +142,7 @@ namespace LHCb::Converters::Track::v2 { return std::vector<Event::v2::Track>{}; } std::vector<Event::v2::Track> out = - convert_tracks( *forward_tracks, fitted_tracks, velo_hits, m_covarianceValues ); + convert_tracks( *forward_tracks, fitted_tracks, m_covarianceValues ); m_nbTracksCounter += out.size(); return out; } diff --git a/Pr/PrKernel/PrKernel/PrPixelFastKalman.h b/Pr/PrKernel/PrKernel/PrPixelFastKalman.h index a68d1f9e934..712ddd3d6b6 100644 --- a/Pr/PrKernel/PrKernel/PrPixelFastKalman.h +++ b/Pr/PrKernel/PrKernel/PrPixelFastKalman.h @@ -51,7 +51,7 @@ namespace PrPixel { const F wx = err * err; const F wy = wx; - I idxHit0 = tracksVP.gather_hit<I>( idxVP, 0 ); + I idxHit0 = tracksVP.gather_vp_index<I>( idxVP, 0 ); PrPixel::SimpleState<F> state; state.tx = tracksVP.gather_stateDir<F>( idxVP, 0 ).x; state.ty = tracksVP.gather_stateDir<F>( idxVP, 0 ).y; @@ -69,7 +69,7 @@ namespace PrPixel { for ( int i = 1; i < nHits.hmax(); i++ ) { // TODO: hit mask (for avx2/avx512) - I idxHit = tracksVP.gather_hit<I>( idxVP, i ); + I idxHit = tracksVP.gather_vp_index<I>( idxVP, i ); Vec3<F> hit = hits.gather_pos<F>( idxHit ); chi2 = chi2 + filter_with_momentum( state.pos.z, state.pos.x, state.tx, state.covXX, state.covXTx, @@ -157,4 +157,4 @@ namespace PrPixel { } // namespace PrPixel -#endif \ No newline at end of file +#endif diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index d5d77ac13ae..a5a44bab37c 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -27,6 +27,7 @@ #include "Kernel/VPChannelID.h" #include "VPDet/DeVP.h" #include "VPDet/VPDetPaths.h" +#include "Kernel/LHCbID.h" // Local #include "VPClusCache.h" @@ -854,7 +855,10 @@ namespace LHCb::Pr::Velo { tracksBackward.compressstore_stateDir( i, 0, backwards, dir ); for ( int h = 0; h < max_hits; h++ ) { - tracksBackward.compressstore_hit( i, h, backwards, tracks.hit<I>( t, h ) ); + tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); + auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + const auto lhcbid = hits.ChannelId<I>( hit_index ); + tracksForward.compressstore_lhcbID( i, h, backwards, lhcbid ); } tracksBackward.size() += simd::popcount( backwards ); @@ -872,7 +876,10 @@ namespace LHCb::Pr::Velo { tracksForward.compressstore_stateDir( i, 1, forwards, dir ); for ( int h = 0; h < max_hits; h++ ) { - tracksForward.compressstore_hit( i, h, forwards, tracks.hit<I>( t, h ) ); + tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); + auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + const auto lhcbid = hits.ChannelId<I>( hit_index ) ; + tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } tracksForward.size() += simd::popcount( forwards ); diff --git a/Pr/PrPixel/src/VeloKalman.cpp b/Pr/PrPixel/src/VeloKalman.cpp index 46fa19761d8..ec251e61785 100644 --- a/Pr/PrPixel/src/VeloKalman.cpp +++ b/Pr/PrPixel/src/VeloKalman.cpp @@ -19,7 +19,7 @@ #include "Event/Track.h" #include "Event/PrFittedForwardTracks.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" @@ -35,9 +35,9 @@ namespace LHCb::Pr::Velo { class Kalman : public Gaudi::Functional::Transformer<Fitted::Forward::Tracks( - const EventContext&, const Hits&, const Tracks&, const Forward::Tracks& )> { + const EventContext&, const Hits&, const Tracks&, const Long::Tracks& )> { using TracksVP = Tracks; - using TracksFT = Forward::Tracks; + using TracksFT = Long::Tracks; using TracksFit = Fitted::Forward::Tracks; using simd = SIMDWrapper::avx256::types; using I = simd::int_v; diff --git a/Pr/PrPixel/src/VeloKalmanHelpers.h b/Pr/PrPixel/src/VeloKalmanHelpers.h index e4fb554133d..7d8d63475e2 100644 --- a/Pr/PrPixel/src/VeloKalmanHelpers.h +++ b/Pr/PrPixel/src/VeloKalmanHelpers.h @@ -113,7 +113,7 @@ inline FittedState<F> fitBackward( const M track_mask, const LHCb::Pr::Velo::Tra const LHCb::Pr::Velo::Hits& hits, const int state_id ) { I nHits = tracks.nHits<I>( t ); int maxHits = nHits.hmax( track_mask ); - I idxHit0 = tracks.hit<I>( t, 0 ); + I idxHit0 = tracks.vp_index<I>( t, 0 ); Vec3<F> dir = tracks.stateDir<F>( t, state_id ); Vec3<F> pos = hits.maskgather_pos<F>( idxHit0, track_mask, 0.f ); @@ -124,7 +124,7 @@ inline FittedState<F> fitBackward( const M track_mask, const LHCb::Pr::Velo::Tra for ( int i = 1; i < maxHits; i++ ) { auto mask = track_mask && ( I( i ) < nHits ); - I idxHit = tracks.hit<I>( t, i ); + I idxHit = tracks.vp_index<I>( t, i ); Vec3<F> hit = hits.maskgather_pos<F>( idxHit, mask, 0.f ); s.covTxTx = select( mask, s.covTxTx + noise2PerLayer, s.covTxTx ); @@ -147,7 +147,7 @@ inline FittedState<F> fitForward( const M track_mask, const LHCb::Pr::Velo::Trac I nHits = tracks.nHits<I>( t ); int maxHits = nHits.hmax( track_mask ); auto mask = track_mask && I( maxHits - 1 ) < nHits; - I idxHit0 = tracks.hit<I>( t, maxHits - 1 ); + I idxHit0 = tracks.vp_index<I>( t, maxHits - 1 ); Vec3<F> dir = tracks.stateDir<F>( t, state_id ); Vec3<F> pos = hits.maskgather_pos<F>( idxHit0, mask, 0.f ); @@ -158,7 +158,7 @@ inline FittedState<F> fitForward( const M track_mask, const LHCb::Pr::Velo::Trac for ( int i = maxHits - 2; i >= 0; i-- ) { auto mask = track_mask && ( I( i ) < nHits ); - I idxHit = tracks.hit<I>( t, i ); + I idxHit = tracks.vp_index<I>( t, i ); Vec3<F> hit = hits.maskgather_pos<F>( idxHit, mask, 0.f ); s.covTxTx = select( mask, s.covTxTx + noise2PerLayer, s.covTxTx ); @@ -250,7 +250,7 @@ fitBackwardWithMomentum( const M track_mask, const LHCb::Pr::Velo::Tracks& track I nHits = tracks.maskgather_nHits<I, I>( idxVP, track_mask, 0 ); int maxHits = nHits.hmax( track_mask ); - I idxHit0 = tracks.maskgather_hit<I, I>( idxVP, track_mask, 0, 0 ); + I idxHit0 = tracks.maskgather_vp_index<I, I>( idxVP, track_mask, 0, 0 ); Vec3<F> dir = tracks.maskgather_stateDir<F, I>( idxVP, track_mask, 0.f, state_id ); Vec3<F> pos = hits.maskgather_pos<F, I>( idxHit0, track_mask, 0.f ); @@ -260,7 +260,7 @@ fitBackwardWithMomentum( const M track_mask, const LHCb::Pr::Velo::Tracks& track for ( int i = 1; i < maxHits; i++ ) { auto mask = track_mask && ( I( i ) < nHits ); - I idxHit = tracks.maskgather_hit<I, I>( idxVP, mask, I( 0 ), i ); + I idxHit = tracks.maskgather_vp_index<I, I>( idxVP, mask, I( 0 ), i ); Vec3<F> hit = hits.maskgather_pos<F, I>( idxHit, mask, 0.f ); chi2 = select( @@ -285,4 +285,4 @@ fitBackwardWithMomentum( const M track_mask, const LHCb::Pr::Velo::Tracks& track s.transportTo( s.zBeam() ); return {s, chi2, 2 * nHits - 4}; -} \ No newline at end of file +} diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 637c32f56aa..4fead11cc16 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -13,7 +13,7 @@ #include "DetDesc/ConditionAccessorHolder.h" #include "GaudiAlg/Transformer.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/PrUpstreamTracks.h" #include "Event/StateParameters.h" #include "Event/Track_v2.h" @@ -95,7 +95,7 @@ namespace { } using TracksUT = LHCb::Pr::Upstream::Tracks; - using TracksFT = LHCb::Pr::Forward::Tracks; + using TracksFT = LHCb::Pr::Long::Tracks; // constants for extrapolation polynomials from x hit in S3L0 // to the corresponding x hit in other stations and layers @@ -668,34 +668,30 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac Output.compressstore_stateQoP<sF>( i, mask, qop ); // store Velo hit indices - LHCb::Pr::Velo::Tracks const* velo_ancestors = tracks.getVeloAncestors(); - const int velotrack = tracks.trackVP<sI>( uttrack + tr ).cast(); - const int vphits = ( *velo_ancestors ).nHits<sI>( velotrack ).cast(); + const int vphits = tracks.nVPHits<sI>( uttrack + tr ).cast(); + const int uthits = tracks.nUTHits<sI>( uttrack + tr ).cast(); + Output.compressstore_nVPHits<sI>( i, mask, vphits ); + Output.compressstore_nUTHits<sI>( i, mask, uthits ); + for ( auto idx{0}; idx < vphits; ++idx ) { - Output.compressstore_vp_index<sI>( i, idx, mask, ( *velo_ancestors ).hit<sI>( velotrack, idx ) ); + Output.compressstore_vp_index<sI>( i, idx, mask, tracks.vp_index<sI>( uttrack + tr, idx ) ); } - Output.compressstore_nVPHits<sI>( i, mask, vphits ); - // TO Do: change the LHCbIDs to hit indices - const int uthits = tracks.nHits<sI>( uttrack + tr ).cast(); - for ( int idx{0}; idx < uthits; ++idx ) { - Output.compressstore_ut_index<sI>( i, idx, mask, tracks.hit<sI>( uttrack + tr, idx ) ); + + for ( auto idx{0}; idx < uthits; ++idx ) { + Output.compressstore_ut_index<sI>( i, idx, mask, tracks.ut_index<sI>( uttrack + tr, idx ) ); + } + for ( auto idx{0}; idx < vphits + uthits; ++idx ) { + Output.compressstore_lhcbID<sI>( i, idx, mask, tracks.lhcbID<sI>( uttrack + tr, idx ) ); } - Output.compressstore_nUTHits<sI>( i, mask, uthits ); int n_hits = 0; for ( auto idx{bestcandidate.ids.begin()}; idx != bestcandidate.ids.end(); ++idx, ++n_hits ) { - // To do: store the LHCbIDs of scifi hits, should we save the lhcbIDs of all hits of this tracks or none of - // them? - Output.compressstore_hit<sI>( i, n_hits, mask, hithandler.IDs[*idx] ); + Output.compressstore_lhcbID<sI>( i, vphits + uthits + n_hits, mask, hithandler.IDs[*idx] ); /// FT hit indices Output.compressstore_ft_index<sI>( i, n_hits, mask, *idx ); } Output.compressstore_nFTHits<sI>( i, mask, bestcandidate.ids.size() ); - // TO do: bestcandidate.numHits is the total number of scifi hits, should we save number of all hits of this - // track? - Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits ); - // Output.compressstore_nHits<sI>( i, mask, bestcandidate.numHits + uthits + vphits ); // AtT State float const endT_z = cache.LayerZPos[8]; diff --git a/Tr/TrackUtils/src/TracksUTConverter.cpp b/Tr/TrackUtils/src/TracksUTConverter.cpp index 4883f889a11..d94d1f34204 100644 --- a/Tr/TrackUtils/src/TracksUTConverter.cpp +++ b/Tr/TrackUtils/src/TracksUTConverter.cpp @@ -65,9 +65,9 @@ public: for ( auto& state : newTrack.states() ) state.setQOverP( tracksUT.stateQoP<F>( t ).cast() ); // Add LHCbIds - int n_hits = tracksUT.nHits<I>( t ).cast(); + I n_hits = tracksUT.template nHits<I>( t ); for ( int i = 0; i < n_hits; i++ ) { - int lhcbid = tracksUT.hit<I>( t, i ).cast(); + int lhcbid = tracksUT.lhcbID<I>( t, i ).cast(); newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); } diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index 818ed5a31ce..41fd709816e 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -106,7 +106,7 @@ public: for ( int t = 0; t < tracks.size(); t++ ) { auto& newTrack = out.emplace_back(); - newTrack.setLhcbIDs( tracks.lhcbIDs( t, hits ), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( tracks.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.states().reserve( 2 ); auto state_beam = getState( tracks, t, 0 ); state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); @@ -153,7 +153,7 @@ public: for ( int t = 0; t < fwd_tracks.size(); t++ ) { auto& newTrack = out.emplace_back(); - newTrack.setLhcbIDs( fwd_tracks.lhcbIDs( t, hits ), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( fwd_tracks.lhcbIDs( t), LHCb::Tag::Unordered ); newTrack.states().reserve( 2 ); auto state_beam = getState( fwd_tracks, t, 0 ); @@ -170,7 +170,7 @@ public: for ( int t = 0; t < bwd_tracks.size(); t++ ) { auto& newTrack = out.emplace_back(); - newTrack.setLhcbIDs( bwd_tracks.lhcbIDs( t, hits ), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( bwd_tracks.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.states().reserve( 1 ); auto state_beam = getState( bwd_tracks, t, 0 ); state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); -- GitLab From b828be42f62ded7bc2242336bf475f7becf94a3d Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Fri, 15 May 2020 16:32:15 +0000 Subject: [PATCH 026/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8398928 --- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 35 +++++++++---------- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 4 +-- .../src/fromPrFittedTrackTrackv2.cpp | 17 ++++----- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 10 +++--- Pr/PrPixel/src/VeloKalman.cpp | 4 +-- .../src/SciFiTrackForwarding.cpp | 2 +- Tr/TrackUtils/src/TracksVPConverter.cpp | 2 +- 7 files changed, 35 insertions(+), 39 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 868ed1cb703..21a41e1a93f 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -530,10 +530,9 @@ namespace { } // namespace template <typename T> -class PrForwardTracking - : public Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( SciFiHits::PrSciFiHits const&, T const&, - ZoneCache const& ), - LHCb::DetDesc::usesConditions<ZoneCache>> { +class PrForwardTracking : public Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( SciFiHits::PrSciFiHits const&, + T const&, ZoneCache const& ), + LHCb::DetDesc::usesConditions<ZoneCache>> { public: using PrSciFiHits = SciFiHits::PrSciFiHits; using base_class_t = @@ -713,8 +712,8 @@ private: // save good tracks template <typename Container> - LHCb::Pr::Long::Tracks makeLHCbTracks( Container const& trackCandidates, - std::vector<std::vector<LHCb::LHCbID>> ids, T const& ) const; + LHCb::Pr::Long::Tracks makeLHCbTracks( Container const& trackCandidates, std::vector<std::vector<LHCb::LHCbID>> ids, + T const& ) const; // ==================================================================================== // -- DEBUG HELPERS @@ -777,7 +776,7 @@ DECLARE_COMPONENT_WITH_ID( PrForwardTracking<LHCb::Pr::Velo::Tracks>, "PrForward //============================================================================= template <typename T> LHCb::Pr::Long::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSciFiHits, T const& input_tracks, - ZoneCache const& cache ) const { + ZoneCache const& cache ) const { if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Execute" << endmsg; @@ -2202,8 +2201,8 @@ bool PrForwardTracking<T>::selectStereoHits( PrForwardTrack<>& track, const PrSc template <typename T> template <typename Container> LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& trackCandidates, - std::vector<std::vector<LHCb::LHCbID>> ids, - T const& input_tracks ) const { + std::vector<std::vector<LHCb::LHCbID>> ids, + T const& input_tracks ) const { auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); LHCb::Pr::Long::Tracks result( velo_ancestors, upstream_ancestors ); @@ -2225,8 +2224,8 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& const int vpidx = input_tracks.template trackVP<I>( uttrack ).cast(); const int vphits = ( *velo_ancestors ).template nHits<I>( vpidx ).cast(); const int uthits = input_tracks.template nHits<I>( uttrack ).cast(); - n_vphits = vphits; - n_uthits = uthits; + n_vphits = vphits; + n_uthits = uthits; result.store_nVPHits<I>( currentsize, vphits ); result.store_nUTHits<I>( currentsize, uthits ); for ( auto idx{0}; idx < vphits; ++idx ) { @@ -2244,7 +2243,7 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_trackUT<I>( currentsize, -1 ); const int vphits = input_tracks.template nHits<I>( uttrack ).cast(); - n_vphits = vphits; + n_vphits = vphits; for ( int idx{0}; idx < vphits; ++idx ) { result.store_vp_index<I>( currentsize, idx, input_tracks.template vp_index<I>( uttrack, idx ) ); result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); @@ -2290,23 +2289,23 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_ut_index<I>( currentsize, idx, utid[idx].lhcbID() ); result.store_lhcbID<I>( currentsize, n_vphits + idx, utid[idx].lhcbID() ); } - n_uthits = utid.size(); + n_uthits = utid.size(); result.store_nUTHits<I>( currentsize, utid.size() ); } //== hits indices, max_fthits=15, not sure if we need this. - assert(id.size()<=15 && "Container cannot store more than 15 SciFi hits per track"); + assert( id.size() <= 15 && "Container cannot store more than 15 SciFi hits per track" ); auto const& ihits = cand.ihits(); result.store_nFTHits<I>( currentsize, ihits.size() ); - for ( size_t idx{0}; idx < ihits.size(); ++idx ) { - result.store_ft_index<I>( currentsize, idx, ihits[idx] ); - result.store_lhcbID<I>( currentsize, n_vphits + n_uthits + idx, id[idx].lhcbID() ); + for ( size_t idx{0}; idx < ihits.size(); ++idx ) { + result.store_ft_index<I>( currentsize, idx, ihits[idx] ); + result.store_lhcbID<I>( currentsize, n_vphits + n_uthits + idx, id[idx].lhcbID() ); } result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Long::Tracks::max_tracks ) ) { // FIXME: find a better way to define - // size of container + // size of container ++m_maxTracksErr; break; // FIXME: do something smarter than this } diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index f51bb816ee3..78ac365797e 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -46,8 +46,8 @@ // //----------------------------------------------------------------------------- -typedef LHCb::Pr::Long::Tracks LongTracks; -typedef LHCb::Pr::Velo::Tracks VeloTracks; +typedef LHCb::Pr::Long::Tracks LongTracks; +typedef LHCb::Pr::Velo::Tracks VeloTracks; class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( const LongTracks&, const VeloTracks& )> { diff --git a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp index 1c5016f1027..64aea9606d0 100644 --- a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp +++ b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp @@ -65,7 +65,7 @@ namespace { return state; } - std::vector<LHCb::Event::v2::Track> convert_tracks( LHCb::Pr::Long::Tracks const& forward_tracks, + std::vector<LHCb::Event::v2::Track> convert_tracks( LHCb::Pr::Long::Tracks const& forward_tracks, LHCb::Pr::Fitted::Forward::Tracks const& fitted_tracks, std::array<float, 5> const covarianceValues ) { std::vector<LHCb::Event::v2::Track> out; @@ -118,17 +118,15 @@ namespace { namespace LHCb::Converters::Track::v2 { template <typename FittedTrackType> - class fromPrFittedForwardTrack : public Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( - const FittedTrackType& )> { + class fromPrFittedForwardTrack + : public Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( const FittedTrackType& )> { public: - using base_class = - Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( const FittedTrackType& )>; - using KeyValue = typename base_class::KeyValue; + using base_class = Gaudi::Functional::Transformer<std::vector<Event::v2::Track>( const FittedTrackType& )>; + using KeyValue = typename base_class::KeyValue; fromPrFittedForwardTrack( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class( name, pSvcLocator, {KeyValue{"FittedTracks", ""}}, - KeyValue{"OutputTracks", ""} ) {} + : base_class( name, pSvcLocator, {KeyValue{"FittedTracks", ""}}, KeyValue{"OutputTracks", ""} ) {} Gaudi::Property<std::array<float, 5>> m_covarianceValues{this, "covarianceValues", default_covarianceValues}; std::vector<Event::v2::Track> operator()( const FittedTrackType& fitted_tracks_like ) const override { @@ -141,8 +139,7 @@ namespace LHCb::Converters::Track::v2 { << endmsg; return std::vector<Event::v2::Track>{}; } - std::vector<Event::v2::Track> out = - convert_tracks( *forward_tracks, fitted_tracks, m_covarianceValues ); + std::vector<Event::v2::Track> out = convert_tracks( *forward_tracks, fitted_tracks, m_covarianceValues ); m_nbTracksCounter += out.size(); return out; } diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index a5a44bab37c..7c8b6508297 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -23,11 +23,11 @@ #include "Event/PrVeloTracks.h" #include "Event/RawEvent.h" #include "Event/StateParameters.h" +#include "Kernel/LHCbID.h" #include "Kernel/STLExtensions.h" #include "Kernel/VPChannelID.h" #include "VPDet/DeVP.h" #include "VPDet/VPDetPaths.h" -#include "Kernel/LHCbID.h" // Local #include "VPClusCache.h" @@ -856,8 +856,8 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); - auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); - const auto lhcbid = hits.ChannelId<I>( hit_index ); + auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + const auto lhcbid = hits.ChannelId<I>( hit_index ); tracksForward.compressstore_lhcbID( i, h, backwards, lhcbid ); } @@ -877,8 +877,8 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); - auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); - const auto lhcbid = hits.ChannelId<I>( hit_index ) ; + auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + const auto lhcbid = hits.ChannelId<I>( hit_index ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } diff --git a/Pr/PrPixel/src/VeloKalman.cpp b/Pr/PrPixel/src/VeloKalman.cpp index ec251e61785..919b1b058af 100644 --- a/Pr/PrPixel/src/VeloKalman.cpp +++ b/Pr/PrPixel/src/VeloKalman.cpp @@ -34,8 +34,8 @@ */ namespace LHCb::Pr::Velo { - class Kalman : public Gaudi::Functional::Transformer<Fitted::Forward::Tracks( - const EventContext&, const Hits&, const Tracks&, const Long::Tracks& )> { + class Kalman : public Gaudi::Functional::Transformer<Fitted::Forward::Tracks( const EventContext&, const Hits&, + const Tracks&, const Long::Tracks& )> { using TracksVP = Tracks; using TracksFT = Long::Tracks; using TracksFit = Fitted::Forward::Tracks; diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 4fead11cc16..a8b617cddbc 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -681,7 +681,7 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac Output.compressstore_ut_index<sI>( i, idx, mask, tracks.ut_index<sI>( uttrack + tr, idx ) ); } for ( auto idx{0}; idx < vphits + uthits; ++idx ) { - Output.compressstore_lhcbID<sI>( i, idx, mask, tracks.lhcbID<sI>( uttrack + tr, idx ) ); + Output.compressstore_lhcbID<sI>( i, idx, mask, tracks.lhcbID<sI>( uttrack + tr, idx ) ); } int n_hits = 0; diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index 41fd709816e..bd70bba1b14 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -153,7 +153,7 @@ public: for ( int t = 0; t < fwd_tracks.size(); t++ ) { auto& newTrack = out.emplace_back(); - newTrack.setLhcbIDs( fwd_tracks.lhcbIDs( t), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( fwd_tracks.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.states().reserve( 2 ); auto state_beam = getState( fwd_tracks, t, 0 ); -- GitLab From 7cfe5baef05243c5c857d56e79001ad73d697257 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Sun, 17 May 2020 19:37:18 +0200 Subject: [PATCH 027/111] more changes for the new PrUpstreamTracks class --- Pr/PrVeloUT/src/PrVeloUT.cpp | 64 +++++++++++++++++++++++++----------- Pr/PrVeloUT/src/PrVeloUT.h | 3 +- 2 files changed, 46 insertions(+), 21 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index b95886d3804..d12f2159643 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -425,7 +425,7 @@ namespace LHCb::Pr { pTracks.size++; } - prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, bdlTable ); + prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } m_tracksCounter += outputTracks.size(); @@ -482,8 +482,8 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum - outputTracks.compressstore_nHits<simd::int_v>( i, outMask, 0 ); // no hits - + outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits + outputTracks.size() += simd::popcount( outMask ); } } @@ -852,7 +852,8 @@ namespace LHCb::Pr { template <typename BdlTable> void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const { + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + const BdlTable& bdlTable ) const { for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { @@ -969,6 +970,8 @@ namespace LHCb::Pr { simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; + // ========================================================================================== + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); auto pos = protoTracks.pos<simd::float_v>( t ); auto dir = protoTracks.dir<simd::float_v>( t ); @@ -980,28 +983,41 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); + + const simd::int_v nVPHits = inputTracks.maskgather_nHits<simd::int_v>( ancestor, validTrackMask, 0 ); + outputTracks.compressstore_nVPHits<simd::int_v>( trackIndex, validTrackMask, nVPHits ); + + // -- Store the hit information of the VP track in the Upstream track + for( std::size_t t2 = 0; t2 < simd::size; ++t2){ + if ( !testbit( validTrackMask, t2 ) ) continue; + // -- At this moment, we only have VP hits, so nHits == nVPHits + const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2).cast(); + const int nHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); + for(int iHit = 0; iHit < nHits; ++iHit){ + outputTracks.store_lhcbID<scalar::int_v>( trackIndex + t2, iHit, inputTracks.lhcbID<scalar::int_v>( iAncestor, iHit)); + outputTracks.store_vp_index<scalar::int_v>( trackIndex + t2, iHit, inputTracks.vp_index<scalar::int_v>( iAncestor, iHit)); + } + } + + // =========================================================================================== + // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); - // a simple helper class that facilitates changing from simd to scalar for the slope TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); - simd::int_v nUTHits{0}; for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); - simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); - //simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - - // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); - outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); + + simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); + outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); } outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); - // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... @@ -1010,9 +1026,17 @@ namespace LHCb::Pr { int trackIndex2 = 0; for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { if ( !testbit( validTrackMask, t2 ) ) continue; - + const std::size_t tscalar = t + t2; + const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2).cast(); + const int nVPHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); + + const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); + outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, iLayer + nVPHits, id ); + + // -- + const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); @@ -1031,14 +1055,14 @@ namespace LHCb::Pr { const float xextrap = xhit + txUTS * ( zohit - zhit ); if ( xohit - xextrap < -m_overlapTol ) continue; if ( xohit - xextrap > m_overlapTol ) break; - + + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + if ( nUTHits > 16 ) continue; // get this number from PrUpstreamTracks!!! + + outputTracks.store_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, hitsInLayers[hitContIndex].indexs[index2] ); LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - - int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - if ( nHits > 30 ) continue; - //outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); - outputTracks.compressstore_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, true, - hitsInLayers[hitContIndex].indexs[index2] ); + outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, nVPHits + nUTHits, oid.lhcbID() ); + outputTracks.store_nUTHits<scalar::int_v>( trackIndex + trackIndex2, nUTHits+1 ); // only one overlap hit // break; } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 32f07328b32..ab731660ee0 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -302,7 +302,8 @@ namespace LHCb::Pr { template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const; + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From fe9f1fb94559cb98d6f2d63324a3e53d842aef5a Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Sun, 17 May 2020 19:57:27 +0200 Subject: [PATCH 028/111] oh pipeline though shalt not fail again --- Pr/PrVeloUT/src/PrVeloUT.cpp | 64 ++++++++++++++++++------------------ Pr/PrVeloUT/src/PrVeloUT.h | 27 +++++++-------- 2 files changed, 44 insertions(+), 47 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index d12f2159643..ea12c1c815b 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -482,8 +482,8 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum - outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits - + outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits + outputTracks.size() += simd::popcount( outMask ); } } @@ -642,9 +642,7 @@ namespace LHCb::Pr { // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - filteredStates.copyBack(t, compressMask); - - + filteredStates.copyBack( t, compressMask ); } return compBoundsArray; @@ -852,7 +850,7 @@ namespace LHCb::Pr { template <typename BdlTable> void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const { for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { @@ -971,7 +969,7 @@ namespace LHCb::Pr { simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; // ========================================================================================== - + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); auto pos = protoTracks.pos<simd::float_v>( t ); auto dir = protoTracks.dir<simd::float_v>( t ); @@ -983,36 +981,37 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); - + const simd::int_v nVPHits = inputTracks.maskgather_nHits<simd::int_v>( ancestor, validTrackMask, 0 ); outputTracks.compressstore_nVPHits<simd::int_v>( trackIndex, validTrackMask, nVPHits ); - + // -- Store the hit information of the VP track in the Upstream track - for( std::size_t t2 = 0; t2 < simd::size; ++t2){ + for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { if ( !testbit( validTrackMask, t2 ) ) continue; // -- At this moment, we only have VP hits, so nHits == nVPHits - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2).cast(); - const int nHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); - for(int iHit = 0; iHit < nHits; ++iHit){ - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + t2, iHit, inputTracks.lhcbID<scalar::int_v>( iAncestor, iHit)); - outputTracks.store_vp_index<scalar::int_v>( trackIndex + t2, iHit, inputTracks.vp_index<scalar::int_v>( iAncestor, iHit)); + const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); + const int nHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); + for ( int iHit = 0; iHit < nHits; ++iHit ) { + outputTracks.store_lhcbID<scalar::int_v>( trackIndex + t2, iHit, + inputTracks.lhcbID<scalar::int_v>( iAncestor, iHit ) ); + outputTracks.store_vp_index<scalar::int_v>( trackIndex + t2, iHit, + inputTracks.vp_index<scalar::int_v>( iAncestor, iHit ) ); } } - - // =========================================================================================== + // =========================================================================================== // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); - + simd::int_v nUTHits{0}; - + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); - - simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - + + simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); + outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); } @@ -1026,15 +1025,15 @@ namespace LHCb::Pr { int trackIndex2 = 0; for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { if ( !testbit( validTrackMask, t2 ) ) continue; - + const std::size_t tscalar = t + t2; - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2).cast(); - const int nVPHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); + const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); + const int nVPHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, iLayer + nVPHits, id ); - + // -- const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); @@ -1055,14 +1054,15 @@ namespace LHCb::Pr { const float xextrap = xhit + txUTS * ( zohit - zhit ); if ( xohit - xextrap < -m_overlapTol ) continue; if ( xohit - xextrap > m_overlapTol ) break; - - int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); if ( nUTHits > 16 ) continue; // get this number from PrUpstreamTracks!!! - - outputTracks.store_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, hitsInLayers[hitContIndex].indexs[index2] ); + + outputTracks.store_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, + hitsInLayers[hitContIndex].indexs[index2] ); LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, nVPHits + nUTHits, oid.lhcbID() ); - outputTracks.store_nUTHits<scalar::int_v>( trackIndex + trackIndex2, nUTHits+1 ); + outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, nVPHits + nUTHits, oid.lhcbID() ); + outputTracks.store_nUTHits<scalar::int_v>( trackIndex + trackIndex2, nUTHits + 1 ); // only one overlap hit // break; } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index ab731660ee0..67a917067d8 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -102,22 +102,19 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) - // -- Copy back the entries, but with a filtering mask - void copyBack(std::size_t at, simd::mask_v mask){ - simd::float_v( &xs[at]).compressstore( mask, &xs[size] ); - simd::float_v( &ys[at]).compressstore( mask, &ys[size] ); - simd::float_v( &zs[at]).compressstore( mask, &zs[size] ); - simd::float_v( &txs[at]).compressstore( mask, &txs[size] ); - simd::float_v( &tys[at]).compressstore( mask, &tys[size] ); - simd::float_v( &covxs[at]).compressstore( mask, &covxs[size] ); - simd::float_v( &covys[at]).compressstore( mask, &covys[size] ); - simd::float_v( &covzs[at]).compressstore( mask, &covzs[size] ); - simd::int_v( &indexs[at]).compressstore( mask, &indexs[size] ); + // -- Copy back the entries, but with a filtering mask + void copyBack( std::size_t at, simd::mask_v mask ) { + simd::float_v( &xs[at] ).compressstore( mask, &xs[size] ); + simd::float_v( &ys[at] ).compressstore( mask, &ys[size] ); + simd::float_v( &zs[at] ).compressstore( mask, &zs[size] ); + simd::float_v( &txs[at] ).compressstore( mask, &txs[size] ); + simd::float_v( &tys[at] ).compressstore( mask, &tys[size] ); + simd::float_v( &covxs[at] ).compressstore( mask, &covxs[size] ); + simd::float_v( &covys[at] ).compressstore( mask, &covys[size] ); + simd::float_v( &covzs[at] ).compressstore( mask, &covzs[size] ); + simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } - - - }; struct ExtrapolatedStates final { @@ -302,7 +299,7 @@ namespace LHCb::Pr { template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From 7e6600a15131f9b2a0e824e1d410d82efd246711 Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Tue, 19 May 2020 13:00:17 +0000 Subject: [PATCH 029/111] Apply suggestion to Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 7c8b6508297..97e259e6019 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -857,7 +857,7 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); - const auto lhcbid = hits.ChannelId<I>( hit_index ); + const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); tracksForward.compressstore_lhcbID( i, h, backwards, lhcbid ); } -- GitLab From 427c70a520abd9addbd5cd5082c1c5f2d70a5e4e Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Tue, 19 May 2020 13:00:23 +0000 Subject: [PATCH 030/111] Apply suggestion to Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 97e259e6019..ea439b8a269 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -877,7 +877,7 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); - auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + auto hit_index = min( tracks.hit<I>( t, h ), I{2048*26}); const auto lhcbid = hits.ChannelId<I>( hit_index ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } -- GitLab From 21b27bddfdbd272f56f0bf2d8081ed8b741293ac Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Tue, 19 May 2020 13:00:31 +0000 Subject: [PATCH 031/111] Apply suggestion to Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index ea439b8a269..7a7200372f2 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -878,7 +878,7 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); auto hit_index = min( tracks.hit<I>( t, h ), I{2048*26}); - const auto lhcbid = hits.ChannelId<I>( hit_index ); + const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } -- GitLab From 6ef1eaa4552afed734ad40a808b746941d8ef2da Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 19 May 2020 22:37:30 +0200 Subject: [PATCH 032/111] more adapting to storing index, simplifications in data structures --- Pr/PrVeloUT/src/PrVeloUT.cpp | 153 ++++++++++++++--------------------- Pr/PrVeloUT/src/PrVeloUT.h | 53 +++++------- 2 files changed, 79 insertions(+), 127 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index ea12c1c815b..4d5ef91725f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -75,11 +75,15 @@ namespace LHCb::Pr { const float zMidUT, const simd::float_v qpxz2p, const int t, simd::mask_v& goodFitMask ) { - const simd::float_v x = protoTracks.xState<simd::float_v>( t ); - const simd::float_v y = protoTracks.yState<simd::float_v>( t ); - const simd::float_v z = protoTracks.zState<simd::float_v>( t ); - const simd::float_v tx = protoTracks.txState<simd::float_v>( t ); - const simd::float_v ty = protoTracks.tyState<simd::float_v>( t ); + const Vec3<simd::float_v> pos = protoTracks.pos<simd::float_v>( t ); + const Vec3<simd::float_v> dir = protoTracks.dir<simd::float_v>( t ); + + + const simd::float_v x = pos.x; + const simd::float_v y = pos.y; + const simd::float_v z = pos.z; + const simd::float_v tx = dir.x; + const simd::float_v ty = dir.y; const simd::float_v zKink = magFieldParams[0] - ty * ty * magFieldParams[1] - ty * ty * ty * ty * magFieldParams[2]; const simd::float_v xMidField = x + tx * ( zKink - z ); @@ -302,8 +306,6 @@ namespace LHCb::Pr { /// Initialization StatusCode VeloUT::initialize() { - // std::cout << "initialize" << std::endl; - return Transformer::initialize().andThen( [&] { return m_PrUTMagnetTool.retrieve(); } ).andThen( [&] { // m_zMidUT is a position of normalization plane which should to be close to z middle of UT ( +- 5 cm ). // Cached once in VeloUTTool at initialization. No need to update with small UT movement. @@ -379,21 +381,22 @@ namespace LHCb::Pr { if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } if ( helper.bestIndices[0] == -1 ) continue; - scalar::float_v covx = filteredStates.covx<scalar::float_v>( tEff ); - scalar::float_v covy = filteredStates.covy<scalar::float_v>( tEff ); - scalar::float_v covz = filteredStates.covz<scalar::float_v>( tEff ); scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); int trackIndex = pTracks.size; // -- manual compressstore to keep everything in sync and fill the registers in the last function - pTracks.store_xState<scalar::float_v>( trackIndex, x ); - pTracks.store_yState<scalar::float_v>( trackIndex, y ); - pTracks.store_zState<scalar::float_v>( trackIndex, z ); - pTracks.store_txState<scalar::float_v>( trackIndex, tx ); - pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); - pTracks.store_covx<scalar::float_v>( trackIndex, covx ); - pTracks.store_covy<scalar::float_v>( trackIndex, covy ); - pTracks.store_covz<scalar::float_v>( trackIndex, covz ); + //pTracks.store_xState<scalar::float_v>( trackIndex, x ); + //pTracks.store_yState<scalar::float_v>( trackIndex, y ); + //pTracks.store_zState<scalar::float_v>( trackIndex, z ); + //pTracks.store_txState<scalar::float_v>( trackIndex, tx ); + //pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); + + Vec3<scalar::float_v> pos = Vec3<scalar::float_v>(x,y,z); + Vec3<scalar::float_v> dir = Vec3<scalar::float_v>(tx,ty,1.0f); + + pTracks.store_pos<scalar::float_v>( trackIndex, pos); + pTracks.store_dir<scalar::float_v>( trackIndex, dir); + pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); @@ -482,8 +485,8 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum - outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits - + outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits + outputTracks.size() += simd::popcount( outMask ); } } @@ -574,6 +577,9 @@ namespace LHCb::Pr { simd::float_v xTol = eStatesArray[layerIndex].xTol<simd::float_v>( t ); simd::float_v tx = eStatesArray[layerIndex].tx<simd::float_v>( t ); + + + simd::mask_v mask = UTDAQ::findSectors( layerIndex, xLayer, yLayer, xTol - abs( tx ) * m_intraLayerDist.value(), m_yTol.value() + m_yTolSlope.value() * abs( xTol ), geom.layers[layerIndex], subcolmin, subcolmax, subrowmin, subrowmax ); @@ -642,7 +648,9 @@ namespace LHCb::Pr { // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - filteredStates.copyBack( t, compressMask ); + filteredStates.copyBack(t, compressMask); + + } return compBoundsArray; @@ -850,18 +858,18 @@ namespace LHCb::Pr { template <typename BdlTable> void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const { for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { //== Handle states. copy Velo one, add TT. const simd::float_v zOrigin = - select( protoTracks.tyState<simd::float_v>( t ) > 0.001f, - protoTracks.zState<simd::float_v>( t ) - - protoTracks.yState<simd::float_v>( t ) / protoTracks.tyState<simd::float_v>( t ), - protoTracks.zState<simd::float_v>( t ) - - protoTracks.xState<simd::float_v>( t ) / protoTracks.txState<simd::float_v>( t ) ); + select( protoTracks.dir<simd::float_v>( t ).y > 0.001f, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).y / protoTracks.dir<simd::float_v>( t ).y, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).x / protoTracks.dir<simd::float_v>( t ).x ); auto loopMask = simd::loop_mask( t, protoTracks.size ); // -- this is to filter tracks where the fit had a too large chi2 @@ -873,7 +881,7 @@ namespace LHCb::Pr { // -- FIXME: these rely on the internal details of PrTableForFunction!!! // and should at least be put back in there, and used from here // to make sure everything _stays_ consistent... - auto var = std::array{protoTracks.tyState<simd::float_v>( t ), zOrigin, protoTracks.zState<simd::float_v>( t )}; + auto var = std::array{protoTracks.dir<simd::float_v>( t ).y, zOrigin, protoTracks.pos<simd::float_v>( t ).z}; simd::int_v index1 = min( max( simd::int_v{( var[0] + 0.3f ) / 0.6f * 30}, 0 ), 30 ); simd::int_v index2 = min( max( simd::int_v{( var[1] + 250 ) / 500 * 10}, 0 ), 10 ); @@ -909,8 +917,8 @@ namespace LHCb::Pr { // -- order is: x, tx, y, chi2 std::array<simd::float_v, 4> finalParams = { protoTracks.xTT<simd::float_v>( t ), protoTracks.xSlopeTT<simd::float_v>( t ), - protoTracks.yState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * ( m_zMidUT - protoTracks.zState<simd::float_v>( t ) ), + protoTracks.pos<simd::float_v>( t ).y + + protoTracks.dir<simd::float_v>( t ).y * ( m_zMidUT - protoTracks.pos<simd::float_v>( t ).z ), protoTracks.chi2TT<simd::float_v>( t )}; const simd::float_v qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; @@ -918,8 +926,8 @@ namespace LHCb::Pr { simd::float_v qp = m_finalFit ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) : protoTracks.qp<simd::float_v>( t ) * - rsqrt( 1.0f + protoTracks.tyState<simd::float_v>( t ) * - protoTracks.tyState<simd::float_v>( t ) ); // is this correct? + rsqrt( 1.0f + protoTracks.dir<simd::float_v>( t ).y * + protoTracks.dir<simd::float_v>( t ).y ); // is this correct? qp = select( fitMask, qp, protoTracks.qp<simd::float_v>( t ) ); const simd::float_v qop = select( abs( bdl ) < 1.e-8f, simd::float_v{1000.0f}, qp * qpxz2p ); @@ -928,8 +936,8 @@ namespace LHCb::Pr { // -- Beware of the momentum resolution! const simd::float_v p = abs( 1.0f / qop ); const simd::float_v pt = - p * sqrt( protoTracks.txState<simd::float_v>( t ) * protoTracks.txState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * protoTracks.tyState<simd::float_v>( t ) ); + p * sqrt( protoTracks.dir<simd::float_v>( t ).x * protoTracks.dir<simd::float_v>( t ).x + + protoTracks.dir<simd::float_v>( t ).y * protoTracks.dir<simd::float_v>( t ).y ); const simd::mask_v pPTMask = ( p > m_minMomentumFinal.value() && pt > m_minPTFinal.value() ); const simd::float_v xUT = finalParams[0]; @@ -969,71 +977,32 @@ namespace LHCb::Pr { simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; // ========================================================================================== - + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); - auto pos = protoTracks.pos<simd::float_v>( t ); - auto dir = protoTracks.dir<simd::float_v>( t ); - auto covX = protoTracks.cov<simd::float_v>( t ); - - int trackIndex = outputTracks.size(); - outputTracks.compressstore_trackVP<simd::int_v>( trackIndex, validTrackMask, ancestor ); - outputTracks.compressstore_statePos<simd::float_v>( trackIndex, validTrackMask, pos ); - outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); - outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); + const int trackIndex = outputTracks.size(); + outputTracks.copyVeloInformation<simd>( inputTracks, ancestor, validTrackMask ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); - - const simd::int_v nVPHits = inputTracks.maskgather_nHits<simd::int_v>( ancestor, validTrackMask, 0 ); - outputTracks.compressstore_nVPHits<simd::int_v>( trackIndex, validTrackMask, nVPHits ); - - // -- Store the hit information of the VP track in the Upstream track - for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { - if ( !testbit( validTrackMask, t2 ) ) continue; - // -- At this moment, we only have VP hits, so nHits == nVPHits - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); - const int nHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); - for ( int iHit = 0; iHit < nHits; ++iHit ) { - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + t2, iHit, - inputTracks.lhcbID<scalar::int_v>( iAncestor, iHit ) ); - outputTracks.store_vp_index<scalar::int_v>( trackIndex + t2, iHit, - inputTracks.vp_index<scalar::int_v>( iAncestor, iHit ) ); - } - } - - // =========================================================================================== - - // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, 0 ); + TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); - - simd::int_v nUTHits{0}; - - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { - simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); - - simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - - outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); - nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - } - - outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); + // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { int trackIndex2 = 0; for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { if ( !testbit( validTrackMask, t2 ) ) continue; - + const std::size_t tscalar = t + t2; - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); - const int nVPHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); - + const bool goodHit = ( protoTracks.weight<scalar::float_v>( tscalar, iLayer ).cast() > 0.0001f ); + const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, iLayer + nVPHits, id ); - + + if( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); + // -- const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); @@ -1054,22 +1023,18 @@ namespace LHCb::Pr { const float xextrap = xhit + txUTS * ( zohit - zhit ); if ( xohit - xextrap < -m_overlapTol ) continue; if ( xohit - xextrap > m_overlapTol ) break; - - int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - if ( nUTHits > 16 ) continue; // get this number from PrUpstreamTracks!!! - - outputTracks.store_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, - hitsInLayers[hitContIndex].indexs[index2] ); + + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + if ( nUTHits >= LHCb::Pr::Upstream::Tracks::max_uthits ) continue; // get this number from PrUpstreamTracks!!! LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, nVPHits + nUTHits, oid.lhcbID() ); - outputTracks.store_nUTHits<scalar::int_v>( trackIndex + trackIndex2, nUTHits + 1 ); + outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), hitsInLayers[hitContIndex].indexs[index2] ); // only one overlap hit // break; } trackIndex2++; } } - outputTracks.size() += simd::popcount( validTrackMask ); + //outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information } } } // namespace LHCb::Pr diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 67a917067d8..dbd0f1b8ad3 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -102,19 +102,22 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) - // -- Copy back the entries, but with a filtering mask - void copyBack( std::size_t at, simd::mask_v mask ) { - simd::float_v( &xs[at] ).compressstore( mask, &xs[size] ); - simd::float_v( &ys[at] ).compressstore( mask, &ys[size] ); - simd::float_v( &zs[at] ).compressstore( mask, &zs[size] ); - simd::float_v( &txs[at] ).compressstore( mask, &txs[size] ); - simd::float_v( &tys[at] ).compressstore( mask, &tys[size] ); - simd::float_v( &covxs[at] ).compressstore( mask, &covxs[size] ); - simd::float_v( &covys[at] ).compressstore( mask, &covys[size] ); - simd::float_v( &covzs[at] ).compressstore( mask, &covzs[size] ); - simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); + // -- Copy back the entries, but with a filtering mask + void copyBack(std::size_t at, simd::mask_v mask){ + simd::float_v( &xs[at]).compressstore( mask, &xs[size] ); + simd::float_v( &ys[at]).compressstore( mask, &ys[size] ); + simd::float_v( &zs[at]).compressstore( mask, &zs[size] ); + simd::float_v( &txs[at]).compressstore( mask, &txs[size] ); + simd::float_v( &tys[at]).compressstore( mask, &tys[size] ); + simd::float_v( &covxs[at]).compressstore( mask, &covxs[size] ); + simd::float_v( &covys[at]).compressstore( mask, &covys[size] ); + simd::float_v( &covzs[at]).compressstore( mask, &covzs[size] ); + simd::int_v( &indexs[at]).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } + + + }; struct ExtrapolatedStates final { @@ -166,17 +169,10 @@ namespace LHCb::Pr { std::array<float, batchSize> ys; // -- and this the original state (in the Velo) - std::array<float, batchSize> xStates; - std::array<float, batchSize> yStates; - std::array<float, batchSize> zStates; - std::array<float, batchSize> txStates; - std::array<float, batchSize> tyStates; + std::array<float, 3*batchSize> statePoss; + std::array<float, 3*batchSize> stateDirs; std::array<int, batchSize> indexs; - std::array<float, batchSize> covxs; - std::array<float, batchSize> covys; - std::array<float, batchSize> covzs; - // -- and this an index to find the hit containers std::array<int, batchSize> hitContIndexs; @@ -194,19 +190,10 @@ namespace LHCb::Pr { SOA_ACCESSOR( xSlopeTT, xSlopeTTs.data() ) SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( xState, xStates.data() ) - SOA_ACCESSOR( yState, yStates.data() ) - SOA_ACCESSOR( zState, zStates.data() ) - SOA_ACCESSOR( txState, txStates.data() ) - SOA_ACCESSOR( tyState, tyStates.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) - SOA_ACCESSOR( index, indexs.data() ) + SOA_ACCESSOR( index, indexs.data() ) SOA_ACCESSOR( hitContIndex, hitContIndexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xStates.data(), yStates.data(), zStates.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txStates.data(), tyStates.data(), 1.0f ) + VEC3_SOA_ACCESSOR( pos, (float*)&(statePoss[0]), (float*)&(statePoss[batchSize]), (float*)&(statePoss[2*batchSize]) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&(stateDirs[0]), (float*)&(stateDirs[batchSize]), 1.0f ) }; struct TxStorage final { @@ -299,7 +286,7 @@ namespace LHCb::Pr { template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From 6743d0d051c5fa7c200d2d47994c8b587aa67740 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 19 May 2020 22:39:40 +0200 Subject: [PATCH 033/111] fix formatting --- Pr/PrVeloUT/src/PrVeloUT.cpp | 83 +++++++++++++++++------------------- Pr/PrVeloUT/src/PrVeloUT.h | 40 +++++++++-------- 2 files changed, 58 insertions(+), 65 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 4d5ef91725f..e1d6de99d3f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -77,7 +77,6 @@ namespace LHCb::Pr { const Vec3<simd::float_v> pos = protoTracks.pos<simd::float_v>( t ); const Vec3<simd::float_v> dir = protoTracks.dir<simd::float_v>( t ); - const simd::float_v x = pos.x; const simd::float_v y = pos.y; @@ -381,22 +380,22 @@ namespace LHCb::Pr { if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } if ( helper.bestIndices[0] == -1 ) continue; - scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); int trackIndex = pTracks.size; // -- manual compressstore to keep everything in sync and fill the registers in the last function - //pTracks.store_xState<scalar::float_v>( trackIndex, x ); - //pTracks.store_yState<scalar::float_v>( trackIndex, y ); - //pTracks.store_zState<scalar::float_v>( trackIndex, z ); - //pTracks.store_txState<scalar::float_v>( trackIndex, tx ); - //pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); - - Vec3<scalar::float_v> pos = Vec3<scalar::float_v>(x,y,z); - Vec3<scalar::float_v> dir = Vec3<scalar::float_v>(tx,ty,1.0f); - - pTracks.store_pos<scalar::float_v>( trackIndex, pos); - pTracks.store_dir<scalar::float_v>( trackIndex, dir); - + // pTracks.store_xState<scalar::float_v>( trackIndex, x ); + // pTracks.store_yState<scalar::float_v>( trackIndex, y ); + // pTracks.store_zState<scalar::float_v>( trackIndex, z ); + // pTracks.store_txState<scalar::float_v>( trackIndex, tx ); + // pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); + + Vec3<scalar::float_v> pos = Vec3<scalar::float_v>( x, y, z ); + Vec3<scalar::float_v> dir = Vec3<scalar::float_v>( tx, ty, 1.0f ); + + pTracks.store_pos<scalar::float_v>( trackIndex, pos ); + pTracks.store_dir<scalar::float_v>( trackIndex, dir ); + pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); @@ -485,8 +484,8 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum - outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits - + outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits + outputTracks.size() += simd::popcount( outMask ); } } @@ -577,9 +576,6 @@ namespace LHCb::Pr { simd::float_v xTol = eStatesArray[layerIndex].xTol<simd::float_v>( t ); simd::float_v tx = eStatesArray[layerIndex].tx<simd::float_v>( t ); - - - simd::mask_v mask = UTDAQ::findSectors( layerIndex, xLayer, yLayer, xTol - abs( tx ) * m_intraLayerDist.value(), m_yTol.value() + m_yTolSlope.value() * abs( xTol ), geom.layers[layerIndex], subcolmin, subcolmax, subrowmin, subrowmax ); @@ -648,9 +644,7 @@ namespace LHCb::Pr { // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - filteredStates.copyBack(t, compressMask); - - + filteredStates.copyBack( t, compressMask ); } return compBoundsArray; @@ -858,7 +852,7 @@ namespace LHCb::Pr { template <typename BdlTable> void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const { for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { @@ -923,11 +917,10 @@ namespace LHCb::Pr { const simd::float_v qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; simd::mask_v fitMask = simd::mask_true(); - simd::float_v qp = m_finalFit - ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) - : protoTracks.qp<simd::float_v>( t ) * - rsqrt( 1.0f + protoTracks.dir<simd::float_v>( t ).y * - protoTracks.dir<simd::float_v>( t ).y ); // is this correct? + simd::float_v qp = m_finalFit ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) + : protoTracks.qp<simd::float_v>( t ) * + rsqrt( 1.0f + protoTracks.dir<simd::float_v>( t ).y * + protoTracks.dir<simd::float_v>( t ).y ); // is this correct? qp = select( fitMask, qp, protoTracks.qp<simd::float_v>( t ) ); const simd::float_v qop = select( abs( bdl ) < 1.e-8f, simd::float_v{1000.0f}, qp * qpxz2p ); @@ -977,16 +970,16 @@ namespace LHCb::Pr { simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; // ========================================================================================== - - const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); - const int trackIndex = outputTracks.size(); + + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); + const int trackIndex = outputTracks.size(); outputTracks.copyVeloInformation<simd>( inputTracks, ancestor, validTrackMask ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, 0 ); - + TxStorage txArray; txArray.store_txUT<simd::float_v>( 0, txUT ); - + // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { @@ -994,15 +987,15 @@ namespace LHCb::Pr { int trackIndex2 = 0; for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { if ( !testbit( validTrackMask, t2 ) ) continue; - + const std::size_t tscalar = t + t2; const bool goodHit = ( protoTracks.weight<scalar::float_v>( tscalar, iLayer ).cast() > 0.0001f ); - const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); - const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); - - if( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); - + const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); + const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); + + if ( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); + // -- const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); @@ -1023,18 +1016,20 @@ namespace LHCb::Pr { const float xextrap = xhit + txUTS * ( zohit - zhit ); if ( xohit - xextrap < -m_overlapTol ) continue; if ( xohit - xextrap > m_overlapTol ) break; - - int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - if ( nUTHits >= LHCb::Pr::Upstream::Tracks::max_uthits ) continue; // get this number from PrUpstreamTracks!!! + + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + if ( nUTHits >= LHCb::Pr::Upstream::Tracks::max_uthits ) + continue; // get this number from PrUpstreamTracks!!! LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), hitsInLayers[hitContIndex].indexs[index2] ); + outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), + hitsInLayers[hitContIndex].indexs[index2] ); // only one overlap hit // break; } trackIndex2++; } } - //outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information + // outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information } } } // namespace LHCb::Pr diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index dbd0f1b8ad3..6e655afb6e6 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -102,22 +102,19 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) - // -- Copy back the entries, but with a filtering mask - void copyBack(std::size_t at, simd::mask_v mask){ - simd::float_v( &xs[at]).compressstore( mask, &xs[size] ); - simd::float_v( &ys[at]).compressstore( mask, &ys[size] ); - simd::float_v( &zs[at]).compressstore( mask, &zs[size] ); - simd::float_v( &txs[at]).compressstore( mask, &txs[size] ); - simd::float_v( &tys[at]).compressstore( mask, &tys[size] ); - simd::float_v( &covxs[at]).compressstore( mask, &covxs[size] ); - simd::float_v( &covys[at]).compressstore( mask, &covys[size] ); - simd::float_v( &covzs[at]).compressstore( mask, &covzs[size] ); - simd::int_v( &indexs[at]).compressstore( mask, &indexs[size] ); + // -- Copy back the entries, but with a filtering mask + void copyBack( std::size_t at, simd::mask_v mask ) { + simd::float_v( &xs[at] ).compressstore( mask, &xs[size] ); + simd::float_v( &ys[at] ).compressstore( mask, &ys[size] ); + simd::float_v( &zs[at] ).compressstore( mask, &zs[size] ); + simd::float_v( &txs[at] ).compressstore( mask, &txs[size] ); + simd::float_v( &tys[at] ).compressstore( mask, &tys[size] ); + simd::float_v( &covxs[at] ).compressstore( mask, &covxs[size] ); + simd::float_v( &covys[at] ).compressstore( mask, &covys[size] ); + simd::float_v( &covzs[at] ).compressstore( mask, &covzs[size] ); + simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } - - - }; struct ExtrapolatedStates final { @@ -169,9 +166,9 @@ namespace LHCb::Pr { std::array<float, batchSize> ys; // -- and this the original state (in the Velo) - std::array<float, 3*batchSize> statePoss; - std::array<float, 3*batchSize> stateDirs; - std::array<int, batchSize> indexs; + std::array<float, 3 * batchSize> statePoss; + std::array<float, 3 * batchSize> stateDirs; + std::array<int, batchSize> indexs; // -- and this an index to find the hit containers std::array<int, batchSize> hitContIndexs; @@ -190,10 +187,11 @@ namespace LHCb::Pr { SOA_ACCESSOR( xSlopeTT, xSlopeTTs.data() ) SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( index, indexs.data() ) + SOA_ACCESSOR( index, indexs.data() ) SOA_ACCESSOR( hitContIndex, hitContIndexs.data() ) - VEC3_SOA_ACCESSOR( pos, (float*)&(statePoss[0]), (float*)&(statePoss[batchSize]), (float*)&(statePoss[2*batchSize]) ) - VEC3_XY_SOA_ACCESSOR( dir, (float*)&(stateDirs[0]), (float*)&(stateDirs[batchSize]), 1.0f ) + VEC3_SOA_ACCESSOR( pos, (float*)&( statePoss[0] ), (float*)&( statePoss[batchSize] ), + (float*)&( statePoss[2 * batchSize] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( stateDirs[0] ), (float*)&( stateDirs[batchSize] ), 1.0f ) }; struct TxStorage final { @@ -286,7 +284,7 @@ namespace LHCb::Pr { template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From 3b28c21947826ead4325995aeecb4042b66c5076 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 20 May 2020 15:27:55 +0200 Subject: [PATCH 034/111] working PrMatchNN with SOA containers --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 185 +++++++----------------------- Pr/PrAlgorithms/src/PrMatchNN.h | 1 + 2 files changed, 43 insertions(+), 143 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index e4d25025cf9..5722d4c8198 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -53,7 +53,7 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; - matches.reserve( 200 ); + matches.reserve( velos.size() * 1.5 ); std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; @@ -63,12 +63,22 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + if ( veloHits.size() == 0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Hit container '" << inputLocation<1>() << "' is empty" << endmsg; + return matches; + } + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; + debug() << "Track container '" << inputLocation<2>() << "' is empty" << endmsg; return matches; } + seedMLPPairs seedMLP; + + seedMLP.reserve( seeds.size() ); + for ( int v = 0; v != velos.size(); v++ ) { auto mlpCounterBuf = m_tracksMLP.buffer(); @@ -80,12 +90,14 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); - const int EndT3 = 3; + const int EndT3 = 2; + for ( int s = 0; s != seeds.size(); s++ ) { auto seed_pos = seeds.statePos<F>( s, EndT3 ); auto seed_dir = seeds.stateDir<F>( s, EndT3 ); const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + if ( posYApproxS > posYApproxV + m_fastYTol ) continue; const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); @@ -94,143 +106,34 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); mlpCounterBuf += mlp; chi2CounterBuf += chi2; - if ( mlp > m_minNN ) { - auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); - - if ( m_addUTHitsTool.isEnabled() ) { - StatusCode sc = m_addUTHitsTool->addUTHits( match ); - if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); - } - } - } - } - } - - /* - std::vector<MatchCandidate> cands; - cands.reserve( seeds.size() ); - - - - - std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; - - // -- make pairs of Velo track and state - // -- TrackStatePair is std::pair<const Track*, const LHCb::State*> - // -- TrackStatePairs is std::vector<TrackStatePair> - // -- typedef in header file - TrackStatePairs veloPairs; - veloPairs.reserve( velos.size() ); - - - for ( auto const& vTr : velos ) { - if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; - if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; - const LHCb::State* vState = vTr.stateAt( LHCb::State::Location::EndVelo ); - assert( vState != nullptr ); - veloPairs.emplace_back( &vTr, vState ); - } - - // -- sort according to approx y position - // -- We don't know deltaSlope, so we just extrapolate linearly - std::sort( veloPairs.begin(), veloPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( 0.0 - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( 0.0 - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - - // -- make pairs of Seed track and state - TrackStatePairs seedPairs; - seedPairs.reserve( seeds.size() ); - - for ( auto const& sTr : seeds ) { - if ( sTr.checkFlag( Track::Flag::Invalid ) ) continue; - const LHCb::State& sState = sTr.closestState( m_zMatchY ); - seedPairs.emplace_back( &sTr, &sState ); - } - - // -- sort according to approx y position - std::sort( seedPairs.begin(), seedPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( m_zMatchY - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( m_zMatchY - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - auto mlpCounterBuf = m_tracksMLP.buffer(); - auto chi2CounterBuf = m_tracksChi2.buffer(); - for ( auto const& vP : veloPairs ) { - cands.clear(); - - const float posYApproxV = vP.second->y() + ( m_zMatchY - vP.second->z() ) * vP.second->ty(); - // -- The TrackStatePairs are sorted according to the approximate extrapolated y position - // -- We can use a binary search to find the starting point from where we need to calculate the chi2 - // -- The tolerance should be large enough such that it is essentially losseless, but speeds things up - // significantly. - auto it = std::lower_bound( - seedPairs.begin(), seedPairs.end(), m_fastYTol, [&]( const TrackStatePair& sP, const float tol ) { - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - return posYApproxS < posYApproxV - tol; - } ); - - // -- The loop to calculate the chi2 between Velo and Seed track - for ( ; it < seedPairs.end(); ++it ) { - TrackStatePair sP = *it; - - // -- Stop the loop at the upper end of the tolerance interval - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - if ( posYApproxS > posYApproxV + m_fastYTol ) break; - - const float chi2 = getChi2Match( *vP.second, *sP.second, mLPReaderInput ); - - if ( m_matchDebugTool.isEnabled() ) { - std::vector<float> v( std::begin( mLPReaderInput ), std::end( mLPReaderInput ) ); - /// TODO: This needs to be updated with Track_v2 (PrMCTools/src/PrDebugMatchTool.{h,cpp} and - /// PrKernel/PrKernel/IPrDebugMatchTool.h) - // m_matchDebugTool->fillTuple( *vP.first, *sP.first, v ); - } - - if ( chi2 < m_maxChi2 ) { - const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); - mlpCounterBuf += mlp; - chi2CounterBuf += chi2; - if ( mlp > m_minNN ) cands.emplace_back( vP.first, sP.first, mlp ); + if ( mlp > m_minNN ) { seedMLP.emplace_back( s, mlp ); } } } - std::sort( cands.begin(), cands.end(), - []( const MatchCandidate& lhs, const MatchCandidate& rhs ) { return lhs.dist() > rhs.dist(); } ); + std::sort( seedMLP.begin(), seedMLP.end(), [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { + return sP1.second > sP2.second; + } ); - // convert unused match candidates to tracks - for ( const MatchCandidate& cand : cands ) { + for ( unsigned int sm = 0; sm != seedMLP.size(); sm++ ) { - if ( cands[0].dist() - cand.dist() > m_maxdDist ) break; + if ( seedMLP[0].second - seedMLP[sm].second > m_maxdDist ) break; - const Track* vTr = cand.vTr(); - const Track* sTr = cand.sTr(); - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) { - debug() << " Candidate" - << " Seed chi2 " << cand.dist() << endmsg; - } - - auto& match = matches.emplace_back( makeTrack( *vTr, *sTr ) ); + auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, seedMLP[sm].first ) ); if ( m_addUTHitsTool.isEnabled() ) { StatusCode sc = m_addUTHitsTool->addUTHits( match ); if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); } - } // end loop match cands - } // end loop velo tracks - - */ + } + seedMLP.clear(); + } m_tracksCount += matches.size(); return matches; } //============================================================================= -// - float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { @@ -238,10 +141,10 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); - if ( std::abs( dSlope ) > 1.5 ) return 99.; + if ( std::abs( dSlope ) > 1.5 ) return 9999.; const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); - if ( std::abs( dSlopeY ) > 0.15 ) return 99.; + if ( std::abs( dSlopeY ) > 0.15 ) return 9999.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + @@ -260,17 +163,16 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; - if ( std::abs( distX ) > 400 ) return 99.; + if ( std::abs( distX ) > 400 ) return 9999.; const float distY = yS - yV; - if ( std::abs( distX ) > 250 ) return 99.; + if ( std::abs( distX ) > 250 ) return 9999.; const float teta2 = tx2 + ty2; const float tolX = dxTol2 + dSlope * dSlope * dxTolSlope2; const float tolY = m_dyTol * m_dyTol + teta2 * m_dyTolSlope * m_dyTolSlope; - float chi2 = distX * distX / tolX + distY * distY / tolY; + float chi2 = ( tolX != 0 and tolY != 0 ? distX * distX / tolX + distY * distY / tolY : 9999. ); - // chi2 += dslY * dslY / sState.errTy2() / 16.; chi2 += dSlopeY * dSlopeY * 10000 * 0.0625; if ( m_maxChi2 < chi2 ) return chi2; @@ -285,6 +187,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di return chi2; } +//============================================================================= PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; @@ -310,40 +213,35 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; - newstates.reserve( 6 ); + newstates.reserve( 5 ); auto state_beam = getVeloState( velos, v, 0 ); state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_beam ); auto state_endvelo = getVeloState( velos, v, 1 ); state_endvelo.setLocation( LHCb::State::Location::EndVelo ); - - auto state_firstmeas = getVeloState( velos, v, 2 ); - state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_beam ); newstates.push_back( state_endvelo ); - newstates.push_back( state_firstmeas ); auto state_begT = getSeedState( seeds, s, 0 ); state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_begT ); auto state_midT = getSeedState( seeds, s, 1 ); state_midT.setLocation( LHCb::State::Location::EndVelo ); + newstates.push_back( state_midT ); + + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice auto state_endT = getSeedState( seeds, s, 2 ); state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_begT ); - - newstates.push_back( state_midT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; - newstates.push_back( state_endT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice //== estimate q/p double qOverP, sigmaQOverP; - // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { @@ -363,3 +261,4 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int return output; } +//============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 922921cc620..180983d4e74 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -187,6 +187,7 @@ private: typedef std::pair<const Track*, const LHCb::State*> TrackStatePair; typedef std::vector<TrackStatePair> TrackStatePairs; + typedef std::vector<std::pair<unsigned int, float>> seedMLPPairs; }; #endif // PRMATCH_H -- GitLab From 97e7aa64efea475b60d81a4247736830263531af Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 20 May 2020 23:17:07 +0200 Subject: [PATCH 035/111] more use of Vec3<F>, simplifies code a bit --- Pr/PrVeloUT/src/PrVeloUT.cpp | 52 +++++++++++----------------- Pr/PrVeloUT/src/PrVeloUT.h | 66 +++++++++++++++--------------------- 2 files changed, 46 insertions(+), 72 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index e1d6de99d3f..7c83b816132 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -362,18 +362,15 @@ namespace LHCb::Pr { if ( !getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[t2], tEff ) ) continue; // -- this is a temporary solution to gradually adapt the algo - scalar::float_v x = filteredStates.x<scalar::float_v>( tEff ); - scalar::float_v y = filteredStates.y<scalar::float_v>( tEff ); - scalar::float_v z = filteredStates.z<scalar::float_v>( tEff ); - scalar::float_v tx = filteredStates.tx<scalar::float_v>( tEff ); - scalar::float_v ty = filteredStates.ty<scalar::float_v>( tEff ); + Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); + Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); MiniState trState; - trState.x = x.cast(); - trState.y = y.cast(); - trState.z = z.cast(); - trState.tx = tx.cast(); - trState.ty = ty.cast(); + trState.x = pos.x.cast(); + trState.y = pos.y.cast(); + trState.z = pos.z.cast(); + trState.tx = dir.x.cast(); + trState.ty = dir.y.cast(); TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); @@ -384,18 +381,8 @@ namespace LHCb::Pr { int trackIndex = pTracks.size; // -- manual compressstore to keep everything in sync and fill the registers in the last function - // pTracks.store_xState<scalar::float_v>( trackIndex, x ); - // pTracks.store_yState<scalar::float_v>( trackIndex, y ); - // pTracks.store_zState<scalar::float_v>( trackIndex, z ); - // pTracks.store_txState<scalar::float_v>( trackIndex, tx ); - // pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); - - Vec3<scalar::float_v> pos = Vec3<scalar::float_v>( x, y, z ); - Vec3<scalar::float_v> dir = Vec3<scalar::float_v>( tx, ty, 1.0f ); - pTracks.store_pos<scalar::float_v>( trackIndex, pos ); pTracks.store_dir<scalar::float_v>( trackIndex, dir ); - pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); @@ -463,14 +450,9 @@ namespace LHCb::Pr { simd::mask_v csMask = loopMask && !mask && ( !passTracks || !passHoleMask ); int index = filteredStates.size; - filteredStates.compressstore_x<simd::float_v>( index, csMask, pos.x ); - filteredStates.compressstore_y<simd::float_v>( index, csMask, pos.y ); - filteredStates.compressstore_z<simd::float_v>( index, csMask, pos.z ); - filteredStates.compressstore_tx<simd::float_v>( index, csMask, dir.x ); - filteredStates.compressstore_ty<simd::float_v>( index, csMask, dir.y ); - filteredStates.compressstore_covx<simd::float_v>( index, csMask, covX.x ); - filteredStates.compressstore_covy<simd::float_v>( index, csMask, covX.y ); - filteredStates.compressstore_covz<simd::float_v>( index, csMask, covX.z ); + filteredStates.compressstore_pos<simd::float_v>( index, csMask, pos ); + filteredStates.compressstore_dir<simd::float_v>( index, csMask, dir ); + filteredStates.compressstore_cov<simd::float_v>( index, csMask, covX ); filteredStates.compressstore_index<simd::int_v>( index, csMask, simd::indices( t ) ); filteredStates.size += simd::popcount( csMask ); @@ -977,8 +959,11 @@ namespace LHCb::Pr { outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, 0 ); - TxStorage txArray; - txArray.store_txUT<simd::float_v>( 0, txUT ); + float txArray[simd::size]; + txUT.store( txArray ); + + // TxStorage txArray; + // txArray.store_txUT<simd::float_v>( 0, txUT ); // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... @@ -998,9 +983,10 @@ namespace LHCb::Pr { // -- - const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); - const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); - const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); + const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); + // const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const float txUTS = txArray[t2]; int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 6e655afb6e6..951fa468792 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -75,43 +75,36 @@ namespace LHCb::Pr { struct MiniStatesArray final { - constexpr static int max_tracks = align_size( 1024 ); - std::array<float, max_tracks> xs; - std::array<float, max_tracks> ys; - std::array<float, max_tracks> zs; - std::array<float, max_tracks> txs; - std::array<float, max_tracks> tys; - std::array<int, max_tracks> indexs; - - std::array<float, max_tracks> covxs; - std::array<float, max_tracks> covys; - std::array<float, max_tracks> covzs; - - std::size_t size{0}; - - SOA_ACCESSOR( x, xs.data() ) - SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( z, zs.data() ) - SOA_ACCESSOR( tx, txs.data() ) - SOA_ACCESSOR( ty, tys.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) + constexpr static int max_tracks = align_size( 1024 ); + std::array<float, 3 * max_tracks> poss; + std::array<float, 2 * max_tracks> dirs; + std::array<float, 3 * max_tracks> covs; + std::array<int, max_tracks> indexs; + std::size_t size{0}; + + SOA_ACCESSOR( x, &( poss[0] ) ) + SOA_ACCESSOR( y, &( poss[max_tracks] ) ) + SOA_ACCESSOR( z, &( poss[2 * max_tracks] ) ) + SOA_ACCESSOR( tx, &( dirs[0] ) ) + SOA_ACCESSOR( ty, &( dirs[max_tracks] ) ) + SOA_ACCESSOR( covx, &( covs[0] ) ) + SOA_ACCESSOR( covy, &( covs[max_tracks] ) ) + SOA_ACCESSOR( covz, &( covs[2 * max_tracks] ) ) SOA_ACCESSOR( index, indexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) + VEC3_SOA_ACCESSOR( pos, (float*)&( poss[0] ), (float*)&( poss[max_tracks] ), (float*)&( poss[2 * max_tracks] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( dirs[0] ), (float*)&( dirs[max_tracks] ), 1.0f ) + VEC3_SOA_ACCESSOR( cov, (float*)&( covs[0] ), (float*)&( covs[max_tracks] ), (float*)&( covs[2 * max_tracks] ) ) // -- Copy back the entries, but with a filtering mask void copyBack( std::size_t at, simd::mask_v mask ) { - simd::float_v( &xs[at] ).compressstore( mask, &xs[size] ); - simd::float_v( &ys[at] ).compressstore( mask, &ys[size] ); - simd::float_v( &zs[at] ).compressstore( mask, &zs[size] ); - simd::float_v( &txs[at] ).compressstore( mask, &txs[size] ); - simd::float_v( &tys[at] ).compressstore( mask, &tys[size] ); - simd::float_v( &covxs[at] ).compressstore( mask, &covxs[size] ); - simd::float_v( &covys[at] ).compressstore( mask, &covys[size] ); - simd::float_v( &covzs[at] ).compressstore( mask, &covzs[size] ); + simd::float_v( &poss[at] ).compressstore( mask, &poss[size] ); + simd::float_v( &poss[at + max_tracks] ).compressstore( mask, &poss[size + max_tracks] ); + simd::float_v( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); + simd::float_v( &dirs[at] ).compressstore( mask, &dirs[size] ); + simd::float_v( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); + simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); + simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); + simd::float_v( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } @@ -167,7 +160,7 @@ namespace LHCb::Pr { // -- and this the original state (in the Velo) std::array<float, 3 * batchSize> statePoss; - std::array<float, 3 * batchSize> stateDirs; + std::array<float, 2 * batchSize> stateDirs; std::array<int, batchSize> indexs; // -- and this an index to find the hit containers @@ -194,11 +187,6 @@ namespace LHCb::Pr { VEC3_XY_SOA_ACCESSOR( dir, (float*)&( stateDirs[0] ), (float*)&( stateDirs[batchSize] ), 1.0f ) }; - struct TxStorage final { - std::array<float, simd::size> txUTs; - SOA_ACCESSOR( txUT, txUTs.data() ) - }; - struct TrackHelper final { TrackHelper( const MiniState& miniState, const float zKink, const float sigmaVeloSlope, const float maxPseudoChi2 ) : state( miniState ), bestParams{{0.0f, maxPseudoChi2, 0.0f, 0.0f}} { -- GitLab From 504eb1e1c01db50e70c9a193d85d3fd1be1a3bc7 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Fri, 22 May 2020 18:00:51 +0200 Subject: [PATCH 036/111] Reduce the usage of magic numbers, put them in 'UTInfo' --- Pr/PrVeloUT/src/PrVeloUT.cpp | 29 +++++++++++++++-------------- Pr/PrVeloUT/src/PrVeloUT.h | 6 ++++-- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 7c83b816132..cdf082b8260 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -181,7 +181,7 @@ namespace LHCb::Pr { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) - void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& helper, + void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers>& helper, const int start ) { for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { @@ -193,7 +193,7 @@ namespace LHCb::Pr { // -- not sure that is the smartest solution // -- but I could not come up with anything better // -- inspired by: https://lemire.me/blog/2017/04/10/removing-duplicates-from-lists-quickly/ - simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& out, int start, + simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers>& out, int start, size_t len ) { simd::int_v pos = start + 1; simd::int_v oldv = out[start]; @@ -533,9 +533,9 @@ namespace LHCb::Pr { int contSize = filteredStates.size; filteredStates.size = 0; - std::array<simd::int_v, UTInfo::TotalLayers> posArray; - std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors - std::array<int, UTInfo::TotalLayers> maxColsRows; + std::array<simd::int_v, UTInfo::TotalLayers> posArray; + std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors + std::array<int, UTInfo::TotalLayers> maxColsRows; // -- This now works with up to 9 sectors for ( int t = 0; t < contSize; t += simd::size ) { @@ -566,8 +566,8 @@ namespace LHCb::Pr { // -- Determine the maximum number of rows and columns we have to take into account // -- maximum 3 - const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); - const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 3 ); + const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, maxNumCols ); + const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, maxNumRows ); maxColsRows[layerIndex] = maxCols * maxRows; @@ -582,7 +582,7 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + 28 * realSC; + simd::int_v sectorIndex = realSR + UTInfo::EffectiveSectorsPerColumn * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number @@ -595,15 +595,16 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * 98 - 1; + helperArray[maxNumSectors * layerIndex + counter] = + sect + ( layerIndex * UTInfo::Regions + region ) * UTInfo::MaxSectorsPerRegion - 1; counter++; } } // -- This is sorting - bubbleSortSIMD( maxCols * maxRows, helperArray, maxSectors * layerIndex ); + bubbleSortSIMD( maxCols * maxRows, helperArray, maxNumSectors * layerIndex ); // -- This is uniquifying - posArray[layerIndex] = makeUniqueSIMD( helperArray, maxSectors * layerIndex, maxCols * maxRows ); + posArray[layerIndex] = makeUniqueSIMD( helperArray, maxNumSectors * layerIndex, maxCols * maxRows ); // -- count the number of layers which are 'valid' nLayers += select( mask, simd::int_v{1}, simd::int_v{0} ); } @@ -615,12 +616,12 @@ namespace LHCb::Pr { int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<simd::int_v>( index, iSector, compressMask, - helperArray[maxSectors * iLayer + iSector] ); + helperArray[maxNumSectors * iLayer + iSector] ); } simd::float_v xTol = eStatesArray[iLayer].xTol<simd::float_v>( t ); compBoundsArray[iLayer].compressstore_xTol<simd::float_v>( index, compressMask, xTol ); compBoundsArray[iLayer].compressstore_nPos<simd::int_v>( index, compressMask, - posArray[iLayer] - maxSectors * iLayer ); + posArray[iLayer] - maxNumSectors * iLayer ); compBoundsArray[iLayer].size += simd::popcount( compressMask ); } @@ -666,7 +667,7 @@ namespace LHCb::Pr { const simd::float_v tolProto{m_yTol.value()}; const simd::float_v xTol{xTolS}; - std::array<int, maxSectors + 1> sectors{0}; + std::array<int, maxNumSectors + 1> sectors{0}; for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(); } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 951fa468792..972488bc51d 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -63,8 +63,10 @@ namespace LHCb::Pr { - constexpr static int batchSize = align_size( 48 ); - constexpr static int maxSectors = 9; // if needed, algo can be templated with this + constexpr static int batchSize = align_size( 48 ); + constexpr static int maxNumCols = 3; // if needed, algo can be templated with this + constexpr static int maxNumRows = 3; // if needed, algo can be templated with this + constexpr static int maxNumSectors = maxNumCols * maxNumRows; // if needed, algo can be templated with this using simd = SIMDWrapper::avx2::types; using scalar = SIMDWrapper::scalar::types; -- GitLab From c8ce1a4f2a73560aafe2bbe8b0c14b84fc1a2387 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 25 May 2020 00:22:38 +0200 Subject: [PATCH 037/111] first version of PrAddUTHits with PrUTHithandler --- Muon/MuonID/src/component/MuonIDHlt1Alg.cpp | 2 +- .../tests/options/test_functors.py | 2 +- .../SelAlgorithms/src/InstantiateFunctors.cpp | 4 +- Pr/PrAlgorithms/src/IPrAddUTHitsTool.h | 14 +- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 839 +++++++++++------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 174 ++-- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 80 +- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 21 +- Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp | 6 +- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 8 +- Pr/PrVeloUT/src/PrVeloUT.cpp | 166 ++-- Pr/PrVeloUT/src/PrVeloUT.h | 91 +- Tr/TrackUtils/src/TracksFTConverter.cpp | 28 +- Tr/TrackUtils/src/TracksVPConverter.cpp | 1 - 14 files changed, 805 insertions(+), 631 deletions(-) diff --git a/Muon/MuonID/src/component/MuonIDHlt1Alg.cpp b/Muon/MuonID/src/component/MuonIDHlt1Alg.cpp index 68f0d3743a1..59642953b8d 100644 --- a/Muon/MuonID/src/component/MuonIDHlt1Alg.cpp +++ b/Muon/MuonID/src/component/MuonIDHlt1Alg.cpp @@ -470,5 +470,5 @@ private: using MuonIDHlt1Alg_v2 = MuonIDHlt1Alg<v2_Tracks_Zip, v2_MuonPIDs>; DECLARE_COMPONENT_WITH_ID( MuonIDHlt1Alg_v2, "MuonIDHlt1Alg" ) -using MuonIDHlt1Alg_pr = MuonIDHlt1Alg<LHCb::Pr::Forward::Tracks, LHCb::Pr::Muon::PIDs>; +using MuonIDHlt1Alg_pr = MuonIDHlt1Alg<LHCb::Pr::Long::Tracks, LHCb::Pr::Muon::PIDs>; DECLARE_COMPONENT_WITH_ID( MuonIDHlt1Alg_pr, "MuonIDHlt1AlgPr" ) diff --git a/Phys/FunctorCore/tests/options/test_functors.py b/Phys/FunctorCore/tests/options/test_functors.py index 6cf54892225..16b2097f3ec 100644 --- a/Phys/FunctorCore/tests/options/test_functors.py +++ b/Phys/FunctorCore/tests/options/test_functors.py @@ -205,7 +205,7 @@ test_pr( only_unwrapped_functors=scalar_track_functors) forward_functors = generic_functors + all_track_functors + only_long_track_functors test_pr( - 'PrForwardTracks', + 'PrLongTracks', forward_functors + all_new_eventmodel_track_functors + only_long_track_functors_except_track_v2, only_unwrapped_functors=scalar_track_functors + diff --git a/Phys/SelAlgorithms/src/InstantiateFunctors.cpp b/Phys/SelAlgorithms/src/InstantiateFunctors.cpp index 6ff42cbc940..ca2e7319914 100644 --- a/Phys/SelAlgorithms/src/InstantiateFunctors.cpp +++ b/Phys/SelAlgorithms/src/InstantiateFunctors.cpp @@ -120,7 +120,7 @@ DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<Pr::Selection<LHCb::Event::v2::Tr DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Velo::Tracks>, "InstantiateFunctors__PrVeloTracks" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Iterable::Scalar::Velo::Tracks>, "InstantiateFunctors__PrVeloTracks__Unwrapped" ) -DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Forward::Tracks>, "InstantiateFunctors__PrForwardTracks" ) +DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Long::Tracks>, "InstantiateFunctors__PrForwardTracks" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Iterable::Scalar::Forward::Tracks>, "InstantiateFunctors__PrForwardTracks__Unwrapped" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Fitted::Forward::Tracks>, @@ -143,4 +143,4 @@ using TrackCombination__ScalarFittedWithMuonID__2 = using vector__TrackCombination__ScalarFittedWithMuonID__2 = std::vector<TrackCombination__ScalarFittedWithMuonID__2>; DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<vector__TrackCombination__ScalarFittedWithMuonID__2>, "InstantiateFunctors__vector__TrackCombination__ScalarFittedWithMuonID__2" ) -DECLARE_COMPONENT_WITH_ID( InstantiateVoidFunctors, "InstantiateFunctors__void" ) \ No newline at end of file +DECLARE_COMPONENT_WITH_ID( InstantiateVoidFunctors, "InstantiateFunctors__void" ) diff --git a/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h b/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h index 062bb59eb21..01c18f6a938 100644 --- a/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h @@ -18,7 +18,8 @@ // from Gaudi #include "Event/Track_v2.h" #include "GaudiKernel/IAlgTool.h" -#include "PrKernel/UTHit.h" +#include "PrKernel/PrMutUTHits.h" +#include "Event/PrLongTracks.h" /** @class IPrAddUTHitsTool IPrAddUTHitsTool.h TrackInterfaces/IPrAddUTHitsTool.h * @@ -27,18 +28,17 @@ */ // forward declaration -namespace LHCb { - class State; +namespace LHCb{ + class state; } class IPrAddUTHitsTool : public extend_interfaces<IAlgTool> { - using Track = LHCb::Event::v2::Track; + using Tracks = LHCb::Pr::Long::Tracks; public: DeclareInterfaceID( IPrAddUTHitsTool, 2, 0 ); - /// Add UT clusters to matched tracks - virtual StatusCode addUTHits( Track& track ) const = 0; - virtual UT::Mut::Hits returnUTHits( LHCb::State& state, double& finalChi2, double p ) const = 0; + /// Add UT clusters to Long tracks + virtual StatusCode addUTHits(Tracks& tracks ) const = 0; }; #endif // TRACKINTERFACES_IPRADDUTHITSTOOL_H diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 016ca3b619b..5c31e35002e 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -10,12 +10,17 @@ \*****************************************************************************/ #include <algorithm> #include <array> +#include <boost/container/small_vector.hpp> +#include <numeric> // Include files // from Gaudi #include "GaudiKernel/SystemOfUnits.h" -#include "PrAddUTHitsTool.h" #include "UTDAQ/UTDAQHelper.h" +#include "PrAddUTHitsTool.h" +#include "LHCbMath/SIMDWrapper.h" +#include "LHCbMath/GeomFun.h" +#include "Kernel/LHCbID.h" //----------------------------------------------------------------------------- // Implementation file for class : PrAddUTHitsTool @@ -23,367 +28,567 @@ // 2016-05-11 : Michel De Cian // //----------------------------------------------------------------------------- +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID(LHCb::Pr::PrAddUTHitsTool, "PrAddUTHitsTool" ) +namespace LHCb::Pr{ + + namespace{ + // -- bubble sort is slow, but we never have more than 9 elements (horizontally) + // -- and can act on 8 elements at once vertically (with AVX) + void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& helper, + const int start ) { + for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { + for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { + swap( helper[start + j] > helper[start + j + 1], helper[start + j], helper[start + j + 1] ); + } + } + } -DECLARE_COMPONENT( PrAddUTHitsTool ) - -using ROOT::Math::CholeskyDecomp; - -//========================================================================= -// -//========================================================================= -StatusCode PrAddUTHitsTool::initialize() { - return GaudiTool::initialize().andThen( [&] { - m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); - // Make sure we precompute z positions/sizes of the layers/sectors - registerCondition( m_utDet->geometry(), &PrAddUTHitsTool::recomputeGeometry ); - } ); -} + // remove duplicated sectors + simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& out, int start, + size_t len ) { + simd::int_v pos = start + 1; + simd::int_v oldv = out[start]; + for ( size_t j = start + 1; j < start + len; ++j ) { + simd::int_v newv = out[j]; + simd::mask_v blendMask = ( newv == oldv ); + for ( size_t k = j + 1; k < start + len; ++k ) { out[k - 1] = select( blendMask, out[k], out[k - 1] ); } + oldv = newv; + pos = pos + select( blendMask, simd::int_v{0}, simd::int_v{1} ); + } + return pos; + } + } // namespace -StatusCode PrAddUTHitsTool::recomputeGeometry() { - m_geomcache = LHCb::UTDAQ::computeGeometry( *m_utDet ); - return StatusCode::SUCCESS; -} -//========================================================================= -// Add the TT hits on the track, only the ids. -//========================================================================= -StatusCode PrAddUTHitsTool::addUTHits( Track& track ) const { + using ROOT::Math::CholeskyDecomp; - LHCb::State state = track.closestState( p_zUTProj ); - double chi2 = 0; + //========================================================================= + // + //========================================================================= + StatusCode PrAddUTHitsTool::initialize() { + return GaudiTool::initialize().andThen( [&] { + m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); + // Make sure we precompute z positions/sizes of the layers/sectors + registerCondition( m_utDet->geometry(), &PrAddUTHitsTool::recomputeGeometry ); + } ); + } - UT::Mut::Hits myUTHits = returnUTHits( state, chi2, track.p() ); + StatusCode PrAddUTHitsTool::recomputeGeometry() { + m_geomcache = LHCb::UTDAQ::computeGeometry( *m_utDet ); + return StatusCode::SUCCESS; + } - // -- Only add hits if there are 3 or more - if ( myUTHits.size() < 3 ) return StatusCode::SUCCESS; + //========================================================================= + // Add the TT hits on the track, only the ids. + //========================================================================= + StatusCode PrAddUTHitsTool::addUTHits( Tracks& tracks ) const { + + MiniStates filteredStates ; + auto compBoundsArray = findAllSectors(tracks, filteredStates); + + for ( auto t = 0; t < int( filteredStates.size ); t++ ) { + + auto myUTHits = returnUTHits( filteredStates, compBoundsArray, t); + if ((myUTHits.size < 3 ) ) continue; + assert( myUTHits.size <= LHCb::Pr::Upstream::Tracks::max_uthits && "Container cannot store more than 16 UT hits per track" ); + + int itr = filteredStates.index<sI>( t ).cast(); + const int nVPHits = tracks.nVPHits<sI>( itr ).cast(); + const int nFTHits = tracks.nFTHits<sI>( itr ).cast(); + tracks.store_nUTHits<sI>( itr, int(myUTHits.size) ); + + for ( auto i = 0; i < int( myUTHits.size ); i++ ) { + // ---------------------------------- + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "--- Adding Hit in Layer: " << myUTHits.planeCode<sI>( i ) << " with projection: " << myUTHits.projections[i] + << endmsg; + // ---------------------------------- + // add ut hit indices and lhcbIDs to the long track + const int idxhit = myUTHits.indexs[i]; + LHCb::LHCbID lhcbid( LHCb::UTChannelID( myUTHits.channelIDs[i] ) ); + tracks.store_ut_index<sI>( itr, i, idxhit ); + tracks.store_lhcbID<sI>( itr, nVPHits + nFTHits + i, lhcbid.lhcbID() ); + } + } + return StatusCode::SUCCESS; + } - for ( const auto& hit : myUTHits ) { + ///======================================================================= + // find all sections + ///======================================================================= + std::array<Boundaries, UTInfo::TotalLayers> + PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, MiniStates& filteredStates ) const{ + + std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; + int contSize = tracks.size(); + filteredStates.size = 0; + std::array<simd::int_v, UTInfo::TotalLayers> posArray; + std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors + std::array<int, UTInfo::TotalLayers> maxColsRows; + + //--- This now works with up to 9 sectors + const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); + for ( int t = 0; t < contSize; t += simd::size ) { + auto loopMask = simd::loop_mask( t, contSize ); + simd::int_v nLayers{0}; + + //---Define the tolerance parameters + const F qoverp = tracks.stateQoP<F>( t ); + const F p = abs( 1 / qoverp ); + const F yTol = p_yTolSlope.value() / p; + const F xTol = p_xTol.value() + p_xTolSlope.value() / p; + + auto pos = tracks.vStatePos<F>( t ); + auto dir = tracks.vStateDir<F>( t ); + const F stateX = pos.x; + const F stateY = pos.y; + const F stateZ = pos.z; + const F stateTx = dir.x; + const F stateTy = dir.y; + + const F bendParam = p_utParam.value() * -1 * signedReCur * qoverp; + + for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + + const F zLayer = m_geomcache.layers[layerIndex].z; + const F yPredLay = stateY + ( zLayer - stateZ ) * stateTy; + const F xPredLay = stateX + ( zLayer - stateZ ) * stateTx + bendParam * ( zLayer - p_zUTField.value() ); + + const simd::int_v regionBoundary1 = ( 2 * m_geomcache.layers[layerIndex].nColsPerSide + 3 ); + const simd::int_v regionBoundary2 = ( 2 * m_geomcache.layers[layerIndex].nColsPerSide - 5 ); + + simd::int_v subcolmin{0}; + simd::int_v subcolmax{0}; + simd::int_v subrowmin{0}; + simd::int_v subrowmax{0}; + + simd::mask_v mask = LHCb::UTDAQ::findSectors( layerIndex, xPredLay, yPredLay, xTol, yTol, + m_geomcache.layers[layerIndex], subcolmin, subcolmax, subrowmin, subrowmax ); + + const simd::mask_v gathermask = loopMask && mask; + + // -- Determine the maximum number of rows and columns we have to take into account + // -- maximum 3 + const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); + const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 3 ); + + maxColsRows[layerIndex] = maxCols * maxRows; + + int counter = 0; + for ( int sc = 0; sc < maxCols; sc++ ) { + simd::int_v realSC = min( subcolmax, subcolmin + sc ); + simd::int_v region = select( realSC > regionBoundary1, simd::int_v{1}, simd::int_v{0} ) + + select( realSC > regionBoundary2, simd::int_v{1}, simd::int_v{0} ); + + for ( int sr = 0; sr < maxRows; sr++ ) { + + simd::int_v realSR = min( subrowmax, subrowmin + sr ); + simd::int_v sectorIndex = realSR + 28 * realSC; + + // -- only gather when we are not outside the acceptance + // -- if we are outside, fill 1 which is the lowest possible sector number + // -- We need to fill a valid number, as one can have 3 layers with a correct sector + // -- and one without a correct sector, in which case the track will not be masked off. + // -- However, these cases should happen very rarely + simd::int_v sect = ( layerIndex < 2 ) + ? m_geomcache.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) + : m_geomcache.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); + + // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 + // -- The regions are already calculated with a -1 + helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * 98 - 1; + counter++; + } + } + // sorting + bubbleSortSIMD( maxCols * maxRows, helperArray, maxSectors * layerIndex ); + // uniquifying + posArray[layerIndex] = makeUniqueSIMD( helperArray, maxSectors * layerIndex, maxCols * maxRows ); + // count the number of `valid` layers + nLayers += select( mask, simd::int_v{1}, simd::int_v{0} ); + } + //-- we need at least three layers + const simd::mask_v compressMask = ( nLayers > 2 ) && loopMask; + + for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + int index = compBoundsArray[iLayer].size; + for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { + compBoundsArray[iLayer].compressstore_sect<I>( index, iSector, compressMask, + helperArray[maxSectors * iLayer + iSector] ); + } + compBoundsArray[iLayer].compressstore_xTol<F>( index, compressMask, xTol ); + compBoundsArray[iLayer].compressstore_nPos<I>( index, compressMask, + posArray[iLayer] - maxSectors * iLayer ); + compBoundsArray[iLayer].size += simd::popcount( compressMask ); + } - // ---------------------------------- - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "--- Adding Hit in Layer: " << hit.HitPtr->planeCode() << " with projection: " << hit.projection - << endmsg; - // ---------------------------------- + // -- Now need to compress the filtered states, such that they are + // -- in sync with the sectors + + int stateidx = filteredStates.size; + filteredStates.compressstore_x<F>( stateidx, compressMask, pos.x ); + filteredStates.compressstore_y<F>( stateidx, compressMask, pos.y ); + filteredStates.compressstore_z<F>( stateidx, compressMask, pos.z ); + filteredStates.compressstore_tx<F>( stateidx, compressMask, dir.x ); + filteredStates.compressstore_ty<F>( stateidx, compressMask, dir.y ); + filteredStates.compressstore_qop<F>( stateidx, compressMask, qoverp ); + filteredStates.compressstore_p<F>( stateidx, compressMask, p ); + filteredStates.compressstore_index<I>( stateidx, compressMask, simd::indices( t ) ); + filteredStates.size += simd::popcount( compressMask ); + } - track.addToLhcbIDs( hit.HitPtr->lhcbID() ); + return compBoundsArray; } - return StatusCode::SUCCESS; -} -//========================================================================= -// Return the TT hits -//========================================================================= -UT::Mut::Hits PrAddUTHitsTool::returnUTHits( LHCb::State& state, double& finalChi2, double p ) const { - // -- If no momentum is given, use the one from the state - if ( p < 1e-10 ) { p = state.p(); } - - UT::Mut::Hits UTHits; - UTHits.reserve( 8 ); - - double bestChi2 = p_maxChi2Tol + p_maxChi2Slope / ( p - p_maxChi2POffset ); - double chi2 = 0.; - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "--- Entering returnUTHits ---" << endmsg; - - // -- Get the container with all the hits compatible with the tack - UT::Mut::Hits selected = selectHits( state, p ); - // -- If only two hits are selected, end algorithm - if ( selected.size() < 3 ) { - UTHits = selected; - finalChi2 = 0; - return UTHits; - } + //========================================================================= + // Return the TT hits + //========================================================================= + LHCb::Pr::UT::Mut::Hits PrAddUTHitsTool::returnUTHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, std::size_t t) const { - std::sort( selected.begin(), selected.end(), UT::Mut::IncreaseByProj ); + LHCb::Pr::UT::Mut::Hits UTHits; + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "--- Entering returnUTHits ---" << endmsg; - // -- Loop over all hits and make "groups" of hits to form a candidate - for ( auto itBeg = selected.cbegin(); itBeg + 2 < selected.end(); ++itBeg ) { + // -- Get the container with all the hits compatible with the track + LHCb::Pr::UT::Mut::Hits hitsInLayers; + hitsInLayers.size = 0 ; + for ( auto& it : hitsInLayers.layerIndices ) it = -1; + + bool findHits = selectHits( filteredStates, compBoundsArray, hitsInLayers, t ); - const double firstProj = ( *itBeg ).projection; - UT::Mut::Hits goodUT; - goodUT.reserve( 4 ); - int nbPlane = 0; - std::array<int, 4> firedPlanes{}; - auto itEnd = itBeg; + // -- If less three layer or only two hits are selected, end algorithm + if ( !findHits || int (hitsInLayers.size ) < 3 ) return UTHits; - // -- If |firstProj| > m_majAxProj, the sqrt is ill defined - double maxProj = firstProj; - if ( fabs( firstProj ) < p_majAxProj ) { - // -- m_invMajAxProj2 = 1/(m_majAxProj*m_majAxProj), but it's faster like this - maxProj = firstProj + sqrt( p_minAxProj * p_minAxProj * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); - } + const auto p = filteredStates.p<sF>( t ).cast(); - // -- This means that there would be less than 3 hits, which does not work, so we can skip this right away - if ( ( *( itBeg + 2 ) ).projection > maxProj ) continue; + float bestChi2 = p_maxChi2Tol.value() + p_maxChi2Slope.value() / ( p - p_maxChi2POffset.value() ); - // -- Make "group" of hits which are within a certain distance to the first hit of the group - while ( itEnd != selected.end() ) { + // -- Loop over all hits and make "groups" of hits to form a candidate + for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size); ++itBeg ) { - if ( ( *itEnd ).projection > maxProj ) break; + const float firstProj = hitsInLayers.projections[itBeg]; - if ( 0 == firedPlanes[( *itEnd ).HitPtr->planeCode()] ) { - firedPlanes[( *itEnd ).HitPtr->planeCode()] = 1; // -- Count number of fired planes - ++nbPlane; - } + LHCb::Pr::UT::Mut::Hits goodUT; - goodUT.push_back( *itEnd++ ); - } + int nbPlane = 0; + std::array<int, 4> firedPlanes{}; - if ( 3 > nbPlane ) continue; // -- Need at least hits in 3 planes - // -- group of hits has to be at least as large than best group at this stage - if ( UTHits.size() > goodUT.size() ) continue; + // -- If |firstProj| > m_majAxProj, the sqrt is ill defined + float maxProj = firstProj; + if ( fabs( firstProj ) < p_majAxProj.value() ) { + // -- m_invMajAxProj2 = 1/(m_majAxProj*m_majAxProj), but it's faster like this + maxProj = firstProj + sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); + } - // ---------------------------------- - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Start fit, first proj " << firstProj << " nbPlane " << nbPlane << " size " << goodUT.size() << endmsg; - // ---------------------------------- + // -- This means that there would be less than 3 hits, which does not work, so we can skip this right away + if ( ( hitsInLayers.projections[itBeg+2]) > maxProj ) continue; - // -- Set variables for the chi2 calculation + // -- Make "group" of hits which are within a certain distance to the first hit of the group + for ( auto itEnd = itBeg; itEnd < int(hitsInLayers.size); itEnd++ ) { - double dist = 0; - chi2 = 1.e20; + auto index = goodUT.size; + if ( hitsInLayers.projections[itEnd] > maxProj ) break; - calculateChi2( chi2, bestChi2, dist, p, goodUT ); + if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { + firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] = 1; // -- Count number of fired planes + ++nbPlane; + } + + goodUT.xs[index] = hitsInLayers.xs[itEnd] ; + goodUT.zs[index] = hitsInLayers.zs[itEnd] ; + goodUT.coss[index] = hitsInLayers.coss[itEnd] ; + goodUT.sins[index] = hitsInLayers.sins[itEnd] ; + goodUT.weights[index] = hitsInLayers.weights[itEnd] ; + goodUT.projections[index] = hitsInLayers.projections[itEnd] ; + goodUT.channelIDs[index] = hitsInLayers.channelIDs[itEnd] ; + goodUT.indexs[index] = hitsInLayers.indexs[itEnd] ; + goodUT.size += 1; + } - // -- If this group has a better chi2 than all the others - // -- and is at least as large as all the others, then make this group the new candidate - if ( bestChi2 > chi2 && goodUT.size() >= UTHits.size() ) { + if ( 3 > nbPlane ) continue; // -- Need at least hits in 3 planes + // -- group of hits has to be at least as large than best group at this stage + if ( UTHits.size > goodUT.size ) continue; // ---------------------------------- - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, state, goodUT ); - // ---------------------------------- - UTHits = goodUT; - bestChi2 = chi2; + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Start fit, first proj " << firstProj << " nbPlane " << nbPlane << " size " << goodUT.size << endmsg; + // -- Set variables for the chi2 calculation + float dist = 0; + float chi2 = 1.e20; + + calculateChi2( chi2, bestChi2, dist, p, goodUT ); + + // -- If this group has a better chi2 than all the others + // -- and is at least as large as all the others, then make this group the new candidate + if ( bestChi2 > chi2 && goodUT.size >= UTHits.size ) { + + UTHits.size = 0; + // ---------------------------------- + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, goodUT ); + // ---------------------------------- + for( auto i =0; i < int( goodUT.size ); i++ ) { + //auto loopmask = simd::loop_mask(i, goodUT.size ); + //UTHits[t].copy_back( goodUT, i, loopmask ); + UTHits.xs[i] = goodUT.xs[i] ; + UTHits.zs[i] = goodUT.zs[i] ; + UTHits.coss[i] = goodUT.coss[i] ; + UTHits.sins[i] = goodUT.sins[i] ; + UTHits.weights[i] = goodUT.weights[i] ; + UTHits.projections[i] = goodUT.projections[i] ; + UTHits.channelIDs[i] = goodUT.channelIDs[i] ; + UTHits.indexs[i] = goodUT.indexs[i] ; + UTHits.size += 1; + } + + bestChi2 = chi2; + } } - } - // -- Assign the final hit container and chi2 to the variables which are returned. - finalChi2 = bestChi2; - - if ( UTHits.size() > 2 ) { - m_hitsAddedCounter += UTHits.size(); - m_tracksWithHitsCounter++; + // -- Assign the final hit container and chi2 to the variables which are returned. + //finalChi2 = bestChi2; + if ( UTHits.size > 2 ) { + m_hitsAddedCounter += UTHits.size; + m_tracksWithHitsCounter++; + } + return UTHits; } - return UTHits; -} -//========================================================================= -// Select the hits in a certain window -//========================================================================= -UT::Mut::Hits PrAddUTHitsTool::selectHits( const LHCb::State& state, const double p ) const { - - // -- Define the tolerance parameters - const double yTol = p_yTolSlope / p; - const double xTol = p_xTol + p_xTolSlope / p; - UT::Mut::Hits selected; - selected.reserve( 10 ); - - // -- Define the parameter that describes the bending - // -- in principle the call m_magFieldSvc->signedRelativeCurrent() is not needed for every track... - const double bendParam = p_utParam * -1 * m_magFieldSvc->signedRelativeCurrent() * state.qOverP(); - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "State z " << state.z() << " x " << state.x() << " y " << state.y() << " tx " << state.tx() << " ty " - << state.ty() << " p " << p << endmsg; - - const double stateX = state.x(); - const double stateZ = state.z(); - const double stateTy = state.ty(); - const double stateY = state.y(); - const double stateTx = state.tx(); - - boost::container::small_vector<std::pair<int, int>, 9> sectors; - - for ( int iStation = 0; iStation < 2; ++iStation ) { - for ( int iLayer = 0; iLayer < 2; ++iLayer ) { - - const unsigned int layerIndex = 2 * iStation + iLayer; - const float zLayer = m_geomcache.layers[layerIndex].z; - const double yPredLay = stateY + ( zLayer - stateZ ) * stateTy; - const double xPredLay = stateX + ( zLayer - stateZ ) * stateTx + bendParam * ( zLayer - p_zUTField ); - - LHCb::UTDAQ::findSectors( layerIndex, xPredLay, yPredLay, xTol, yTol, m_geomcache.layers[layerIndex], sectors ); - std::pair prevSector{-1, -1}; - for ( auto& sector : sectors ) { - // sectors can be duplicated in the list, but they are ordered - if ( sector == prevSector ) continue; - prevSector = sector; - for ( auto& hit : m_HitHandler.get()->hits( iStation + 1, iLayer + 1, sector.first, sector.second ) ) { - const double yPred = stateY + ( hit.zAtYEq0() - stateZ ) * stateTy; - - if ( !hit.isYCompatible( yPred, yTol ) ) continue; - - const auto y = stateY + ( hit.zAtYEq0() - stateZ ) * stateTy; - auto xx = hit.xAt( y ); - - const double xPred = - stateX + ( hit.zAtYEq0() - stateZ ) * stateTx + bendParam * ( hit.zAtYEq0() - p_zUTField ); - if ( xx > xPred + xTol ) break; - if ( xx < xPred - xTol ) continue; - - const double projDist = ( xPred - xx ) * ( p_zUTProj - p_zMSPoint ) / ( hit.zAtYEq0() - p_zMSPoint ); - selected.emplace_back( &hit, xx, hit.zAtYEq0(), projDist, Tf::HitBase::UsedByPatMatch ); + //========================================================================= + // Select the hits in a certain window + //========================================================================= + bool PrAddUTHitsTool::selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { + + // -- Define the parameter that describes the bending + // -- in principle the call m_magFieldSvc->signedRelativeCurrent() is not needed for every track... + const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); + hitsInLayers.size =0; + + const float stateX = filteredStates.x<sF>( t ).cast(); + const float stateY = filteredStates.y<sF>( t ).cast(); + const float stateZ = filteredStates.z<sF>( t ).cast(); + const float stateTx = filteredStates.tx<sF>( t ).cast(); + const float stateTy = filteredStates.ty<sF>( t ).cast(); + const float p = filteredStates.p<sF>( t ).cast(); + const float qop = filteredStates.qop<sF>( t ).cast(); + const float bendParam = p_utParam.value() * -1 * signedReCur * qop; + + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "selectHits: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " + << stateTy << " p " << p << endmsg; + + std::size_t nSize = 0; + std::size_t nLayers = 0; + const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); + for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; + + // -- Define the tolerance parameters + const F yTol = p_yTolSlope.value() / p; + const F xTol = p_xTol.value() + p_xTolSlope.value() / p; + + const int nPos = compBoundsArray[layerIndex].nPos<sI>( t ).cast(); + std::array<int, maxSectors + 1> sectors{0}; + for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<sI>( t, i ).cast(); } + + for ( int j = 0; j < nPos; j++ ) { + const std::pair<int, int>& temp = m_HitHandler.get()->indices( sectors[j] ); + const std::pair<int, int>& temp2 = m_HitHandler.get()->indices( sectors[j + 1] ); + const int firstIndex = temp.first; + const int shift = ( temp2.first == temp.second ); + const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; + j += shift; + + for( int i = firstIndex; i < lastIndex; i += simd::size){ + auto loopMask = simd::loop_mask( i, lastIndex); + const F yPred = stateY + (myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; + + const auto yMin = min( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); + const auto yMax = max( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); + const auto yy = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; + auto xx = myHits.xAtYEq0<F>( i ) + yy * myHits.dxDy<F>( i ); + F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ) + bendParam * (myHits.zAtYEq0<F>( i ) - p_zUTField.value()); + F absdx = abs( xx - xPred ); + + if ( none( absdx < xTol ) ) continue; + + auto mask = (yMin - yTol < yPred && yPred < yMax + yTol ) && ( absdx < xTol ) && loopMask; + + if ( none( mask ) ) continue; + const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / ( myHits.zAtYEq0<F>( i ) - p_zMSPoint.value() ); + + // save the selected hits + auto index = hitsInLayers.size; + + if ( ( index + simd::size ) >= LHCb::Pr::UT::Mut::Hits::max_hits ) { + error() << "Reached maximum number of hits. This is a temporary limitation and needs to be fixed" << endmsg; + break; + } + hitsInLayers.compressstore_x( index, mask, xx ); + hitsInLayers.compressstore_z( index, mask, myHits.zAtYEq0<F>( i ) ); + hitsInLayers.compressstore_cos( index, mask, myHits.cos<F>( i ) ); + hitsInLayers.compressstore_sin( index, mask, + myHits.cos<F>( i ) * -1.0f * myHits.dxDy<F>( i ) ); + hitsInLayers.compressstore_weight( index, mask, myHits.weight<F>( i ) ); + hitsInLayers.compressstore_projection( index, mask, projDist ); + hitsInLayers.compressstore_channelID( index, mask, myHits.channelID<I>( i ) ); + hitsInLayers.compressstore_index( index, mask, simd::indices( i ) ); // fill the index in the original hit container + hitsInLayers.size += simd::popcount( mask ); + } } - // -- would not have hits in 3 layers like this - if ( iStation == 1 && selected.empty() ) break; + nLayers += int( nSize != hitsInLayers.size ); + hitsInLayers.layerIndices[layerIndex] = nSize; + nSize = hitsInLayers.size; } + return nLayers > 2 ; } - return selected; -} -//========================================================================= -// Calculate Chi2 -//========================================================================= -void PrAddUTHitsTool::calculateChi2( double& chi2, const double& bestChi2, double& finalDist, const double& p, - UT::Mut::Hits& goodUT ) const { - - // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - - UT::Mut::Hits::iterator worst; - - double dist = 0; - chi2 = 1.e20; - - const double xTol = p_xTol + p_xTolSlope / p; - const double fixedWeight = 9. / ( xTol * xTol ); - - unsigned int nHits = goodUT.size(); - const unsigned int maxIterations = nHits; - unsigned int counter = 0; - - // -- Loop until chi2 has a reasonable value or no more outliers can be removed to improve it - // -- (with the counter as a sanity check to avoid infinite loops). - - unsigned int nDoF = 0; - std::array<unsigned int, 4> differentPlanes; - differentPlanes.fill( 0 ); - double worstDiff = -1.0; - double mat[6], rhs[3]; - - mat[0] = fixedWeight; // -- Fix X = 0 with fixedWeight - mat[1] = 0.; - mat[2] = fixedWeight * ( p_zUTProj - p_zMSPoint ) * - ( p_zUTProj - p_zMSPoint ); // -- Fix slope by point at multiple scattering point - mat[3] = 0.; - mat[4] = 0.; - mat[5] = fixedWeight; // -- Fix Y = 0 with fixedWeight - rhs[0] = 0.; - rhs[1] = 0.; - rhs[2] = 0.; - - for ( const auto& ut : goodUT ) { - const double w = ut.HitPtr->weight(); - const double dz = ut.z - p_zUTProj; - const double t = ut.HitPtr->sinT(); - const double dist2 = ut.projection; - mat[0] += w; - mat[1] += w * dz; - mat[2] += w * dz * dz; - mat[3] += w * t; - mat[4] += w * dz * t; - mat[5] += w * t * t; - rhs[0] += w * dist2; - rhs[1] += w * dist2 * dz; - rhs[2] += w * dist2 * t; - - if ( 0 == differentPlanes[ut.HitPtr->planeCode()]++ ) ++nDoF; - } + //========================================================================= + // Calculate Chi2 + //========================================================================= + void PrAddUTHitsTool::calculateChi2( float& chi2, const float& bestChi2, float& finalDist, const float& p, + LHCb::Pr::UT::Mut::Hits& goodUT ) const { - // -- Loop to remove outliers - // -- Don't loop more often than number of hits in the selection - // -- The counter protects infinite loops in very rare occasions. - while ( chi2 > 1e10 && counter < maxIterations ) { + // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - worstDiff = -1.0; - - // -- This is needed since 'CholeskyDecomp' overwrites rhs - // -- which is needed later on - const double saveRhs[3] = {rhs[0], rhs[1], rhs[2]}; + auto worst = 0; + float dist = 0; + chi2 = 1.e20; - CholeskyDecomp<double, 3> decomp( mat ); - if ( UNLIKELY( !decomp ) ) { - chi2 = 1e42; - break; - } else { - decomp.Solve( rhs ); + const float xTol = p_xTol.value() + p_xTolSlope.value() / p; + const float fixedWeight = 9. / ( xTol * xTol ); + + unsigned int nHits = goodUT.size; + const unsigned int maxIterations = nHits; + unsigned int counter = 0; + + // -- Loop until chi2 has a reasonable value or no more outliers can be removed to improve it + // -- (with the counter as a sanity check to avoid infinite loops). + + unsigned int nDoF = 0; + std::array<unsigned int, 4> differentPlanes; + differentPlanes.fill( 0 ); + float worstDiff = -1.0; + float mat[6], rhs[3]; + + mat[0] = fixedWeight; // -- Fix X = 0 with fixedWeight + mat[1] = 0.; + mat[2] = fixedWeight * ( p_zUTProj - p_zMSPoint ) * + ( p_zUTProj - p_zMSPoint ); // -- Fix slope by point at multiple scattering point + mat[3] = 0.; + mat[4] = 0.; + mat[5] = fixedWeight; // -- Fix Y = 0 with fixedWeight + rhs[0] = 0.; + rhs[1] = 0.; + rhs[2] = 0.; + + for ( auto i = 0; i < int( goodUT.size ); i++ ) { + const float w = goodUT.weights[i]; + const float dz = goodUT.zs[i] - p_zUTProj; + const float t = goodUT.sins[i]; + const float dist2 = goodUT.projections[i]; + mat[0] += w; + mat[1] += w * dz; + mat[2] += w * dz * dz; + mat[3] += w * t; + mat[4] += w * dz * t; + mat[5] += w * t * t; + rhs[0] += w * dist2; + rhs[1] += w * dist2 * dz; + rhs[2] += w * dist2 * t; + + if ( 0 == differentPlanes[goodUT.planeCode<sI>( i ).cast()]++ ) ++nDoF; } - const double offset = rhs[0]; - const double slope = rhs[1]; - const double offsetY = rhs[2]; - - rhs[0] = saveRhs[0]; - rhs[1] = saveRhs[1]; - rhs[2] = saveRhs[2]; - - chi2 = fixedWeight * ( offset * offset + offsetY * offsetY + - ( p_zUTProj - p_zMSPoint ) * ( p_zUTProj - p_zMSPoint ) * slope * slope ); - - for ( auto itSel = goodUT.begin(); goodUT.end() != itSel; ++itSel ) { - const auto ut = *itSel; - const double w = ut.HitPtr->weight(); - const double dz = ut.z - p_zUTProj; - dist = ut.projection - offset - slope * dz - offsetY * ut.HitPtr->sinT(); - if ( ( 1 < differentPlanes[ut.HitPtr->planeCode()] || nDoF == nHits ) && worstDiff < w * dist * dist ) { - worstDiff = w * dist * dist; - worst = itSel; + // -- Loop to remove outliers + // -- Don't loop more often than number of hits in the selection + // -- The counter protects infinite loops in very rare occasions. + while ( chi2 > 1e10 && counter < maxIterations ) { + + worstDiff = -1.0; + + // -- This is needed since 'CholeskyDecomp' overwrites rhs + // -- which is needed later on + const double saveRhs[3] = {rhs[0], rhs[1], rhs[2]}; + + CholeskyDecomp<double, 3> decomp( mat ); + if ( UNLIKELY( !decomp ) ) { + chi2 = 1e42; + break; + } else { + decomp.Solve( rhs ); } - chi2 += w * dist * dist; - } - chi2 /= nDoF; + const double offset = rhs[0]; + const double slope = rhs[1]; + const double offsetY = rhs[2]; - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) && worstDiff > 0. ) { - info() << format( " chi2 %10.2f nDoF%2d wors %8.2f proj %6.2f offset %8.3f slope %10.6f offsetY %10.6f", chi2, - nDoF, worstDiff, ( *worst ).projection, offset, slope, offsetY ) - << endmsg; - } + rhs[0] = saveRhs[0]; + rhs[1] = saveRhs[1]; + rhs[2] = saveRhs[2]; - // -- Remove last point (outlier) if bad fit... - if ( worstDiff > 0. && bestChi2 < chi2 && nHits > 3 ) { - - const auto ut = *worst; - const double w = ut.HitPtr->weight(); - const double dz = ut.z - p_zUTProj; - const double t = ut.HitPtr->sinT(); - const double dist2 = ut.projection; - mat[0] -= w; - mat[1] -= w * dz; - mat[2] -= w * dz * dz; - mat[3] -= w * t; - mat[4] -= w * dz * t; - mat[5] -= w * t * t; - rhs[0] -= w * dist2; - rhs[1] -= w * dist2 * dz; - rhs[2] -= w * dist2 * t; - - if ( 1 == differentPlanes[ut.HitPtr->planeCode()]-- ) --nDoF; - --nHits; - - goodUT.erase( worst ); - chi2 = 1.e11; // --Start new iteration - } + chi2 = fixedWeight * ( offset * offset + offsetY * offsetY + + ( p_zUTProj.value() - p_zMSPoint.value() ) * ( p_zUTProj.value() - p_zMSPoint.value() ) * slope * slope ); + + for ( auto it = 0; it < int( goodUT.size ); it++) { + const float w = goodUT.weights[it]; + const float dz = goodUT.zs[it] - p_zUTProj; + dist = goodUT.projections[it] - offset - slope * dz - offsetY * goodUT.sins[it]; + if ( ( 1 < differentPlanes[goodUT.planeCode<sI>( it ).cast()] || nDoF == nHits ) && worstDiff < w * dist * dist ) { + worstDiff = w * dist * dist; + worst = it; + } + chi2 += w * dist * dist; + } + + chi2 /= nDoF; - // -- Increase the sanity check counter - ++counter; + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) && worstDiff > 0. ) { + info() << format( " chi2 %10.2f nDoF%2d wors %8.2f proj %6.2f offset %8.3f slope %10.6f offsetY %10.6f", chi2, + nDoF, worstDiff, goodUT.projections[worst], offset, slope, offsetY ) + << endmsg; + } + // -- Remove last point (outlier) if bad fit... + if ( worstDiff > 0. && bestChi2 < chi2 && nHits > 3 ) { + + const double w = goodUT.weights[worst]; + const double dz = goodUT.zs[worst] - p_zUTProj; + const double t = goodUT.sins[worst]; + const double dist2 = goodUT.projections[worst]; + mat[0] -= w; + mat[1] -= w * dz; + mat[2] -= w * dz * dz; + mat[3] -= w * t; + mat[4] -= w * dz * t; + mat[5] -= w * t * t; + rhs[0] -= w * dist2; + rhs[1] -= w * dist2 * dz; + rhs[2] -= w * dist2 * t; + + if ( 1 == differentPlanes[goodUT.planeCode<sI>( worst ).cast()]-- ) --nDoF; + --nHits; + + } + // -- Increase the sanity check counter + ++counter; + } + + finalDist = dist; } - finalDist = dist; -} - -//========================================================================= -// Print out info -//========================================================================= -void PrAddUTHitsTool::printInfo( double dist, double chi2, const LHCb::State& state, - const UT::Mut::Hits& goodUT ) const { - - // -- Print some information at the end - info() << "*** Store this candidate, nbTT = " << goodUT.size() << " chi2 " << chi2 << endmsg; - for ( const auto& ut : goodUT ) { - double z = ut.z; - double mPred = ut.x + dist; - info() << ut.HitPtr->planeCode() - << format( " z%7.0f x straight %7.2f pred %7.2f x %7.2f diff %7.2f ", z, - state.x() + state.tx() * ( z - state.z() ), mPred, ut.HitPtr->xAtYMid(), dist ) - << endmsg; + //========================================================================= + // Print out info + //========================================================================= + void PrAddUTHitsTool::printInfo( float dist, float chi2, + const LHCb::Pr::UT::Mut::Hits& goodUT ) const { + + // -- Print some information at the end + info() << "*** Store this candidate, nbTT = " << goodUT.size << " chi2 " << chi2 << endmsg; + for ( auto i = 0; i < int( goodUT.size ); i +=simd::size ){ + sF z = goodUT.z<sF>( i ); + sF mPred = goodUT.x<sF>( i ) + dist; + info() << goodUT.planeCode<sI>( i ) + << format( " z%7.0f pred %7.2f diff %7.2f ", z, mPred, dist ) + << endmsg; + } } -} + +} //namespace LHCb::Pr diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index df8b5393c04..c1d1b43462a 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -11,16 +11,22 @@ #ifndef PRADDUTHITSTOOL_H #define PRADDUTHITSTOOL_H 1 -#include "PrKernel/UTHitHandler.h" +#include "PrKernel/PrUTHitHandler.h" #include "Event/State.h" #include "Event/Track_v2.h" #include "IPrAddUTHitsTool.h" // Interface #include "Kernel/ILHCbMagnetSvc.h" #include "UTDAQ/UTDAQHelper.h" +#include "UTDAQ/UTInfo.h" +#include "LHCbMath/SIMDWrapper.h" #include "GaudiAlg/GaudiTool.h" #include <GaudiKernel/DataObjectHandle.h> +#include "Event/PrLongTracks.h" +#include "PrKernel/PrMutUTHits.h" +#include "vdt/log.h" +#include "vdt/sqrt.h" /* * @class PrAddUTHitsTool PrAddUTHitsTool.h @@ -46,64 +52,112 @@ * @date 2016-05-11 * */ - -class PrAddUTHitsTool : public extends<GaudiTool, IPrAddUTHitsTool> { - using Track = LHCb::Event::v2::Track; - -public: +namespace LHCb::Pr{ + + using simd = SIMDWrapper::avx2::types; + using I = simd::int_v; + using F = simd::float_v; + using scalar = SIMDWrapper::scalar::types; + using sI = scalar::int_v; + using sF = scalar::float_v; + + constexpr static int max_tracks= align_size( 1024 ); + constexpr static int maxSectors = 9; + + struct MiniStates final { + std::array<float, max_tracks> xs; + std::array<float, max_tracks> ys; + std::array<float, max_tracks> zs; + std::array<float, max_tracks> txs; + std::array<float, max_tracks> tys; + std::array<float, max_tracks> qops; + std::array<float, max_tracks> ps; + std::array<int, max_tracks> indexs; + + std::size_t size{0}; + + SOA_ACCESSOR( x, xs.data() ) + SOA_ACCESSOR( y, ys.data() ) + SOA_ACCESSOR( z, zs.data() ) + SOA_ACCESSOR( tx, txs.data() ) + SOA_ACCESSOR( ty, tys.data() ) + SOA_ACCESSOR( qop, qops.data() ) + SOA_ACCESSOR( p, ps.data() ) + SOA_ACCESSOR( index, indexs.data() ) + }; + struct Boundaries final { + + std::array<int, 9 * max_tracks> sects; + std::array<float, max_tracks> xTols; + std::array<int, max_tracks> nPoss; + + std::size_t size{0}; + SOA_ACCESSOR_VAR( sect, &( sects[pos * max_tracks] ), int pos ) + SOA_ACCESSOR( xTol, xTols.data() ) + SOA_ACCESSOR( nPos, nPoss.data() ) + }; + + class PrAddUTHitsTool : public extends<GaudiTool, IPrAddUTHitsTool> { + + using Tracks = LHCb::Pr::Long::Tracks; + + public: /// Standard constructor - using extends::extends; - - StatusCode initialize() override; - - /** @brief Add UT clusters to matched tracks. This calls returnUTHits internally - @param track Track to add the UT hits to - */ - StatusCode addUTHits( Track& track ) const override; - - /** Return UT hits without adding them. - @param state State closest to UT for extrapolation (normally Velo state) - @param ttHits Container to fill UT hits in - @param finalChi2 internal chi2 of the UT hit adding - @param p momentum estimate. If none given, the one from the state will be taken - */ - UT::Mut::Hits returnUTHits( LHCb::State& state, double& finalChi2, double p = 0 ) const override; - -private: - StatusCode recomputeGeometry(); - - DeUTDetector* m_utDet = nullptr; - /// information about the different layers - LHCb::UTDAQ::GeomCache m_geomcache; - - DataObjectReadHandle<UT::HitHandler> m_HitHandler{this, "UTHitsLocation", UT::Info::HitLocation}; - - Gaudi::Property<double> p_zUTField{this, "ZUTField", 1740. * Gaudi::Units::mm}; - Gaudi::Property<double> p_zMSPoint{this, "ZMSPoint", 400. * Gaudi::Units::mm}; - Gaudi::Property<double> p_utParam{this, "UTParam", 29.}; - Gaudi::Property<double> p_zUTProj{this, "ZUTProj", 2500. * Gaudi::Units::mm}; - Gaudi::Property<double> p_maxChi2Tol{this, "MaxChi2Tol", 2.0}; - Gaudi::Property<double> p_maxChi2Slope{this, "MaxChi2Slope", 25000}; - Gaudi::Property<double> p_maxChi2POffset{this, "MaxChi2POffset", 100}; - Gaudi::Property<double> p_yTolSlope{this, "YTolSlope", 20000.}; - Gaudi::Property<double> p_xTol{this, "XTol", 1.0}; - Gaudi::Property<double> p_xTolSlope{this, "XTolSlope", 30000.0}; - double m_invMajAxProj2 = 0.0; - Gaudi::Property<double> p_majAxProj{ - this, "MajAxProj", 20.0 * Gaudi::Units::mm, - [=]( Property& ) { this->m_invMajAxProj2 = 1 / ( this->p_majAxProj * this->p_majAxProj ); }, - Gaudi::Details::Property::ImmediatelyInvokeHandler{true}}; - Gaudi::Property<double> p_minAxProj{this, "MinAxProj", 2.0 * Gaudi::Units::mm}; - - mutable Gaudi::Accumulators::SummingCounter<unsigned int> m_hitsAddedCounter{this, "#UT hits added"}; - mutable Gaudi::Accumulators::Counter<> m_tracksWithHitsCounter{this, "#tracks with hits added"}; - - ServiceHandle<ILHCbMagnetSvc> m_magFieldSvc{this, "MagneticField", "MagneticFieldSvc"}; - - UT::Mut::Hits selectHits( const LHCb::State& state, const double p ) const; - void calculateChi2( double& chi2, const double& bestChi2, double& finalDist, const double& p, - UT::Mut::Hits& goodUT ) const; - void printInfo( double dist, double chi2, const LHCb::State& state, const UT::Mut::Hits& goodUT ) const; -}; - + using extends::extends; + + StatusCode initialize() override; + + /** @brief Add UT clusters to matched tracks. This calls returnUTHits internally + @param track Track to add the UT hits to + */ + StatusCode addUTHits( Tracks& longtracks ) const override; + + /** Return UT hits without adding them. + @param state State closest to UT for extrapolation (normally Velo state) + @param ttHits Container to fill UT hits in + @param finalChi2 internal chi2 of the UT hit adding + @param p momentum estimate. If none given, the one from the state will be taken + */ + + //LHCb::Pr::UT::Mut::Hits returnUTHits( LHCb::State& state, float& finalChi2, float p = 0 ) const; + + private: + StatusCode recomputeGeometry(); + + DeUTDetector* m_utDet = nullptr; + /// information about the different layers + LHCb::UTDAQ::GeomCache m_geomcache; + + DataObjectReadHandle<LHCb::Pr::UT::HitHandler> m_HitHandler{this, "UTHitsLocation", "PrUTHitHandler"}; + + Gaudi::Property<float> p_zUTField{this, "ZUTField", 1740. * Gaudi::Units::mm}; + Gaudi::Property<float> p_zMSPoint{this, "ZMSPoint", 400. * Gaudi::Units::mm}; + Gaudi::Property<float> p_utParam{this, "UTParam", 29.}; + Gaudi::Property<float> p_zUTProj{this, "ZUTProj", 2500. * Gaudi::Units::mm}; + Gaudi::Property<float> p_maxChi2Tol{this, "MaxChi2Tol", 2.0}; + Gaudi::Property<float> p_maxChi2Slope{this, "MaxChi2Slope", 25000}; + Gaudi::Property<float> p_maxChi2POffset{this, "MaxChi2POffset", 100}; + Gaudi::Property<float> p_yTolSlope{this, "YTolSlope", 20000.}; + Gaudi::Property<float> p_xTol{this, "XTol", 1.0}; + Gaudi::Property<float> p_xTolSlope{this, "XTolSlope", 30000.0}; + float m_invMajAxProj2 = 0.0; + Gaudi::Property<float> p_majAxProj{ + this, "MajAxProj", 20.0 * Gaudi::Units::mm, + [=]( Property& ) { this->m_invMajAxProj2 = 1 / ( this->p_majAxProj * this->p_majAxProj ); }, + Gaudi::Details::Property::ImmediatelyInvokeHandler{true}}; + Gaudi::Property<float> p_minAxProj{this, "MinAxProj", 2.0 * Gaudi::Units::mm}; + + mutable Gaudi::Accumulators::SummingCounter<unsigned int> m_hitsAddedCounter{this, "#UT hits added"}; + mutable Gaudi::Accumulators::Counter<> m_tracksWithHitsCounter{this, "#tracks with hits added"}; + + ServiceHandle<ILHCbMagnetSvc> m_magFieldSvc{this, "MagneticField", "MagneticFieldSvc"}; + + std::array<LHCb::Pr::Boundaries, UTInfo::TotalLayers> findAllSectors( Tracks& tracks, MiniStates& filteredStates) const; + LHCb::Pr::UT::Mut::Hits returnUTHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, std::size_t t) const; + bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const; + void calculateChi2( float& chi2, const float & bestChi2, float& finalDist, const float& p, + LHCb::Pr::UT::Mut::Hits& goodUT ) const; + void printInfo( float dist, float chi2, const LHCb::Pr::UT::Mut::Hits& goodUT ) const; + }; +} //namespace LHCb::Pr #endif // PRADDUTHITSTOOL_H diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 21a41e1a93f..046c25acc86 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -567,6 +567,7 @@ public: if ( sc.isFailure() ) return sc; // error printed already by GaudiAlgorithm if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Initialize" << endmsg; + //info()<<"......DEBUGS Initialize Forward BEGIN" <<endmsg; // Initialise stuff we imported from PrForwardTool @@ -779,6 +780,7 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSc ZoneCache const& cache ) const { if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Execute" << endmsg; + //info()<<"......DEBUGS Forward BEGIN" <<endmsg; if ( UNLIKELY( input_tracks.size() == 0 ) ) { auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); @@ -2212,30 +2214,25 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& int uttrack = cand.track(); - /// TO DO: change the LHCbID to be index - std::vector<LHCb::LHCbID> utid; - utid.reserve( 30 ); auto n_vphits = 0; auto n_uthits = 0; if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { result.store_trackVP<I>( currentsize, input_tracks.template trackVP<I>( uttrack ) ); result.store_trackUT<I>( currentsize, uttrack ); - const int vpidx = input_tracks.template trackVP<I>( uttrack ).cast(); - const int vphits = ( *velo_ancestors ).template nHits<I>( vpidx ).cast(); - const int uthits = input_tracks.template nHits<I>( uttrack ).cast(); + const int vphits = input_tracks.template nVPHits<I>( uttrack ).cast(); + const int uthits = input_tracks.template nUTHits<I>( uttrack ).cast(); n_vphits = vphits; n_uthits = uthits; result.store_nVPHits<I>( currentsize, vphits ); result.store_nUTHits<I>( currentsize, uthits ); for ( auto idx{0}; idx < vphits; ++idx ) { - result.store_vp_index<I>( currentsize, idx, input_tracks.template vp_index<I>( vpidx, idx ) ); + result.store_vp_index<I>( currentsize, idx, input_tracks.template vp_index<I>( uttrack, idx ) ); + result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); } for ( auto idx{0}; idx < uthits; ++idx ) { result.store_ut_index<I>( currentsize, idx, input_tracks.template ut_index<I>( uttrack, idx ) ); - } - for ( auto idx{0}; idx < vphits + uthits; ++idx ) { - result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); + result.store_lhcbID<I>( currentsize, vphits + idx, input_tracks.template lhcbID<I>( uttrack, vphits + idx ) ); } } else { @@ -2249,8 +2246,18 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); } result.store_nVPHits<I>( currentsize, vphits ); - // only used to disable unused warning in the velo track input case - // uttrack = input_tracks.size(); + result.store_nUTHits<I>( currentsize, 0); + result.store_ut_index<I>( currentsize, 0, -1 ); + } + + //== hits indices, max_fthits=15, not sure if this number is reasonable. + assert( id.size() <= LHCb::Pr::Long::Tracks::max_fthits && "Container cannot store more than 15 SciFi hits per track" ); + + auto const& ihits = cand.ihits(); + result.store_nFTHits<I>( currentsize, ihits.size() ); + for ( size_t idx{0}; idx < ihits.size(); ++idx ) { + result.store_ft_index<I>( currentsize, idx, ihits[idx] ); + result.store_lhcbID<I>( currentsize, n_vphits + n_uthits + idx, id[idx].lhcbID() ); } const double qOverP = cand.getQoP(); @@ -2268,40 +2275,12 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_statePos<F>( currentsize, pos ); result.store_stateDir<F>( currentsize, dir ); - if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { - if ( m_addUTHitsTool.isEnabled() ) { - double chi2{0}; - LHCb::State vState; - vState.setState( cand.seed().x0, cand.seed().y0, cand.seed().z0, cand.seed().tx, cand.seed().ty, qOverP ); - auto uthits = m_addUTHitsTool->returnUTHits( vState, chi2, vState.p() ); - // There are candidates with more than 8 UT hits. To be understood. Better protect this.... - if ( uthits.size() < 3 || uthits.size() > 16 ) { - if ( msgLevel( MSG::DEBUG ) ) debug() << " Failure in adding UT hits to track" << endmsg; - } else { - for ( auto const hit : uthits ) id.emplace_back( hit.HitPtr->chanID() ); - std::sort( id.begin(), id.end() ); - // TO do: change the LHCbIDs to indices - for ( auto const hit : uthits ) utid.emplace_back( hit.HitPtr->chanID() ); - std::sort( utid.begin(), utid.end() ); - } - } - for ( size_t idx{0}; idx < utid.size(); ++idx ) { - result.store_ut_index<I>( currentsize, idx, utid[idx].lhcbID() ); - result.store_lhcbID<I>( currentsize, n_vphits + idx, utid[idx].lhcbID() ); - } - n_uthits = utid.size(); - result.store_nUTHits<I>( currentsize, utid.size() ); - } - - //== hits indices, max_fthits=15, not sure if we need this. - assert( id.size() <= 15 && "Container cannot store more than 15 SciFi hits per track" ); - - auto const& ihits = cand.ihits(); - result.store_nFTHits<I>( currentsize, ihits.size() ); - for ( size_t idx{0}; idx < ihits.size(); ++idx ) { - result.store_ft_index<I>( currentsize, idx, ihits[idx] ); - result.store_lhcbID<I>( currentsize, n_vphits + n_uthits + idx, id[idx].lhcbID() ); - } + LHCb::State vState; + vState.setState( cand.seed().x0, cand.seed().y0, cand.seed().z0, cand.seed().tx, cand.seed().ty, qOverP ); + auto velopos = Vec3<F>( vState.x(), vState.y(), vState.z() ); + auto velodir = Vec3<F>( vState.tx(), vState.ty(), 1.f ); + result.store_vStatePos<F>( currentsize, velopos ); + result.store_vStateDir<F>( currentsize, velodir ); result.size() += 1; if ( UNLIKELY( result.size() == LHCb::Pr::Long::Tracks::max_tracks ) ) { // FIXME: find a better way to define @@ -2314,5 +2293,14 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& if ( msgLevel( MSG::DEBUG ) ) debug() << "Store track quality " << cand.quality() << endmsg; // -- < Debug -------- } // next candidate + + // add UT hits into the tracks + if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { + if ( m_addUTHitsTool.isEnabled() ) { + auto sc = m_addUTHitsTool->addUTHits( result ); + if ( sc.isFailure() ) info()<< "adding UT clusters failed!" <<endmsg; + } + } + return result; } diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index b54edb2f959..c7c6a585744 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -91,26 +91,21 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit // info() <<"total UT Hits " << uthithandler.nbHits() <<endmsg; for ( auto& track : tracks ) { - auto& ids = track.lhcbIDs(); - for ( auto& id : ids ) { + for ( auto& id : track.lhcbIDs() ) { if ( !( id.isUT() ) ) continue; usedUTHits.emplace_back( id.utID().channelID() ); } } + // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; - for ( int iStation = 1; iStation < 3; ++iStation ) { - for ( int iLayer = 1; iLayer < 3; ++iLayer ) { - for ( int iRegion = 1; iRegion < 4; ++iRegion ) { - for ( int iSector = 1; iSector < 99; ++iSector ) { + for ( int iStation = 1; iStation <= UTInfo::Stations; ++iStation ) { + for ( int iLayer = 1; iLayer <= UTInfo::Layers; ++iLayer ) { + for ( int iRegion = 1; iRegion <= UTInfo::Regions; ++iRegion ) { + for ( int iSector = 1; iSector <= UTInfo::Sectors; ++iSector ) { for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { - bool used = false; - for ( auto& id : usedUTHits ) { - if ( uthit.chanID().channelID() == id ) { - used = true; - break; - } - } + bool used = std::any_of(usedUTHits.begin(), usedUTHits.end(), [utid = uthit.chanID().channelID()](const auto& id) { return utid == id;}); + if ( used ) continue; const unsigned int fullChanIdx = UT::HitHandler::HitsInUT::idx( iStation, iLayer, iRegion, iSector ); const auto* aSector = m_utDet->getSector( uthit.chanID() ); diff --git a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp index 78ac365797e..9f6fb4ba994 100644 --- a/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp +++ b/Pr/PrAlgorithms/src/PrResidualVeloTracks.cpp @@ -46,8 +46,8 @@ // //----------------------------------------------------------------------------- -typedef LHCb::Pr::Long::Tracks LongTracks; -typedef LHCb::Pr::Velo::Tracks VeloTracks; +using LongTracks = LHCb::Pr::Long::Tracks; +using VeloTracks = LHCb::Pr::Velo::Tracks; class PrResidualVeloTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Velo::Tracks( const LongTracks&, const VeloTracks& )> { @@ -79,7 +79,7 @@ LHCb::Pr::Velo::Tracks PrResidualVeloTracks::operator()( const LongTracks& track if ( velotracks.empty() ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Velo Track container '" << inputLocation<1>() << "' is empty" << endmsg; + debug() << "Velo Track container '" << inputLocation<VeloTracks>() << "' is empty" << endmsg; return tmp; } diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 7a7200372f2..7f95738d1e3 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -856,9 +856,9 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); - auto hit_index = tracks.hit<SIMDWrapper::scalar::types::int_v>( t, h ).cast(); + auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); - tracksForward.compressstore_lhcbID( i, h, backwards, lhcbid ); + tracksBackward.compressstore_lhcbID( i, h, backwards, lhcbid ); } tracksBackward.size() += simd::popcount( backwards ); @@ -877,8 +877,8 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); - auto hit_index = min( tracks.hit<I>( t, h ), I{2048*26}); - const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); + auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); + const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index ea12c1c815b..7c83b816132 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -75,11 +75,14 @@ namespace LHCb::Pr { const float zMidUT, const simd::float_v qpxz2p, const int t, simd::mask_v& goodFitMask ) { - const simd::float_v x = protoTracks.xState<simd::float_v>( t ); - const simd::float_v y = protoTracks.yState<simd::float_v>( t ); - const simd::float_v z = protoTracks.zState<simd::float_v>( t ); - const simd::float_v tx = protoTracks.txState<simd::float_v>( t ); - const simd::float_v ty = protoTracks.tyState<simd::float_v>( t ); + const Vec3<simd::float_v> pos = protoTracks.pos<simd::float_v>( t ); + const Vec3<simd::float_v> dir = protoTracks.dir<simd::float_v>( t ); + + const simd::float_v x = pos.x; + const simd::float_v y = pos.y; + const simd::float_v z = pos.z; + const simd::float_v tx = dir.x; + const simd::float_v ty = dir.y; const simd::float_v zKink = magFieldParams[0] - ty * ty * magFieldParams[1] - ty * ty * ty * ty * magFieldParams[2]; const simd::float_v xMidField = x + tx * ( zKink - z ); @@ -302,8 +305,6 @@ namespace LHCb::Pr { /// Initialization StatusCode VeloUT::initialize() { - // std::cout << "initialize" << std::endl; - return Transformer::initialize().andThen( [&] { return m_PrUTMagnetTool.retrieve(); } ).andThen( [&] { // m_zMidUT is a position of normalization plane which should to be close to z middle of UT ( +- 5 cm ). // Cached once in VeloUTTool at initialization. No need to update with small UT movement. @@ -361,39 +362,27 @@ namespace LHCb::Pr { if ( !getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[t2], tEff ) ) continue; // -- this is a temporary solution to gradually adapt the algo - scalar::float_v x = filteredStates.x<scalar::float_v>( tEff ); - scalar::float_v y = filteredStates.y<scalar::float_v>( tEff ); - scalar::float_v z = filteredStates.z<scalar::float_v>( tEff ); - scalar::float_v tx = filteredStates.tx<scalar::float_v>( tEff ); - scalar::float_v ty = filteredStates.ty<scalar::float_v>( tEff ); + Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); + Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); MiniState trState; - trState.x = x.cast(); - trState.y = y.cast(); - trState.z = z.cast(); - trState.tx = tx.cast(); - trState.ty = ty.cast(); + trState.x = pos.x.cast(); + trState.y = pos.y.cast(); + trState.z = pos.z.cast(); + trState.tx = dir.x.cast(); + trState.ty = dir.y.cast(); TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } if ( helper.bestIndices[0] == -1 ) continue; - scalar::float_v covx = filteredStates.covx<scalar::float_v>( tEff ); - scalar::float_v covy = filteredStates.covy<scalar::float_v>( tEff ); - scalar::float_v covz = filteredStates.covz<scalar::float_v>( tEff ); - scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); int trackIndex = pTracks.size; // -- manual compressstore to keep everything in sync and fill the registers in the last function - pTracks.store_xState<scalar::float_v>( trackIndex, x ); - pTracks.store_yState<scalar::float_v>( trackIndex, y ); - pTracks.store_zState<scalar::float_v>( trackIndex, z ); - pTracks.store_txState<scalar::float_v>( trackIndex, tx ); - pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); - pTracks.store_covx<scalar::float_v>( trackIndex, covx ); - pTracks.store_covy<scalar::float_v>( trackIndex, covy ); - pTracks.store_covz<scalar::float_v>( trackIndex, covz ); + pTracks.store_pos<scalar::float_v>( trackIndex, pos ); + pTracks.store_dir<scalar::float_v>( trackIndex, dir ); pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); @@ -461,14 +450,9 @@ namespace LHCb::Pr { simd::mask_v csMask = loopMask && !mask && ( !passTracks || !passHoleMask ); int index = filteredStates.size; - filteredStates.compressstore_x<simd::float_v>( index, csMask, pos.x ); - filteredStates.compressstore_y<simd::float_v>( index, csMask, pos.y ); - filteredStates.compressstore_z<simd::float_v>( index, csMask, pos.z ); - filteredStates.compressstore_tx<simd::float_v>( index, csMask, dir.x ); - filteredStates.compressstore_ty<simd::float_v>( index, csMask, dir.y ); - filteredStates.compressstore_covx<simd::float_v>( index, csMask, covX.x ); - filteredStates.compressstore_covy<simd::float_v>( index, csMask, covX.y ); - filteredStates.compressstore_covz<simd::float_v>( index, csMask, covX.z ); + filteredStates.compressstore_pos<simd::float_v>( index, csMask, pos ); + filteredStates.compressstore_dir<simd::float_v>( index, csMask, dir ); + filteredStates.compressstore_cov<simd::float_v>( index, csMask, covX ); filteredStates.compressstore_index<simd::int_v>( index, csMask, simd::indices( t ) ); filteredStates.size += simd::popcount( csMask ); @@ -857,11 +841,11 @@ namespace LHCb::Pr { //== Handle states. copy Velo one, add TT. const simd::float_v zOrigin = - select( protoTracks.tyState<simd::float_v>( t ) > 0.001f, - protoTracks.zState<simd::float_v>( t ) - - protoTracks.yState<simd::float_v>( t ) / protoTracks.tyState<simd::float_v>( t ), - protoTracks.zState<simd::float_v>( t ) - - protoTracks.xState<simd::float_v>( t ) / protoTracks.txState<simd::float_v>( t ) ); + select( protoTracks.dir<simd::float_v>( t ).y > 0.001f, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).y / protoTracks.dir<simd::float_v>( t ).y, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).x / protoTracks.dir<simd::float_v>( t ).x ); auto loopMask = simd::loop_mask( t, protoTracks.size ); // -- this is to filter tracks where the fit had a too large chi2 @@ -873,7 +857,7 @@ namespace LHCb::Pr { // -- FIXME: these rely on the internal details of PrTableForFunction!!! // and should at least be put back in there, and used from here // to make sure everything _stays_ consistent... - auto var = std::array{protoTracks.tyState<simd::float_v>( t ), zOrigin, protoTracks.zState<simd::float_v>( t )}; + auto var = std::array{protoTracks.dir<simd::float_v>( t ).y, zOrigin, protoTracks.pos<simd::float_v>( t ).z}; simd::int_v index1 = min( max( simd::int_v{( var[0] + 0.3f ) / 0.6f * 30}, 0 ), 30 ); simd::int_v index2 = min( max( simd::int_v{( var[1] + 250 ) / 500 * 10}, 0 ), 10 ); @@ -909,17 +893,16 @@ namespace LHCb::Pr { // -- order is: x, tx, y, chi2 std::array<simd::float_v, 4> finalParams = { protoTracks.xTT<simd::float_v>( t ), protoTracks.xSlopeTT<simd::float_v>( t ), - protoTracks.yState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * ( m_zMidUT - protoTracks.zState<simd::float_v>( t ) ), + protoTracks.pos<simd::float_v>( t ).y + + protoTracks.dir<simd::float_v>( t ).y * ( m_zMidUT - protoTracks.pos<simd::float_v>( t ).z ), protoTracks.chi2TT<simd::float_v>( t )}; const simd::float_v qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; simd::mask_v fitMask = simd::mask_true(); - simd::float_v qp = m_finalFit - ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) - : protoTracks.qp<simd::float_v>( t ) * - rsqrt( 1.0f + protoTracks.tyState<simd::float_v>( t ) * - protoTracks.tyState<simd::float_v>( t ) ); // is this correct? + simd::float_v qp = m_finalFit ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) + : protoTracks.qp<simd::float_v>( t ) * + rsqrt( 1.0f + protoTracks.dir<simd::float_v>( t ).y * + protoTracks.dir<simd::float_v>( t ).y ); // is this correct? qp = select( fitMask, qp, protoTracks.qp<simd::float_v>( t ) ); const simd::float_v qop = select( abs( bdl ) < 1.e-8f, simd::float_v{1000.0f}, qp * qpxz2p ); @@ -928,8 +911,8 @@ namespace LHCb::Pr { // -- Beware of the momentum resolution! const simd::float_v p = abs( 1.0f / qop ); const simd::float_v pt = - p * sqrt( protoTracks.txState<simd::float_v>( t ) * protoTracks.txState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * protoTracks.tyState<simd::float_v>( t ) ); + p * sqrt( protoTracks.dir<simd::float_v>( t ).x * protoTracks.dir<simd::float_v>( t ).x + + protoTracks.dir<simd::float_v>( t ).y * protoTracks.dir<simd::float_v>( t ).y ); const simd::mask_v pPTMask = ( p > m_minMomentumFinal.value() && pt > m_minPTFinal.value() ); const simd::float_v xUT = finalParams[0]; @@ -970,56 +953,20 @@ namespace LHCb::Pr { // ========================================================================================== - const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); - auto pos = protoTracks.pos<simd::float_v>( t ); - auto dir = protoTracks.dir<simd::float_v>( t ); - auto covX = protoTracks.cov<simd::float_v>( t ); - - int trackIndex = outputTracks.size(); - outputTracks.compressstore_trackVP<simd::int_v>( trackIndex, validTrackMask, ancestor ); - outputTracks.compressstore_statePos<simd::float_v>( trackIndex, validTrackMask, pos ); - outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); - outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); + const int trackIndex = outputTracks.size(); + outputTracks.copyVeloInformation<simd>( inputTracks, ancestor, validTrackMask ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, 0 ); - const simd::int_v nVPHits = inputTracks.maskgather_nHits<simd::int_v>( ancestor, validTrackMask, 0 ); - outputTracks.compressstore_nVPHits<simd::int_v>( trackIndex, validTrackMask, nVPHits ); - - // -- Store the hit information of the VP track in the Upstream track - for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { - if ( !testbit( validTrackMask, t2 ) ) continue; - // -- At this moment, we only have VP hits, so nHits == nVPHits - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); - const int nHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); - for ( int iHit = 0; iHit < nHits; ++iHit ) { - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + t2, iHit, - inputTracks.lhcbID<scalar::int_v>( iAncestor, iHit ) ); - outputTracks.store_vp_index<scalar::int_v>( trackIndex + t2, iHit, - inputTracks.vp_index<scalar::int_v>( iAncestor, iHit ) ); - } - } + float txArray[simd::size]; + txUT.store( txArray ); - // =========================================================================================== + // TxStorage txArray; + // txArray.store_txUT<simd::float_v>( 0, txUT ); - // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); - TxStorage txArray; - txArray.store_txUT<simd::float_v>( 0, txUT ); - - simd::int_v nUTHits{0}; - - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { - simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); - - simd::int_v hitIndex = protoTracks.hitIndex<simd::int_v>( t, iLayer ); - - outputTracks.compressstore_ut_index<simd::int_v>( trackIndex, iLayer, validTrackMask, hitIndex ); - nUTHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - } - - outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, nUTHits ); // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { int trackIndex2 = 0; @@ -1028,17 +975,18 @@ namespace LHCb::Pr { const std::size_t tscalar = t + t2; - const int iAncestor = outputTracks.trackVP<scalar::int_v>( trackIndex + t2 ).cast(); - const int nVPHits = inputTracks.nHits<scalar::int_v>( iAncestor ).cast(); + const bool goodHit = ( protoTracks.weight<scalar::float_v>( tscalar, iLayer ).cast() > 0.0001f ); + const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); + const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); - const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, iLayer + nVPHits, id ); + if ( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); // -- - const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); - const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); - const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); + const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); + // const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const float txUTS = txArray[t2]; int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); @@ -1056,20 +1004,18 @@ namespace LHCb::Pr { if ( xohit - xextrap > m_overlapTol ) break; int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - if ( nUTHits > 16 ) continue; // get this number from PrUpstreamTracks!!! - - outputTracks.store_ut_index<scalar::int_v>( trackIndex + trackIndex2, nUTHits, - hitsInLayers[hitContIndex].indexs[index2] ); + if ( nUTHits >= LHCb::Pr::Upstream::Tracks::max_uthits ) + continue; // get this number from PrUpstreamTracks!!! LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - outputTracks.store_lhcbID<scalar::int_v>( trackIndex + trackIndex2, nVPHits + nUTHits, oid.lhcbID() ); - outputTracks.store_nUTHits<scalar::int_v>( trackIndex + trackIndex2, nUTHits + 1 ); + outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), + hitsInLayers[hitContIndex].indexs[index2] ); // only one overlap hit // break; } trackIndex2++; } } - outputTracks.size() += simd::popcount( validTrackMask ); + // outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information } } } // namespace LHCb::Pr diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 67a917067d8..951fa468792 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -75,43 +75,36 @@ namespace LHCb::Pr { struct MiniStatesArray final { - constexpr static int max_tracks = align_size( 1024 ); - std::array<float, max_tracks> xs; - std::array<float, max_tracks> ys; - std::array<float, max_tracks> zs; - std::array<float, max_tracks> txs; - std::array<float, max_tracks> tys; - std::array<int, max_tracks> indexs; - - std::array<float, max_tracks> covxs; - std::array<float, max_tracks> covys; - std::array<float, max_tracks> covzs; - - std::size_t size{0}; - - SOA_ACCESSOR( x, xs.data() ) - SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( z, zs.data() ) - SOA_ACCESSOR( tx, txs.data() ) - SOA_ACCESSOR( ty, tys.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) + constexpr static int max_tracks = align_size( 1024 ); + std::array<float, 3 * max_tracks> poss; + std::array<float, 2 * max_tracks> dirs; + std::array<float, 3 * max_tracks> covs; + std::array<int, max_tracks> indexs; + std::size_t size{0}; + + SOA_ACCESSOR( x, &( poss[0] ) ) + SOA_ACCESSOR( y, &( poss[max_tracks] ) ) + SOA_ACCESSOR( z, &( poss[2 * max_tracks] ) ) + SOA_ACCESSOR( tx, &( dirs[0] ) ) + SOA_ACCESSOR( ty, &( dirs[max_tracks] ) ) + SOA_ACCESSOR( covx, &( covs[0] ) ) + SOA_ACCESSOR( covy, &( covs[max_tracks] ) ) + SOA_ACCESSOR( covz, &( covs[2 * max_tracks] ) ) SOA_ACCESSOR( index, indexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) + VEC3_SOA_ACCESSOR( pos, (float*)&( poss[0] ), (float*)&( poss[max_tracks] ), (float*)&( poss[2 * max_tracks] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( dirs[0] ), (float*)&( dirs[max_tracks] ), 1.0f ) + VEC3_SOA_ACCESSOR( cov, (float*)&( covs[0] ), (float*)&( covs[max_tracks] ), (float*)&( covs[2 * max_tracks] ) ) // -- Copy back the entries, but with a filtering mask void copyBack( std::size_t at, simd::mask_v mask ) { - simd::float_v( &xs[at] ).compressstore( mask, &xs[size] ); - simd::float_v( &ys[at] ).compressstore( mask, &ys[size] ); - simd::float_v( &zs[at] ).compressstore( mask, &zs[size] ); - simd::float_v( &txs[at] ).compressstore( mask, &txs[size] ); - simd::float_v( &tys[at] ).compressstore( mask, &tys[size] ); - simd::float_v( &covxs[at] ).compressstore( mask, &covxs[size] ); - simd::float_v( &covys[at] ).compressstore( mask, &covys[size] ); - simd::float_v( &covzs[at] ).compressstore( mask, &covzs[size] ); + simd::float_v( &poss[at] ).compressstore( mask, &poss[size] ); + simd::float_v( &poss[at + max_tracks] ).compressstore( mask, &poss[size + max_tracks] ); + simd::float_v( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); + simd::float_v( &dirs[at] ).compressstore( mask, &dirs[size] ); + simd::float_v( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); + simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); + simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); + simd::float_v( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } @@ -166,16 +159,9 @@ namespace LHCb::Pr { std::array<float, batchSize> ys; // -- and this the original state (in the Velo) - std::array<float, batchSize> xStates; - std::array<float, batchSize> yStates; - std::array<float, batchSize> zStates; - std::array<float, batchSize> txStates; - std::array<float, batchSize> tyStates; - std::array<int, batchSize> indexs; - - std::array<float, batchSize> covxs; - std::array<float, batchSize> covys; - std::array<float, batchSize> covzs; + std::array<float, 3 * batchSize> statePoss; + std::array<float, 2 * batchSize> stateDirs; + std::array<int, batchSize> indexs; // -- and this an index to find the hit containers std::array<int, batchSize> hitContIndexs; @@ -194,24 +180,11 @@ namespace LHCb::Pr { SOA_ACCESSOR( xSlopeTT, xSlopeTTs.data() ) SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( xState, xStates.data() ) - SOA_ACCESSOR( yState, yStates.data() ) - SOA_ACCESSOR( zState, zStates.data() ) - SOA_ACCESSOR( txState, txStates.data() ) - SOA_ACCESSOR( tyState, tyStates.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) SOA_ACCESSOR( index, indexs.data() ) SOA_ACCESSOR( hitContIndex, hitContIndexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xStates.data(), yStates.data(), zStates.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txStates.data(), tyStates.data(), 1.0f ) - }; - - struct TxStorage final { - std::array<float, simd::size> txUTs; - SOA_ACCESSOR( txUT, txUTs.data() ) + VEC3_SOA_ACCESSOR( pos, (float*)&( statePoss[0] ), (float*)&( statePoss[batchSize] ), + (float*)&( statePoss[2 * batchSize] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( stateDirs[0] ), (float*)&( stateDirs[batchSize] ), 1.0f ) }; struct TrackHelper final { diff --git a/Tr/TrackUtils/src/TracksFTConverter.cpp b/Tr/TrackUtils/src/TracksFTConverter.cpp index ec3c08e52af..49f9d760e23 100644 --- a/Tr/TrackUtils/src/TracksFTConverter.cpp +++ b/Tr/TrackUtils/src/TracksFTConverter.cpp @@ -18,8 +18,12 @@ // LHCb #include "Event/StateParameters.h" #include "Event/Track.h" +#include "Kernel/LHCbID.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/VPChannelID.h" +#include "Kernel/FTChannelID.h" -#include "Event/PrForwardTracks.h" +#include "Event/PrLongTracks.h" #include "Event/PrVeloTracks.h" /** @@ -41,9 +45,9 @@ namespace { } // namespace class TracksFTConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const std::vector<LHCb::Event::v2::Track>&, const LHCb::Pr::Forward::Tracks& )> { + const std::vector<LHCb::Event::v2::Track>&, const LHCb::Pr::Long::Tracks& )> { using Track = LHCb::Event::v2::Track; - using Tracks = LHCb::Pr::Forward::Tracks; + using Tracks = LHCb::Pr::Long::Tracks; // From PrGeometryTool in PrAlgorithms public: @@ -97,14 +101,24 @@ public: newTrack.addToStates( state ); // Add LHCbIds - int n_hits = tracksFT.nHits<I>( t ).cast(); - for ( int i = 0; i < n_hits; i++ ) { - int lhcbid = tracksFT.hit<I>( t, i ).cast(); + + int n_vphits = tracksFT.nVPHits<I>( t ).cast(); + int n_fthits = tracksFT.nFTHits<I>( t ).cast(); + int n_uthits = tracksFT.nUTHits<I>( t ).cast(); + + //info()<<"FT converter ........vp "<< n_vphits <<"...ft "<< n_fthits <<"..ut "<< n_uthits<<endmsg; + /* + for ( int i = n_vphits; i < n_vphits + n_fthits + n_uthits; i++ ) { + auto lhcbid = tracksFT.lhcbID<I>( t, i ).cast(); + //if(i <n_vphits) info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID(LHCb::VPChannelID( lhcbid ))<< " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; + //else info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID( lhcbid )<< " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); } + */ + newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered); newTrack.setType( Track::Type::Long ); - newTrack.setHistory( Track::History::PrForward ); + newTrack.setHistory( Track::History::PrForward); newTrack.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); } diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index bd70bba1b14..b58dc4888b3 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -169,7 +169,6 @@ public: for ( int t = 0; t < bwd_tracks.size(); t++ ) { auto& newTrack = out.emplace_back(); - newTrack.setLhcbIDs( bwd_tracks.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.states().reserve( 1 ); auto state_beam = getState( bwd_tracks, t, 0 ); -- GitLab From 21eded71ecbe26d37528340a719dde81db47451d Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Sun, 24 May 2020 22:23:31 +0000 Subject: [PATCH 038/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8502166 --- Pr/PrAlgorithms/src/IPrAddUTHitsTool.h | 6 +- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 252 +++++++++++----------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 40 ++-- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 19 +- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 5 +- Tr/TrackUtils/src/TracksFTConverter.cpp | 29 +-- 6 files changed, 182 insertions(+), 169 deletions(-) diff --git a/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h b/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h index 01c18f6a938..1ee4619e1bc 100644 --- a/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/IPrAddUTHitsTool.h @@ -16,10 +16,10 @@ #include <vector> // from Gaudi +#include "Event/PrLongTracks.h" #include "Event/Track_v2.h" #include "GaudiKernel/IAlgTool.h" #include "PrKernel/PrMutUTHits.h" -#include "Event/PrLongTracks.h" /** @class IPrAddUTHitsTool IPrAddUTHitsTool.h TrackInterfaces/IPrAddUTHitsTool.h * @@ -28,7 +28,7 @@ */ // forward declaration -namespace LHCb{ +namespace LHCb { class state; } @@ -39,6 +39,6 @@ public: DeclareInterfaceID( IPrAddUTHitsTool, 2, 0 ); /// Add UT clusters to Long tracks - virtual StatusCode addUTHits(Tracks& tracks ) const = 0; + virtual StatusCode addUTHits( Tracks& tracks ) const = 0; }; #endif // TRACKINTERFACES_IPRADDUTHITSTOOL_H diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 5c31e35002e..32722c6cef6 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -16,11 +16,11 @@ // Include files // from Gaudi #include "GaudiKernel/SystemOfUnits.h" -#include "UTDAQ/UTDAQHelper.h" -#include "PrAddUTHitsTool.h" -#include "LHCbMath/SIMDWrapper.h" -#include "LHCbMath/GeomFun.h" #include "Kernel/LHCbID.h" +#include "LHCbMath/GeomFun.h" +#include "LHCbMath/SIMDWrapper.h" +#include "PrAddUTHitsTool.h" +#include "UTDAQ/UTDAQHelper.h" //----------------------------------------------------------------------------- // Implementation file for class : PrAddUTHitsTool @@ -29,10 +29,10 @@ // //----------------------------------------------------------------------------- // Declaration of the Algorithm Factory -DECLARE_COMPONENT_WITH_ID(LHCb::Pr::PrAddUTHitsTool, "PrAddUTHitsTool" ) -namespace LHCb::Pr{ +DECLARE_COMPONENT_WITH_ID( LHCb::Pr::PrAddUTHitsTool, "PrAddUTHitsTool" ) +namespace LHCb::Pr { - namespace{ + namespace { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& helper, @@ -60,7 +60,6 @@ namespace LHCb::Pr{ } } // namespace - using ROOT::Math::CholeskyDecomp; //========================================================================= @@ -84,28 +83,29 @@ namespace LHCb::Pr{ //========================================================================= StatusCode PrAddUTHitsTool::addUTHits( Tracks& tracks ) const { - MiniStates filteredStates ; - auto compBoundsArray = findAllSectors(tracks, filteredStates); - + MiniStates filteredStates; + auto compBoundsArray = findAllSectors( tracks, filteredStates ); + for ( auto t = 0; t < int( filteredStates.size ); t++ ) { - auto myUTHits = returnUTHits( filteredStates, compBoundsArray, t); - if ((myUTHits.size < 3 ) ) continue; - assert( myUTHits.size <= LHCb::Pr::Upstream::Tracks::max_uthits && "Container cannot store more than 16 UT hits per track" ); + auto myUTHits = returnUTHits( filteredStates, compBoundsArray, t ); + if ( ( myUTHits.size < 3 ) ) continue; + assert( myUTHits.size <= LHCb::Pr::Upstream::Tracks::max_uthits && + "Container cannot store more than 16 UT hits per track" ); - int itr = filteredStates.index<sI>( t ).cast(); + int itr = filteredStates.index<sI>( t ).cast(); const int nVPHits = tracks.nVPHits<sI>( itr ).cast(); const int nFTHits = tracks.nFTHits<sI>( itr ).cast(); - tracks.store_nUTHits<sI>( itr, int(myUTHits.size) ); + tracks.store_nUTHits<sI>( itr, int( myUTHits.size ) ); for ( auto i = 0; i < int( myUTHits.size ); i++ ) { // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "--- Adding Hit in Layer: " << myUTHits.planeCode<sI>( i ) << " with projection: " << myUTHits.projections[i] - << endmsg; + debug() << "--- Adding Hit in Layer: " << myUTHits.planeCode<sI>( i ) + << " with projection: " << myUTHits.projections[i] << endmsg; // ---------------------------------- // add ut hit indices and lhcbIDs to the long track - const int idxhit = myUTHits.indexs[i]; + const int idxhit = myUTHits.indexs[i]; LHCb::LHCbID lhcbid( LHCb::UTChannelID( myUTHits.channelIDs[i] ) ); tracks.store_ut_index<sI>( itr, i, idxhit ); tracks.store_lhcbID<sI>( itr, nVPHits + nFTHits + i, lhcbid.lhcbID() ); @@ -115,10 +115,10 @@ namespace LHCb::Pr{ } ///======================================================================= - // find all sections + // find all sections ///======================================================================= - std::array<Boundaries, UTInfo::TotalLayers> - PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, MiniStates& filteredStates ) const{ + std::array<Boundaries, UTInfo::TotalLayers> PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, + MiniStates& filteredStates ) const { std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; int contSize = tracks.size(); @@ -127,20 +127,20 @@ namespace LHCb::Pr{ std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors std::array<int, UTInfo::TotalLayers> maxColsRows; - //--- This now works with up to 9 sectors + //--- This now works with up to 9 sectors const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); for ( int t = 0; t < contSize; t += simd::size ) { - auto loopMask = simd::loop_mask( t, contSize ); + auto loopMask = simd::loop_mask( t, contSize ); simd::int_v nLayers{0}; //---Define the tolerance parameters - const F qoverp = tracks.stateQoP<F>( t ); - const F p = abs( 1 / qoverp ); - const F yTol = p_yTolSlope.value() / p; - const F xTol = p_xTol.value() + p_xTolSlope.value() / p; + const F qoverp = tracks.stateQoP<F>( t ); + const F p = abs( 1 / qoverp ); + const F yTol = p_yTolSlope.value() / p; + const F xTol = p_xTol.value() + p_xTolSlope.value() / p; - auto pos = tracks.vStatePos<F>( t ); - auto dir = tracks.vStateDir<F>( t ); + auto pos = tracks.vStatePos<F>( t ); + auto dir = tracks.vStateDir<F>( t ); const F stateX = pos.x; const F stateY = pos.y; const F stateZ = pos.z; @@ -151,9 +151,9 @@ namespace LHCb::Pr{ for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { - const F zLayer = m_geomcache.layers[layerIndex].z; - const F yPredLay = stateY + ( zLayer - stateZ ) * stateTy; - const F xPredLay = stateX + ( zLayer - stateZ ) * stateTx + bendParam * ( zLayer - p_zUTField.value() ); + const F zLayer = m_geomcache.layers[layerIndex].z; + const F yPredLay = stateY + ( zLayer - stateZ ) * stateTy; + const F xPredLay = stateX + ( zLayer - stateZ ) * stateTx + bendParam * ( zLayer - p_zUTField.value() ); const simd::int_v regionBoundary1 = ( 2 * m_geomcache.layers[layerIndex].nColsPerSide + 3 ); const simd::int_v regionBoundary2 = ( 2 * m_geomcache.layers[layerIndex].nColsPerSide - 5 ); @@ -163,8 +163,9 @@ namespace LHCb::Pr{ simd::int_v subrowmin{0}; simd::int_v subrowmax{0}; - simd::mask_v mask = LHCb::UTDAQ::findSectors( layerIndex, xPredLay, yPredLay, xTol, yTol, - m_geomcache.layers[layerIndex], subcolmin, subcolmax, subrowmin, subrowmax ); + simd::mask_v mask = + LHCb::UTDAQ::findSectors( layerIndex, xPredLay, yPredLay, xTol, yTol, m_geomcache.layers[layerIndex], + subcolmin, subcolmax, subrowmin, subrowmax ); const simd::mask_v gathermask = loopMask && mask; @@ -191,9 +192,10 @@ namespace LHCb::Pr{ // -- We need to fill a valid number, as one can have 3 layers with a correct sector // -- and one without a correct sector, in which case the track will not be masked off. // -- However, these cases should happen very rarely - simd::int_v sect = ( layerIndex < 2 ) - ? m_geomcache.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) - : m_geomcache.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); + simd::int_v sect = + ( layerIndex < 2 ) + ? m_geomcache.sectorLUT.maskgather_station1<simd::int_v>( sectorIndex, gathermask, 1 ) + : m_geomcache.sectorLUT.maskgather_station2<simd::int_v>( sectorIndex, gathermask, 1 ); // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 @@ -201,9 +203,9 @@ namespace LHCb::Pr{ counter++; } } - // sorting + // sorting bubbleSortSIMD( maxCols * maxRows, helperArray, maxSectors * layerIndex ); - // uniquifying + // uniquifying posArray[layerIndex] = makeUniqueSIMD( helperArray, maxSectors * layerIndex, maxCols * maxRows ); // count the number of `valid` layers nLayers += select( mask, simd::int_v{1}, simd::int_v{0} ); @@ -215,11 +217,10 @@ namespace LHCb::Pr{ int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<I>( index, iSector, compressMask, - helperArray[maxSectors * iLayer + iSector] ); + helperArray[maxSectors * iLayer + iSector] ); } compBoundsArray[iLayer].compressstore_xTol<F>( index, compressMask, xTol ); - compBoundsArray[iLayer].compressstore_nPos<I>( index, compressMask, - posArray[iLayer] - maxSectors * iLayer ); + compBoundsArray[iLayer].compressstore_nPos<I>( index, compressMask, posArray[iLayer] - maxSectors * iLayer ); compBoundsArray[iLayer].size += simd::popcount( compressMask ); } @@ -235,7 +236,7 @@ namespace LHCb::Pr{ filteredStates.compressstore_qop<F>( stateidx, compressMask, qoverp ); filteredStates.compressstore_p<F>( stateidx, compressMask, p ); filteredStates.compressstore_index<I>( stateidx, compressMask, simd::indices( t ) ); - filteredStates.size += simd::popcount( compressMask ); + filteredStates.size += simd::popcount( compressMask ); } return compBoundsArray; @@ -244,27 +245,30 @@ namespace LHCb::Pr{ //========================================================================= // Return the TT hits //========================================================================= - LHCb::Pr::UT::Mut::Hits PrAddUTHitsTool::returnUTHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, std::size_t t) const { + LHCb::Pr::UT::Mut::Hits + PrAddUTHitsTool::returnUTHits( MiniStates& filteredStates, + const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + std::size_t t ) const { LHCb::Pr::UT::Mut::Hits UTHits; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "--- Entering returnUTHits ---" << endmsg; // -- Get the container with all the hits compatible with the track LHCb::Pr::UT::Mut::Hits hitsInLayers; - hitsInLayers.size = 0 ; + hitsInLayers.size = 0; for ( auto& it : hitsInLayers.layerIndices ) it = -1; - + bool findHits = selectHits( filteredStates, compBoundsArray, hitsInLayers, t ); // -- If less three layer or only two hits are selected, end algorithm - if ( !findHits || int (hitsInLayers.size ) < 3 ) return UTHits; + if ( !findHits || int( hitsInLayers.size ) < 3 ) return UTHits; const auto p = filteredStates.p<sF>( t ).cast(); float bestChi2 = p_maxChi2Tol.value() + p_maxChi2Slope.value() / ( p - p_maxChi2POffset.value() ); // -- Loop over all hits and make "groups" of hits to form a candidate - for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size); ++itBeg ) { + for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size ); ++itBeg ) { const float firstProj = hitsInLayers.projections[itBeg]; @@ -277,14 +281,15 @@ namespace LHCb::Pr{ float maxProj = firstProj; if ( fabs( firstProj ) < p_majAxProj.value() ) { // -- m_invMajAxProj2 = 1/(m_majAxProj*m_majAxProj), but it's faster like this - maxProj = firstProj + sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); + maxProj = firstProj + + sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); } // -- This means that there would be less than 3 hits, which does not work, so we can skip this right away - if ( ( hitsInLayers.projections[itBeg+2]) > maxProj ) continue; + if ( ( hitsInLayers.projections[itBeg + 2] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group - for ( auto itEnd = itBeg; itEnd < int(hitsInLayers.size); itEnd++ ) { + for ( auto itEnd = itBeg; itEnd < int( hitsInLayers.size ); itEnd++ ) { auto index = goodUT.size; if ( hitsInLayers.projections[itEnd] > maxProj ) break; @@ -293,15 +298,15 @@ namespace LHCb::Pr{ firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] = 1; // -- Count number of fired planes ++nbPlane; } - - goodUT.xs[index] = hitsInLayers.xs[itEnd] ; - goodUT.zs[index] = hitsInLayers.zs[itEnd] ; - goodUT.coss[index] = hitsInLayers.coss[itEnd] ; - goodUT.sins[index] = hitsInLayers.sins[itEnd] ; - goodUT.weights[index] = hitsInLayers.weights[itEnd] ; - goodUT.projections[index] = hitsInLayers.projections[itEnd] ; - goodUT.channelIDs[index] = hitsInLayers.channelIDs[itEnd] ; - goodUT.indexs[index] = hitsInLayers.indexs[itEnd] ; + + goodUT.xs[index] = hitsInLayers.xs[itEnd]; + goodUT.zs[index] = hitsInLayers.zs[itEnd]; + goodUT.coss[index] = hitsInLayers.coss[itEnd]; + goodUT.sins[index] = hitsInLayers.sins[itEnd]; + goodUT.weights[index] = hitsInLayers.weights[itEnd]; + goodUT.projections[index] = hitsInLayers.projections[itEnd]; + goodUT.channelIDs[index] = hitsInLayers.channelIDs[itEnd]; + goodUT.indexs[index] = hitsInLayers.indexs[itEnd]; goodUT.size += 1; } @@ -314,7 +319,7 @@ namespace LHCb::Pr{ debug() << "Start fit, first proj " << firstProj << " nbPlane " << nbPlane << " size " << goodUT.size << endmsg; // -- Set variables for the chi2 calculation float dist = 0; - float chi2 = 1.e20; + float chi2 = 1.e20; calculateChi2( chi2, bestChi2, dist, p, goodUT ); @@ -326,26 +331,26 @@ namespace LHCb::Pr{ // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, goodUT ); // ---------------------------------- - for( auto i =0; i < int( goodUT.size ); i++ ) { - //auto loopmask = simd::loop_mask(i, goodUT.size ); - //UTHits[t].copy_back( goodUT, i, loopmask ); - UTHits.xs[i] = goodUT.xs[i] ; - UTHits.zs[i] = goodUT.zs[i] ; - UTHits.coss[i] = goodUT.coss[i] ; - UTHits.sins[i] = goodUT.sins[i] ; - UTHits.weights[i] = goodUT.weights[i] ; - UTHits.projections[i] = goodUT.projections[i] ; - UTHits.channelIDs[i] = goodUT.channelIDs[i] ; - UTHits.indexs[i] = goodUT.indexs[i] ; + for ( auto i = 0; i < int( goodUT.size ); i++ ) { + // auto loopmask = simd::loop_mask(i, goodUT.size ); + // UTHits[t].copy_back( goodUT, i, loopmask ); + UTHits.xs[i] = goodUT.xs[i]; + UTHits.zs[i] = goodUT.zs[i]; + UTHits.coss[i] = goodUT.coss[i]; + UTHits.sins[i] = goodUT.sins[i]; + UTHits.weights[i] = goodUT.weights[i]; + UTHits.projections[i] = goodUT.projections[i]; + UTHits.channelIDs[i] = goodUT.channelIDs[i]; + UTHits.indexs[i] = goodUT.indexs[i]; UTHits.size += 1; } - + bestChi2 = chi2; } } // -- Assign the final hit container and chi2 to the variables which are returned. - //finalChi2 = bestChi2; + // finalChi2 = bestChi2; if ( UTHits.size > 2 ) { m_hitsAddedCounter += UTHits.size; m_tracksWithHitsCounter++; @@ -355,37 +360,39 @@ namespace LHCb::Pr{ //========================================================================= // Select the hits in a certain window //========================================================================= - bool PrAddUTHitsTool::selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { + bool PrAddUTHitsTool::selectHits( MiniStates& filteredStates, + const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { // -- Define the parameter that describes the bending // -- in principle the call m_magFieldSvc->signedRelativeCurrent() is not needed for every track... const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); - hitsInLayers.size =0; - - const float stateX = filteredStates.x<sF>( t ).cast(); - const float stateY = filteredStates.y<sF>( t ).cast(); - const float stateZ = filteredStates.z<sF>( t ).cast(); - const float stateTx = filteredStates.tx<sF>( t ).cast(); - const float stateTy = filteredStates.ty<sF>( t ).cast(); - const float p = filteredStates.p<sF>( t ).cast(); - const float qop = filteredStates.qop<sF>( t ).cast(); + hitsInLayers.size = 0; + + const float stateX = filteredStates.x<sF>( t ).cast(); + const float stateY = filteredStates.y<sF>( t ).cast(); + const float stateZ = filteredStates.z<sF>( t ).cast(); + const float stateTx = filteredStates.tx<sF>( t ).cast(); + const float stateTy = filteredStates.ty<sF>( t ).cast(); + const float p = filteredStates.p<sF>( t ).cast(); + const float qop = filteredStates.qop<sF>( t ).cast(); const float bendParam = p_utParam.value() * -1 * signedReCur * qop; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "selectHits: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " << stateTy << " p " << p << endmsg; - std::size_t nSize = 0; - std::size_t nLayers = 0; - const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); + std::size_t nSize = 0; + std::size_t nLayers = 0; + const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; // -- Define the tolerance parameters - const F yTol = p_yTolSlope.value() / p; - const F xTol = p_xTol.value() + p_xTolSlope.value() / p; + const F yTol = p_yTolSlope.value() / p; + const F xTol = p_xTol.value() + p_xTolSlope.value() / p; - const int nPos = compBoundsArray[layerIndex].nPos<sI>( t ).cast(); + const int nPos = compBoundsArray[layerIndex].nPos<sI>( t ).cast(); std::array<int, maxSectors + 1> sectors{0}; for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<sI>( t, i ).cast(); } @@ -397,23 +404,25 @@ namespace LHCb::Pr{ const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; j += shift; - for( int i = firstIndex; i < lastIndex; i += simd::size){ - auto loopMask = simd::loop_mask( i, lastIndex); - const F yPred = stateY + (myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; + for ( int i = firstIndex; i < lastIndex; i += simd::size ) { + auto loopMask = simd::loop_mask( i, lastIndex ); + const F yPred = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; - const auto yMin = min( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); - const auto yMax = max( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); - const auto yy = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; - auto xx = myHits.xAtYEq0<F>( i ) + yy * myHits.dxDy<F>( i ); - F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ) + bendParam * (myHits.zAtYEq0<F>( i ) - p_zUTField.value()); - F absdx = abs( xx - xPred ); + const auto yMin = min( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); + const auto yMax = max( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); + const auto yy = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; + auto xx = myHits.xAtYEq0<F>( i ) + yy * myHits.dxDy<F>( i ); + F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ ) + + bendParam * ( myHits.zAtYEq0<F>( i ) - p_zUTField.value() ); + F absdx = abs( xx - xPred ); if ( none( absdx < xTol ) ) continue; - auto mask = (yMin - yTol < yPred && yPred < yMax + yTol ) && ( absdx < xTol ) && loopMask; + auto mask = ( yMin - yTol < yPred && yPred < yMax + yTol ) && ( absdx < xTol ) && loopMask; if ( none( mask ) ) continue; - const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / ( myHits.zAtYEq0<F>( i ) - p_zMSPoint.value() ); + const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / + ( myHits.zAtYEq0<F>( i ) - p_zMSPoint.value() ); // save the selected hits auto index = hitsInLayers.size; @@ -425,32 +434,31 @@ namespace LHCb::Pr{ hitsInLayers.compressstore_x( index, mask, xx ); hitsInLayers.compressstore_z( index, mask, myHits.zAtYEq0<F>( i ) ); hitsInLayers.compressstore_cos( index, mask, myHits.cos<F>( i ) ); - hitsInLayers.compressstore_sin( index, mask, - myHits.cos<F>( i ) * -1.0f * myHits.dxDy<F>( i ) ); + hitsInLayers.compressstore_sin( index, mask, myHits.cos<F>( i ) * -1.0f * myHits.dxDy<F>( i ) ); hitsInLayers.compressstore_weight( index, mask, myHits.weight<F>( i ) ); hitsInLayers.compressstore_projection( index, mask, projDist ); hitsInLayers.compressstore_channelID( index, mask, myHits.channelID<I>( i ) ); - hitsInLayers.compressstore_index( index, mask, simd::indices( i ) ); // fill the index in the original hit container + hitsInLayers.compressstore_index( index, mask, + simd::indices( i ) ); // fill the index in the original hit container hitsInLayers.size += simd::popcount( mask ); - } } nLayers += int( nSize != hitsInLayers.size ); hitsInLayers.layerIndices[layerIndex] = nSize; - nSize = hitsInLayers.size; + nSize = hitsInLayers.size; } - return nLayers > 2 ; + return nLayers > 2; } //========================================================================= // Calculate Chi2 //========================================================================= void PrAddUTHitsTool::calculateChi2( float& chi2, const float& bestChi2, float& finalDist, const float& p, - LHCb::Pr::UT::Mut::Hits& goodUT ) const { + LHCb::Pr::UT::Mut::Hits& goodUT ) const { // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - auto worst = 0; - float dist = 0; + auto worst = 0; + float dist = 0; chi2 = 1.e20; const float xTol = p_xTol.value() + p_xTolSlope.value() / p; @@ -526,13 +534,15 @@ namespace LHCb::Pr{ rhs[2] = saveRhs[2]; chi2 = fixedWeight * ( offset * offset + offsetY * offsetY + - ( p_zUTProj.value() - p_zMSPoint.value() ) * ( p_zUTProj.value() - p_zMSPoint.value() ) * slope * slope ); + ( p_zUTProj.value() - p_zMSPoint.value() ) * ( p_zUTProj.value() - p_zMSPoint.value() ) * + slope * slope ); - for ( auto it = 0; it < int( goodUT.size ); it++) { + for ( auto it = 0; it < int( goodUT.size ); it++ ) { const float w = goodUT.weights[it]; const float dz = goodUT.zs[it] - p_zUTProj; dist = goodUT.projections[it] - offset - slope * dz - offsetY * goodUT.sins[it]; - if ( ( 1 < differentPlanes[goodUT.planeCode<sI>( it ).cast()] || nDoF == nHits ) && worstDiff < w * dist * dist ) { + if ( ( 1 < differentPlanes[goodUT.planeCode<sI>( it ).cast()] || nDoF == nHits ) && + worstDiff < w * dist * dist ) { worstDiff = w * dist * dist; worst = it; } @@ -565,30 +575,26 @@ namespace LHCb::Pr{ if ( 1 == differentPlanes[goodUT.planeCode<sI>( worst ).cast()]-- ) --nDoF; --nHits; - } // -- Increase the sanity check counter ++counter; } - + finalDist = dist; } //========================================================================= // Print out info //========================================================================= - void PrAddUTHitsTool::printInfo( float dist, float chi2, - const LHCb::Pr::UT::Mut::Hits& goodUT ) const { + void PrAddUTHitsTool::printInfo( float dist, float chi2, const LHCb::Pr::UT::Mut::Hits& goodUT ) const { // -- Print some information at the end info() << "*** Store this candidate, nbTT = " << goodUT.size << " chi2 " << chi2 << endmsg; - for ( auto i = 0; i < int( goodUT.size ); i +=simd::size ){ + for ( auto i = 0; i < int( goodUT.size ); i += simd::size ) { sF z = goodUT.z<sF>( i ); sF mPred = goodUT.x<sF>( i ) + dist; - info() << goodUT.planeCode<sI>( i ) - << format( " z%7.0f pred %7.2f diff %7.2f ", z, mPred, dist ) - << endmsg; + info() << goodUT.planeCode<sI>( i ) << format( " z%7.0f pred %7.2f diff %7.2f ", z, mPred, dist ) << endmsg; } } -} //namespace LHCb::Pr +} // namespace LHCb::Pr diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index c1d1b43462a..4af9ea8c355 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -17,16 +17,16 @@ #include "Event/Track_v2.h" #include "IPrAddUTHitsTool.h" // Interface #include "Kernel/ILHCbMagnetSvc.h" +#include "LHCbMath/SIMDWrapper.h" #include "UTDAQ/UTDAQHelper.h" #include "UTDAQ/UTInfo.h" -#include "LHCbMath/SIMDWrapper.h" -#include "GaudiAlg/GaudiTool.h" -#include <GaudiKernel/DataObjectHandle.h> #include "Event/PrLongTracks.h" +#include "GaudiAlg/GaudiTool.h" #include "PrKernel/PrMutUTHits.h" #include "vdt/log.h" #include "vdt/sqrt.h" +#include <GaudiKernel/DataObjectHandle.h> /* * @class PrAddUTHitsTool PrAddUTHitsTool.h @@ -52,16 +52,16 @@ * @date 2016-05-11 * */ -namespace LHCb::Pr{ +namespace LHCb::Pr { using simd = SIMDWrapper::avx2::types; - using I = simd::int_v; - using F = simd::float_v; + using I = simd::int_v; + using F = simd::float_v; using scalar = SIMDWrapper::scalar::types; - using sI = scalar::int_v; - using sF = scalar::float_v; + using sI = scalar::int_v; + using sF = scalar::float_v; - constexpr static int max_tracks= align_size( 1024 ); + constexpr static int max_tracks = align_size( 1024 ); constexpr static int maxSectors = 9; struct MiniStates final { @@ -102,7 +102,7 @@ namespace LHCb::Pr{ using Tracks = LHCb::Pr::Long::Tracks; public: - /// Standard constructor + /// Standard constructor using extends::extends; StatusCode initialize() override; @@ -119,7 +119,7 @@ namespace LHCb::Pr{ @param p momentum estimate. If none given, the one from the state will be taken */ - //LHCb::Pr::UT::Mut::Hits returnUTHits( LHCb::State& state, float& finalChi2, float p = 0 ) const; + // LHCb::Pr::UT::Mut::Hits returnUTHits( LHCb::State& state, float& finalChi2, float p = 0 ) const; private: StatusCode recomputeGeometry(); @@ -152,12 +152,16 @@ namespace LHCb::Pr{ ServiceHandle<ILHCbMagnetSvc> m_magFieldSvc{this, "MagneticField", "MagneticFieldSvc"}; - std::array<LHCb::Pr::Boundaries, UTInfo::TotalLayers> findAllSectors( Tracks& tracks, MiniStates& filteredStates) const; - LHCb::Pr::UT::Mut::Hits returnUTHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, std::size_t t) const; - bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const; - void calculateChi2( float& chi2, const float & bestChi2, float& finalDist, const float& p, - LHCb::Pr::UT::Mut::Hits& goodUT ) const; - void printInfo( float dist, float chi2, const LHCb::Pr::UT::Mut::Hits& goodUT ) const; + std::array<LHCb::Pr::Boundaries, UTInfo::TotalLayers> findAllSectors( Tracks& tracks, + MiniStates& filteredStates ) const; + LHCb::Pr::UT::Mut::Hits returnUTHits( MiniStates& filteredStates, + const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + std::size_t t ) const; + bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const; + void calculateChi2( float& chi2, const float& bestChi2, float& finalDist, const float& p, + LHCb::Pr::UT::Mut::Hits& goodUT ) const; + void printInfo( float dist, float chi2, const LHCb::Pr::UT::Mut::Hits& goodUT ) const; }; -} //namespace LHCb::Pr +} // namespace LHCb::Pr #endif // PRADDUTHITSTOOL_H diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index 046c25acc86..ed898072339 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -567,7 +567,7 @@ public: if ( sc.isFailure() ) return sc; // error printed already by GaudiAlgorithm if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Initialize" << endmsg; - //info()<<"......DEBUGS Initialize Forward BEGIN" <<endmsg; + // info()<<"......DEBUGS Initialize Forward BEGIN" <<endmsg; // Initialise stuff we imported from PrForwardTool @@ -780,7 +780,7 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSc ZoneCache const& cache ) const { if ( msgLevel( MSG::DEBUG ) ) debug() << "==> Execute" << endmsg; - //info()<<"......DEBUGS Forward BEGIN" <<endmsg; + // info()<<"......DEBUGS Forward BEGIN" <<endmsg; if ( UNLIKELY( input_tracks.size() == 0 ) ) { auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); @@ -2246,12 +2246,13 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_lhcbID<I>( currentsize, idx, input_tracks.template lhcbID<I>( uttrack, idx ) ); } result.store_nVPHits<I>( currentsize, vphits ); - result.store_nUTHits<I>( currentsize, 0); + result.store_nUTHits<I>( currentsize, 0 ); result.store_ut_index<I>( currentsize, 0, -1 ); } //== hits indices, max_fthits=15, not sure if this number is reasonable. - assert( id.size() <= LHCb::Pr::Long::Tracks::max_fthits && "Container cannot store more than 15 SciFi hits per track" ); + assert( id.size() <= LHCb::Pr::Long::Tracks::max_fthits && + "Container cannot store more than 15 SciFi hits per track" ); auto const& ihits = cand.ihits(); result.store_nFTHits<I>( currentsize, ihits.size() ); @@ -2293,14 +2294,14 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& if ( msgLevel( MSG::DEBUG ) ) debug() << "Store track quality " << cand.quality() << endmsg; // -- < Debug -------- } // next candidate - + // add UT hits into the tracks if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { - if ( m_addUTHitsTool.isEnabled() ) { - auto sc = m_addUTHitsTool->addUTHits( result ); - if ( sc.isFailure() ) info()<< "adding UT clusters failed!" <<endmsg; + if ( m_addUTHitsTool.isEnabled() ) { + auto sc = m_addUTHitsTool->addUTHits( result ); + if ( sc.isFailure() ) info() << "adding UT clusters failed!" << endmsg; } } - + return result; } diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index c7c6a585744..e22eb6cc150 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -96,7 +96,7 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit usedUTHits.emplace_back( id.utID().channelID() ); } } - + // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; for ( int iStation = 1; iStation <= UTInfo::Stations; ++iStation ) { @@ -104,7 +104,8 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit for ( int iRegion = 1; iRegion <= UTInfo::Regions; ++iRegion ) { for ( int iSector = 1; iSector <= UTInfo::Sectors; ++iSector ) { for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { - bool used = std::any_of(usedUTHits.begin(), usedUTHits.end(), [utid = uthit.chanID().channelID()](const auto& id) { return utid == id;}); + bool used = std::any_of( usedUTHits.begin(), usedUTHits.end(), + [utid = uthit.chanID().channelID()]( const auto& id ) { return utid == id; } ); if ( used ) continue; const unsigned int fullChanIdx = UT::HitHandler::HitsInUT::idx( iStation, iLayer, iRegion, iSector ); diff --git a/Tr/TrackUtils/src/TracksFTConverter.cpp b/Tr/TrackUtils/src/TracksFTConverter.cpp index 49f9d760e23..f53b89399cc 100644 --- a/Tr/TrackUtils/src/TracksFTConverter.cpp +++ b/Tr/TrackUtils/src/TracksFTConverter.cpp @@ -18,10 +18,10 @@ // LHCb #include "Event/StateParameters.h" #include "Event/Track.h" +#include "Kernel/FTChannelID.h" #include "Kernel/LHCbID.h" #include "Kernel/UTChannelID.h" #include "Kernel/VPChannelID.h" -#include "Kernel/FTChannelID.h" #include "Event/PrLongTracks.h" #include "Event/PrVeloTracks.h" @@ -101,24 +101,25 @@ public: newTrack.addToStates( state ); // Add LHCbIds - + int n_vphits = tracksFT.nVPHits<I>( t ).cast(); int n_fthits = tracksFT.nFTHits<I>( t ).cast(); int n_uthits = tracksFT.nUTHits<I>( t ).cast(); - - //info()<<"FT converter ........vp "<< n_vphits <<"...ft "<< n_fthits <<"..ut "<< n_uthits<<endmsg; - /* - for ( int i = n_vphits; i < n_vphits + n_fthits + n_uthits; i++ ) { - auto lhcbid = tracksFT.lhcbID<I>( t, i ).cast(); - //if(i <n_vphits) info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID(LHCb::VPChannelID( lhcbid ))<< " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; - //else info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID( lhcbid )<< " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; - newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); - } - */ - newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered); + + // info()<<"FT converter ........vp "<< n_vphits <<"...ft "<< n_fthits <<"..ut "<< n_uthits<<endmsg; + /* + for ( int i = n_vphits; i < n_vphits + n_fthits + n_uthits; i++ ) { + auto lhcbid = tracksFT.lhcbID<I>( t, i ).cast(); + //if(i <n_vphits) info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID(LHCb::VPChannelID( lhcbid ))<< + " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; + //else info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID( lhcbid )<< " "<< tracksFT.lhcbID<I>( t, + i ) << endmsg; newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); + } + */ + newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); - newTrack.setHistory( Track::History::PrForward); + newTrack.setHistory( Track::History::PrForward ); newTrack.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); } -- GitLab From 82c9f0e1ad6ecfe4ede7980de987ae0072c0386c Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 26 May 2020 23:43:31 +0200 Subject: [PATCH 039/111] Remove TrackHelper, and other small changes --- Pr/PrVeloUT/src/PrVeloUT.cpp | 149 +++++++++++++++++------------------ Pr/PrVeloUT/src/PrVeloUT.h | 66 +++++++++------- 2 files changed, 107 insertions(+), 108 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index cdf082b8260..7eca170eb8f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -235,24 +235,21 @@ namespace LHCb::Pr { rhs[1] += wi * ui * dz; } template <std::size_t N> - void simpleFit( const std::array<int, N>& indices, const LHCb::Pr::UT::Mut::Hits& hits, TrackHelper& helper, - float zMidUT, float zKink, float invSigmaVeloSlope ) { + void simpleFit( const std::array<int, N>& indices, const LHCb::Pr::UT::Mut::Hits& hits, ProtoTracks& pTracks, + const int trackIndex, float zMidUT, float zKink, float invSigmaVeloSlope ) { static_assert( N == 3 || N == 4 ); - // commented, as the threshold bit might / will be removed - // -- Veto hit combinations with no high threshold hit - // -- = likely spillover - // const int nHighThres = std::count_if( hits.begin(), hits.end(), - // []( const UT::Mut::Hit* hit ){ return hit && hit->HitPtr->highThreshold(); - // }); - - // if( nHighThres < m_minHighThres ) return; - // -- Scale the z-component, to not run into numerical problems // -- with floats + const float wb = pTracks.wb<scalar::float_v>( 0 ).cast(); + const float xMidField = pTracks.xMidField<scalar::float_v>( 0 ).cast(); + const float invKinkVeloDist = pTracks.invKinkVeloDist<scalar::float_v>( 0 ).cast(); + const float stateX = pTracks.pos<scalar::float_v>( trackIndex ).x.cast(); + const float stateTx = pTracks.dir<scalar::float_v>( trackIndex ).x.cast(); + const float zDiff = 0.001f * ( zKink - zMidUT ); - auto mat = std::array{helper.wb, helper.wb * zDiff, helper.wb * zDiff * zDiff}; - auto rhs = std::array{helper.wb * helper.xMidField, helper.wb * helper.xMidField * zDiff}; + auto mat = std::array{wb, wb * zDiff, wb * zDiff * zDiff}; + auto rhs = std::array{wb * xMidField, wb * xMidField * zDiff}; std::for_each( indices.begin(), indices.end(), [&]( const auto index ) { addHit( mat, rhs, hits, index, zMidUT ); } ); @@ -267,8 +264,8 @@ namespace LHCb::Pr { // new VELO slope x const float xb = xTTFit + xSlopeTTFit * ( zKink - zMidUT ); - const float xSlopeVeloFit = ( xb - helper.state.x ) * helper.invKinkVeloDist; - const float chi2VeloSlope = ( helper.state.tx - xSlopeVeloFit ) * invSigmaVeloSlope; + const float xSlopeVeloFit = ( xb - stateX ) * invKinkVeloDist; + const float chi2VeloSlope = ( stateTx - xSlopeVeloFit ) * invSigmaVeloSlope; const float chi2TT = std::accumulate( indices.begin(), indices.end(), chi2VeloSlope * chi2VeloSlope, @@ -278,17 +275,20 @@ namespace LHCb::Pr { } ) / ( N + 1 - 2 ); - if ( chi2TT < helper.bestParams[1] ) { + if ( chi2TT < pTracks.chi2TT<scalar::float_v>( trackIndex ).cast() ) { // calculate q/p const float sinInX = xSlopeVeloFit * vdt::fast_isqrtf( 1.0f + xSlopeVeloFit * xSlopeVeloFit ); const float sinOutX = xSlopeTTFit * vdt::fast_isqrtf( 1.0f + xSlopeTTFit * xSlopeTTFit ); const float qp = ( sinInX - sinOutX ); - helper.bestParams = {qp, chi2TT, xTTFit, xSlopeTTFit}; + pTracks.store_chi2TT<scalar::float_v>( trackIndex, chi2TT ); + pTracks.store_qp<scalar::float_v>( trackIndex, qp ); + pTracks.store_xTT<scalar::float_v>( trackIndex, xTTFit ); + pTracks.store_xSlopeTT<scalar::float_v>( trackIndex, xSlopeTTFit ); - std::copy( indices.begin(), indices.end(), helper.bestIndices.begin() ); - if constexpr ( N == 3 ) { helper.bestIndices[3] = -1; } + for ( std::size_t i = 0; i < N; i++ ) { pTracks.store_hitIndex<scalar::int_v>( trackIndex, i, indices[i] ); } + if constexpr ( N == 3 ) { pTracks.store_hitIndex<scalar::int_v>( trackIndex, 3, -1 ); } } } } // namespace @@ -346,74 +346,60 @@ namespace LHCb::Pr { // -- We cannot put all found hits in an array, as otherwise the stack overflows // -- so we just do the whole thing in batches - for ( std::size_t t = 0; t < filteredStates.size; t += batchSize ) { - - for ( std::size_t m = 0; m < batchSize; ++m ) { - for ( auto& it : hitsInLayers[m].layerIndices ) it = -1; + const std::size_t filteredStatesSize = filteredStates.size; + + for ( std::size_t t = 0; t < filteredStatesSize; t += batchSize ) { + + // -- This is scalar, as the hits are found in a scalar way + filteredStates.size = 0; + for ( std::size_t t2 = 0; t2 < batchSize && t2 + t < filteredStatesSize; ++t2 ) { + for ( auto& it : hitsInLayers[filteredStates.size].layerIndices ) it = -1; + hitsInLayers[filteredStates.size].size = 0; + const bool foundHits = + getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[filteredStates.size], t + t2 ); + filteredStates.copyBack<scalar>( t + t2, foundHits ); } pTracks.size = 0; + for ( std::size_t tEff = 0; tEff < filteredStates.size; tEff++ ) { - for ( std::size_t t2 = 0; t2 < batchSize && t2 + t < filteredStates.size; t2++ ) { - - std::size_t tEff = t + t2; - hitsInLayers[t2].size = 0; - - if ( !getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[t2], tEff ) ) continue; - - // -- this is a temporary solution to gradually adapt the algo Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); - MiniState trState; - trState.x = pos.x.cast(); - trState.y = pos.y.cast(); - trState.z = pos.z.cast(); - trState.tx = dir.x.cast(); - trState.ty = dir.y.cast(); - - TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); - - if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } - if ( helper.bestIndices[0] == -1 ) continue; - - scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); - int trackIndex = pTracks.size; - // -- manual compressstore to keep everything in sync and fill the registers in the last function + pTracks.fillHelperParams<scalar>( pos, dir, c_zKink, c_sigmaVeloSlope ); pTracks.store_pos<scalar::float_v>( trackIndex, pos ); pTracks.store_dir<scalar::float_v>( trackIndex, dir ); - pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); - pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); + pTracks.store_chi2TT<scalar::float_v>( trackIndex, m_maxPseudoChi2.value() ); + + pTracks.store_hitIndex<scalar::int_v>( trackIndex, 0, -1 ); + if ( !formClusters<true>( hitsInLayers[tEff], pTracks, trackIndex ) ) { + formClusters<false>( hitsInLayers[tEff], pTracks, trackIndex ); + } + if ( pTracks.hitIndex<scalar::int_v>( trackIndex, 0 ).cast() == -1 ) continue; - // -- another temporary thing: Put the clusters in an array - // -- order is: - pTracks.store_xTT<scalar::float_v>( trackIndex, helper.bestParams[2] ); - pTracks.store_xSlopeTT<scalar::float_v>( trackIndex, helper.bestParams[3] ); - pTracks.store_qp<scalar::float_v>( trackIndex, helper.bestParams[0] ); - pTracks.store_chi2TT<scalar::float_v>( trackIndex, helper.bestParams[1] ); + scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); + pTracks.store_hitContIndex<scalar::int_v>( trackIndex, tEff ); - int nHits = 0; // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 - for ( auto hitI : helper.bestIndices ) { - pTracks.store_x<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].xs[hitI] ); - pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitI] ); - pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitI] ); - - scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitI]; - pTracks.store_weight<scalar::float_v>( trackIndex, nHits, weight ); - - LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitI] ) ); - pTracks.store_id<scalar::int_v>( trackIndex, nHits, id.lhcbID() ); // not sure if correct - pTracks.store_hitIndex<scalar::int_v>( trackIndex, nHits, - hitsInLayers[t2].indexs[hitI] ); // not sure if correct - nHits++; + for ( int i = 0; i < 4; ++i ) { + const int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); + pTracks.store_x<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].xs[hitI] ); + pTracks.store_z<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].zs[hitI] ); + pTracks.store_sin<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].sins[hitI] ); + + scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; + pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); + + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[tEff].channelIDs[hitI] ) ); + pTracks.store_id<scalar::int_v>( trackIndex, i, id.lhcbID() ); // not sure if correct + pTracks.store_hitIndex<scalar::int_v>( trackIndex, i, + hitsInLayers[tEff].indexs[hitI] ); // not sure if correct } - pTracks.size++; } - prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } @@ -627,7 +613,7 @@ namespace LHCb::Pr { // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - filteredStates.copyBack( t, compressMask ); + filteredStates.copyBack<simd>( t, compressMask ); } return compBoundsArray; @@ -742,7 +728,8 @@ namespace LHCb::Pr { // Form clusters //========================================================================= template <bool forward> - bool VeloUT::formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, TrackHelper& helper ) const { + bool VeloUT::formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, ProtoTracks& pTracks, + const int trackIndex ) const { const int begin0 = forward ? hitsInLayers.layerIndices[0] : hitsInLayers.layerIndices[3]; const int end0 = forward ? hitsInLayers.layerIndices[1] : hitsInLayers.size; @@ -758,6 +745,8 @@ namespace LHCb::Pr { bool fourLayerSolution = false; + const float stateTx = pTracks.dir<scalar::float_v>( trackIndex ).x.cast(); + // -- this is scalar for the moment for ( int i0 = begin0; i0 < end0; ++i0 ) { @@ -772,7 +761,7 @@ namespace LHCb::Pr { const float tx = ( xhitLayer2 - xhitLayer0 ) / ( zhitLayer2 - zhitLayer0 ); - if ( std::abs( tx - helper.state.tx ) > m_deltaTx2 ) continue; + if ( std::abs( tx - stateTx ) > m_deltaTx2 ) continue; int bestHit1Index = -1; float hitTol = m_hitTol2; @@ -808,21 +797,25 @@ namespace LHCb::Pr { // -- All hits found if ( bestHit1Index != -1 && bestHit3Index != -1 ) { - simpleFit( std::array{i0, bestHit1Index, i2, bestHit3Index}, hitsInLayers, helper, m_zMidUT, c_zKink, - c_invSigmaVeloSlope ); + simpleFit( std::array{i0, bestHit1Index, i2, bestHit3Index}, hitsInLayers, pTracks, trackIndex, m_zMidUT, + c_zKink, c_invSigmaVeloSlope ); - if ( !fourLayerSolution && helper.bestIndices[0] != -1 ) { fourLayerSolution = true; } + if ( !fourLayerSolution && pTracks.hitIndex<scalar::int_v>( trackIndex, 0 ).cast() != -1 ) { + fourLayerSolution = true; + } continue; } // -- Nothing found in layer 3 if ( !fourLayerSolution && bestHit1Index != -1 ) { - simpleFit( std::array{i0, bestHit1Index, i2}, hitsInLayers, helper, m_zMidUT, c_zKink, c_invSigmaVeloSlope ); + simpleFit( std::array{i0, bestHit1Index, i2}, hitsInLayers, pTracks, trackIndex, m_zMidUT, c_zKink, + c_invSigmaVeloSlope ); continue; } // -- Noting found in layer 1 if ( !fourLayerSolution && bestHit3Index != -1 ) { - simpleFit( std::array{i0, bestHit3Index, i2}, hitsInLayers, helper, m_zMidUT, c_zKink, c_invSigmaVeloSlope ); + simpleFit( std::array{i0, bestHit3Index, i2}, hitsInLayers, pTracks, trackIndex, m_zMidUT, c_zKink, + c_invSigmaVeloSlope ); continue; } } diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 972488bc51d..ec2bf7b6e40 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -71,10 +71,6 @@ namespace LHCb::Pr { using simd = SIMDWrapper::avx2::types; using scalar = SIMDWrapper::scalar::types; - struct MiniState final { - float x, y, z, tx, ty; - }; - struct MiniStatesArray final { constexpr static int max_tracks = align_size( 1024 ); @@ -98,17 +94,22 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( cov, (float*)&( covs[0] ), (float*)&( covs[max_tracks] ), (float*)&( covs[2 * max_tracks] ) ) // -- Copy back the entries, but with a filtering mask - void copyBack( std::size_t at, simd::mask_v mask ) { - simd::float_v( &poss[at] ).compressstore( mask, &poss[size] ); - simd::float_v( &poss[at + max_tracks] ).compressstore( mask, &poss[size + max_tracks] ); - simd::float_v( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); - simd::float_v( &dirs[at] ).compressstore( mask, &dirs[size] ); - simd::float_v( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); - simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); - simd::float_v( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); - simd::float_v( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); - simd::int_v( &indexs[at] ).compressstore( mask, &indexs[size] ); - size += simd::popcount( mask ); + template <typename dType> + void copyBack( std::size_t at, typename dType::mask_v mask ) { + + using F = typename dType::float_v; + using I = typename dType::int_v; + + F( &poss[at] ).compressstore( mask, &poss[size] ); + F( &poss[at + max_tracks] ).compressstore( mask, &poss[size + max_tracks] ); + F( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); + F( &dirs[at] ).compressstore( mask, &dirs[size] ); + F( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); + F( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); + F( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); + F( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); + I( &indexs[at] ).compressstore( mask, &indexs[size] ); + size += dType::popcount( mask ); } }; @@ -144,6 +145,10 @@ namespace LHCb::Pr { struct ProtoTracks final { + std::array<float, simd::size> wbs; + std::array<float, simd::size> xMidFields; + std::array<float, simd::size> invKinkVeloDists; + // -- this is for the hits // -- this does _not_ include overlap hits, so only 4 per track std::array<float, 4 * batchSize> xs; @@ -151,7 +156,7 @@ namespace LHCb::Pr { std::array<float, 4 * batchSize> weightss{}; // this needs to be zero-initialized std::array<float, 4 * batchSize> sins; std::array<int, 4 * batchSize> ids; - std::array<int, 4 * batchSize> hitIndexs; + std::array<int, 4 * batchSize> hitIndexs{-1}; // -- this is the output of the fit std::array<float, batchSize> qps; @@ -187,21 +192,22 @@ namespace LHCb::Pr { VEC3_SOA_ACCESSOR( pos, (float*)&( statePoss[0] ), (float*)&( statePoss[batchSize] ), (float*)&( statePoss[2 * batchSize] ) ) VEC3_XY_SOA_ACCESSOR( dir, (float*)&( stateDirs[0] ), (float*)&( stateDirs[batchSize] ), 1.0f ) - }; - struct TrackHelper final { - TrackHelper( const MiniState& miniState, const float zKink, const float sigmaVeloSlope, const float maxPseudoChi2 ) - : state( miniState ), bestParams{{0.0f, maxPseudoChi2, 0.0f, 0.0f}} { - xMidField = state.x + state.tx * ( zKink - state.z ); - const float a = sigmaVeloSlope * ( zKink - state.z ); - wb = 1.0f / ( a * a ); - invKinkVeloDist = 1.0f / ( zKink - state.z ); - } + SOA_ACCESSOR( wb, wbs.data() ) + SOA_ACCESSOR( xMidField, xMidFields.data() ) + SOA_ACCESSOR( invKinkVeloDist, invKinkVeloDists.data() ) + + template <typename dType> + void fillHelperParams( Vec3<typename dType::float_v> pos, Vec3<typename dType::float_v> dir, const float zKink, + const float sigmaVeloSlope ) { - MiniState state; - std::array<int, 4> bestIndices = {-1, -1, -1, -1}; - std::array<float, 4> bestParams; - float wb, invKinkVeloDist, xMidField; + using F = typename dType::float_v; + + F( pos.x + dir.x * ( zKink - pos.z ) ).store( xMidFields.data() ); + F a = sigmaVeloSlope * ( zKink - pos.z ); + F( 1.0f / ( a * a ) ).store( wbs.data() ); + F( 1.0f / ( zKink - pos.z ) ).store( invKinkVeloDists.data() ); + } }; class VeloUT : public Gaudi::Functional::Transformer<Upstream::Tracks( const EventContext&, const Velo::Tracks&, @@ -269,7 +275,7 @@ namespace LHCb::Pr { const simd::float_v& yTol, const int firstIndex, const int lastIndex ) const; template <bool forward> - bool formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, TrackHelper& helper ) const; + bool formClusters( const LHCb::Pr::UT::Mut::Hits& hitsInLayers, ProtoTracks& pTracks, const int trackIndex ) const; template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, -- GitLab From 3cebd0e3cdb5c5a9dbd21acfac96dce7746371aa Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 27 May 2020 17:14:45 +0200 Subject: [PATCH 040/111] remove unnecessary hit tolerance variable --- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ++-- Pr/PrVeloUT/src/PrVeloUT.h | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 7eca170eb8f..fd5c0f9f4ed 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -764,7 +764,7 @@ namespace LHCb::Pr { if ( std::abs( tx - stateTx ) > m_deltaTx2 ) continue; int bestHit1Index = -1; - float hitTol = m_hitTol2; + float hitTol = m_hitTol; for ( int i1 = begin1; i1 < end1; ++i1 ) { @@ -781,7 +781,7 @@ namespace LHCb::Pr { if ( fourLayerSolution && bestHit1Index == -1 ) continue; int bestHit3Index = -1; - hitTol = m_hitTol2; + hitTol = m_hitTol; for ( int i3 = begin3; i3 < end3; ++i3 ) { const float xhitLayer3 = hitsInLayers.xs[i3]; diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index ec2bf7b6e40..3bbdba96e35 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -232,8 +232,7 @@ namespace LHCb::Pr { Gaudi::Property<float> m_maxPseudoChi2{this, "maxPseudoChi2", 1280.}; Gaudi::Property<float> m_yTol{this, "YTolerance", 0.5 * Gaudi::Units::mm}; // 0.8 Gaudi::Property<float> m_yTolSlope{this, "YTolSlope", 0.08}; // 0.2 - Gaudi::Property<float> m_hitTol1{this, "HitTol1", 6.0 * Gaudi::Units::mm}; - Gaudi::Property<float> m_hitTol2{this, "HitTol2", 0.8 * Gaudi::Units::mm}; // 0.8 + Gaudi::Property<float> m_hitTol{this, "HitTol", 0.8 * Gaudi::Units::mm}; // 0.8 Gaudi::Property<float> m_deltaTx1{this, "DeltaTx1", 0.035}; Gaudi::Property<float> m_deltaTx2{this, "DeltaTx2", 0.018}; // 0.02 Gaudi::Property<float> m_maxXSlope{this, "MaxXSlope", 0.350}; -- GitLab From d06bab65c57200429a08b88de671958bcdcdbcfc Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 2 Jun 2020 10:41:46 +0200 Subject: [PATCH 041/111] fix bugs in porting of code --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 66 +++++++++++++++++-------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 2 +- Pr/PrKernel/PrKernel/PrMutUTHits.h | 3 ++ 3 files changed, 49 insertions(+), 22 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 32722c6cef6..a29fdbef6da 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -58,6 +58,22 @@ namespace LHCb::Pr { } return pos; } + void ProjSortSIMD( LHCb::Pr::UT::Mut::Hits& muthits ){ + for(int i = 0; i < int (muthits.size)-1; i++){ + for(int j = 0; j < int (muthits.size)-i-1; j++){ + if( muthits.projections[j] > muthits.projections[j+1] ) { + std::swap ( muthits.projections[j], muthits.projections[j+1] ); + std::swap ( muthits.indexs[j], muthits.indexs[j+1]); + std::swap ( muthits.channelIDs[j], muthits.channelIDs[j+1]); + std::swap ( muthits.xs[j], muthits.xs[j+1]); + std::swap ( muthits.zs[j], muthits.zs[j+1]); + std::swap ( muthits.coss[j], muthits.coss[j+1]); + std::swap ( muthits.sins[j], muthits.sins[j+1]); + std::swap ( muthits.weights[j], muthits.weights[j+1]); + } + } + } + } } // namespace using ROOT::Math::CholeskyDecomp; @@ -79,7 +95,7 @@ namespace LHCb::Pr { } //========================================================================= - // Add the TT hits on the track, only the ids. + // Add the UT hits on the track, only the ids. //========================================================================= StatusCode PrAddUTHitsTool::addUTHits( Tracks& tracks ) const { @@ -89,6 +105,7 @@ namespace LHCb::Pr { for ( auto t = 0; t < int( filteredStates.size ); t++ ) { auto myUTHits = returnUTHits( filteredStates, compBoundsArray, t ); + if ( ( myUTHits.size < 3 ) ) continue; assert( myUTHits.size <= LHCb::Pr::Upstream::Tracks::max_uthits && "Container cannot store more than 16 UT hits per track" ); @@ -97,7 +114,7 @@ namespace LHCb::Pr { const int nVPHits = tracks.nVPHits<sI>( itr ).cast(); const int nFTHits = tracks.nFTHits<sI>( itr ).cast(); tracks.store_nUTHits<sI>( itr, int( myUTHits.size ) ); - + for ( auto i = 0; i < int( myUTHits.size ); i++ ) { // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) @@ -121,7 +138,6 @@ namespace LHCb::Pr { MiniStates& filteredStates ) const { std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; - int contSize = tracks.size(); filteredStates.size = 0; std::array<simd::int_v, UTInfo::TotalLayers> posArray; std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors @@ -129,8 +145,8 @@ namespace LHCb::Pr { //--- This now works with up to 9 sectors const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); - for ( int t = 0; t < contSize; t += simd::size ) { - auto loopMask = simd::loop_mask( t, contSize ); + for ( int t = 0; t < tracks.size(); t += simd::size ) { + auto loopMask = simd::loop_mask( t, int(tracks.size())); simd::int_v nLayers{0}; //---Define the tolerance parameters @@ -199,7 +215,7 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * 98 - 1; + helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * UTInfo::Sectors- 1; counter++; } } @@ -267,6 +283,9 @@ namespace LHCb::Pr { float bestChi2 = p_maxChi2Tol.value() + p_maxChi2Slope.value() / ( p - p_maxChi2POffset.value() ); + // sort of hits in increasing projection + ProjSortSIMD(hitsInLayers); + // -- Loop over all hits and make "groups" of hits to form a candidate for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size ); ++itBeg ) { @@ -284,14 +303,12 @@ namespace LHCb::Pr { maxProj = firstProj + sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); } - - // -- This means that there would be less than 3 hits, which does not work, so we can skip this right away + //TODO -- This means that there would be less than 3 hits, which does not work, so we can skip this right away if ( ( hitsInLayers.projections[itBeg + 2] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group for ( auto itEnd = itBeg; itEnd < int( hitsInLayers.size ); itEnd++ ) { - auto index = goodUT.size; if ( hitsInLayers.projections[itEnd] > maxProj ) break; if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { @@ -299,6 +316,7 @@ namespace LHCb::Pr { ++nbPlane; } + auto index = goodUT.size; goodUT.xs[index] = hitsInLayers.xs[itEnd]; goodUT.zs[index] = hitsInLayers.zs[itEnd]; goodUT.coss[index] = hitsInLayers.coss[itEnd]; @@ -308,6 +326,7 @@ namespace LHCb::Pr { goodUT.channelIDs[index] = hitsInLayers.channelIDs[itEnd]; goodUT.indexs[index] = hitsInLayers.indexs[itEnd]; goodUT.size += 1; + } if ( 3 > nbPlane ) continue; // -- Need at least hits in 3 planes @@ -332,8 +351,6 @@ namespace LHCb::Pr { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, goodUT ); // ---------------------------------- for ( auto i = 0; i < int( goodUT.size ); i++ ) { - // auto loopmask = simd::loop_mask(i, goodUT.size ); - // UTHits[t].copy_back( goodUT, i, loopmask ); UTHits.xs[i] = goodUT.xs[i]; UTHits.zs[i] = goodUT.zs[i]; UTHits.coss[i] = goodUT.coss[i]; @@ -379,7 +396,7 @@ namespace LHCb::Pr { const float bendParam = p_utParam.value() * -1 * signedReCur * qop; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "selectHits: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " + debug() << "State z: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " << stateTy << " p " << p << endmsg; std::size_t nSize = 0; @@ -412,7 +429,7 @@ namespace LHCb::Pr { const auto yMax = max( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); const auto yy = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; auto xx = myHits.xAtYEq0<F>( i ) + yy * myHits.dxDy<F>( i ); - F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ ) + + F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ ) + bendParam * ( myHits.zAtYEq0<F>( i ) - p_zUTField.value() ); F absdx = abs( xx - xPred ); @@ -421,16 +438,12 @@ namespace LHCb::Pr { auto mask = ( yMin - yTol < yPred && yPred < yMax + yTol ) && ( absdx < xTol ) && loopMask; if ( none( mask ) ) continue; - const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / + const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / ( myHits.zAtYEq0<F>( i ) - p_zMSPoint.value() ); // save the selected hits auto index = hitsInLayers.size; - if ( ( index + simd::size ) >= LHCb::Pr::UT::Mut::Hits::max_hits ) { - error() << "Reached maximum number of hits. This is a temporary limitation and needs to be fixed" << endmsg; - break; - } hitsInLayers.compressstore_x( index, mask, xx ); hitsInLayers.compressstore_z( index, mask, myHits.zAtYEq0<F>( i ) ); hitsInLayers.compressstore_cos( index, mask, myHits.cos<F>( i ) ); @@ -438,8 +451,7 @@ namespace LHCb::Pr { hitsInLayers.compressstore_weight( index, mask, myHits.weight<F>( i ) ); hitsInLayers.compressstore_projection( index, mask, projDist ); hitsInLayers.compressstore_channelID( index, mask, myHits.channelID<I>( i ) ); - hitsInLayers.compressstore_index( index, mask, - simd::indices( i ) ); // fill the index in the original hit container + hitsInLayers.compressstore_index( index, mask, simd::indices(i)) ; hitsInLayers.size += simd::popcount( mask ); } } @@ -457,7 +469,7 @@ namespace LHCb::Pr { // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - auto worst = 0; + int worst = 0; float dist = 0; chi2 = 1.e20; @@ -574,7 +586,19 @@ namespace LHCb::Pr { rhs[2] -= w * dist2 * t; if ( 1 == differentPlanes[goodUT.planeCode<sI>( worst ).cast()]-- ) --nDoF; + // remove the worst hit + goodUT.xs[worst] = goodUT.xs[nHits-1]; + goodUT.zs[worst] = goodUT.zs[nHits-1]; + goodUT.coss[worst] = goodUT.coss[nHits-1]; + goodUT.sins[worst] = goodUT.sins[nHits-1]; + goodUT.weights[worst] = goodUT.weights[nHits-1]; + goodUT.projections[worst] = goodUT.projections[nHits-1]; + goodUT.channelIDs[worst] = goodUT.channelIDs[nHits-1]; + goodUT.indexs[worst] = goodUT.indexs[nHits-1]; + goodUT.size = goodUT.size -1; + --nHits; + chi2 = 1.e11; // S--Start new iteration } // -- Increase the sanity check counter ++counter; diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index 4af9ea8c355..0eb29505ea2 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -87,7 +87,7 @@ namespace LHCb::Pr { }; struct Boundaries final { - std::array<int, 9 * max_tracks> sects; + std::array<int, maxSectors * max_tracks> sects; std::array<float, max_tracks> xTols; std::array<int, max_tracks> nPoss; diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index 0d7afa034ed..fc0e60f04d0 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -35,6 +35,7 @@ namespace LHCb::Pr::UT { alignas( 64 ) std::array<float, max_hits> coss; alignas( 64 ) std::array<float, max_hits> sins; alignas( 64 ) std::array<float, max_hits> weights; + alignas( 64 ) std::array<float, max_hits> projections; alignas( 64 ) std::array<int, max_hits> channelIDs; alignas( 64 ) std::array<int, max_hits> indexs; @@ -47,6 +48,7 @@ namespace LHCb::Pr::UT { // at some point one needs to calculate the sin, we'll see if calculating or storing it is faster SOA_ACCESSOR( sin, sins.data() ) SOA_ACCESSOR( weight, weights.data() ) + SOA_ACCESSOR( projection, projections.data() ) SOA_ACCESSOR( channelID, channelIDs.data() ) SOA_ACCESSOR( index, indexs.data() ) @@ -59,6 +61,7 @@ namespace LHCb::Pr::UT { return 2 * ( station - 1 ) + ( layer - 1 ); } + }; } // namespace Mut -- GitLab From 9c52b2b24402a6e9bdb9a0e46a3c6c78591ab1ef Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 2 Jun 2020 12:12:40 +0200 Subject: [PATCH 042/111] partial vectorisation --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 31 +++++-------------------- Pr/PrKernel/PrKernel/PrMutUTHits.h | 18 ++++++++++++++ 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index a29fdbef6da..6cc11a437a5 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -315,18 +315,8 @@ namespace LHCb::Pr { firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] = 1; // -- Count number of fired planes ++nbPlane; } - - auto index = goodUT.size; - goodUT.xs[index] = hitsInLayers.xs[itEnd]; - goodUT.zs[index] = hitsInLayers.zs[itEnd]; - goodUT.coss[index] = hitsInLayers.coss[itEnd]; - goodUT.sins[index] = hitsInLayers.sins[itEnd]; - goodUT.weights[index] = hitsInLayers.weights[itEnd]; - goodUT.projections[index] = hitsInLayers.projections[itEnd]; - goodUT.channelIDs[index] = hitsInLayers.channelIDs[itEnd]; - goodUT.indexs[index] = hitsInLayers.indexs[itEnd]; - goodUT.size += 1; - + scalar::mask_v mask = 1; + goodUT.copy_from<scalar>(hitsInLayers, itEnd, mask); } if ( 3 > nbPlane ) continue; // -- Need at least hits in 3 planes @@ -350,24 +340,15 @@ namespace LHCb::Pr { // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, goodUT ); // ---------------------------------- - for ( auto i = 0; i < int( goodUT.size ); i++ ) { - UTHits.xs[i] = goodUT.xs[i]; - UTHits.zs[i] = goodUT.zs[i]; - UTHits.coss[i] = goodUT.coss[i]; - UTHits.sins[i] = goodUT.sins[i]; - UTHits.weights[i] = goodUT.weights[i]; - UTHits.projections[i] = goodUT.projections[i]; - UTHits.channelIDs[i] = goodUT.channelIDs[i]; - UTHits.indexs[i] = goodUT.indexs[i]; - UTHits.size += 1; + for ( auto i = 0; i < int( goodUT.size ); i +=simd::size ) { + auto loopmask = simd::loop_mask(i, goodUT.size ); + UTHits.copy_from<simd>( goodUT, i, loopmask ); } - bestChi2 = chi2; } } // -- Assign the final hit container and chi2 to the variables which are returned. - // finalChi2 = bestChi2; if ( UTHits.size > 2 ) { m_hitsAddedCounter += UTHits.size; m_tracksWithHitsCounter++; @@ -469,7 +450,6 @@ namespace LHCb::Pr { // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - int worst = 0; float dist = 0; chi2 = 1.e20; @@ -524,6 +504,7 @@ namespace LHCb::Pr { while ( chi2 > 1e10 && counter < maxIterations ) { worstDiff = -1.0; + int worst = -1; // -- This is needed since 'CholeskyDecomp' overwrites rhs // -- which is needed later on diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index fc0e60f04d0..d694ff58626 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -61,7 +61,25 @@ namespace LHCb::Pr::UT { return 2 * ( station - 1 ) + ( layer - 1 ); } + + template <typename simd, typename MaskT> + void copy_from( const Hits& hits, int from, MaskT mask ){ + using I = typename simd::int_v; + using F = typename simd::float_v; + assert( from + simd::popcount( mask ) < max_hits ); + + F(&hits.xs[from]).compressstore(mask, &xs[size]); + F(&hits.zs[from]).compressstore(mask, &zs[size]); + F(&hits.coss[from]).compressstore(mask, &coss[size]); + F(&hits.sins[from]).compressstore(mask, &sins[size]); + F(&hits.weights[from]).compressstore(mask, &weights[size]); + F(&hits.projections[from]).compressstore(mask, &projections[size]); + I(&hits.channelIDs[from]).compressstore(mask, &channelIDs[size]); + I(&hits.indexs[from]).compressstore(mask, &indexs[size]); + + size += simd::popcount( mask ); + } }; } // namespace Mut -- GitLab From e09a1e4d62772d78c791a4b393f844d42e6d12b1 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 2 Jun 2020 10:13:29 +0000 Subject: [PATCH 043/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8618805 --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 76 ++++++++++++------------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 4 +- Pr/PrKernel/PrKernel/PrMutUTHits.h | 19 +++---- 3 files changed, 49 insertions(+), 50 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 6cc11a437a5..246b02f84ce 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -58,21 +58,21 @@ namespace LHCb::Pr { } return pos; } - void ProjSortSIMD( LHCb::Pr::UT::Mut::Hits& muthits ){ - for(int i = 0; i < int (muthits.size)-1; i++){ - for(int j = 0; j < int (muthits.size)-i-1; j++){ - if( muthits.projections[j] > muthits.projections[j+1] ) { - std::swap ( muthits.projections[j], muthits.projections[j+1] ); - std::swap ( muthits.indexs[j], muthits.indexs[j+1]); - std::swap ( muthits.channelIDs[j], muthits.channelIDs[j+1]); - std::swap ( muthits.xs[j], muthits.xs[j+1]); - std::swap ( muthits.zs[j], muthits.zs[j+1]); - std::swap ( muthits.coss[j], muthits.coss[j+1]); - std::swap ( muthits.sins[j], muthits.sins[j+1]); - std::swap ( muthits.weights[j], muthits.weights[j+1]); + void ProjSortSIMD( LHCb::Pr::UT::Mut::Hits& muthits ) { + for ( int i = 0; i < int( muthits.size ) - 1; i++ ) { + for ( int j = 0; j < int( muthits.size ) - i - 1; j++ ) { + if ( muthits.projections[j] > muthits.projections[j + 1] ) { + std::swap( muthits.projections[j], muthits.projections[j + 1] ); + std::swap( muthits.indexs[j], muthits.indexs[j + 1] ); + std::swap( muthits.channelIDs[j], muthits.channelIDs[j + 1] ); + std::swap( muthits.xs[j], muthits.xs[j + 1] ); + std::swap( muthits.zs[j], muthits.zs[j + 1] ); + std::swap( muthits.coss[j], muthits.coss[j + 1] ); + std::swap( muthits.sins[j], muthits.sins[j + 1] ); + std::swap( muthits.weights[j], muthits.weights[j + 1] ); } } - } + } } } // namespace @@ -114,7 +114,7 @@ namespace LHCb::Pr { const int nVPHits = tracks.nVPHits<sI>( itr ).cast(); const int nFTHits = tracks.nFTHits<sI>( itr ).cast(); tracks.store_nUTHits<sI>( itr, int( myUTHits.size ) ); - + for ( auto i = 0; i < int( myUTHits.size ); i++ ) { // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) @@ -138,7 +138,7 @@ namespace LHCb::Pr { MiniStates& filteredStates ) const { std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; - filteredStates.size = 0; + filteredStates.size = 0; std::array<simd::int_v, UTInfo::TotalLayers> posArray; std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors std::array<int, UTInfo::TotalLayers> maxColsRows; @@ -146,7 +146,7 @@ namespace LHCb::Pr { //--- This now works with up to 9 sectors const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); for ( int t = 0; t < tracks.size(); t += simd::size ) { - auto loopMask = simd::loop_mask( t, int(tracks.size())); + auto loopMask = simd::loop_mask( t, int( tracks.size() ) ); simd::int_v nLayers{0}; //---Define the tolerance parameters @@ -215,7 +215,7 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * UTInfo::Sectors- 1; + helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * UTInfo::Sectors - 1; counter++; } } @@ -284,7 +284,7 @@ namespace LHCb::Pr { float bestChi2 = p_maxChi2Tol.value() + p_maxChi2Slope.value() / ( p - p_maxChi2POffset.value() ); // sort of hits in increasing projection - ProjSortSIMD(hitsInLayers); + ProjSortSIMD( hitsInLayers ); // -- Loop over all hits and make "groups" of hits to form a candidate for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size ); ++itBeg ) { @@ -303,7 +303,7 @@ namespace LHCb::Pr { maxProj = firstProj + sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); } - //TODO -- This means that there would be less than 3 hits, which does not work, so we can skip this right away + // TODO -- This means that there would be less than 3 hits, which does not work, so we can skip this right away if ( ( hitsInLayers.projections[itBeg + 2] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group @@ -316,7 +316,7 @@ namespace LHCb::Pr { ++nbPlane; } scalar::mask_v mask = 1; - goodUT.copy_from<scalar>(hitsInLayers, itEnd, mask); + goodUT.copy_from<scalar>( hitsInLayers, itEnd, mask ); } if ( 3 > nbPlane ) continue; // -- Need at least hits in 3 planes @@ -340,8 +340,8 @@ namespace LHCb::Pr { // ---------------------------------- if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) printInfo( dist, chi2, goodUT ); // ---------------------------------- - for ( auto i = 0; i < int( goodUT.size ); i +=simd::size ) { - auto loopmask = simd::loop_mask(i, goodUT.size ); + for ( auto i = 0; i < int( goodUT.size ); i += simd::size ) { + auto loopmask = simd::loop_mask( i, goodUT.size ); UTHits.copy_from<simd>( goodUT, i, loopmask ); } bestChi2 = chi2; @@ -377,8 +377,8 @@ namespace LHCb::Pr { const float bendParam = p_utParam.value() * -1 * signedReCur * qop; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "State z: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " - << stateTy << " p " << p << endmsg; + debug() << "State z: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " << stateTy + << " p " << p << endmsg; std::size_t nSize = 0; std::size_t nLayers = 0; @@ -410,7 +410,7 @@ namespace LHCb::Pr { const auto yMax = max( myHits.yBegin<F>( i ), myHits.yEnd<F>( i ) ); const auto yy = stateY + ( myHits.zAtYEq0<F>( i ) - stateZ ) * stateTy; auto xx = myHits.xAtYEq0<F>( i ) + yy * myHits.dxDy<F>( i ); - F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ ) + + F xPred = stateX + stateTx * ( myHits.zAtYEq0<F>( i ) - stateZ ) + bendParam * ( myHits.zAtYEq0<F>( i ) - p_zUTField.value() ); F absdx = abs( xx - xPred ); @@ -419,7 +419,7 @@ namespace LHCb::Pr { auto mask = ( yMin - yTol < yPred && yPred < yMax + yTol ) && ( absdx < xTol ) && loopMask; if ( none( mask ) ) continue; - const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / + const F projDist = ( xPred - xx ) * ( p_zUTProj.value() - p_zMSPoint.value() ) / ( myHits.zAtYEq0<F>( i ) - p_zMSPoint.value() ); // save the selected hits @@ -432,7 +432,7 @@ namespace LHCb::Pr { hitsInLayers.compressstore_weight( index, mask, myHits.weight<F>( i ) ); hitsInLayers.compressstore_projection( index, mask, projDist ); hitsInLayers.compressstore_channelID( index, mask, myHits.channelID<I>( i ) ); - hitsInLayers.compressstore_index( index, mask, simd::indices(i)) ; + hitsInLayers.compressstore_index( index, mask, simd::indices( i ) ); hitsInLayers.size += simd::popcount( mask ); } } @@ -450,8 +450,8 @@ namespace LHCb::Pr { // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - float dist = 0; - chi2 = 1.e20; + float dist = 0; + chi2 = 1.e20; const float xTol = p_xTol.value() + p_xTolSlope.value() / p; const float fixedWeight = 9. / ( xTol * xTol ); @@ -568,15 +568,15 @@ namespace LHCb::Pr { if ( 1 == differentPlanes[goodUT.planeCode<sI>( worst ).cast()]-- ) --nDoF; // remove the worst hit - goodUT.xs[worst] = goodUT.xs[nHits-1]; - goodUT.zs[worst] = goodUT.zs[nHits-1]; - goodUT.coss[worst] = goodUT.coss[nHits-1]; - goodUT.sins[worst] = goodUT.sins[nHits-1]; - goodUT.weights[worst] = goodUT.weights[nHits-1]; - goodUT.projections[worst] = goodUT.projections[nHits-1]; - goodUT.channelIDs[worst] = goodUT.channelIDs[nHits-1]; - goodUT.indexs[worst] = goodUT.indexs[nHits-1]; - goodUT.size = goodUT.size -1; + goodUT.xs[worst] = goodUT.xs[nHits - 1]; + goodUT.zs[worst] = goodUT.zs[nHits - 1]; + goodUT.coss[worst] = goodUT.coss[nHits - 1]; + goodUT.sins[worst] = goodUT.sins[nHits - 1]; + goodUT.weights[worst] = goodUT.weights[nHits - 1]; + goodUT.projections[worst] = goodUT.projections[nHits - 1]; + goodUT.channelIDs[worst] = goodUT.channelIDs[nHits - 1]; + goodUT.indexs[worst] = goodUT.indexs[nHits - 1]; + goodUT.size = goodUT.size - 1; --nHits; chi2 = 1.e11; // S--Start new iteration diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index 0eb29505ea2..bbbd55f2407 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -88,8 +88,8 @@ namespace LHCb::Pr { struct Boundaries final { std::array<int, maxSectors * max_tracks> sects; - std::array<float, max_tracks> xTols; - std::array<int, max_tracks> nPoss; + std::array<float, max_tracks> xTols; + std::array<int, max_tracks> nPoss; std::size_t size{0}; SOA_ACCESSOR_VAR( sect, &( sects[pos * max_tracks] ), int pos ) diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index d694ff58626..70f1ec57f90 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -62,21 +62,20 @@ namespace LHCb::Pr::UT { return 2 * ( station - 1 ) + ( layer - 1 ); } - template <typename simd, typename MaskT> - void copy_from( const Hits& hits, int from, MaskT mask ){ + void copy_from( const Hits& hits, int from, MaskT mask ) { using I = typename simd::int_v; using F = typename simd::float_v; assert( from + simd::popcount( mask ) < max_hits ); - F(&hits.xs[from]).compressstore(mask, &xs[size]); - F(&hits.zs[from]).compressstore(mask, &zs[size]); - F(&hits.coss[from]).compressstore(mask, &coss[size]); - F(&hits.sins[from]).compressstore(mask, &sins[size]); - F(&hits.weights[from]).compressstore(mask, &weights[size]); - F(&hits.projections[from]).compressstore(mask, &projections[size]); - I(&hits.channelIDs[from]).compressstore(mask, &channelIDs[size]); - I(&hits.indexs[from]).compressstore(mask, &indexs[size]); + F( &hits.xs[from] ).compressstore( mask, &xs[size] ); + F( &hits.zs[from] ).compressstore( mask, &zs[size] ); + F( &hits.coss[from] ).compressstore( mask, &coss[size] ); + F( &hits.sins[from] ).compressstore( mask, &sins[size] ); + F( &hits.weights[from] ).compressstore( mask, &weights[size] ); + F( &hits.projections[from] ).compressstore( mask, &projections[size] ); + I( &hits.channelIDs[from] ).compressstore( mask, &channelIDs[size] ); + I( &hits.indexs[from] ).compressstore( mask, &indexs[size] ); size += simd::popcount( mask ); } -- GitLab From 702829c1f07d0e67dc7ac3a028f97d498e677757 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 2 Jun 2020 23:31:44 +0200 Subject: [PATCH 044/111] Remove unnecessary parameter --- Pr/PrVeloUT/src/PrVeloUT.cpp | 2 +- Pr/PrVeloUT/src/PrVeloUT.h | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index fd5c0f9f4ed..fc5a4df5228 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -761,7 +761,7 @@ namespace LHCb::Pr { const float tx = ( xhitLayer2 - xhitLayer0 ) / ( zhitLayer2 - zhitLayer0 ); - if ( std::abs( tx - stateTx ) > m_deltaTx2 ) continue; + if ( std::abs( tx - stateTx ) > m_deltaTx ) continue; int bestHit1Index = -1; float hitTol = m_hitTol; diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 3bbdba96e35..64259d9bce5 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -233,8 +233,7 @@ namespace LHCb::Pr { Gaudi::Property<float> m_yTol{this, "YTolerance", 0.5 * Gaudi::Units::mm}; // 0.8 Gaudi::Property<float> m_yTolSlope{this, "YTolSlope", 0.08}; // 0.2 Gaudi::Property<float> m_hitTol{this, "HitTol", 0.8 * Gaudi::Units::mm}; // 0.8 - Gaudi::Property<float> m_deltaTx1{this, "DeltaTx1", 0.035}; - Gaudi::Property<float> m_deltaTx2{this, "DeltaTx2", 0.018}; // 0.02 + Gaudi::Property<float> m_deltaTx{this, "DeltaTx", 0.018}; // 0.02 Gaudi::Property<float> m_maxXSlope{this, "MaxXSlope", 0.350}; Gaudi::Property<float> m_maxYSlope{this, "MaxYSlope", 0.300}; Gaudi::Property<float> m_centralHoleSize{this, "centralHoleSize", 33. * Gaudi::Units::mm}; -- GitLab From 8cb652d9d5f4cdb527927f0818b8e8988caf1315 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 2 Jun 2020 23:37:20 +0200 Subject: [PATCH 045/111] Adapt for change in UTInfo (enum to enum class) --- Pr/PrVeloUT/src/PrVeloUT.cpp | 56 ++++++++++++++++++++---------------- Pr/PrVeloUT/src/PrVeloUT.h | 10 +++---- 2 files changed, 36 insertions(+), 30 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index fc5a4df5228..5160571a193 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -181,8 +181,10 @@ namespace LHCb::Pr { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) - void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers>& helper, - const int start ) { + void bubbleSortSIMD( + const int maxColsMaxRows, + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& helper, + const int start ) { for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { swap( helper[start + j] > helper[start + j + 1], helper[start + j], helper[start + j + 1] ); @@ -193,8 +195,9 @@ namespace LHCb::Pr { // -- not sure that is the smartest solution // -- but I could not come up with anything better // -- inspired by: https://lemire.me/blog/2017/04/10/removing-duplicates-from-lists-quickly/ - simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers>& out, int start, - size_t len ) { + simd::int_v makeUniqueSIMD( + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& out, + int start, size_t len ) { simd::int_v pos = start + 1; simd::int_v oldv = out[start]; for ( size_t j = start + 1; j < start + len; ++j ) { @@ -462,13 +465,14 @@ namespace LHCb::Pr { //============================================================================= // Extrapolate the states //============================================================================= - std::array<ExtrapolatedStates, UTInfo::TotalLayers> VeloUT::extrapStates( const MiniStatesArray& filteredStates, - const UTDAQ::GeomCache& geom ) const { + std::array<ExtrapolatedStates, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + VeloUT::extrapStates( const MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const { - std::array<ExtrapolatedStates, UTInfo::TotalLayers> eStatesArray; + std::array<ExtrapolatedStates, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> eStatesArray; // -- Used for the calculation of the size of the search windows - constexpr const std::array<float, UTInfo::TotalLayers> normFact{0.95f, 1.0f, 1.36f, 1.41f}; + constexpr const std::array<float, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> normFact{0.95f, 1.0f, + 1.36f, 1.41f}; for ( std::size_t t = 0; t < filteredStates.size; t += simd::size ) { @@ -482,7 +486,7 @@ namespace LHCb::Pr { const simd::float_v invTheta = min( 500.0f, 1.0f * rsqrt( tx * tx + ty * ty ) ); const simd::float_v minMom = max( m_minPT.value() * invTheta, m_minMomentum.value() ); - for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { const simd::float_v xTol = abs( 1.0f / ( m_distToMomentum * minMom ) ) * normFact[layerIndex]; const simd::float_v yTol = m_yTol.value() + m_yTolSlope.value() * xTol; @@ -511,17 +515,18 @@ namespace LHCb::Pr { //============================================================================= // -- find the sectors //============================================================================= - std::array<Boundaries, UTInfo::TotalLayers> - VeloUT::findAllSectors( const std::array<ExtrapolatedStates, UTInfo::TotalLayers>& eStatesArray, - MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const { + std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> VeloUT::findAllSectors( + const std::array<ExtrapolatedStates, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& eStatesArray, + MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const { - std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; - int contSize = filteredStates.size; - filteredStates.size = 0; + std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> compBoundsArray; + int contSize = filteredStates.size; + filteredStates.size = 0; - std::array<simd::int_v, UTInfo::TotalLayers> posArray; - std::array<simd::int_v, maxNumSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors - std::array<int, UTInfo::TotalLayers> maxColsRows; + std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray; + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + helperArray; // 4 layers x maximum 9 sectors + std::array<int, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> maxColsRows; // -- This now works with up to 9 sectors for ( int t = 0; t < contSize; t += simd::size ) { @@ -529,7 +534,7 @@ namespace LHCb::Pr { simd::int_v nLayers{0}; - for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { const simd::int_v regionBoundary1 = ( 2 * geom.layers[layerIndex].nColsPerSide + 3 ); const simd::int_v regionBoundary2 = ( 2 * geom.layers[layerIndex].nColsPerSide - 5 ); @@ -598,7 +603,7 @@ namespace LHCb::Pr { // -- We need at least three layers const simd::mask_v compressMask = ( nLayers > 2 ) && loopMask; - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<simd::int_v>( index, iSector, compressMask, @@ -621,9 +626,10 @@ namespace LHCb::Pr { //============================================================================= // Find the hits //============================================================================= - bool VeloUT::getHitsScalar( const LHCb::Pr::UT::HitHandler& hh, const MiniStatesArray& filteredStates, - const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, - LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { + bool VeloUT::getHitsScalar( + const LHCb::Pr::UT::HitHandler& hh, const MiniStatesArray& filteredStates, + const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, + LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { const simd::float_v yTolSlope{m_yTolSlope.value()}; @@ -642,7 +648,7 @@ namespace LHCb::Pr { const simd::float_v ty{tyState}; const simd::float_v tx{txState}; - for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; @@ -961,7 +967,7 @@ namespace LHCb::Pr { // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { int trackIndex2 = 0; for ( std::size_t t2 = 0; t2 < simd::size; ++t2 ) { diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 64259d9bce5..21d8c1633d0 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -256,12 +256,12 @@ namespace LHCb::Pr { MiniStatesArray getStates( const Velo::Tracks& inputTracks, Upstream::Tracks& outputTracks ) const; - std::array<ExtrapolatedStates, UTInfo::TotalLayers> extrapStates( const MiniStatesArray& filteredStates, - const UTDAQ::GeomCache& geom ) const; + std::array<ExtrapolatedStates, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + extrapStates( const MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const; - std::array<Boundaries, UTInfo::TotalLayers> - findAllSectors( const std::array<ExtrapolatedStates, UTInfo::TotalLayers>& eStatesArray, - MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const; + std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> findAllSectors( + const std::array<ExtrapolatedStates, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& eStatesArray, + MiniStatesArray& filteredStates, const UTDAQ::GeomCache& geom ) const; bool getHitsScalar( const LHCb::Pr::UT::HitHandler& hh, const MiniStatesArray& filteredStates, const std::array<Boundaries, 4>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, -- GitLab From 46f7b7bbd72c1810f7d192df662e6983a33f8b41 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 4 Jun 2020 18:13:06 +0200 Subject: [PATCH 046/111] remove unused hits input for converter --- .../src/TrackCompactVertexToV1Vertex.cpp | 8 ++-- Tr/TrackUtils/src/TracksVPConverter.cpp | 38 +++++++------------ 2 files changed, 18 insertions(+), 28 deletions(-) diff --git a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp index 247f5424789..5bdf32e6440 100644 --- a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp +++ b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp @@ -95,23 +95,23 @@ namespace LHCb::Converters::TrackCompactVertex { : public Gaudi::Functional::Transformer<std::vector<LHCb::RecVertex>( std::vector<LHCb::TrackKernel::TrackCompactVertex<2, double>, LHCb::Allocators::EventLocal<LHCb::TrackKernel::TrackCompactVertex<2, double>>> const&, - const VertexTrackType&, const Pr::Velo::Hits&, const std::vector<LHCb::Track>& )> { + const VertexTrackType&, const std::vector<LHCb::Track>& )> { using base_class = Gaudi::Functional::Transformer<std::vector<LHCb::RecVertex>( std::vector<LHCb::TrackKernel::TrackCompactVertex<2, double>, LHCb::Allocators::EventLocal<LHCb::TrackKernel::TrackCompactVertex<2, double>>> const&, - const VertexTrackType&, const Pr::Velo::Hits&, const std::vector<LHCb::Track>& )>; + const VertexTrackType&, const std::vector<LHCb::Track>& )>; using KeyValue = typename base_class::KeyValue; VectorOf2TrackPrFittedCompactVertexToVectorOfRecVertex( const std::string& name, ISvcLocator* pSvcLocator ) : base_class( name, pSvcLocator, - {KeyValue{"InputVertices", ""}, KeyValue{"TracksInVertices", ""}, KeyValue{"VeloHits", ""}, + {KeyValue{"InputVertices", ""}, KeyValue{"TracksInVertices", ""}, KeyValue{"ConvertedTracks", ""}}, KeyValue{"OutputVertices", ""} ) {} std::vector<LHCb::RecVertex> operator()( const std::vector<LHCb::TrackKernel::TrackCompactVertex<2, double>, LHCb::Allocators::EventLocal<LHCb::TrackKernel::TrackCompactVertex<2, double>>>& vertices, - const VertexTrackType& tracks_zip, const Pr::Velo::Hits& velo_hits, + const VertexTrackType& tracks_zip, const std::vector<LHCb::Track>& conv_tracks ) const override { std::vector<LHCb::RecVertex> converted_vertices; const auto& tracks = tracks_zip.template get<LHCb::Pr::Fitted::Forward::Tracks>(); diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index b58dc4888b3..f41e8bc8d66 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -81,23 +81,18 @@ namespace { } } // namespace -template <typename T> -class TracksVPConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const T&, const LHCb::Pr::Velo::Tracks& )> { - - using base_class_t = - Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( const T&, const LHCb::Pr::Velo::Tracks& )>; +class TracksVPConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( const LHCb::Pr::Velo::Tracks& )> { Gaudi::Property<float> m_ptVelo{this, "ptVelo", 400 * Gaudi::Units::MeV, "Default pT for Velo tracks"}; public: TracksVPConverter( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class_t( name, pSvcLocator, - std::array{typename base_class_t::KeyValue{"HitsLocation", "Raw/VP/Hits"}, - typename base_class_t::KeyValue{"TracksLocation", "Rec/Track/Velo"}}, - typename base_class_t::KeyValue{"OutputTracksLocation", "Rec/Track/v2/Velo"} ) {} + : Transformer( name, pSvcLocator, + KeyValue{"TracksLocation", "Rec/Track/Velo"}, + KeyValue{"OutputTracksLocation", "Rec/Track/v2/Velo"} ) {} + - std::vector<LHCb::Event::v2::Track> operator()( const T& hits, const Tracks& tracks ) const override { + std::vector<LHCb::Event::v2::Track> operator()( const Tracks& tracks ) const override { std::vector<LHCb::Event::v2::Track> out; out.reserve( tracks.size() ); @@ -126,24 +121,22 @@ private: mutable Gaudi::Accumulators::SummingCounter<> m_nbTracksCounter{this, "Nb of Produced Tracks"}; }; -template <typename T> class TracksVPMergerConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const T&, const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Tracks& )> { + const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Tracks& )> { using base_class_t = Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const T&, const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Tracks& )>; + const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Tracks& )>; Gaudi::Property<float> m_ptVelo{this, "ptVelo", 400 * Gaudi::Units::MeV, "Default pT for Velo tracks"}; public: TracksVPMergerConverter( const std::string& name, ISvcLocator* pSvcLocator ) : base_class_t( name, pSvcLocator, - std::array{typename base_class_t::KeyValue{"HitsLocation", "Raw/VP/Hits"}, - typename base_class_t::KeyValue{"TracksForwardLocation", ""}, - typename base_class_t::KeyValue{"TracksBackwardLocation", ""}}, - typename base_class_t::KeyValue{"OutputTracksLocation", ""} ) {} + std::array{base_class_t::KeyValue{"TracksForwardLocation", ""}, + base_class_t::KeyValue{"TracksBackwardLocation", ""}}, + base_class_t::KeyValue{"OutputTracksLocation", ""} ) {} - std::vector<LHCb::Event::v2::Track> operator()( const T& hits, const Tracks& fwd_tracks, + std::vector<LHCb::Event::v2::Track> operator()( const Tracks& fwd_tracks, const Tracks& bwd_tracks ) const override { std::vector<LHCb::Event::v2::Track> out; out.reserve( fwd_tracks.size() + bwd_tracks.size() ); @@ -186,8 +179,5 @@ private: mutable Gaudi::Accumulators::SummingCounter<> m_nbTracksCounter{this, "Nb of Produced Tracks"}; }; -DECLARE_COMPONENT_WITH_ID( TracksVPConverter<LHCb::Pr::Velo::Hits>, "TracksVPConverter" ) -DECLARE_COMPONENT_WITH_ID( TracksVPConverter<std::vector<LHCb::VPLightCluster>>, "TracksVPConverter_Clusters" ) -DECLARE_COMPONENT_WITH_ID( TracksVPMergerConverter<LHCb::Pr::Velo::Hits>, "TracksVPMergerConverter" ) -DECLARE_COMPONENT_WITH_ID( TracksVPMergerConverter<std::vector<LHCb::VPLightCluster>>, - "TracksVPMergerConverter_Clusters" ) +DECLARE_COMPONENT_WITH_ID( TracksVPConverter, "TracksVPConverter" ) +DECLARE_COMPONENT_WITH_ID( TracksVPMergerConverter, "TracksVPMergerConverter" ) -- GitLab From 4e1e7ed948c8b64c1b68535daf258e02aa9e3bf7 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Thu, 4 Jun 2020 16:13:54 +0000 Subject: [PATCH 047/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8664649 --- Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp | 11 +++++------ Tr/TrackUtils/src/TracksVPConverter.cpp | 12 +++++------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp index 5bdf32e6440..923ce82f2a7 100644 --- a/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp +++ b/Pr/PrConverters/src/TrackCompactVertexToV1Vertex.cpp @@ -104,15 +104,14 @@ namespace LHCb::Converters::TrackCompactVertex { using KeyValue = typename base_class::KeyValue; VectorOf2TrackPrFittedCompactVertexToVectorOfRecVertex( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class( name, pSvcLocator, - {KeyValue{"InputVertices", ""}, KeyValue{"TracksInVertices", ""}, - KeyValue{"ConvertedTracks", ""}}, - KeyValue{"OutputVertices", ""} ) {} + : base_class( + name, pSvcLocator, + {KeyValue{"InputVertices", ""}, KeyValue{"TracksInVertices", ""}, KeyValue{"ConvertedTracks", ""}}, + KeyValue{"OutputVertices", ""} ) {} std::vector<LHCb::RecVertex> operator()( const std::vector<LHCb::TrackKernel::TrackCompactVertex<2, double>, LHCb::Allocators::EventLocal<LHCb::TrackKernel::TrackCompactVertex<2, double>>>& vertices, - const VertexTrackType& tracks_zip, - const std::vector<LHCb::Track>& conv_tracks ) const override { + const VertexTrackType& tracks_zip, const std::vector<LHCb::Track>& conv_tracks ) const override { std::vector<LHCb::RecVertex> converted_vertices; const auto& tracks = tracks_zip.template get<LHCb::Pr::Fitted::Forward::Tracks>(); for ( const auto& vertex : vertices ) { diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index f41e8bc8d66..d3fd567c8e9 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -81,16 +81,15 @@ namespace { } } // namespace -class TracksVPConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( const LHCb::Pr::Velo::Tracks& )> { +class TracksVPConverter + : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( const LHCb::Pr::Velo::Tracks& )> { Gaudi::Property<float> m_ptVelo{this, "ptVelo", 400 * Gaudi::Units::MeV, "Default pT for Velo tracks"}; public: TracksVPConverter( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer( name, pSvcLocator, - KeyValue{"TracksLocation", "Rec/Track/Velo"}, - KeyValue{"OutputTracksLocation", "Rec/Track/v2/Velo"} ) {} - + : Transformer( name, pSvcLocator, KeyValue{"TracksLocation", "Rec/Track/Velo"}, + KeyValue{"OutputTracksLocation", "Rec/Track/v2/Velo"} ) {} std::vector<LHCb::Event::v2::Track> operator()( const Tracks& tracks ) const override { std::vector<LHCb::Event::v2::Track> out; @@ -136,8 +135,7 @@ public: base_class_t::KeyValue{"TracksBackwardLocation", ""}}, base_class_t::KeyValue{"OutputTracksLocation", ""} ) {} - std::vector<LHCb::Event::v2::Track> operator()( const Tracks& fwd_tracks, - const Tracks& bwd_tracks ) const override { + std::vector<LHCb::Event::v2::Track> operator()( const Tracks& fwd_tracks, const Tracks& bwd_tracks ) const override { std::vector<LHCb::Event::v2::Track> out; out.reserve( fwd_tracks.size() + bwd_tracks.size() ); -- GitLab From 9332cc0c649d3118b6231bc4795ea89c6578abea Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 4 Jun 2020 22:28:39 +0200 Subject: [PATCH 048/111] adapt UTInfo --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 26 ++++++++++++------------ Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 6 +++--- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 8 ++++---- Pr/PrKernel/PrKernel/PrMutUTHits.h | 4 ++-- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ++-- Tr/TrackUtils/src/TracksFTConverter.cpp | 15 -------------- 6 files changed, 24 insertions(+), 39 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 246b02f84ce..c9e1284b468 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -35,7 +35,7 @@ namespace LHCb::Pr { namespace { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) - void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& helper, + void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors *static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& helper, const int start ) { for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { @@ -45,7 +45,7 @@ namespace LHCb::Pr { } // remove duplicated sectors - simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * UTInfo::TotalLayers>& out, int start, + simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& out, int start, size_t len ) { simd::int_v pos = start + 1; simd::int_v oldv = out[start]; @@ -134,14 +134,14 @@ namespace LHCb::Pr { ///======================================================================= // find all sections ///======================================================================= - std::array<Boundaries, UTInfo::TotalLayers> PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, + std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, MiniStates& filteredStates ) const { - std::array<Boundaries, UTInfo::TotalLayers> compBoundsArray; + std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> compBoundsArray; filteredStates.size = 0; - std::array<simd::int_v, UTInfo::TotalLayers> posArray; - std::array<simd::int_v, maxSectors * UTInfo::TotalLayers> helperArray; // 4 layers x maximum 9 sectors - std::array<int, UTInfo::TotalLayers> maxColsRows; + std::array<simd::int_v, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> posArray; + std::array<simd::int_v, maxSectors * static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> helperArray; // 4 layers x maximum 9 sectors + std::array<int, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> maxColsRows; //--- This now works with up to 9 sectors const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); @@ -165,7 +165,7 @@ namespace LHCb::Pr { const F bendParam = p_utParam.value() * -1 * signedReCur * qoverp; - for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++layerIndex ) { const F zLayer = m_geomcache.layers[layerIndex].z; const F yPredLay = stateY + ( zLayer - stateZ ) * stateTy; @@ -215,7 +215,7 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * UTInfo::Sectors - 1; + helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * static_cast<int>(UTInfo::DetectorNumbers::Sectors) - 1; counter++; } } @@ -229,7 +229,7 @@ namespace LHCb::Pr { //-- we need at least three layers const simd::mask_v compressMask = ( nLayers > 2 ) && loopMask; - for ( int iLayer = 0; iLayer < UTInfo::TotalLayers; ++iLayer ) { + for ( int iLayer = 0; iLayer < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++iLayer ) { int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<I>( index, iSector, compressMask, @@ -263,7 +263,7 @@ namespace LHCb::Pr { //========================================================================= LHCb::Pr::UT::Mut::Hits PrAddUTHitsTool::returnUTHits( MiniStates& filteredStates, - const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, std::size_t t ) const { LHCb::Pr::UT::Mut::Hits UTHits; @@ -359,7 +359,7 @@ namespace LHCb::Pr { // Select the hits in a certain window //========================================================================= bool PrAddUTHitsTool::selectHits( MiniStates& filteredStates, - const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { // -- Define the parameter that describes the bending @@ -383,7 +383,7 @@ namespace LHCb::Pr { std::size_t nSize = 0; std::size_t nLayers = 0; const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); - for ( int layerIndex = 0; layerIndex < UTInfo::TotalLayers; ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++layerIndex ) { if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; // -- Define the tolerance parameters diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index bbbd55f2407..711c69b0d32 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -152,12 +152,12 @@ namespace LHCb::Pr { ServiceHandle<ILHCbMagnetSvc> m_magFieldSvc{this, "MagneticField", "MagneticFieldSvc"}; - std::array<LHCb::Pr::Boundaries, UTInfo::TotalLayers> findAllSectors( Tracks& tracks, + std::array<LHCb::Pr::Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> findAllSectors( Tracks& tracks, MiniStates& filteredStates ) const; LHCb::Pr::UT::Mut::Hits returnUTHits( MiniStates& filteredStates, - const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, std::size_t t ) const; - bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, UTInfo::TotalLayers>& compBoundsArray, + bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const; void calculateChi2( float& chi2, const float& bestChi2, float& finalDist, const float& p, LHCb::Pr::UT::Mut::Hits& goodUT ) const; diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index e22eb6cc150..650ea757bbf 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -99,10 +99,10 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; - for ( int iStation = 1; iStation <= UTInfo::Stations; ++iStation ) { - for ( int iLayer = 1; iLayer <= UTInfo::Layers; ++iLayer ) { - for ( int iRegion = 1; iRegion <= UTInfo::Regions; ++iRegion ) { - for ( int iSector = 1; iSector <= UTInfo::Sectors; ++iSector ) { + for ( int iStation = 1; iStation <= static_cast<int>( UTInfo::DetectorNumbers::Stations); ++iStation ) { + for ( int iLayer = 1; iLayer <= static_cast<int>(UTInfo::DetectorNumbers::Layers); ++iLayer ) { + for ( int iRegion = 1; iRegion <= static_cast<int>(UTInfo::DetectorNumbers::Regions); ++iRegion ){ + for ( int iSector = 1; iSector <= static_cast<int>(UTInfo::DetectorNumbers::Sectors); ++iSector ) { for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { bool used = std::any_of( usedUTHits.begin(), usedUTHits.end(), [utid = uthit.chanID().channelID()]( const auto& id ) { return utid == id; } ); diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index caec665cd7e..2ad1af42495 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -56,8 +56,8 @@ namespace LHCb::Pr::UT { template <typename T> T planeCode( int t ) const { T id = channelID<T>( t ); - T station = ( id & UTInfo::StationMask ) >> UTInfo::StationBits; - T layer = ( id & UTInfo::LayerMask ) >> UTInfo::LayerBits; + T station = ( id & static_cast<int>(UTInfo::MasksBits::StationMask) ) >> static_cast<int>(UTInfo::MasksBits::StationBits); + T layer = ( id & static_cast<int>(UTInfo::MasksBits::LayerMask) ) >> static_cast<int>(UTInfo::MasksBits::LayerBits); return 2 * ( station - 1 ) + ( layer - 1 ); } diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 5160571a193..b71b3ba3def 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -573,7 +573,7 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + UTInfo::EffectiveSectorsPerColumn * realSC; + simd::int_v sectorIndex = realSR + static_cast<int>(UTInfo::SectorNumbers::EffectiveSectorsPerColumn) * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number @@ -587,7 +587,7 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 helperArray[maxNumSectors * layerIndex + counter] = - sect + ( layerIndex * UTInfo::Regions + region ) * UTInfo::MaxSectorsPerRegion - 1; + sect + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions) + region ) * static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion) - 1; counter++; } } diff --git a/Tr/TrackUtils/src/TracksFTConverter.cpp b/Tr/TrackUtils/src/TracksFTConverter.cpp index f53b89399cc..7fad8b3c752 100644 --- a/Tr/TrackUtils/src/TracksFTConverter.cpp +++ b/Tr/TrackUtils/src/TracksFTConverter.cpp @@ -101,21 +101,6 @@ public: newTrack.addToStates( state ); // Add LHCbIds - - int n_vphits = tracksFT.nVPHits<I>( t ).cast(); - int n_fthits = tracksFT.nFTHits<I>( t ).cast(); - int n_uthits = tracksFT.nUTHits<I>( t ).cast(); - - // info()<<"FT converter ........vp "<< n_vphits <<"...ft "<< n_fthits <<"..ut "<< n_uthits<<endmsg; - /* - for ( int i = n_vphits; i < n_vphits + n_fthits + n_uthits; i++ ) { - auto lhcbid = tracksFT.lhcbID<I>( t, i ).cast(); - //if(i <n_vphits) info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID(LHCb::VPChannelID( lhcbid ))<< - " "<< tracksFT.lhcbID<I>( t, i ) << endmsg; - //else info()<<i <<" FTconverter ...lhcbid ........ "<< LHCb::LHCbID( lhcbid )<< " "<< tracksFT.lhcbID<I>( t, - i ) << endmsg; newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); - } - */ newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); -- GitLab From e8402c801a8517b4d39f0ef68de663a4d105f028 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Thu, 4 Jun 2020 20:29:30 +0000 Subject: [PATCH 049/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8667986 --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 48 +++++++++++++----------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 15 +++++--- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 8 ++-- Pr/PrKernel/PrKernel/PrMutUTHits.h | 6 ++- Pr/PrVeloUT/src/PrVeloUT.cpp | 10 +++-- 5 files changed, 51 insertions(+), 36 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index c9e1284b468..5f91f74523c 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -35,8 +35,10 @@ namespace LHCb::Pr { namespace { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) - void bubbleSortSIMD( const int maxColsMaxRows, std::array<simd::int_v, maxSectors *static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& helper, - const int start ) { + void bubbleSortSIMD( + const int maxColsMaxRows, + std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& helper, + const int start ) { for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { swap( helper[start + j] > helper[start + j + 1], helper[start + j], helper[start + j + 1] ); @@ -45,8 +47,9 @@ namespace LHCb::Pr { } // remove duplicated sectors - simd::int_v makeUniqueSIMD( std::array<simd::int_v, maxSectors * static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& out, int start, - size_t len ) { + simd::int_v + makeUniqueSIMD( std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& out, + int start, size_t len ) { simd::int_v pos = start + 1; simd::int_v oldv = out[start]; for ( size_t j = start + 1; j < start + len; ++j ) { @@ -134,14 +137,15 @@ namespace LHCb::Pr { ///======================================================================= // find all sections ///======================================================================= - std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, - MiniStates& filteredStates ) const { + std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + PrAddUTHitsTool::findAllSectors( LHCb::Pr::Long::Tracks& tracks, MiniStates& filteredStates ) const { - std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> compBoundsArray; + std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> compBoundsArray; filteredStates.size = 0; - std::array<simd::int_v, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> posArray; - std::array<simd::int_v, maxSectors * static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> helperArray; // 4 layers x maximum 9 sectors - std::array<int, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> maxColsRows; + std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray; + std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + helperArray; // 4 layers x maximum 9 sectors + std::array<int, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> maxColsRows; //--- This now works with up to 9 sectors const float signedReCur = m_magFieldSvc->signedRelativeCurrent(); @@ -165,7 +169,7 @@ namespace LHCb::Pr { const F bendParam = p_utParam.value() * -1 * signedReCur * qoverp; - for ( int layerIndex = 0; layerIndex < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { const F zLayer = m_geomcache.layers[layerIndex].z; const F yPredLay = stateY + ( zLayer - stateZ ) * stateTy; @@ -215,7 +219,8 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * static_cast<int>(UTInfo::DetectorNumbers::Sectors) - 1; + helperArray[maxSectors * layerIndex + counter] = + sect + ( layerIndex * 3 + region ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ) - 1; counter++; } } @@ -229,7 +234,7 @@ namespace LHCb::Pr { //-- we need at least three layers const simd::mask_v compressMask = ( nLayers > 2 ) && loopMask; - for ( int iLayer = 0; iLayer < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++iLayer ) { + for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<I>( index, iSector, compressMask, @@ -261,10 +266,10 @@ namespace LHCb::Pr { //========================================================================= // Return the TT hits //========================================================================= - LHCb::Pr::UT::Mut::Hits - PrAddUTHitsTool::returnUTHits( MiniStates& filteredStates, - const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, - std::size_t t ) const { + LHCb::Pr::UT::Mut::Hits PrAddUTHitsTool::returnUTHits( + MiniStates& filteredStates, + const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, + std::size_t t ) const { LHCb::Pr::UT::Mut::Hits UTHits; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "--- Entering returnUTHits ---" << endmsg; @@ -358,9 +363,10 @@ namespace LHCb::Pr { //========================================================================= // Select the hits in a certain window //========================================================================= - bool PrAddUTHitsTool::selectHits( MiniStates& filteredStates, - const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, - LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { + bool PrAddUTHitsTool::selectHits( + MiniStates& filteredStates, + const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, + LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const { // -- Define the parameter that describes the bending // -- in principle the call m_magFieldSvc->signedRelativeCurrent() is not needed for every track... @@ -383,7 +389,7 @@ namespace LHCb::Pr { std::size_t nSize = 0; std::size_t nLayers = 0; const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); - for ( int layerIndex = 0; layerIndex < static_cast<int>(UTInfo::DetectorNumbers::TotalLayers); ++layerIndex ) { + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; // -- Define the tolerance parameters diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index 711c69b0d32..f695c33a5a7 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -152,12 +152,15 @@ namespace LHCb::Pr { ServiceHandle<ILHCbMagnetSvc> m_magFieldSvc{this, "MagneticField", "MagneticFieldSvc"}; - std::array<LHCb::Pr::Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)> findAllSectors( Tracks& tracks, - MiniStates& filteredStates ) const; - LHCb::Pr::UT::Mut::Hits returnUTHits( MiniStates& filteredStates, - const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, - std::size_t t ) const; - bool selectHits( MiniStates& filteredStates, const std::array<Boundaries, static_cast<int>(UTInfo::DetectorNumbers::TotalLayers)>& compBoundsArray, + std::array<LHCb::Pr::Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + findAllSectors( Tracks& tracks, MiniStates& filteredStates ) const; + LHCb::Pr::UT::Mut::Hits returnUTHits( + MiniStates& filteredStates, + const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, + std::size_t t ) const; + bool + selectHits( MiniStates& filteredStates, + const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, std::size_t t ) const; void calculateChi2( float& chi2, const float& bestChi2, float& finalDist, const float& p, LHCb::Pr::UT::Mut::Hits& goodUT ) const; diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index 650ea757bbf..b74d06e0ab9 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -99,10 +99,10 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; - for ( int iStation = 1; iStation <= static_cast<int>( UTInfo::DetectorNumbers::Stations); ++iStation ) { - for ( int iLayer = 1; iLayer <= static_cast<int>(UTInfo::DetectorNumbers::Layers); ++iLayer ) { - for ( int iRegion = 1; iRegion <= static_cast<int>(UTInfo::DetectorNumbers::Regions); ++iRegion ){ - for ( int iSector = 1; iSector <= static_cast<int>(UTInfo::DetectorNumbers::Sectors); ++iSector ) { + for ( int iStation = 1; iStation <= static_cast<int>( UTInfo::DetectorNumbers::Stations ); ++iStation ) { + for ( int iLayer = 1; iLayer <= static_cast<int>( UTInfo::DetectorNumbers::Layers ); ++iLayer ) { + for ( int iRegion = 1; iRegion <= static_cast<int>( UTInfo::DetectorNumbers::Regions ); ++iRegion ) { + for ( int iSector = 1; iSector <= static_cast<int>( UTInfo::DetectorNumbers::Sectors ); ++iSector ) { for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { bool used = std::any_of( usedUTHits.begin(), usedUTHits.end(), [utid = uthit.chanID().channelID()]( const auto& id ) { return utid == id; } ); diff --git a/Pr/PrKernel/PrKernel/PrMutUTHits.h b/Pr/PrKernel/PrKernel/PrMutUTHits.h index 2ad1af42495..a4033daed67 100644 --- a/Pr/PrKernel/PrKernel/PrMutUTHits.h +++ b/Pr/PrKernel/PrKernel/PrMutUTHits.h @@ -56,8 +56,10 @@ namespace LHCb::Pr::UT { template <typename T> T planeCode( int t ) const { T id = channelID<T>( t ); - T station = ( id & static_cast<int>(UTInfo::MasksBits::StationMask) ) >> static_cast<int>(UTInfo::MasksBits::StationBits); - T layer = ( id & static_cast<int>(UTInfo::MasksBits::LayerMask) ) >> static_cast<int>(UTInfo::MasksBits::LayerBits); + T station = ( id & static_cast<int>( UTInfo::MasksBits::StationMask ) ) >> + static_cast<int>( UTInfo::MasksBits::StationBits ); + T layer = ( id & static_cast<int>( UTInfo::MasksBits::LayerMask ) ) >> + static_cast<int>( UTInfo::MasksBits::LayerBits ); return 2 * ( station - 1 ) + ( layer - 1 ); } diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index b71b3ba3def..ff32d1a4e1c 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -572,8 +572,9 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { - simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + static_cast<int>(UTInfo::SectorNumbers::EffectiveSectorsPerColumn) * realSC; + simd::int_v realSR = min( subrowmax, subrowmin + sr ); + simd::int_v sectorIndex = + realSR + static_cast<int>( UTInfo::SectorNumbers::EffectiveSectorsPerColumn ) * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number @@ -587,7 +588,10 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 helperArray[maxNumSectors * layerIndex + counter] = - sect + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions) + region ) * static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion) - 1; + sect + + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions ) + region ) * + static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion ) - + 1; counter++; } } -- GitLab From 853be27f091824b43e5e9dd6660fc4da63fa0bb1 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 4 Jun 2020 23:57:22 +0200 Subject: [PATCH 050/111] adapt VPChanID to LHCbID --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 7f95738d1e3..ece074efb28 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -857,7 +857,7 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); - const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); + const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); tracksBackward.compressstore_lhcbID( i, h, backwards, lhcbid ); } @@ -878,7 +878,7 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); - const auto lhcbid = hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); + const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } -- GitLab From e2a52406642672079f4d2c6a6a565eda6a4c42b2 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Thu, 4 Jun 2020 21:58:04 +0000 Subject: [PATCH 051/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8668435 --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index ece074efb28..3d0a6153572 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -857,7 +857,8 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); - const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); + const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + + hits.maskgather_ChannelId<I>( hit_index, backwards, 0 ); tracksBackward.compressstore_lhcbID( i, h, backwards, lhcbid ); } @@ -878,7 +879,8 @@ namespace LHCb::Pr::Velo { for ( int h = 0; h < max_hits; h++ ) { tracksForward.compressstore_vp_index( i, h, forwards, tracks.hit<I>( t, h ) ); auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); - const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); + const auto lhcbid = ( LHCbID::channelIDtype::VP << LHCbID::detectorTypeBits ) + + hits.maskgather_ChannelId<I>( hit_index, forwards, 0 ); tracksForward.compressstore_lhcbID( i, h, forwards, lhcbid ); } -- GitLab From 82fff362a995a7f55658de220dca234f58285201 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 8 Jun 2020 18:31:51 +0200 Subject: [PATCH 052/111] update PrResidualUTHits to use Pr::Tracks --- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 105 ++++++++++------------- Pr/PrKernel/PrKernel/PrUTHitHandler.h | 23 +++++ 2 files changed, 69 insertions(+), 59 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index b74d06e0ab9..0ff72cdaa0b 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -12,6 +12,8 @@ #include "Event/ODIN.h" #include "Event/Track.h" #include "Event/Track_v2.h" +#include "Event/PrLongTracks.h" +#include "Event/PrUpstreamTracks.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -27,6 +29,7 @@ #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" +#include "boost/dynamic_bitset.hpp" #include <memory> //----------------------------------------------------------------------------- @@ -37,86 +40,70 @@ // //----------------------------------------------------------------------------- -class PrResidualUTHits : public Gaudi::Functional::Transformer<UT::HitHandler( - const std::vector<LHCb::Event::v2::Track>&, const UT::HitHandler& )> { - - using Tracks = std::vector<LHCb::Event::v2::Track>; +template<typename T> +class PrResidualUTHits : public Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( + const T&, const LHCb::Pr::UT::HitHandler& )> { public: - StatusCode initialize() override; + //StatusCode initialize() override; + using base_class_t = + Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )>; + + //PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); - PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); + LHCb::Pr::UT::HitHandler operator()( const T&, const LHCb::Pr::UT::HitHandler& ) const override; - UT::HitHandler operator()( const Tracks&, const UT::HitHandler& ) const override; + PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) + : base_class_t( name, pSvcLocator, std::array{typename base_class_t::KeyValue{"TracksLocation", ""}, typename base_class_t::KeyValue{"UTHitsLocation", ""}}, + typename base_class_t::KeyValue{"UTHitsOutput", ""} ) {} -private: - DeUTDetector* m_utDet = nullptr; }; // Declaration of the Algorithm Factory -DECLARE_COMPONENT_WITH_ID( PrResidualUTHits, "PrResidualUTHits" ) - -//============================================================================= -// Standard constructor, initializes variables -//============================================================================= -PrResidualUTHits::PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer( name, pSvcLocator, {KeyValue{"TracksLocation", ""}, KeyValue{"UTHitsLocation", ""}}, - // KeyValue{"GeomCache", "AlgorithmSpecific-" + name + "-UTGeomCache"}}, - KeyValue{"UTHitsOutput", ""} ) {} +DECLARE_COMPONENT_WITH_ID( PrResidualUTHits<LHCb::Pr::Long::Tracks>, "PrResidualUTHits" ) +DECLARE_COMPONENT_WITH_ID( PrResidualUTHits<LHCb::Pr::Upstream::Tracks>, "PrResidualUTHits_Upstream" ) -// initializes //============================================================================= -StatusCode PrResidualUTHits::initialize() { - StatusCode sc = GaudiAlgorithm::initialize(); - if ( sc.isFailure() ) return sc; - m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); - debug() << "Number of UT layers " << m_utDet->layers().size() << endmsg; - return StatusCode::SUCCESS; -} // Main execution //============================================================================= -// UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::HitHandler& uthithandler, const -// UTGeomCache& cache ) const { -UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::HitHandler& uthithandler ) const { - UT::HitHandler tmp{}; +template<typename T> +LHCb::Pr::UT::HitHandler PrResidualUTHits<T>::operator()( const T& tracks, const LHCb::Pr::UT::HitHandler& uthithandler ) const { + LHCb::Pr::UT::HitHandler tmp{}; - if ( tracks.empty() ) { +/* + if ( tracks.size()==0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; - return uthithandler; + debug() << "Track container '" << inputLocation<T>() << "' is empty" << endmsg; + return &uthithandler; } +*/ + + using scalar = SIMDWrapper::scalar::types; + using sI = scalar::int_v; - std::vector<long unsigned int> usedUTHits{}; - usedUTHits.reserve( uthithandler.nbHits() ); + //mark used UT hits + const unsigned int nhits = uthithandler.nHits(); + boost::dynamic_bitset<> used{nhits, false}; - // info() <<"total UT Hits " << uthithandler.nbHits() <<endmsg; - for ( auto& track : tracks ) { - for ( auto& id : track.lhcbIDs() ) { - if ( !( id.isUT() ) ) continue; - usedUTHits.emplace_back( id.utID().channelID() ); + for ( int t = 0; t < tracks.size(); t++ ) { + const int nuthits = tracks.template nUTHits<sI>( t ).cast(); + for ( int idx = 0; idx < nuthits; idx++ ) { + const int index = tracks.template ut_index<sI>( t, idx ).cast(); + if ( index >= 0 ) used[index] = true; } } - // info() <<"used UT Hits" << usedUTHits.size() <<endmsg; - - for ( int iStation = 1; iStation <= static_cast<int>( UTInfo::DetectorNumbers::Stations ); ++iStation ) { - for ( int iLayer = 1; iLayer <= static_cast<int>( UTInfo::DetectorNumbers::Layers ); ++iLayer ) { - for ( int iRegion = 1; iRegion <= static_cast<int>( UTInfo::DetectorNumbers::Regions ); ++iRegion ) { - for ( int iSector = 1; iSector <= static_cast<int>( UTInfo::DetectorNumbers::Sectors ); ++iSector ) { - for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { - bool used = std::any_of( usedUTHits.begin(), usedUTHits.end(), - [utid = uthit.chanID().channelID()]( const auto& id ) { return utid == id; } ); - - if ( used ) continue; - const unsigned int fullChanIdx = UT::HitHandler::HitsInUT::idx( iStation, iLayer, iRegion, iSector ); - const auto* aSector = m_utDet->getSector( uthit.chanID() ); - tmp.AddHit( aSector, fullChanIdx, uthit.strip(), uthit.fracStrip(), uthit.chanID(), uthit.size(), - uthit.highThreshold() ); - } - } - } + + const auto & allhits = uthithandler.hits(); + const int fullChanIdx = static_cast<int>( UTInfo::DetectorNumbers::Layers ) *static_cast<int>( UTInfo::DetectorNumbers::Stations ) *static_cast<int>( UTInfo::DetectorNumbers::Regions ) *static_cast<int>( UTInfo::DetectorNumbers::Sectors ); + + for( auto fullchan =0; fullchan < fullChanIdx; fullchan++ ){ + const auto indexs = uthithandler.indices( fullchan ); + + for( int idx = indexs.first; idx!= indexs.second; idx++ ){ + if ( used[idx] ) continue; + tmp.copyHit(fullchan, idx, allhits); } } - // info() <<"residual UT Hits" << tmp.nbHits() <<endmsg; return tmp; } diff --git a/Pr/PrKernel/PrKernel/PrUTHitHandler.h b/Pr/PrKernel/PrKernel/PrUTHitHandler.h index d81d1d146b6..d9c88e035e0 100644 --- a/Pr/PrKernel/PrKernel/PrUTHitHandler.h +++ b/Pr/PrKernel/PrKernel/PrUTHitHandler.h @@ -123,6 +123,29 @@ namespace LHCb::Pr::UT { // -- Don't increase the number of hits } + void copyHit( unsigned int fullChanIdx, int at, const LHCb::Pr::UT::Hits& allhits){ + auto& indices = m_indices[fullChanIdx]; + if ( &indices != last_indices ) { + assert( indices.first == indices.second ); + indices = {m_index, m_index}; + last_indices = &indices; + } + using F = SIMDWrapper::scalar::types::float_v; + using I = SIMDWrapper::scalar::types::int_v; + + m_hits.store_channelID<I>( m_index, allhits.channelID<I>( at) ); + m_hits.store_weight<F>( m_index, allhits.weight<F>( at ) ); + m_hits.store_xAtYEq0<F>( m_index, allhits.xAtYEq0<F> ( at ) ); + m_hits.store_yBegin<F>( m_index, allhits.yBegin<F> ( at ) ); + m_hits.store_yEnd<F>( m_index, allhits.yEnd<F>( at) ); + m_hits.store_zAtYEq0<F>( m_index, allhits.zAtYEq0<F>( at) ); + m_hits.store_cos<F>( m_index, allhits.cos<F>( at ) ); + m_hits.store_dxDy<F>( m_index, allhits.dxDy<F>(at) ); + + m_index++; + + ++( indices.second ); + } const std::pair<int, int> indices( const int fullChanIdx ) const { return m_indices[fullChanIdx]; } const LHCb::Pr::UT::Hits& hits() const { return m_hits; } -- GitLab From 0a1167bf476100d4ba6ee0d34d32ae28a0bf7241 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Mon, 8 Jun 2020 16:32:43 +0000 Subject: [PATCH 053/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8711481 --- Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 59 +++++++++++++----------- Pr/PrKernel/PrKernel/PrUTHitHandler.h | 16 +++---- 2 files changed, 39 insertions(+), 36 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index 0ff72cdaa0b..c4a38f7b779 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -10,10 +10,10 @@ \*****************************************************************************/ // Include files #include "Event/ODIN.h" -#include "Event/Track.h" -#include "Event/Track_v2.h" #include "Event/PrLongTracks.h" #include "Event/PrUpstreamTracks.h" +#include "Event/Track.h" +#include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -40,23 +40,24 @@ // //----------------------------------------------------------------------------- -template<typename T> -class PrResidualUTHits : public Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( - const T&, const LHCb::Pr::UT::HitHandler& )> { +template <typename T> +class PrResidualUTHits + : public Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )> { public: - //StatusCode initialize() override; + // StatusCode initialize() override; using base_class_t = Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )>; - //PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); + // PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); LHCb::Pr::UT::HitHandler operator()( const T&, const LHCb::Pr::UT::HitHandler& ) const override; PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class_t( name, pSvcLocator, std::array{typename base_class_t::KeyValue{"TracksLocation", ""}, typename base_class_t::KeyValue{"UTHitsLocation", ""}}, - typename base_class_t::KeyValue{"UTHitsOutput", ""} ) {} - + : base_class_t( name, pSvcLocator, + std::array{typename base_class_t::KeyValue{"TracksLocation", ""}, + typename base_class_t::KeyValue{"UTHitsLocation", ""}}, + typename base_class_t::KeyValue{"UTHitsOutput", ""} ) {} }; // Declaration of the Algorithm Factory @@ -66,43 +67,45 @@ DECLARE_COMPONENT_WITH_ID( PrResidualUTHits<LHCb::Pr::Upstream::Tracks>, "PrResi //============================================================================= // Main execution //============================================================================= -template<typename T> -LHCb::Pr::UT::HitHandler PrResidualUTHits<T>::operator()( const T& tracks, const LHCb::Pr::UT::HitHandler& uthithandler ) const { +template <typename T> +LHCb::Pr::UT::HitHandler PrResidualUTHits<T>::operator()( const T& tracks, + const LHCb::Pr::UT::HitHandler& uthithandler ) const { LHCb::Pr::UT::HitHandler tmp{}; -/* - if ( tracks.size()==0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<T>() << "' is empty" << endmsg; - return &uthithandler; - } -*/ + /* + if ( tracks.size()==0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Track container '" << inputLocation<T>() << "' is empty" << endmsg; + return &uthithandler; + } + */ using scalar = SIMDWrapper::scalar::types; using sI = scalar::int_v; - //mark used UT hits - const unsigned int nhits = uthithandler.nHits(); + // mark used UT hits + const unsigned int nhits = uthithandler.nHits(); boost::dynamic_bitset<> used{nhits, false}; for ( int t = 0; t < tracks.size(); t++ ) { const int nuthits = tracks.template nUTHits<sI>( t ).cast(); for ( int idx = 0; idx < nuthits; idx++ ) { const int index = tracks.template ut_index<sI>( t, idx ).cast(); - if ( index >= 0 ) used[index] = true; + if ( index >= 0 ) used[index] = true; } } + const auto& allhits = uthithandler.hits(); + const int fullChanIdx = + static_cast<int>( UTInfo::DetectorNumbers::Layers ) * static_cast<int>( UTInfo::DetectorNumbers::Stations ) * + static_cast<int>( UTInfo::DetectorNumbers::Regions ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ); - const auto & allhits = uthithandler.hits(); - const int fullChanIdx = static_cast<int>( UTInfo::DetectorNumbers::Layers ) *static_cast<int>( UTInfo::DetectorNumbers::Stations ) *static_cast<int>( UTInfo::DetectorNumbers::Regions ) *static_cast<int>( UTInfo::DetectorNumbers::Sectors ); - - for( auto fullchan =0; fullchan < fullChanIdx; fullchan++ ){ + for ( auto fullchan = 0; fullchan < fullChanIdx; fullchan++ ) { const auto indexs = uthithandler.indices( fullchan ); - for( int idx = indexs.first; idx!= indexs.second; idx++ ){ + for ( int idx = indexs.first; idx != indexs.second; idx++ ) { if ( used[idx] ) continue; - tmp.copyHit(fullchan, idx, allhits); + tmp.copyHit( fullchan, idx, allhits ); } } return tmp; diff --git a/Pr/PrKernel/PrKernel/PrUTHitHandler.h b/Pr/PrKernel/PrKernel/PrUTHitHandler.h index d9c88e035e0..408c0458fc9 100644 --- a/Pr/PrKernel/PrKernel/PrUTHitHandler.h +++ b/Pr/PrKernel/PrKernel/PrUTHitHandler.h @@ -123,24 +123,24 @@ namespace LHCb::Pr::UT { // -- Don't increase the number of hits } - void copyHit( unsigned int fullChanIdx, int at, const LHCb::Pr::UT::Hits& allhits){ + void copyHit( unsigned int fullChanIdx, int at, const LHCb::Pr::UT::Hits& allhits ) { auto& indices = m_indices[fullChanIdx]; if ( &indices != last_indices ) { assert( indices.first == indices.second ); - indices = {m_index, m_index}; + indices = {m_index, m_index}; last_indices = &indices; } using F = SIMDWrapper::scalar::types::float_v; using I = SIMDWrapper::scalar::types::int_v; - m_hits.store_channelID<I>( m_index, allhits.channelID<I>( at) ); + m_hits.store_channelID<I>( m_index, allhits.channelID<I>( at ) ); m_hits.store_weight<F>( m_index, allhits.weight<F>( at ) ); - m_hits.store_xAtYEq0<F>( m_index, allhits.xAtYEq0<F> ( at ) ); - m_hits.store_yBegin<F>( m_index, allhits.yBegin<F> ( at ) ); - m_hits.store_yEnd<F>( m_index, allhits.yEnd<F>( at) ); - m_hits.store_zAtYEq0<F>( m_index, allhits.zAtYEq0<F>( at) ); + m_hits.store_xAtYEq0<F>( m_index, allhits.xAtYEq0<F>( at ) ); + m_hits.store_yBegin<F>( m_index, allhits.yBegin<F>( at ) ); + m_hits.store_yEnd<F>( m_index, allhits.yEnd<F>( at ) ); + m_hits.store_zAtYEq0<F>( m_index, allhits.zAtYEq0<F>( at ) ); m_hits.store_cos<F>( m_index, allhits.cos<F>( at ) ); - m_hits.store_dxDy<F>( m_index, allhits.dxDy<F>(at) ); + m_hits.store_dxDy<F>( m_index, allhits.dxDy<F>( at ) ); m_index++; -- GitLab From 0bb270ab38fd3e14975ca7a4daa956d010a2104c Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 9 Jun 2020 14:38:34 +0200 Subject: [PATCH 054/111] cherry pick the wrongly merge with master --- Pr/PrVeloUT/src/PrVeloUT.cpp | 291 +++++++++++++++-------------------- Pr/PrVeloUT/src/PrVeloUT.h | 174 +++++---------------- 2 files changed, 161 insertions(+), 304 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 617ff72efb5..ff32d1a4e1c 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -75,11 +75,14 @@ namespace LHCb::Pr { const float zMidUT, const simd::float_v qpxz2p, const int t, simd::mask_v& goodFitMask ) { - const simd::float_v x = protoTracks.xState<simd::float_v>( t ); - const simd::float_v y = protoTracks.yState<simd::float_v>( t ); - const simd::float_v z = protoTracks.zState<simd::float_v>( t ); - const simd::float_v tx = protoTracks.txState<simd::float_v>( t ); - const simd::float_v ty = protoTracks.tyState<simd::float_v>( t ); + const Vec3<simd::float_v> pos = protoTracks.pos<simd::float_v>( t ); + const Vec3<simd::float_v> dir = protoTracks.dir<simd::float_v>( t ); + + const simd::float_v x = pos.x; + const simd::float_v y = pos.y; + const simd::float_v z = pos.z; + const simd::float_v tx = dir.x; + const simd::float_v ty = dir.y; const simd::float_v zKink = magFieldParams[0] - ty * ty * magFieldParams[1] - ty * ty * ty * ty * magFieldParams[2]; const simd::float_v xMidField = x + tx * ( zKink - z ); @@ -179,9 +182,9 @@ namespace LHCb::Pr { // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) void bubbleSortSIMD( - const int maxColsMaxRows, - std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& helper, - const int start ) { + const int maxColsMaxRows, + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& helper, + const int start ) { for ( int i = 0; i < maxColsMaxRows - 1; i++ ) { for ( int j = 0; j < maxColsMaxRows - i - 1; j++ ) { swap( helper[start + j] > helper[start + j + 1], helper[start + j], helper[start + j + 1] ); @@ -192,9 +195,9 @@ namespace LHCb::Pr { // -- not sure that is the smartest solution // -- but I could not come up with anything better // -- inspired by: https://lemire.me/blog/2017/04/10/removing-duplicates-from-lists-quickly/ - simd::int_v - makeUniqueSIMD( std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& out, - int start, size_t len ) { + simd::int_v makeUniqueSIMD( + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& out, + int start, size_t len ) { simd::int_v pos = start + 1; simd::int_v oldv = out[start]; for ( size_t j = start + 1; j < start + len; ++j ) { @@ -346,87 +349,61 @@ namespace LHCb::Pr { // -- We cannot put all found hits in an array, as otherwise the stack overflows // -- so we just do the whole thing in batches - for ( std::size_t t = 0; t < filteredStates.size; t += batchSize ) { - - for ( std::size_t m = 0; m < batchSize; ++m ) { - for ( auto& it : hitsInLayers[m].layerIndices ) it = -1; + const std::size_t filteredStatesSize = filteredStates.size; + + for ( std::size_t t = 0; t < filteredStatesSize; t += batchSize ) { + + // -- This is scalar, as the hits are found in a scalar way + filteredStates.size = 0; + for ( std::size_t t2 = 0; t2 < batchSize && t2 + t < filteredStatesSize; ++t2 ) { + for ( auto& it : hitsInLayers[filteredStates.size].layerIndices ) it = -1; + hitsInLayers[filteredStates.size].size = 0; + const bool foundHits = + getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[filteredStates.size], t + t2 ); + filteredStates.copyBack<scalar>( t + t2, foundHits ); } pTracks.size = 0; + for ( std::size_t tEff = 0; tEff < filteredStates.size; tEff++ ) { - for ( std::size_t t2 = 0; t2 < batchSize && t2 + t < filteredStates.size; t2++ ) { - - std::size_t tEff = t + t2; - hitsInLayers[t2].size = 0; - - if ( !getHitsScalar( hh, filteredStates, compBoundsArray, hitsInLayers[t2], tEff ) ) continue; - - // -- this is a temporary solution to gradually adapt the algo - scalar::float_v x = filteredStates.x<scalar::float_v>( tEff ); - scalar::float_v y = filteredStates.y<scalar::float_v>( tEff ); - scalar::float_v z = filteredStates.z<scalar::float_v>( tEff ); - scalar::float_v tx = filteredStates.tx<scalar::float_v>( tEff ); - scalar::float_v ty = filteredStates.ty<scalar::float_v>( tEff ); - - MiniState trState; - trState.x = x.cast(); - trState.y = y.cast(); - trState.z = z.cast(); - trState.tx = tx.cast(); - trState.ty = ty.cast(); - - TrackHelper helper( trState, c_zKink, c_sigmaVeloSlope, m_maxPseudoChi2 ); - - if ( !formClusters<true>( hitsInLayers[t2], helper ) ) { formClusters<false>( hitsInLayers[t2], helper ); } - if ( helper.bestIndices[0] == -1 ) continue; - - scalar::float_v covx = filteredStates.covx<scalar::float_v>( tEff ); - scalar::float_v covy = filteredStates.covy<scalar::float_v>( tEff ); - scalar::float_v covz = filteredStates.covz<scalar::float_v>( tEff ); - scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); + Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); int trackIndex = pTracks.size; - // -- manual compressstore to keep everything in sync and fill the registers in the last function - pTracks.store_xState<scalar::float_v>( trackIndex, x ); - pTracks.store_yState<scalar::float_v>( trackIndex, y ); - pTracks.store_zState<scalar::float_v>( trackIndex, z ); - pTracks.store_txState<scalar::float_v>( trackIndex, tx ); - pTracks.store_tyState<scalar::float_v>( trackIndex, ty ); - pTracks.store_covx<scalar::float_v>( trackIndex, covx ); - pTracks.store_covy<scalar::float_v>( trackIndex, covy ); - pTracks.store_covz<scalar::float_v>( trackIndex, covz ); - pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); - pTracks.store_hitContIndex<scalar::int_v>( trackIndex, t2 ); + pTracks.fillHelperParams<scalar>( pos, dir, c_zKink, c_sigmaVeloSlope ); + pTracks.store_pos<scalar::float_v>( trackIndex, pos ); + pTracks.store_dir<scalar::float_v>( trackIndex, dir ); + pTracks.store_chi2TT<scalar::float_v>( trackIndex, m_maxPseudoChi2.value() ); + + pTracks.store_hitIndex<scalar::int_v>( trackIndex, 0, -1 ); + if ( !formClusters<true>( hitsInLayers[tEff], pTracks, trackIndex ) ) { + formClusters<false>( hitsInLayers[tEff], pTracks, trackIndex ); + } + if ( pTracks.hitIndex<scalar::int_v>( trackIndex, 0 ).cast() == -1 ) continue; - // -- another temporary thing: Put the clusters in an array - // -- order is: - pTracks.store_xTT<scalar::float_v>( trackIndex, helper.bestParams[2] ); - pTracks.store_xSlopeTT<scalar::float_v>( trackIndex, helper.bestParams[3] ); - pTracks.store_qp<scalar::float_v>( trackIndex, helper.bestParams[0] ); - pTracks.store_chi2TT<scalar::float_v>( trackIndex, helper.bestParams[1] ); + scalar::int_v ancestorIndex = filteredStates.index<scalar::int_v>( tEff ); + pTracks.store_index<scalar::int_v>( trackIndex, ancestorIndex ); + pTracks.store_hitContIndex<scalar::int_v>( trackIndex, tEff ); - int nHits = 0; // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 - for ( auto hitIndex : helper.bestIndices ) { - - scalar::float_v weight = ( hitIndex == -1 ) ? 0.0f : hitsInLayers[t2].weights[hitIndex]; - pTracks.store_weight<scalar::float_v>( trackIndex, nHits, weight ); - hitIndex = std::max( 0, hitIndex ); // this avoids accessing the '-1' element of an array - - pTracks.store_x<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].xs[hitIndex] ); - pTracks.store_z<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].zs[hitIndex] ); - pTracks.store_sin<scalar::float_v>( trackIndex, nHits, hitsInLayers[t2].sins[hitIndex] ); - - LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[t2].channelIDs[hitIndex] ) ); - pTracks.store_id<scalar::int_v>( trackIndex, nHits, id.lhcbID() ); // not sure if correct - nHits++; + for ( int i = 0; i < 4; ++i ) { + const int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); + pTracks.store_x<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].xs[hitI] ); + pTracks.store_z<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].zs[hitI] ); + pTracks.store_sin<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].sins[hitI] ); + + scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; + pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); + + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[tEff].channelIDs[hitI] ) ); + pTracks.store_id<scalar::int_v>( trackIndex, i, id.lhcbID() ); // not sure if correct + pTracks.store_hitIndex<scalar::int_v>( trackIndex, i, + hitsInLayers[tEff].indexs[hitI] ); // not sure if correct } - pTracks.size++; } - - prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, bdlTable ); + prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } m_tracksCounter += outputTracks.size(); @@ -462,14 +439,9 @@ namespace LHCb::Pr { simd::mask_v csMask = loopMask && !mask && ( !passTracks || !passHoleMask ); int index = filteredStates.size; - filteredStates.compressstore_x<simd::float_v>( index, csMask, pos.x ); - filteredStates.compressstore_y<simd::float_v>( index, csMask, pos.y ); - filteredStates.compressstore_z<simd::float_v>( index, csMask, pos.z ); - filteredStates.compressstore_tx<simd::float_v>( index, csMask, dir.x ); - filteredStates.compressstore_ty<simd::float_v>( index, csMask, dir.y ); - filteredStates.compressstore_covx<simd::float_v>( index, csMask, covX.x ); - filteredStates.compressstore_covy<simd::float_v>( index, csMask, covX.y ); - filteredStates.compressstore_covz<simd::float_v>( index, csMask, covX.z ); + filteredStates.compressstore_pos<simd::float_v>( index, csMask, pos ); + filteredStates.compressstore_dir<simd::float_v>( index, csMask, dir ); + filteredStates.compressstore_cov<simd::float_v>( index, csMask, covX ); filteredStates.compressstore_index<simd::int_v>( index, csMask, simd::indices( t ) ); filteredStates.size += simd::popcount( csMask ); @@ -483,7 +455,7 @@ namespace LHCb::Pr { outputTracks.compressstore_stateDir<simd::float_v>( i, outMask, dir ); outputTracks.compressstore_stateCov<simd::float_v>( i, outMask, covX ); outputTracks.compressstore_stateQoP<simd::float_v>( i, outMask, 0.f ); // no momentum - outputTracks.compressstore_nHits<simd::int_v>( i, outMask, 0 ); // no hits + outputTracks.compressstore_nUTHits<simd::int_v>( i, outMask, 0 ); // no hits outputTracks.size() += simd::popcount( outMask ); } @@ -552,7 +524,7 @@ namespace LHCb::Pr { filteredStates.size = 0; std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray; - std::array<simd::int_v, maxSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> + std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> helperArray; // 4 layers x maximum 9 sectors std::array<int, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> maxColsRows; @@ -585,8 +557,8 @@ namespace LHCb::Pr { // -- Determine the maximum number of rows and columns we have to take into account // -- maximum 3 - const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); - const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 3 ); + const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, maxNumCols ); + const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, maxNumRows ); maxColsRows[layerIndex] = maxCols * maxRows; @@ -600,8 +572,9 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { - simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + 28 * realSC; + simd::int_v realSR = min( subrowmax, subrowmin + sr ); + simd::int_v sectorIndex = + realSR + static_cast<int>( UTInfo::SectorNumbers::EffectiveSectorsPerColumn ) * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number @@ -614,15 +587,19 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 - helperArray[maxSectors * layerIndex + counter] = sect + ( layerIndex * 3 + region ) * 98 - 1; + helperArray[maxNumSectors * layerIndex + counter] = + sect + + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions ) + region ) * + static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion ) - + 1; counter++; } } // -- This is sorting - bubbleSortSIMD( maxCols * maxRows, helperArray, maxSectors * layerIndex ); + bubbleSortSIMD( maxCols * maxRows, helperArray, maxNumSectors * layerIndex ); // -- This is uniquifying - posArray[layerIndex] = makeUniqueSIMD( helperArray, maxSectors * layerIndex, maxCols * maxRows ); + posArray[layerIndex] = makeUniqueSIMD( helperArray, maxNumSectors * layerIndex, maxCols * maxRows ); // -- count the number of layers which are 'valid' nLayers += select( mask, simd::int_v{1}, simd::int_v{0} ); } @@ -634,38 +611,18 @@ namespace LHCb::Pr { int index = compBoundsArray[iLayer].size; for ( int iSector = 0; iSector < maxColsRows[iLayer]; ++iSector ) { compBoundsArray[iLayer].compressstore_sect<simd::int_v>( index, iSector, compressMask, - helperArray[maxSectors * iLayer + iSector] ); + helperArray[maxNumSectors * iLayer + iSector] ); } simd::float_v xTol = eStatesArray[iLayer].xTol<simd::float_v>( t ); compBoundsArray[iLayer].compressstore_xTol<simd::float_v>( index, compressMask, xTol ); compBoundsArray[iLayer].compressstore_nPos<simd::int_v>( index, compressMask, - posArray[iLayer] - maxSectors * iLayer ); + posArray[iLayer] - maxNumSectors * iLayer ); compBoundsArray[iLayer].size += simd::popcount( compressMask ); } // -- Now need to compress the filtered states, such that they are // -- in sync with the sectors - simd::float_v x = filteredStates.x<simd::float_v>( t ); - simd::float_v y = filteredStates.y<simd::float_v>( t ); - simd::float_v z = filteredStates.z<simd::float_v>( t ); - simd::float_v tx = filteredStates.tx<simd::float_v>( t ); - simd::float_v ty = filteredStates.ty<simd::float_v>( t ); - simd::float_v covx = filteredStates.covx<simd::float_v>( t ); - simd::float_v covy = filteredStates.covy<simd::float_v>( t ); - simd::float_v covz = filteredStates.covz<simd::float_v>( t ); - simd::int_v trackIndex = filteredStates.index<simd::int_v>( t ); - - auto index = filteredStates.size; - filteredStates.compressstore_x<simd::float_v>( index, compressMask, x ); - filteredStates.compressstore_y<simd::float_v>( index, compressMask, y ); - filteredStates.compressstore_z<simd::float_v>( index, compressMask, z ); - filteredStates.compressstore_tx<simd::float_v>( index, compressMask, tx ); - filteredStates.compressstore_ty<simd::float_v>( index, compressMask, ty ); - filteredStates.compressstore_covx<simd::float_v>( index, compressMask, covx ); - filteredStates.compressstore_covy<simd::float_v>( index, compressMask, covy ); - filteredStates.compressstore_covz<simd::float_v>( index, compressMask, covz ); - filteredStates.compressstore_index<simd::int_v>( index, compressMask, trackIndex ); - filteredStates.size += simd::popcount( compressMask ); + filteredStates.copyBack<simd>( t, compressMask ); } return compBoundsArray; @@ -706,7 +663,7 @@ namespace LHCb::Pr { const simd::float_v tolProto{m_yTol.value()}; const simd::float_v xTol{xTolS}; - std::array<int, maxSectors + 1> sectors{0}; + std::array<int, maxNumSectors + 1> sectors{0}; for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(); } @@ -773,6 +730,7 @@ namespace LHCb::Pr { myHits.cos<simd::float_v>( i ) * -1.0f * myHits.dxDy<simd::float_v>( i ) ); mutHits.compressstore_weight( index, mask, myHits.weight<simd::float_v>( i ) ); mutHits.compressstore_channelID( index, mask, myHits.channelID<simd::int_v>( i ) ); + mutHits.compressstore_index( index, mask, simd::indices( i ) ); // fill the index in the original hit container mutHits.size += simd::popcount( mask ); } } @@ -880,17 +838,18 @@ namespace LHCb::Pr { template <typename BdlTable> void VeloUT::prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const { + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + const BdlTable& bdlTable ) const { for ( std::size_t t = 0; t < protoTracks.size; t += simd::size ) { //== Handle states. copy Velo one, add TT. const simd::float_v zOrigin = - select( protoTracks.tyState<simd::float_v>( t ) > 0.001f, - protoTracks.zState<simd::float_v>( t ) - - protoTracks.yState<simd::float_v>( t ) / protoTracks.tyState<simd::float_v>( t ), - protoTracks.zState<simd::float_v>( t ) - - protoTracks.xState<simd::float_v>( t ) / protoTracks.txState<simd::float_v>( t ) ); + select( protoTracks.dir<simd::float_v>( t ).y > 0.001f, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).y / protoTracks.dir<simd::float_v>( t ).y, + protoTracks.pos<simd::float_v>( t ).z - + protoTracks.pos<simd::float_v>( t ).x / protoTracks.dir<simd::float_v>( t ).x ); auto loopMask = simd::loop_mask( t, protoTracks.size ); // -- this is to filter tracks where the fit had a too large chi2 @@ -902,7 +861,7 @@ namespace LHCb::Pr { // -- FIXME: these rely on the internal details of PrTableForFunction!!! // and should at least be put back in there, and used from here // to make sure everything _stays_ consistent... - auto var = std::array{protoTracks.tyState<simd::float_v>( t ), zOrigin, protoTracks.zState<simd::float_v>( t )}; + auto var = std::array{protoTracks.dir<simd::float_v>( t ).y, zOrigin, protoTracks.pos<simd::float_v>( t ).z}; simd::int_v index1 = min( max( simd::int_v{( var[0] + 0.3f ) / 0.6f * 30}, 0 ), 30 ); simd::int_v index2 = min( max( simd::int_v{( var[1] + 250 ) / 500 * 10}, 0 ), 10 ); @@ -938,17 +897,16 @@ namespace LHCb::Pr { // -- order is: x, tx, y, chi2 std::array<simd::float_v, 4> finalParams = { protoTracks.xTT<simd::float_v>( t ), protoTracks.xSlopeTT<simd::float_v>( t ), - protoTracks.yState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * ( m_zMidUT - protoTracks.zState<simd::float_v>( t ) ), + protoTracks.pos<simd::float_v>( t ).y + + protoTracks.dir<simd::float_v>( t ).y * ( m_zMidUT - protoTracks.pos<simd::float_v>( t ).z ), protoTracks.chi2TT<simd::float_v>( t )}; const simd::float_v qpxz2p = -1.0f / bdl * 3.3356f / Gaudi::Units::GeV; simd::mask_v fitMask = simd::mask_true(); - simd::float_v qp = m_finalFit - ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) - : protoTracks.qp<simd::float_v>( t ) * - rsqrt( 1.0f + protoTracks.tyState<simd::float_v>( t ) * - protoTracks.tyState<simd::float_v>( t ) ); // is this correct? + simd::float_v qp = m_finalFit ? fastfitterSIMD( finalParams, protoTracks, m_zMidUT, qpxz2p, t, fitMask ) + : protoTracks.qp<simd::float_v>( t ) * + rsqrt( 1.0f + protoTracks.dir<simd::float_v>( t ).y * + protoTracks.dir<simd::float_v>( t ).y ); // is this correct? qp = select( fitMask, qp, protoTracks.qp<simd::float_v>( t ) ); const simd::float_v qop = select( abs( bdl ) < 1.e-8f, simd::float_v{1000.0f}, qp * qpxz2p ); @@ -957,8 +915,8 @@ namespace LHCb::Pr { // -- Beware of the momentum resolution! const simd::float_v p = abs( 1.0f / qop ); const simd::float_v pt = - p * sqrt( protoTracks.txState<simd::float_v>( t ) * protoTracks.txState<simd::float_v>( t ) + - protoTracks.tyState<simd::float_v>( t ) * protoTracks.tyState<simd::float_v>( t ) ); + p * sqrt( protoTracks.dir<simd::float_v>( t ).x * protoTracks.dir<simd::float_v>( t ).x + + protoTracks.dir<simd::float_v>( t ).y * protoTracks.dir<simd::float_v>( t ).y ); const simd::mask_v pPTMask = ( p > m_minMomentumFinal.value() && pt > m_minPTFinal.value() ); const simd::float_v xUT = finalParams[0]; @@ -997,38 +955,22 @@ namespace LHCb::Pr { simd::mask_v validTrackMask = !fiducialMask && pPTMask && loopMask && mvaMask; - const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); - auto pos = protoTracks.pos<simd::float_v>( t ); - auto dir = protoTracks.dir<simd::float_v>( t ); - auto covX = protoTracks.cov<simd::float_v>( t ); + // ========================================================================================== - int trackIndex = outputTracks.size(); - outputTracks.compressstore_trackVP<simd::int_v>( trackIndex, validTrackMask, ancestor ); - outputTracks.compressstore_statePos<simd::float_v>( trackIndex, validTrackMask, pos ); - outputTracks.compressstore_stateDir<simd::float_v>( trackIndex, validTrackMask, dir ); - outputTracks.compressstore_stateCov<simd::float_v>( trackIndex, validTrackMask, covX ); + const simd::int_v ancestor = protoTracks.index<simd::int_v>( t ); + const int trackIndex = outputTracks.size(); + outputTracks.copyVeloInformation<simd>( inputTracks, ancestor, validTrackMask ); outputTracks.compressstore_stateQoP<simd::float_v>( trackIndex, validTrackMask, qop ); + outputTracks.compressstore_nUTHits<simd::int_v>( trackIndex, validTrackMask, 0 ); - // outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, simd::int_v{0} ); - // a simple helper class that facilitates changing from simd to scalar for the slope - TxStorage txArray; - txArray.store_txUT<simd::float_v>( 0, txUT ); - - simd::int_v nHits{0}; - - for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { - simd::mask_v emptyHitMask = ( protoTracks.weight<simd::float_v>( t, iLayer ) > 0.0001f ); - simd::int_v hit = protoTracks.id<simd::int_v>( t, iLayer ); + float txArray[simd::size]; + txUT.store( txArray ); - // simd::int_v nHits = outputTracks.nHits<simd::int_v>( trackIndex ); - outputTracks.compressstore_hit<simd::int_v>( trackIndex, iLayer, validTrackMask, hit ); - nHits += select( emptyHitMask, simd::int_v{1}, simd::int_v{0} ); - outputTracks.compressstore_nHits<simd::int_v>( trackIndex, validTrackMask, nHits ); - } + // TxStorage txArray; + // txArray.store_txUT<simd::float_v>( 0, txUT ); // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... - for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { int trackIndex2 = 0; @@ -1037,9 +979,18 @@ namespace LHCb::Pr { const std::size_t tscalar = t + t2; - const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); - const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); - const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const bool goodHit = ( protoTracks.weight<scalar::float_v>( tscalar, iLayer ).cast() > 0.0001f ); + const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); + const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); + + if ( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); + + // -- + + const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); + const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); + // const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); + const float txUTS = txArray[t2]; int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); @@ -1056,19 +1007,19 @@ namespace LHCb::Pr { if ( xohit - xextrap < -m_overlapTol ) continue; if ( xohit - xextrap > m_overlapTol ) break; + int nUTHits = outputTracks.nUTHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); + if ( nUTHits >= LHCb::Pr::Upstream::Tracks::max_uthits ) + continue; // get this number from PrUpstreamTracks!!! LHCb::LHCbID oid( LHCb::UTChannelID( hitsInLayers[hitContIndex].channelIDs[index2] ) ); - - int nHits = outputTracks.nHits<scalar::int_v>( trackIndex + trackIndex2 ).cast(); - if ( nHits > 30 ) continue; - outputTracks.compressstore_hit<scalar::int_v>( trackIndex + trackIndex2, nHits, true, oid.lhcbID() ); - outputTracks.compressstore_nHits<scalar::int_v>( trackIndex + trackIndex2, true, nHits + 1 ); + outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), + hitsInLayers[hitContIndex].indexs[index2] ); // only one overlap hit // break; } trackIndex2++; } } - outputTracks.size() += simd::popcount( validTrackMask ); + // outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information } } } // namespace LHCb::Pr diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 7fc296b1505..21d8c1633d0 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -63,148 +63,53 @@ namespace LHCb::Pr { - constexpr static int batchSize = align_size( 48 ); - constexpr static int maxSectors = 9; // if needed, algo can be templated with this + constexpr static int batchSize = align_size( 48 ); + constexpr static int maxNumCols = 3; // if needed, algo can be templated with this + constexpr static int maxNumRows = 3; // if needed, algo can be templated with this + constexpr static int maxNumSectors = maxNumCols * maxNumRows; // if needed, algo can be templated with this using simd = SIMDWrapper::avx2::types; using scalar = SIMDWrapper::scalar::types; - struct MiniState final { - float x, y, z, tx, ty; - }; - struct MiniStatesArray final { - constexpr static int max_tracks = align_size( 1024 ); - std::array<float, max_tracks> xs; - std::array<float, max_tracks> ys; - std::array<float, max_tracks> zs; - std::array<float, max_tracks> txs; - std::array<float, max_tracks> tys; - std::array<int, max_tracks> indexs; - - std::array<float, max_tracks> covxs; - std::array<float, max_tracks> covys; - std::array<float, max_tracks> covzs; - - std::size_t size{0}; - - SOA_ACCESSOR( x, xs.data() ) - SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( z, zs.data() ) - SOA_ACCESSOR( tx, txs.data() ) - SOA_ACCESSOR( ty, tys.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) - SOA_ACCESSOR( index, indexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xs.data(), ys.data(), zs.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txs.data(), tys.data(), 1.0f ) - }; - - struct ExtrapolatedStates final { - - constexpr static int max_tracks = align_size( 1024 ); - - std::array<float, max_tracks> xLayers; - std::array<float, max_tracks> yLayers; - std::array<float, max_tracks> xTols; - std::array<float, max_tracks> txs; - - std::size_t size{0}; - SOA_ACCESSOR( xLayer, xLayers.data() ) - SOA_ACCESSOR( yLayer, yLayers.data() ) - SOA_ACCESSOR( xTol, xTols.data() ) - SOA_ACCESSOR( tx, txs.data() ) - }; - - struct Boundaries final { - - constexpr static int max_tracks = align_size( 1024 ); - - std::array<int, 9 * max_tracks> sects; - std::array<float, max_tracks> xTols; - std::array<int, max_tracks> nPoss; - - std::size_t size{0}; - SOA_ACCESSOR_VAR( sect, &( sects[pos * max_tracks] ), int pos ) - SOA_ACCESSOR( xTol, xTols.data() ) - SOA_ACCESSOR( nPos, nPoss.data() ) - }; - - struct ProtoTracks final { - - // -- this is for the hits - // -- this does _not_ include overlap hits, so only 4 per track - std::array<float, 4 * batchSize> xs; - std::array<float, 4 * batchSize> zs; - std::array<float, 4 * batchSize> weightss{}; // this needs to be zero-initialized - std::array<float, 4 * batchSize> sins; - std::array<int, 4 * batchSize> ids; - - // -- this is the output of the fit - std::array<float, batchSize> qps; - std::array<float, batchSize> chi2TTs; - std::array<float, batchSize> xTTs; - std::array<float, batchSize> xSlopeTTs; - std::array<float, batchSize> ys; - - // -- and this the original state (in the Velo) - std::array<float, batchSize> xStates; - std::array<float, batchSize> yStates; - std::array<float, batchSize> zStates; - std::array<float, batchSize> txStates; - std::array<float, batchSize> tyStates; - std::array<int, batchSize> indexs; - - std::array<float, batchSize> covxs; - std::array<float, batchSize> covys; - std::array<float, batchSize> covzs; - - // -- and this and index to find the hit containers - std::array<int, batchSize> hitContIndexs; - - std::size_t size{0}; - SOA_ACCESSOR_VAR( x, &( xs[pos * batchSize] ), int pos ) - SOA_ACCESSOR_VAR( z, &( zs[pos * batchSize] ), int pos ) - SOA_ACCESSOR_VAR( weight, &( weightss[pos * batchSize] ), int pos ) - SOA_ACCESSOR_VAR( sin, &( sins[pos * batchSize] ), int pos ) - SOA_ACCESSOR_VAR( id, &( ids[pos * batchSize] ), int pos ) - - SOA_ACCESSOR( qp, qps.data() ) - SOA_ACCESSOR( chi2TT, chi2TTs.data() ) - SOA_ACCESSOR( xTT, xTTs.data() ) - SOA_ACCESSOR( xSlopeTT, xSlopeTTs.data() ) - SOA_ACCESSOR( y, ys.data() ) - - SOA_ACCESSOR( xState, xStates.data() ) - SOA_ACCESSOR( yState, yStates.data() ) - SOA_ACCESSOR( zState, zStates.data() ) - SOA_ACCESSOR( txState, txStates.data() ) - SOA_ACCESSOR( tyState, tyStates.data() ) - SOA_ACCESSOR( covx, covxs.data() ) - SOA_ACCESSOR( covy, covys.data() ) - SOA_ACCESSOR( covz, covzs.data() ) + constexpr static int max_tracks = align_size( 1024 ); + std::array<float, 3 * max_tracks> poss; + std::array<float, 2 * max_tracks> dirs; + std::array<float, 3 * max_tracks> covs; + std::array<int, max_tracks> indexs; + std::size_t size{0}; + + SOA_ACCESSOR( x, &( poss[0] ) ) + SOA_ACCESSOR( y, &( poss[max_tracks] ) ) + SOA_ACCESSOR( z, &( poss[2 * max_tracks] ) ) + SOA_ACCESSOR( tx, &( dirs[0] ) ) + SOA_ACCESSOR( ty, &( dirs[max_tracks] ) ) + SOA_ACCESSOR( covx, &( covs[0] ) ) + SOA_ACCESSOR( covy, &( covs[max_tracks] ) ) + SOA_ACCESSOR( covz, &( covs[2 * max_tracks] ) ) SOA_ACCESSOR( index, indexs.data() ) - SOA_ACCESSOR( hitContIndex, hitContIndexs.data() ) - VEC3_SOA_ACCESSOR( cov, covxs.data(), covys.data(), covzs.data() ) - VEC3_SOA_ACCESSOR( pos, xStates.data(), yStates.data(), zStates.data() ) - VEC3_XY_SOA_ACCESSOR( dir, txStates.data(), tyStates.data(), 1.0f ) - }; + VEC3_SOA_ACCESSOR( pos, (float*)&( poss[0] ), (float*)&( poss[max_tracks] ), (float*)&( poss[2 * max_tracks] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( dirs[0] ), (float*)&( dirs[max_tracks] ), 1.0f ) + VEC3_SOA_ACCESSOR( cov, (float*)&( covs[0] ), (float*)&( covs[max_tracks] ), (float*)&( covs[2 * max_tracks] ) ) - struct TxStorage final { - std::array<float, simd::size> txUTs; - SOA_ACCESSOR( txUT, txUTs.data() ) - }; + // -- Copy back the entries, but with a filtering mask + template <typename dType> + void copyBack( std::size_t at, typename dType::mask_v mask ) { - struct TrackHelper final { - TrackHelper( const MiniState& miniState, const float zKink, const float sigmaVeloSlope, const float maxPseudoChi2 ) - : state( miniState ), bestParams{{0.0f, maxPseudoChi2, 0.0f, 0.0f}} { - xMidField = state.x + state.tx * ( zKink - state.z ); - const float a = sigmaVeloSlope * ( zKink - state.z ); - wb = 1.0f / ( a * a ); - invKinkVeloDist = 1.0f / ( zKink - state.z ); + using F = typename dType::float_v; + using I = typename dType::int_v; + + F( &poss[at] ).compressstore( mask, &poss[size] ); + F( &poss[at + max_tracks] ).compressstore( mask, &poss[size + max_tracks] ); + F( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); + F( &dirs[at] ).compressstore( mask, &dirs[size] ); + F( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); + F( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); + F( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); + F( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); + I( &indexs[at] ).compressstore( mask, &indexs[size] ); + size += dType::popcount( mask ); } }; @@ -373,7 +278,8 @@ namespace LHCb::Pr { template <typename BdlTable> void prepareOutputTrackSIMD( const ProtoTracks& protoTracks, const std::array<LHCb::Pr::UT::Mut::Hits, batchSize>& hitsInLayers, - Upstream::Tracks& outputTracks, const BdlTable& bdlTable ) const; + Upstream::Tracks& outputTracks, const Velo::Tracks& inputTracks, + const BdlTable& bdlTable ) const; DeUTDetector* m_utDet = nullptr; -- GitLab From 2d8523a304427e16241a2bf1be185660c7abaafd Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Thu, 30 Apr 2020 20:35:18 +0200 Subject: [PATCH 055/111] First commit of PrMatchNN to use the PrTrack classes. non functional --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 15 +++++++++------ Pr/PrAlgorithms/src/PrMatchNN.h | 8 +++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index bc3302bce97..92042cf3665 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,7 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", LHCb::TrackLocation::Velo}, KeyValue{"SeedInput", LHCb::TrackLocation::Seed}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,18 +48,18 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN::Track>& velos, - const std::vector<PrMatchNN::Track>& seeds ) const { +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); - if ( velos.empty() ) { + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; return matches; } - if ( seeds.empty() ) { + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; return matches; @@ -76,7 +76,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - + + /* for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -176,6 +177,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } // end loop match cands } // end loop velo tracks + */ + m_tracksCount += matches.size(); return matches; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index cb7581998fa..50071ecad47 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -14,6 +14,9 @@ // Include files // from Gaudi #include "Event/Track_v2.h" +#include "Event/PrVeloTracks.h" +#include "Event/PrSeedTracks.h" + #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -36,8 +39,7 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const std::vector<LHCb::Event::v2::Track>&, const std::vector<LHCb::Event::v2::Track>& )> { +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { using Track = LHCb::Event::v2::Track; public: @@ -48,7 +50,7 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const std::vector<Track>&, const std::vector<Track>& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * -- GitLab From 0e971fb8ae67750eb63d54d508e43e7f10c89fd4 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 12 May 2020 22:15:11 +0200 Subject: [PATCH 056/111] update matching --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 149 ++++++++++++++++++++++-------- Pr/PrAlgorithms/src/PrMatchNN.h | 89 ++++++++++++++++-- 2 files changed, 193 insertions(+), 45 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 92042cf3665..e4d25025cf9 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,8 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"VeloHitsLocation", "Raw/VP/Hits"}, + KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,11 +49,14 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; @@ -65,9 +69,50 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + for ( int v = 0; v != velos.size(); v++ ) { + + auto mlpCounterBuf = m_tracksMLP.buffer(); + auto chi2CounterBuf = m_tracksChi2.buffer(); + + const int EndVelo = 1; + auto velo_pos = velos.statePos<F>( v, EndVelo ); + auto velo_dir = velos.stateDir<F>( v, EndVelo ); + + const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); + + const int EndT3 = 3; + for ( int s = 0; s != seeds.size(); s++ ) { + auto seed_pos = seeds.statePos<F>( s, EndT3 ); + auto seed_dir = seeds.stateDir<F>( s, EndT3 ); + + const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + + const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); + + if ( chi2 < m_maxChi2 ) { + + const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); + mlpCounterBuf += mlp; + chi2CounterBuf += chi2; + if ( mlp > m_minNN ) { + auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); + + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( match ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + } + } + } + } + + /* std::vector<MatchCandidate> cands; cands.reserve( seeds.size() ); + + + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // -- make pairs of Velo track and state @@ -76,8 +121,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - - /* + + for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -182,34 +227,37 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track m_tracksCount += matches.size(); return matches; } + //============================================================================= // -float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const { - const float tx2 = vState.tx() * vState.tx(); - const float ty2 = vState.ty() * vState.ty(); +float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { + + const float tx2 = vState_dir.x.cast() * vState_dir.x.cast(); + const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); - const float dSlope = vState.tx() - sState.tx(); + const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); if ( std::abs( dSlope ) > 1.5 ) return 99.; - const float dSlopeY = vState.ty() - sState.ty(); + const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); if ( std::abs( dSlopeY ) > 0.15 ) return 99.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + - m_zMagParams[3] * std::abs( sState.x() ) + m_zMagParams[4] * vState.tx() * vState.tx(); + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + + m_zMagParams[4] * vState_dir.x.cast() * vState_dir.x.cast(); const float dxTol2 = m_dxTol * m_dxTol; const float dxTolSlope2 = m_dxTolSlope * m_dxTolSlope; - const float xV = vState.x() + ( zForX - vState.z() ) * vState.tx(); + const float xV = vState_pos.x.cast() + ( zForX - vState_pos.z.cast() ) * vState_dir.x.cast(); // -- This is the function that calculates the 'bending' in y-direction // -- The parametrisation can be derived with the MatchFitParams package - const float yV = vState.y() + ( m_zMatchY - vState.z() ) * vState.ty() + - vState.ty() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); + const float yV = vState_pos.y.cast() + ( m_zMatchY - vState_pos.z.cast() ) * vState_dir.y.cast() + + vState_dir.y.cast() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); - const float xS = sState.x() + ( zForX - sState.z() ) * sState.tx(); - const float yS = sState.y() + ( m_zMatchY - sState.z() ) * sState.ty(); + const float xS = sState_pos.x.cast() + ( zForX - sState_pos.z.cast() ) * sState_dir.x.cast(); + const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; if ( std::abs( distX ) > 400 ) return 99.; @@ -237,40 +285,67 @@ float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sSt return chi2; } -PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMatchNN::Track& seed ) const { +PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; - output.addToAncestors( velo ); - output.addToAncestors( seed ); + + // output.addToAncestors( velo ); + // output.addToAncestors( seed ); + //== Adjust flags output.setType( Track::Type::Long ); output.setHistory( Track::History::PrMatch ); output.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); + //== copy LHCbIDs - output.addToLhcbIDs( velo.lhcbIDs(), LHCb::Tag::Sorted ); - output.addToLhcbIDs( seed.lhcbIDs(), LHCb::Tag::Sorted ); + int nSeedHits = seeds.nHits<I>( s ).cast(); + std::vector<LHCb::LHCbID> seedlhcbIDs; + seedlhcbIDs.reserve( nSeedHits ); + + for ( int i = 0; i < nSeedHits; ++i ) { seedlhcbIDs.emplace_back( seeds.hit<I>( s, i ).cast() ); } + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Sorted ); + + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Unordered ); + output.addToLhcbIDs( velos.lhcbIDs( v, veloHits ), LHCb::Tag::Unordered ); + //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; newstates.reserve( 6 ); - if ( velo.hasStateAt( LHCb::State::Location::ClosestToBeam ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::ClosestToBeam ) ); - if ( velo.hasStateAt( LHCb::State::Location::FirstMeasurement ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::FirstMeasurement ) ); - if ( velo.hasStateAt( LHCb::State::Location::EndVelo ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::EndVelo ) ); - newstates.push_back( seed.closestState( StateParameters::ZBegT ) ); - newstates.push_back( seed.closestState( StateParameters::ZMidT ) ); + auto state_beam = getVeloState( velos, v, 0 ); + state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_endvelo = getVeloState( velos, v, 1 ); + state_endvelo.setLocation( LHCb::State::Location::EndVelo ); + + auto state_firstmeas = getVeloState( velos, v, 2 ); + state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_beam ); + newstates.push_back( state_endvelo ); + newstates.push_back( state_firstmeas ); + + auto state_begT = getSeedState( seeds, s, 0 ); + state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_midT = getSeedState( seeds, s, 1 ); + state_midT.setLocation( LHCb::State::Location::EndVelo ); + + auto state_endT = getSeedState( seeds, s, 2 ); + state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_begT ); + + newstates.push_back( state_midT ); // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } - newstates.push_back( seed.closestState( StateParameters::ZEndT ) ); + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; + + newstates.push_back( state_endT ); // make sure we don't include same state twice if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } //== estimate q/p - double qOverP, sigmaQOverP; - bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); - const LHCb::State& vState = velo.closestState( 0. ); - const LHCb::State& sState = seed.closestState( m_zMatchY ); - StatusCode sc = m_fastMomentumTool->calculate( &vState, &sState, qOverP, sigmaQOverP, cubicFit ); + double qOverP, sigmaQOverP; + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + + StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { Warning( "momentum determination failed!", sc ).ignore(); // assume the Velo/T station standalone reco do something reasonable @@ -282,7 +357,9 @@ PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMat st.setQOverP( qOverP ); } } + //== add copied states to output track output.addToStates( newstates, LHCb::Tag::Unordered ); + return output; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 50071ecad47..922921cc620 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -13,9 +13,9 @@ // Include files // from Gaudi -#include "Event/Track_v2.h" -#include "Event/PrVeloTracks.h" #include "Event/PrSeedTracks.h" +#include "Event/PrVeloTracks.h" +#include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" @@ -39,7 +39,76 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { +namespace { + using dType = SIMDWrapper::scalar::types; + using I = dType::int_v; + using F = dType::float_v; + + using SeedTracks = LHCb::Pr::Seeding::Tracks; + using VeloTracks = LHCb::Pr::Velo::Tracks; + using Hits = LHCb::Pr::Velo::Hits; + + LHCb::State getVeloState( VeloTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + Vec3<F> covX = tracks.stateCovX<F>( t, index ); + Vec3<F> covY = tracks.stateCovY<F>( t, index ); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( 0. ); + + c( 0, 0 ) = covX.x.cast(); + c( 2, 0 ) = covX.y.cast(); + c( 2, 2 ) = covX.z.cast(); + c( 1, 1 ) = covY.x.cast(); + c( 3, 1 ) = covY.y.cast(); + c( 3, 3 ) = covY.z.cast(); + c( 4, 4 ) = 1.f; + + state.setState( s ); + + state.setCovariance( c ); + + return state; + } + LHCb::State getSeedState( SeedTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + auto const qop = tracks.QoP<F>( t ).cast(); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( qop ); + + state.setState( s ); + + return state; + } + +} // namespace + +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, const LHCb::Pr::Seeding::Tracks& )> { + using Track = LHCb::Event::v2::Track; public: @@ -50,7 +119,8 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, + const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * @@ -79,12 +149,13 @@ public: }; private: - /// calculate matching chi^2 - float getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const; + // calculate matching chi^2 + float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; - /// merge velo and seed segment to output track - Track makeTrack( const Track& velo, const Track& seed ) const; + // merge velo and seed segment to output track + Track makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const; Gaudi::Property<std::vector<double>> m_zMagParams{ this, "ZMagnetParams", {5287.6, -7.98878, 317.683, 0.0119379, -1418.42}}; -- GitLab From df718f3c747b50fedbe5a905188d8a475c47641e Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 20 May 2020 15:27:55 +0200 Subject: [PATCH 057/111] working PrMatchNN with SOA containers --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 185 +++++++----------------------- Pr/PrAlgorithms/src/PrMatchNN.h | 1 + 2 files changed, 43 insertions(+), 143 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index e4d25025cf9..5722d4c8198 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -53,7 +53,7 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; - matches.reserve( 200 ); + matches.reserve( velos.size() * 1.5 ); std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; @@ -63,12 +63,22 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + if ( veloHits.size() == 0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Hit container '" << inputLocation<1>() << "' is empty" << endmsg; + return matches; + } + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; + debug() << "Track container '" << inputLocation<2>() << "' is empty" << endmsg; return matches; } + seedMLPPairs seedMLP; + + seedMLP.reserve( seeds.size() ); + for ( int v = 0; v != velos.size(); v++ ) { auto mlpCounterBuf = m_tracksMLP.buffer(); @@ -80,12 +90,14 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); - const int EndT3 = 3; + const int EndT3 = 2; + for ( int s = 0; s != seeds.size(); s++ ) { auto seed_pos = seeds.statePos<F>( s, EndT3 ); auto seed_dir = seeds.stateDir<F>( s, EndT3 ); const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + if ( posYApproxS > posYApproxV + m_fastYTol ) continue; const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); @@ -94,143 +106,34 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); mlpCounterBuf += mlp; chi2CounterBuf += chi2; - if ( mlp > m_minNN ) { - auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); - - if ( m_addUTHitsTool.isEnabled() ) { - StatusCode sc = m_addUTHitsTool->addUTHits( match ); - if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); - } - } - } - } - } - - /* - std::vector<MatchCandidate> cands; - cands.reserve( seeds.size() ); - - - - - std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; - - // -- make pairs of Velo track and state - // -- TrackStatePair is std::pair<const Track*, const LHCb::State*> - // -- TrackStatePairs is std::vector<TrackStatePair> - // -- typedef in header file - TrackStatePairs veloPairs; - veloPairs.reserve( velos.size() ); - - - for ( auto const& vTr : velos ) { - if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; - if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; - const LHCb::State* vState = vTr.stateAt( LHCb::State::Location::EndVelo ); - assert( vState != nullptr ); - veloPairs.emplace_back( &vTr, vState ); - } - - // -- sort according to approx y position - // -- We don't know deltaSlope, so we just extrapolate linearly - std::sort( veloPairs.begin(), veloPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( 0.0 - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( 0.0 - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - - // -- make pairs of Seed track and state - TrackStatePairs seedPairs; - seedPairs.reserve( seeds.size() ); - - for ( auto const& sTr : seeds ) { - if ( sTr.checkFlag( Track::Flag::Invalid ) ) continue; - const LHCb::State& sState = sTr.closestState( m_zMatchY ); - seedPairs.emplace_back( &sTr, &sState ); - } - - // -- sort according to approx y position - std::sort( seedPairs.begin(), seedPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( m_zMatchY - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( m_zMatchY - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - auto mlpCounterBuf = m_tracksMLP.buffer(); - auto chi2CounterBuf = m_tracksChi2.buffer(); - for ( auto const& vP : veloPairs ) { - cands.clear(); - - const float posYApproxV = vP.second->y() + ( m_zMatchY - vP.second->z() ) * vP.second->ty(); - // -- The TrackStatePairs are sorted according to the approximate extrapolated y position - // -- We can use a binary search to find the starting point from where we need to calculate the chi2 - // -- The tolerance should be large enough such that it is essentially losseless, but speeds things up - // significantly. - auto it = std::lower_bound( - seedPairs.begin(), seedPairs.end(), m_fastYTol, [&]( const TrackStatePair& sP, const float tol ) { - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - return posYApproxS < posYApproxV - tol; - } ); - - // -- The loop to calculate the chi2 between Velo and Seed track - for ( ; it < seedPairs.end(); ++it ) { - TrackStatePair sP = *it; - - // -- Stop the loop at the upper end of the tolerance interval - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - if ( posYApproxS > posYApproxV + m_fastYTol ) break; - - const float chi2 = getChi2Match( *vP.second, *sP.second, mLPReaderInput ); - - if ( m_matchDebugTool.isEnabled() ) { - std::vector<float> v( std::begin( mLPReaderInput ), std::end( mLPReaderInput ) ); - /// TODO: This needs to be updated with Track_v2 (PrMCTools/src/PrDebugMatchTool.{h,cpp} and - /// PrKernel/PrKernel/IPrDebugMatchTool.h) - // m_matchDebugTool->fillTuple( *vP.first, *sP.first, v ); - } - - if ( chi2 < m_maxChi2 ) { - const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); - mlpCounterBuf += mlp; - chi2CounterBuf += chi2; - if ( mlp > m_minNN ) cands.emplace_back( vP.first, sP.first, mlp ); + if ( mlp > m_minNN ) { seedMLP.emplace_back( s, mlp ); } } } - std::sort( cands.begin(), cands.end(), - []( const MatchCandidate& lhs, const MatchCandidate& rhs ) { return lhs.dist() > rhs.dist(); } ); + std::sort( seedMLP.begin(), seedMLP.end(), [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { + return sP1.second > sP2.second; + } ); - // convert unused match candidates to tracks - for ( const MatchCandidate& cand : cands ) { + for ( unsigned int sm = 0; sm != seedMLP.size(); sm++ ) { - if ( cands[0].dist() - cand.dist() > m_maxdDist ) break; + if ( seedMLP[0].second - seedMLP[sm].second > m_maxdDist ) break; - const Track* vTr = cand.vTr(); - const Track* sTr = cand.sTr(); - - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) { - debug() << " Candidate" - << " Seed chi2 " << cand.dist() << endmsg; - } - - auto& match = matches.emplace_back( makeTrack( *vTr, *sTr ) ); + auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, seedMLP[sm].first ) ); if ( m_addUTHitsTool.isEnabled() ) { StatusCode sc = m_addUTHitsTool->addUTHits( match ); if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); } - } // end loop match cands - } // end loop velo tracks - - */ + } + seedMLP.clear(); + } m_tracksCount += matches.size(); return matches; } //============================================================================= -// - float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { @@ -238,10 +141,10 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); - if ( std::abs( dSlope ) > 1.5 ) return 99.; + if ( std::abs( dSlope ) > 1.5 ) return 9999.; const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); - if ( std::abs( dSlopeY ) > 0.15 ) return 99.; + if ( std::abs( dSlopeY ) > 0.15 ) return 9999.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + @@ -260,17 +163,16 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; - if ( std::abs( distX ) > 400 ) return 99.; + if ( std::abs( distX ) > 400 ) return 9999.; const float distY = yS - yV; - if ( std::abs( distX ) > 250 ) return 99.; + if ( std::abs( distX ) > 250 ) return 9999.; const float teta2 = tx2 + ty2; const float tolX = dxTol2 + dSlope * dSlope * dxTolSlope2; const float tolY = m_dyTol * m_dyTol + teta2 * m_dyTolSlope * m_dyTolSlope; - float chi2 = distX * distX / tolX + distY * distY / tolY; + float chi2 = ( tolX != 0 and tolY != 0 ? distX * distX / tolX + distY * distY / tolY : 9999. ); - // chi2 += dslY * dslY / sState.errTy2() / 16.; chi2 += dSlopeY * dSlopeY * 10000 * 0.0625; if ( m_maxChi2 < chi2 ) return chi2; @@ -285,6 +187,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di return chi2; } +//============================================================================= PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; @@ -310,40 +213,35 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; - newstates.reserve( 6 ); + newstates.reserve( 5 ); auto state_beam = getVeloState( velos, v, 0 ); state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_beam ); auto state_endvelo = getVeloState( velos, v, 1 ); state_endvelo.setLocation( LHCb::State::Location::EndVelo ); - - auto state_firstmeas = getVeloState( velos, v, 2 ); - state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_beam ); newstates.push_back( state_endvelo ); - newstates.push_back( state_firstmeas ); auto state_begT = getSeedState( seeds, s, 0 ); state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_begT ); auto state_midT = getSeedState( seeds, s, 1 ); state_midT.setLocation( LHCb::State::Location::EndVelo ); + newstates.push_back( state_midT ); + + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice auto state_endT = getSeedState( seeds, s, 2 ); state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_begT ); - - newstates.push_back( state_midT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; - newstates.push_back( state_endT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice //== estimate q/p double qOverP, sigmaQOverP; - // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { @@ -363,3 +261,4 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int return output; } +//============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 922921cc620..180983d4e74 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -187,6 +187,7 @@ private: typedef std::pair<const Track*, const LHCb::State*> TrackStatePair; typedef std::vector<TrackStatePair> TrackStatePairs; + typedef std::vector<std::pair<unsigned int, float>> seedMLPPairs; }; #endif // PRMATCH_H -- GitLab From 40d47e6d288e3f3a2ba4b4461a0ec310cc935d01 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 9 Jun 2020 21:41:58 +0200 Subject: [PATCH 058/111] fix out of bound index --- Pr/PrVeloUT/src/PrVeloUT.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index ff32d1a4e1c..9a5ab52125f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -388,14 +388,16 @@ namespace LHCb::Pr { // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 for ( int i = 0; i < 4; ++i ) { - const int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); + int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); + scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; + pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); + + hitI = std::max(0, hitI);// prevent index out of bound + pTracks.store_x<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].xs[hitI] ); pTracks.store_z<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].zs[hitI] ); pTracks.store_sin<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].sins[hitI] ); - - scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; - pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); - + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[tEff].channelIDs[hitI] ) ); pTracks.store_id<scalar::int_v>( trackIndex, i, id.lhcbID() ); // not sure if correct pTracks.store_hitIndex<scalar::int_v>( trackIndex, i, -- GitLab From 8746790d61fff5a178f9f462efb20afa80d712a9 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 9 Jun 2020 21:44:03 +0200 Subject: [PATCH 059/111] fix formatting --- Pr/PrVeloUT/src/PrVeloUT.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 9a5ab52125f..358997fe0d1 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -388,16 +388,16 @@ namespace LHCb::Pr { // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 for ( int i = 0; i < 4; ++i ) { - int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); + int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); - - hitI = std::max(0, hitI);// prevent index out of bound - + + hitI = std::max( 0, hitI ); // prevent index out of bound + pTracks.store_x<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].xs[hitI] ); pTracks.store_z<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].zs[hitI] ); pTracks.store_sin<scalar::float_v>( trackIndex, i, hitsInLayers[tEff].sins[hitI] ); - + LHCb::LHCbID id( LHCb::UTChannelID( hitsInLayers[tEff].channelIDs[hitI] ) ); pTracks.store_id<scalar::int_v>( trackIndex, i, id.lhcbID() ); // not sure if correct pTracks.store_hitIndex<scalar::int_v>( trackIndex, i, -- GitLab From a71a7561eec94a764468da47773563863ee899be Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 10 Jun 2020 16:07:47 +0200 Subject: [PATCH 060/111] another out-of-bound-index --- Pr/PrVeloUT/src/PrVeloUT.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 358997fe0d1..2401d3e696f 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -558,9 +558,10 @@ namespace LHCb::Pr { const simd::mask_v gathermask = loopMask && mask; // -- Determine the maximum number of rows and columns we have to take into account - // -- maximum 3 - const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, maxNumCols ); - const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, maxNumRows ); + // -- maximum 3, minimum 0 + // -- The 'clamp' is needed to prevent large negative values from 'hmax' when gathermask has no true entries + const int maxCols = std::clamp( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 0, 3 ); + const int maxRows = std::clamp( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 0, 3 ); maxColsRows[layerIndex] = maxCols * maxRows; -- GitLab From b9f6929d9c094d6ecc553e0f6eaf53fb66d37c05 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Wed, 10 Jun 2020 16:09:24 +0200 Subject: [PATCH 061/111] another out-of-bound-index --- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 2401d3e696f..7ddd874d660 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -560,8 +560,8 @@ namespace LHCb::Pr { // -- Determine the maximum number of rows and columns we have to take into account // -- maximum 3, minimum 0 // -- The 'clamp' is needed to prevent large negative values from 'hmax' when gathermask has no true entries - const int maxCols = std::clamp( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 0, 3 ); - const int maxRows = std::clamp( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 0, 3 ); + const int maxCols = std::clamp( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 0, maxNumCols ); + const int maxRows = std::clamp( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 0, maxNumRows ); maxColsRows[layerIndex] = maxCols * maxRows; -- GitLab From 3f2a6fa079ecfe2cf50cf847e5c6c9f4ad7a8a0f Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Thu, 11 Jun 2020 06:56:42 +0000 Subject: [PATCH 062/111] Apply suggestion to Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 5f91f74523c..b7528e85314 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -220,7 +220,7 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 helperArray[maxSectors * layerIndex + counter] = - sect + ( layerIndex * 3 + region ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ) - 1; + sect + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions ) + region ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ) - 1; counter++; } } -- GitLab From ad0fcbe3614bf9385e5c911a33f259878f7d284c Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Thu, 11 Jun 2020 06:56:50 +0000 Subject: [PATCH 063/111] Apply suggestion to Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index b7528e85314..105bdd8ff3c 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -205,7 +205,7 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + 28 * realSC; + simd::int_v sectorIndex = realSR + static_cast<int>( UTInfo::SectorNumbers::EffectiveSectorsPerColumn ) * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number -- GitLab From 3dd1d95d95037245cd6bbe1954448bd3ceaba741 Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Thu, 11 Jun 2020 06:56:56 +0000 Subject: [PATCH 064/111] Apply suggestion to Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 105bdd8ff3c..e92bf8ed6a5 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -192,7 +192,7 @@ namespace LHCb::Pr { // -- Determine the maximum number of rows and columns we have to take into account // -- maximum 3 const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); - const int maxRows = std::min( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 3 ); + const int maxRows = std::clamp( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 0, 3 ); maxColsRows[layerIndex] = maxCols * maxRows; -- GitLab From 586771db638b6de70b1463dd27fc2b1d97e0747e Mon Sep 17 00:00:00 2001 From: Peilian Li <peilian.li@cern.ch> Date: Thu, 11 Jun 2020 06:57:00 +0000 Subject: [PATCH 065/111] Apply suggestion to Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index e92bf8ed6a5..7dc85048a1a 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -191,7 +191,7 @@ namespace LHCb::Pr { // -- Determine the maximum number of rows and columns we have to take into account // -- maximum 3 - const int maxCols = std::min( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 3 ); + const int maxCols = std::clamp( ( subcolmax - subcolmin ).hmax( gathermask ) + 1, 0, 3 ); const int maxRows = std::clamp( ( subrowmax - subrowmin ).hmax( gathermask ) + 1, 0, 3 ); maxColsRows[layerIndex] = maxCols * maxRows; -- GitLab From 64f19272f893a816efdecbb8c8ccb7dc6abbcc71 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 11 Jun 2020 09:55:27 +0200 Subject: [PATCH 066/111] Apply Suggestion --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 7dc85048a1a..a9f1513a175 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -62,8 +62,8 @@ namespace LHCb::Pr { return pos; } void ProjSortSIMD( LHCb::Pr::UT::Mut::Hits& muthits ) { - for ( int i = 0; i < int( muthits.size ) - 1; i++ ) { - for ( int j = 0; j < int( muthits.size ) - i - 1; j++ ) { + for ( int i = 0; i < static_cast<int>( muthits.size ) - 1; i++ ) { + for ( int j = 0; j < static_cast<int>( muthits.size ) - i - 1; j++ ) { if ( muthits.projections[j] > muthits.projections[j + 1] ) { std::swap( muthits.projections[j], muthits.projections[j + 1] ); std::swap( muthits.indexs[j], muthits.indexs[j + 1] ); -- GitLab From 00d9af9670ec8aabf7ed22bc958330db5cf4973d Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Thu, 11 Jun 2020 07:55:52 +0000 Subject: [PATCH 067/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8755601 --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index a9f1513a175..56d375be5c1 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -204,8 +204,9 @@ namespace LHCb::Pr { for ( int sr = 0; sr < maxRows; sr++ ) { - simd::int_v realSR = min( subrowmax, subrowmin + sr ); - simd::int_v sectorIndex = realSR + static_cast<int>( UTInfo::SectorNumbers::EffectiveSectorsPerColumn ) * realSC; + simd::int_v realSR = min( subrowmax, subrowmin + sr ); + simd::int_v sectorIndex = + realSR + static_cast<int>( UTInfo::SectorNumbers::EffectiveSectorsPerColumn ) * realSC; // -- only gather when we are not outside the acceptance // -- if we are outside, fill 1 which is the lowest possible sector number @@ -220,7 +221,10 @@ namespace LHCb::Pr { // -- ID is: sectorIndex (from LUT) + (layerIndex * 3 + region - 1 ) * 98 // -- The regions are already calculated with a -1 helperArray[maxSectors * layerIndex + counter] = - sect + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions ) + region ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ) - 1; + sect + + ( layerIndex * static_cast<int>( UTInfo::DetectorNumbers::Regions ) + region ) * + static_cast<int>( UTInfo::DetectorNumbers::Sectors ) - + 1; counter++; } } -- GitLab From 6e212ddb08235d1076fb271ab1c616d8fb0c6251 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 16 Jun 2020 14:36:24 +0200 Subject: [PATCH 068/111] apply suggestions and fix FPEs --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 45 ++++++------------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 23 +++++----- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 19 +++++--- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ++ .../src/SciFiTrackForwarding.cpp | 3 +- 5 files changed, 44 insertions(+), 50 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 56d375be5c1..a9384cdefd0 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -61,22 +61,6 @@ namespace LHCb::Pr { } return pos; } - void ProjSortSIMD( LHCb::Pr::UT::Mut::Hits& muthits ) { - for ( int i = 0; i < static_cast<int>( muthits.size ) - 1; i++ ) { - for ( int j = 0; j < static_cast<int>( muthits.size ) - i - 1; j++ ) { - if ( muthits.projections[j] > muthits.projections[j + 1] ) { - std::swap( muthits.projections[j], muthits.projections[j + 1] ); - std::swap( muthits.indexs[j], muthits.indexs[j + 1] ); - std::swap( muthits.channelIDs[j], muthits.channelIDs[j + 1] ); - std::swap( muthits.xs[j], muthits.xs[j + 1] ); - std::swap( muthits.zs[j], muthits.zs[j + 1] ); - std::swap( muthits.coss[j], muthits.coss[j + 1] ); - std::swap( muthits.sins[j], muthits.sins[j + 1] ); - std::swap( muthits.weights[j], muthits.weights[j + 1] ); - } - } - } - } } // namespace using ROOT::Math::CholeskyDecomp; @@ -253,11 +237,8 @@ namespace LHCb::Pr { // -- in sync with the sectors int stateidx = filteredStates.size; - filteredStates.compressstore_x<F>( stateidx, compressMask, pos.x ); - filteredStates.compressstore_y<F>( stateidx, compressMask, pos.y ); - filteredStates.compressstore_z<F>( stateidx, compressMask, pos.z ); - filteredStates.compressstore_tx<F>( stateidx, compressMask, dir.x ); - filteredStates.compressstore_ty<F>( stateidx, compressMask, dir.y ); + filteredStates.compressstore_pos<F>( stateidx, compressMask, pos ); + filteredStates.compressstore_dir<F>( stateidx, compressMask, dir ); filteredStates.compressstore_qop<F>( stateidx, compressMask, qoverp ); filteredStates.compressstore_p<F>( stateidx, compressMask, p ); filteredStates.compressstore_index<I>( stateidx, compressMask, simd::indices( t ) ); @@ -293,11 +274,14 @@ namespace LHCb::Pr { float bestChi2 = p_maxChi2Tol.value() + p_maxChi2Slope.value() / ( p - p_maxChi2POffset.value() ); // sort of hits in increasing projection - ProjSortSIMD( hitsInLayers ); + std::vector<int> hitIdx; + hitIdx.reserve( int(hitsInLayers.size) ); + for( int i = 0; i < int(hitsInLayers.size); i++ ) hitIdx.emplace_back( i ); + std::sort(hitIdx.begin(), hitIdx.end(), [&hitsInLayers](const int i, const int j) { return hitsInLayers.projections[i] < hitsInLayers.projections[j]; }); // -- Loop over all hits and make "groups" of hits to form a candidate - for ( auto itBeg = 0; itBeg + 2 < int( hitsInLayers.size ); ++itBeg ) { - + for ( auto itB = 0; itB + 2 < int( hitsInLayers.size ); ++itB ) { + const int itBeg = hitIdx[itB]; const float firstProj = hitsInLayers.projections[itBeg]; LHCb::Pr::UT::Mut::Hits goodUT; @@ -313,13 +297,12 @@ namespace LHCb::Pr { sqrt( p_minAxProj.value() * p_minAxProj.value() * ( 1 - firstProj * firstProj * m_invMajAxProj2 ) ); } // TODO -- This means that there would be less than 3 hits, which does not work, so we can skip this right away - if ( ( hitsInLayers.projections[itBeg + 2] ) > maxProj ) continue; + if ( ( hitsInLayers.projections[hitIdx[itB + 2]] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group - for ( auto itEnd = itBeg; itEnd < int( hitsInLayers.size ); itEnd++ ) { - + for ( auto itE = itB; itE < int( hitsInLayers.size ); itE++ ) { + const int itEnd = hitIdx[itE]; if ( hitsInLayers.projections[itEnd] > maxProj ) break; - if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] = 1; // -- Count number of fired planes ++nbPlane; @@ -384,7 +367,7 @@ namespace LHCb::Pr { const float stateTy = filteredStates.ty<sF>( t ).cast(); const float p = filteredStates.p<sF>( t ).cast(); const float qop = filteredStates.qop<sF>( t ).cast(); - const float bendParam = p_utParam.value() * -1 * signedReCur * qop; + const float bendParam = p_utParam.value() * -1.0 * signedReCur * qop; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "State z: " << stateZ << " x " << stateX << " y " << stateY << " tx " << stateTx << " ty " << stateTy @@ -393,6 +376,7 @@ namespace LHCb::Pr { std::size_t nSize = 0; std::size_t nLayers = 0; const LHCb::Pr::UT::Hits& myHits = m_HitHandler.get()->hits(); + for ( int layerIndex = 0; layerIndex < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++layerIndex ) { if ( ( layerIndex == 2 && nLayers == 0 ) || ( layerIndex == 3 && nLayers < 2 ) ) return false; @@ -462,7 +446,6 @@ namespace LHCb::Pr { float dist = 0; chi2 = 1.e20; - const float xTol = p_xTol.value() + p_xTolSlope.value() / p; const float fixedWeight = 9. / ( xTol * xTol ); @@ -552,7 +535,7 @@ namespace LHCb::Pr { chi2 += w * dist * dist; } - chi2 /= nDoF; + if( nDoF != 0 ) chi2 /= nDoF; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) && worstDiff > 0. ) { info() << format( " chi2 %10.2f nDoF%2d wors %8.2f proj %6.2f offset %8.3f slope %10.6f offsetY %10.6f", chi2, diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index f695c33a5a7..3f4bec9824c 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -65,25 +65,24 @@ namespace LHCb::Pr { constexpr static int maxSectors = 9; struct MiniStates final { - std::array<float, max_tracks> xs; - std::array<float, max_tracks> ys; - std::array<float, max_tracks> zs; - std::array<float, max_tracks> txs; - std::array<float, max_tracks> tys; + std::array<float, 3 * max_tracks> poss; + std::array<float, 2 * max_tracks> dirs; std::array<float, max_tracks> qops; std::array<float, max_tracks> ps; std::array<int, max_tracks> indexs; std::size_t size{0}; - SOA_ACCESSOR( x, xs.data() ) - SOA_ACCESSOR( y, ys.data() ) - SOA_ACCESSOR( z, zs.data() ) - SOA_ACCESSOR( tx, txs.data() ) - SOA_ACCESSOR( ty, tys.data() ) + SOA_ACCESSOR( x, &( poss[0] ) ) + SOA_ACCESSOR( y, &( poss[max_tracks] ) ) + SOA_ACCESSOR( z, &( poss[2*max_tracks] ) ) + SOA_ACCESSOR( tx, &( dirs[0] ) ) + SOA_ACCESSOR( ty, &( dirs[max_tracks] ) ) SOA_ACCESSOR( qop, qops.data() ) SOA_ACCESSOR( p, ps.data() ) SOA_ACCESSOR( index, indexs.data() ) + VEC3_SOA_ACCESSOR( pos, (float*)&( poss[0] ), (float*)&( poss[max_tracks] ), (float*)&( poss[2 * max_tracks] ) ) + VEC3_XY_SOA_ACCESSOR( dir, (float*)&( dirs[0] ), (float*)&( dirs[max_tracks] ), 1.0f ) }; struct Boundaries final { @@ -135,8 +134,8 @@ namespace LHCb::Pr { Gaudi::Property<float> p_utParam{this, "UTParam", 29.}; Gaudi::Property<float> p_zUTProj{this, "ZUTProj", 2500. * Gaudi::Units::mm}; Gaudi::Property<float> p_maxChi2Tol{this, "MaxChi2Tol", 2.0}; - Gaudi::Property<float> p_maxChi2Slope{this, "MaxChi2Slope", 25000}; - Gaudi::Property<float> p_maxChi2POffset{this, "MaxChi2POffset", 100}; + Gaudi::Property<float> p_maxChi2Slope{this, "MaxChi2Slope", 25000.0}; + Gaudi::Property<float> p_maxChi2POffset{this, "MaxChi2POffset", 100.0}; Gaudi::Property<float> p_yTolSlope{this, "YTolSlope", 20000.}; Gaudi::Property<float> p_xTol{this, "XTol", 1.0}; Gaudi::Property<float> p_xTolSlope{this, "XTolSlope", 30000.0}; diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index ed898072339..ea2fd13e450 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -232,9 +232,10 @@ namespace { using namespace SciFiHits; template <typename T> - std::tuple<LHCb::Pr::Velo::Tracks const*, LHCb::Pr::Upstream::Tracks const*> get_ancestors( T const& input_tracks ) { + std::tuple<LHCb::Pr::Velo::Tracks const*, LHCb::Pr::Upstream::Tracks const*, LHCb::Pr::Seeding::Tracks const*> get_ancestors( T const& input_tracks ) { LHCb::Pr::Velo::Tracks const* velo_ancestors = nullptr; LHCb::Pr::Upstream::Tracks const* upstream_ancestors = nullptr; + LHCb::Pr::Seeding::Tracks const* seed_ancestors = nullptr; if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { velo_ancestors = input_tracks.getVeloAncestors(); upstream_ancestors = &input_tracks; @@ -242,7 +243,7 @@ namespace { velo_ancestors = &input_tracks; upstream_ancestors = nullptr; } - return {velo_ancestors, upstream_ancestors}; + return {velo_ancestors, upstream_ancestors, seed_ancestors}; } template <typename Range, typename Projection, typename Comparison, typename Value> @@ -783,8 +784,8 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::operator()( PrSciFiHits const& prSc // info()<<"......DEBUGS Forward BEGIN" <<endmsg; if ( UNLIKELY( input_tracks.size() == 0 ) ) { - auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); - return {velo_ancestors, upstream_ancestors}; + auto [velo_ancestors, upstream_ancestors, seed_ancestors] = get_ancestors( input_tracks ); + return {velo_ancestors, upstream_ancestors, seed_ancestors}; } //============================================================ @@ -2205,8 +2206,8 @@ template <typename Container> LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& trackCandidates, std::vector<std::vector<LHCb::LHCbID>> ids, T const& input_tracks ) const { - auto [velo_ancestors, upstream_ancestors] = get_ancestors( input_tracks ); - LHCb::Pr::Long::Tracks result( velo_ancestors, upstream_ancestors ); + auto [velo_ancestors, upstream_ancestors, seed_ancestors] = get_ancestors( input_tracks ); + LHCb::Pr::Long::Tracks result( velo_ancestors, upstream_ancestors, seed_ancestors ); for ( auto&& [cand, id] : Gaudi::Functional::details::zip::range( trackCandidates, ids ) ) { int const currentsize = result.size(); @@ -2250,6 +2251,7 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& result.store_ut_index<I>( currentsize, 0, -1 ); } + result.store_trackSeed<I>( currentsize, -1 ); //== hits indices, max_fthits=15, not sure if this number is reasonable. assert( id.size() <= LHCb::Pr::Long::Tracks::max_fthits && "Container cannot store more than 15 SciFi hits per track" ); @@ -2295,6 +2297,11 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& // -- < Debug -------- } // next candidate + // padding results to avoid FPEs + result.store_stateQoP<simd::float_v>(result.size(), simd::float_v (1.f) ); + result.store_vStatePos<simd::float_v>(result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_vStateDir<simd::float_v>(result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + // add UT hits into the tracks if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { if ( m_addUTHitsTool.isEnabled() ) { diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 7ddd874d660..19249007d7b 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -369,6 +369,7 @@ namespace LHCb::Pr { Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); + int trackIndex = pTracks.size; pTracks.fillHelperParams<scalar>( pos, dir, c_zKink, c_sigmaVeloSlope ); pTracks.store_pos<scalar::float_v>( trackIndex, pos ); @@ -405,6 +406,9 @@ namespace LHCb::Pr { } pTracks.size++; } + //padding to avoid FPEs + pTracks.store_pos<simd::float_v>( pTracks.size, Vec3<simd::float_v>(1.f, 1.f, 1.f ) ); + pTracks.store_dir<simd::float_v>( pTracks.size, Vec3<simd::float_v>(1.f, 1.f, 1.f )); prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index a8b617cddbc..310d42750bc 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -543,7 +543,7 @@ DECLARE_COMPONENT( SciFiTrackForwarding ) TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrackForwardingHits const& hithandler, TracksUT const& tracks, GeomCache const& cache ) const { - TracksFT Output{tracks.getVeloAncestors(), &tracks, Zipping::generateZipIdentifier(), LHCb::getMemResource( evtCtx )}; + TracksFT Output{tracks.getVeloAncestors(), &tracks, nullptr, Zipping::generateZipIdentifier(), LHCb::getMemResource( evtCtx )}; mydebug( "LayerZPos", cache.LayerZPos ); @@ -663,6 +663,7 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac Output.compressstore_trackVP<sI>( i, mask, tracks.trackVP<sI>( uttrack + tr ) ); Output.compressstore_trackUT<sI>( i, mask, uttrack + tr ); + Output.compressstore_trackSeed<sI>( i, mask, -1); float const qop = 1.f / bestcandidate.PQ; Output.compressstore_stateQoP<sF>( i, mask, qop ); -- GitLab From 80117fc076b07d3e8d1eaf0aa1ccc876e8abaa43 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 16 Jun 2020 12:37:05 +0000 Subject: [PATCH 069/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8827191 --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 16 +++++++++------- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 8 ++++---- Pr/PrAlgorithms/src/PrForwardTracking.cpp | 11 ++++++----- Pr/PrVeloUT/src/PrVeloUT.cpp | 7 +++---- .../src/SciFiTrackForwarding.cpp | 5 +++-- 5 files changed, 25 insertions(+), 22 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index a9384cdefd0..46833980188 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -275,13 +275,15 @@ namespace LHCb::Pr { // sort of hits in increasing projection std::vector<int> hitIdx; - hitIdx.reserve( int(hitsInLayers.size) ); - for( int i = 0; i < int(hitsInLayers.size); i++ ) hitIdx.emplace_back( i ); - std::sort(hitIdx.begin(), hitIdx.end(), [&hitsInLayers](const int i, const int j) { return hitsInLayers.projections[i] < hitsInLayers.projections[j]; }); + hitIdx.reserve( int( hitsInLayers.size ) ); + for ( int i = 0; i < int( hitsInLayers.size ); i++ ) hitIdx.emplace_back( i ); + std::sort( hitIdx.begin(), hitIdx.end(), [&hitsInLayers]( const int i, const int j ) { + return hitsInLayers.projections[i] < hitsInLayers.projections[j]; + } ); // -- Loop over all hits and make "groups" of hits to form a candidate for ( auto itB = 0; itB + 2 < int( hitsInLayers.size ); ++itB ) { - const int itBeg = hitIdx[itB]; + const int itBeg = hitIdx[itB]; const float firstProj = hitsInLayers.projections[itBeg]; LHCb::Pr::UT::Mut::Hits goodUT; @@ -444,8 +446,8 @@ namespace LHCb::Pr { // -- Fit a straight line to the points and calculate the chi2 of the hits with respect to the fitted track - float dist = 0; - chi2 = 1.e20; + float dist = 0; + chi2 = 1.e20; const float xTol = p_xTol.value() + p_xTolSlope.value() / p; const float fixedWeight = 9. / ( xTol * xTol ); @@ -535,7 +537,7 @@ namespace LHCb::Pr { chi2 += w * dist * dist; } - if( nDoF != 0 ) chi2 /= nDoF; + if ( nDoF != 0 ) chi2 /= nDoF; if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) && worstDiff > 0. ) { info() << format( " chi2 %10.2f nDoF%2d wors %8.2f proj %6.2f offset %8.3f slope %10.6f offsetY %10.6f", chi2, diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index 3f4bec9824c..42c8d9dc244 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -67,15 +67,15 @@ namespace LHCb::Pr { struct MiniStates final { std::array<float, 3 * max_tracks> poss; std::array<float, 2 * max_tracks> dirs; - std::array<float, max_tracks> qops; - std::array<float, max_tracks> ps; - std::array<int, max_tracks> indexs; + std::array<float, max_tracks> qops; + std::array<float, max_tracks> ps; + std::array<int, max_tracks> indexs; std::size_t size{0}; SOA_ACCESSOR( x, &( poss[0] ) ) SOA_ACCESSOR( y, &( poss[max_tracks] ) ) - SOA_ACCESSOR( z, &( poss[2*max_tracks] ) ) + SOA_ACCESSOR( z, &( poss[2 * max_tracks] ) ) SOA_ACCESSOR( tx, &( dirs[0] ) ) SOA_ACCESSOR( ty, &( dirs[max_tracks] ) ) SOA_ACCESSOR( qop, qops.data() ) diff --git a/Pr/PrAlgorithms/src/PrForwardTracking.cpp b/Pr/PrAlgorithms/src/PrForwardTracking.cpp index ea2fd13e450..cb109d3ebd8 100644 --- a/Pr/PrAlgorithms/src/PrForwardTracking.cpp +++ b/Pr/PrAlgorithms/src/PrForwardTracking.cpp @@ -232,10 +232,11 @@ namespace { using namespace SciFiHits; template <typename T> - std::tuple<LHCb::Pr::Velo::Tracks const*, LHCb::Pr::Upstream::Tracks const*, LHCb::Pr::Seeding::Tracks const*> get_ancestors( T const& input_tracks ) { + std::tuple<LHCb::Pr::Velo::Tracks const*, LHCb::Pr::Upstream::Tracks const*, LHCb::Pr::Seeding::Tracks const*> + get_ancestors( T const& input_tracks ) { LHCb::Pr::Velo::Tracks const* velo_ancestors = nullptr; LHCb::Pr::Upstream::Tracks const* upstream_ancestors = nullptr; - LHCb::Pr::Seeding::Tracks const* seed_ancestors = nullptr; + LHCb::Pr::Seeding::Tracks const* seed_ancestors = nullptr; if constexpr ( std::is_same_v<T, LHCb::Pr::Upstream::Tracks> ) { velo_ancestors = input_tracks.getVeloAncestors(); upstream_ancestors = &input_tracks; @@ -2298,9 +2299,9 @@ LHCb::Pr::Long::Tracks PrForwardTracking<T>::makeLHCbTracks( Container const& } // next candidate // padding results to avoid FPEs - result.store_stateQoP<simd::float_v>(result.size(), simd::float_v (1.f) ); - result.store_vStatePos<simd::float_v>(result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); - result.store_vStateDir<simd::float_v>(result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_stateQoP<simd::float_v>( result.size(), simd::float_v( 1.f ) ); + result.store_vStatePos<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_vStateDir<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); // add UT hits into the tracks if constexpr ( std::is_same_v<T, LHCb::Pr::Velo::Tracks> ) { diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 19249007d7b..ae45cb54f73 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -369,7 +369,6 @@ namespace LHCb::Pr { Vec3<scalar::float_v> pos = filteredStates.pos<scalar::float_v>( tEff ); Vec3<scalar::float_v> dir = filteredStates.dir<scalar::float_v>( tEff ); - int trackIndex = pTracks.size; pTracks.fillHelperParams<scalar>( pos, dir, c_zKink, c_sigmaVeloSlope ); pTracks.store_pos<scalar::float_v>( trackIndex, pos ); @@ -406,9 +405,9 @@ namespace LHCb::Pr { } pTracks.size++; } - //padding to avoid FPEs - pTracks.store_pos<simd::float_v>( pTracks.size, Vec3<simd::float_v>(1.f, 1.f, 1.f ) ); - pTracks.store_dir<simd::float_v>( pTracks.size, Vec3<simd::float_v>(1.f, 1.f, 1.f )); + // padding to avoid FPEs + pTracks.store_pos<simd::float_v>( pTracks.size, Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + pTracks.store_dir<simd::float_v>( pTracks.size, Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 310d42750bc..093d9f2bb97 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -543,7 +543,8 @@ DECLARE_COMPONENT( SciFiTrackForwarding ) TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrackForwardingHits const& hithandler, TracksUT const& tracks, GeomCache const& cache ) const { - TracksFT Output{tracks.getVeloAncestors(), &tracks, nullptr, Zipping::generateZipIdentifier(), LHCb::getMemResource( evtCtx )}; + TracksFT Output{tracks.getVeloAncestors(), &tracks, nullptr, Zipping::generateZipIdentifier(), + LHCb::getMemResource( evtCtx )}; mydebug( "LayerZPos", cache.LayerZPos ); @@ -663,7 +664,7 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac Output.compressstore_trackVP<sI>( i, mask, tracks.trackVP<sI>( uttrack + tr ) ); Output.compressstore_trackUT<sI>( i, mask, uttrack + tr ); - Output.compressstore_trackSeed<sI>( i, mask, -1); + Output.compressstore_trackSeed<sI>( i, mask, -1 ); float const qop = 1.f / bestcandidate.PQ; Output.compressstore_stateQoP<sF>( i, mask, qop ); -- GitLab From b5bd9f93d92d4ebc6a1b1e832ebf7bc567fedc18 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Tue, 16 Jun 2020 16:00:29 +0200 Subject: [PATCH 070/111] port checks to other branch --- Pr/PrVeloUT/src/PrVeloUT.cpp | 12 ++++++++++++ Pr/PrVeloUT/src/PrVeloUT.h | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index ae45cb54f73..7d9b16a6e0e 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -641,6 +641,13 @@ namespace LHCb::Pr { const std::array<Boundaries, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )>& compBoundsArray, LHCb::Pr::UT::Mut::Hits& hitsInLayers, const std::size_t t ) const { + // -- This is for some sanity checks later + constexpr const int maxSectorsPerRegion = static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion ); + constexpr const int maxLayer = static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); + constexpr const int maxRegion = static_cast<int>( UTInfo::DetectorNumbers::Regions ); + constexpr const int maxSectorNumber = + maxSectorsPerRegion + ( ( maxLayer - 1 ) * maxRegion + ( maxRegion - 1 ) ) * maxSectorsPerRegion; + const simd::float_v yTolSlope{m_yTolSlope.value()}; const float xState = filteredStates.x<scalar::float_v>( t ).cast(); @@ -666,6 +673,8 @@ namespace LHCb::Pr { const int nPos = compBoundsArray[layerIndex].nPos<scalar::int_v>( t ).cast(); const simd::float_v yTol = m_yTol.value() + m_yTolSlope.value() * xTolS; + assert( nPos < maxNumSectors && "nPos out of bound" ); + const simd::float_v tolProto{m_yTol.value()}; const simd::float_v xTol{xTolS}; @@ -674,6 +683,9 @@ namespace LHCb::Pr { for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(); } for ( int j = 0; j < nPos; j++ ) { + + assert( ( sectors[j] > -1 ) && ( sectors[j] < maxSectorNumber ) && "sector number out of bound" ); + // -- let's try to make it branchless const std::pair<int, int>& temp = hh.indices( sectors[j] ); const std::pair<int, int>& temp2 = hh.indices( sectors[j + 1] ); diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 21d8c1633d0..8ba40976b27 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -133,9 +133,9 @@ namespace LHCb::Pr { constexpr static int max_tracks = align_size( 1024 ); - std::array<int, 9 * max_tracks> sects; + std::array<int, 9 * max_tracks> sects{}; std::array<float, max_tracks> xTols; - std::array<int, max_tracks> nPoss; + std::array<int, max_tracks> nPoss{}; std::size_t size{0}; SOA_ACCESSOR_VAR( sect, &( sects[pos * max_tracks] ), int pos ) -- GitLab From 05a1d72aaefe8494da82b84920ebdb89ae4f1722 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 17 Jun 2020 09:40:43 +0200 Subject: [PATCH 071/111] fix FPEs in VeloTracking Algorithm --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 16 ++++++++++++++++ Pr/PrPixel/src/VeloKalmanHelpers.h | 3 +-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 3d0a6153572..3c3b117f1c9 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -310,6 +310,9 @@ namespace LHCb::Pr::Velo { } // Loop over sensors // Pre-compute phi + //padding + Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 100.f, 100.f, 100.f) ); + for ( int i = 0; i < n_hits; i += simd::size ) { auto pos = Pout.pos<F>( i ); Pout.store_phi( i, pos.phi() ); @@ -368,6 +371,8 @@ namespace LHCb::Pr::Velo { Pout.size() = n_hits; hits.size() += n_hits; + //padding + Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f) ); } // =========================================================================== @@ -511,6 +516,10 @@ namespace LHCb::Pr::Velo { tracks->size() += simd::popcount( bestH0 ); } // h1 + //padding to avoid FPEs + tracks->store_p0<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f )); + tracks->store_p1<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f )); + tracks->store_p2<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f )); P1->size() = new_P1size; } @@ -824,6 +833,10 @@ namespace LHCb::Pr::Velo { using simd = SIMDWrapper::avx256::types; using I = simd::int_v; using F = simd::float_v; + //padding to avoid FPEs + tracks.store_p0<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f ) ); + tracks.store_p1<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f ) ); + tracks.store_p2<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f ) ); for ( int t = 0; t < tracks.size(); t += simd::size ) { auto loop_mask = simd::loop_mask( t, tracks.size() ); @@ -886,6 +899,9 @@ namespace LHCb::Pr::Velo { tracksForward.size() += simd::popcount( forwards ); } + //padding to avoid FPEs + tracksForward.store_stateDir(tracksForward.size(), 0, Vec3<F>(100.f, 100.f, 100.f) ); + tracksBackward.store_stateDir(tracksBackward.size(), 0, Vec3<F>(100.f, 100.f, 100.f) ); // Fit forwards for ( int t = 0; t < tracksForward.size(); t += simd::size ) { diff --git a/Pr/PrPixel/src/VeloKalmanHelpers.h b/Pr/PrPixel/src/VeloKalmanHelpers.h index 7d8d63475e2..0dfac7f94d6 100644 --- a/Pr/PrPixel/src/VeloKalmanHelpers.h +++ b/Pr/PrPixel/src/VeloKalmanHelpers.h @@ -201,7 +201,6 @@ inline F filterWithMomentum( const M mask, const F z, F& x, F& tx, F& covXX, F& const F predcovXX = covXX + 2.f * dz_t_covXTx + dz * dz_t_covTxTx + eXX; const F predcovXTx = covXTx + dz_t_covTxTx + eXTx; - // compute the gain matrix const F R = 1.0f / ( winv + predcovXX ); const F Kx = predcovXX * R; @@ -251,7 +250,7 @@ fitBackwardWithMomentum( const M track_mask, const LHCb::Pr::Velo::Tracks& track I nHits = tracks.maskgather_nHits<I, I>( idxVP, track_mask, 0 ); int maxHits = nHits.hmax( track_mask ); I idxHit0 = tracks.maskgather_vp_index<I, I>( idxVP, track_mask, 0, 0 ); - Vec3<F> dir = tracks.maskgather_stateDir<F, I>( idxVP, track_mask, 0.f, state_id ); + Vec3<F> dir = tracks.maskgather_stateDir<F, I>( idxVP, track_mask, 100.f, state_id ); Vec3<F> pos = hits.maskgather_pos<F, I>( idxHit0, track_mask, 0.f ); FittedState<F> s = FittedState<F>( pos, dir, 100.f, 0.f, 0.0001f, 100.f, 0.f, 0.0001f ); -- GitLab From 2a00dcfedeb7fda6e51601279f3f8fc3c2823870 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Wed, 17 Jun 2020 07:41:26 +0000 Subject: [PATCH 072/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8838250 --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 3c3b117f1c9..0c8894a47e4 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -310,8 +310,8 @@ namespace LHCb::Pr::Velo { } // Loop over sensors // Pre-compute phi - //padding - Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 100.f, 100.f, 100.f) ); + // padding + Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 100.f, 100.f, 100.f ) ); for ( int i = 0; i < n_hits; i += simd::size ) { auto pos = Pout.pos<F>( i ); @@ -371,8 +371,8 @@ namespace LHCb::Pr::Velo { Pout.size() = n_hits; hits.size() += n_hits; - //padding - Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f) ); + // padding + Pout.store_pos<simd::float_v>( n_hits, Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f ) ); } // =========================================================================== @@ -516,10 +516,10 @@ namespace LHCb::Pr::Velo { tracks->size() += simd::popcount( bestH0 ); } // h1 - //padding to avoid FPEs - tracks->store_p0<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f )); - tracks->store_p1<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f )); - tracks->store_p2<simd::float_v>(tracks->size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f )); + // padding to avoid FPEs + tracks->store_p0<simd::float_v>( tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f ) ); + tracks->store_p1<simd::float_v>( tracks->size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f ) ); + tracks->store_p2<simd::float_v>( tracks->size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f ) ); P1->size() = new_P1size; } @@ -833,10 +833,10 @@ namespace LHCb::Pr::Velo { using simd = SIMDWrapper::avx256::types; using I = simd::int_v; using F = simd::float_v; - //padding to avoid FPEs - tracks.store_p0<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f ) ); - tracks.store_p1<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f ) ); - tracks.store_p2<simd::float_v>(tracks.size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f ) ); + // padding to avoid FPEs + tracks.store_p0<simd::float_v>( tracks.size(), Vec3<simd::float_v>( 1.e9f, 1.e9f, 1.e9f ) ); + tracks.store_p1<simd::float_v>( tracks.size(), Vec3<simd::float_v>( 1.e8f, 1.e8f, 1.e8f ) ); + tracks.store_p2<simd::float_v>( tracks.size(), Vec3<simd::float_v>( 1.e7f, 1.e7f, 1.e7f ) ); for ( int t = 0; t < tracks.size(); t += simd::size ) { auto loop_mask = simd::loop_mask( t, tracks.size() ); @@ -899,9 +899,9 @@ namespace LHCb::Pr::Velo { tracksForward.size() += simd::popcount( forwards ); } - //padding to avoid FPEs - tracksForward.store_stateDir(tracksForward.size(), 0, Vec3<F>(100.f, 100.f, 100.f) ); - tracksBackward.store_stateDir(tracksBackward.size(), 0, Vec3<F>(100.f, 100.f, 100.f) ); + // padding to avoid FPEs + tracksForward.store_stateDir( tracksForward.size(), 0, Vec3<F>( 100.f, 100.f, 100.f ) ); + tracksBackward.store_stateDir( tracksBackward.size(), 0, Vec3<F>( 100.f, 100.f, 100.f ) ); // Fit forwards for ( int t = 0; t < tracksForward.size(); t += simd::size ) { -- GitLab From 63cbbe2897536ede708af63c626eee4aedbb7824 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 17 Jun 2020 13:52:00 +0200 Subject: [PATCH 073/111] disable AddUTTools for test --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index bc3302bce97..3a30264c6c5 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -168,11 +168,12 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } auto& match = matches.emplace_back( makeTrack( *vTr, *sTr ) ); - +/* if ( m_addUTHitsTool.isEnabled() ) { StatusCode sc = m_addUTHitsTool->addUTHits( match ); if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); } +*/ } // end loop match cands } // end loop velo tracks -- GitLab From 396a84241655c5f40b150c59d32e725486e77c97 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Wed, 17 Jun 2020 11:53:14 +0000 Subject: [PATCH 074/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/8843750 --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 3a30264c6c5..5f5f02e744a 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -168,12 +168,12 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } auto& match = matches.emplace_back( makeTrack( *vTr, *sTr ) ); -/* - if ( m_addUTHitsTool.isEnabled() ) { - StatusCode sc = m_addUTHitsTool->addUTHits( match ); - if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); - } -*/ + /* + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( match ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + */ } // end loop match cands } // end loop velo tracks -- GitLab From b04cf7ada0d31fc3cddbcdcdcb0f3377a8214afd Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 17 Jun 2020 15:08:29 +0200 Subject: [PATCH 075/111] fix conflicts --- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ---- Tr/TrackUtils/src/TracksUTConverter.cpp | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 5d52419b539..8c1ee769a3d 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -673,11 +673,7 @@ namespace LHCb::Pr { const int nPos = compBoundsArray[layerIndex].nPos<scalar::int_v>( t ).cast(); const simd::float_v yTol = m_yTol.value() + m_yTolSlope.value() * xTolS; -<<<<<<< HEAD assert( nPos < maxNumSectors && "nPos out of bound" ); -======= - assert( nPos < maxSectors && "nPos out of bound" ); ->>>>>>> master const simd::float_v tolProto{m_yTol.value()}; const simd::float_v xTol{xTolS}; diff --git a/Tr/TrackUtils/src/TracksUTConverter.cpp b/Tr/TrackUtils/src/TracksUTConverter.cpp index d94d1f34204..caa184ab118 100644 --- a/Tr/TrackUtils/src/TracksUTConverter.cpp +++ b/Tr/TrackUtils/src/TracksUTConverter.cpp @@ -65,7 +65,7 @@ public: for ( auto& state : newTrack.states() ) state.setQOverP( tracksUT.stateQoP<F>( t ).cast() ); // Add LHCbIds - I n_hits = tracksUT.template nHits<I>( t ); + int n_hits = tracksUT.template nHits<I>( t ).cast(); for ( int i = 0; i < n_hits; i++ ) { int lhcbid = tracksUT.lhcbID<I>( t, i ).cast(); newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); -- GitLab From 5b530f330beb01608155224cf4fbefa2ea71a5b5 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 18 Jun 2020 16:20:19 +0200 Subject: [PATCH 076/111] require no more than 8 UT hits --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index 46833980188..e4005e0338c 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -95,7 +95,7 @@ namespace LHCb::Pr { if ( ( myUTHits.size < 3 ) ) continue; assert( myUTHits.size <= LHCb::Pr::Upstream::Tracks::max_uthits && - "Container cannot store more than 16 UT hits per track" ); + "Container cannot store more than 8 UT hits per track" ); int itr = filteredStates.index<sI>( t ).cast(); const int nVPHits = tracks.nVPHits<sI>( itr ).cast(); @@ -544,8 +544,8 @@ namespace LHCb::Pr { nDoF, worstDiff, goodUT.projections[worst], offset, slope, offsetY ) << endmsg; } - // -- Remove last point (outlier) if bad fit... - if ( worstDiff > 0. && bestChi2 < chi2 && nHits > 3 ) { + // -- Remove last point (outlier) if bad fit...or if nHits>8. + if ( worstDiff > 0. && ( ( bestChi2 < chi2 && nHits > 3 ) || ( bestChi2 > chi2 && nHits > 8 ) ) ) { const double w = goodUT.weights[worst]; const double dz = goodUT.zs[worst] - p_zUTProj; -- GitLab From b1304b4a7ad25c4296b328a16452bef5c82a5825 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Fri, 19 Jun 2020 10:52:20 +0200 Subject: [PATCH 077/111] fix build error --- Phys/SelAlgorithms/src/InstantiateFunctors.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Phys/SelAlgorithms/src/InstantiateFunctors.cpp b/Phys/SelAlgorithms/src/InstantiateFunctors.cpp index ca2e7319914..70cd827bcdc 100644 --- a/Phys/SelAlgorithms/src/InstantiateFunctors.cpp +++ b/Phys/SelAlgorithms/src/InstantiateFunctors.cpp @@ -120,9 +120,9 @@ DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<Pr::Selection<LHCb::Event::v2::Tr DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Velo::Tracks>, "InstantiateFunctors__PrVeloTracks" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Iterable::Scalar::Velo::Tracks>, "InstantiateFunctors__PrVeloTracks__Unwrapped" ) -DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Long::Tracks>, "InstantiateFunctors__PrForwardTracks" ) +DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Long::Tracks>, "InstantiateFunctors__PrLongTracks" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Iterable::Scalar::Forward::Tracks>, - "InstantiateFunctors__PrForwardTracks__Unwrapped" ) + "InstantiateFunctors__PrLongTracks__Unwrapped" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Fitted::Forward::Tracks>, "InstantiateFunctors__PrFittedForwardTracks" ) DECLARE_COMPONENT_WITH_ID( InstantiateFunctors<LHCb::Pr::Iterable::Scalar::Fitted::Forward::Tracks>, -- GitLab From 49792f85ea2a42a4fb5e569f7dd18296af9b7a8e Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Fri, 19 Jun 2020 14:53:35 +0200 Subject: [PATCH 078/111] fix duplicate hits, add functionality that searches for hits in layers which were originally not found --- Pr/PrVeloUT/src/PrVeloUT.cpp | 80 ++++++++++++++++++++++++++++-------- 1 file changed, 62 insertions(+), 18 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 8c1ee769a3d..814dd90a620 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/***************************************************************************** \ * (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * * * * This software is distributed under the terms of the GNU General Public * @@ -178,6 +178,34 @@ namespace LHCb::Pr { return ( 38000.0f / minMom + 0.25f ) * ( 1.0f + ty * ty * 0.8f ); } */ + // -------------------------------------------------------------------- + // -- Helper function to calculate the planeCode: 0 - 1 - 2 - 3 + // -------------------------------------------------------------------- + int planeCode( unsigned int id ) { + const int station = ( (unsigned int)id & static_cast<unsigned int>( UTInfo::MasksBits::StationMask ) ) >> + static_cast<int>( UTInfo::MasksBits::StationBits ); + const int layer = ( (unsigned int)id & static_cast<unsigned int>( UTInfo::MasksBits::LayerMask ) ) >> + static_cast<int>( UTInfo::MasksBits::LayerBits ); + return 2 * ( station - 1 ) + ( layer - 1 ); + } + + // -------------------------------------------------------------------- + // -- Helper function to find duplicates in hits in the output + // -------------------------------------------------------------------- + [[maybe_unused]] bool findDuplicates( const Upstream::Tracks& outputTracks ) { + for ( int t = 0; t < outputTracks.size(); ++t ) { + const int nHits = outputTracks.nUTHits<scalar::int_v>( t ).cast(); + std::vector<int> IDs; + for ( int h = 0; h < nHits; h++ ) { + const int id = outputTracks.lhcbID<scalar::int_v>( t, h ).cast(); + IDs.push_back( id ); + } + std::sort( IDs.begin(), IDs.end() ); + if ( std::adjacent_find( IDs.begin(), IDs.end() ) != IDs.end() ) return false; + } + return true; + } + // -------------------------------------------------------------------- // -- bubble sort is slow, but we never have more than 9 elements (horizontally) // -- and can act on 8 elements at once vertically (with AVX) @@ -387,7 +415,9 @@ namespace LHCb::Pr { // -- this runs over all 4 layers, even if no hit was found // -- but it fills a weight of 0 - for ( int i = 0; i < 4; ++i ) { + // -- Note: These are not "physical" layers, as the hits are ordered such that only + // -- the last one can be not filled. + for ( int i = 0; i < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++i ) { int hitI = pTracks.hitIndex<scalar::int_v>( trackIndex, i ).cast(); scalar::float_v weight = ( hitI == -1 ) ? 0.0f : hitsInLayers[tEff].weights[hitI]; pTracks.store_weight<scalar::float_v>( trackIndex, i, weight ); @@ -411,6 +441,9 @@ namespace LHCb::Pr { prepareOutputTrackSIMD( pTracks, hitsInLayers, outputTracks, inputTracks, bdlTable ); } + // -- The algorithm should not store duplicated hits... + assert( findDuplicates( outputTracks ) && "Hit duplicates found" ); + m_tracksCounter += outputTracks.size(); if ( m_doTiming ) m_timerTool->stop( m_veloUTTime ); return outputTracks; @@ -645,7 +678,7 @@ namespace LHCb::Pr { constexpr const int maxSectorsPerRegion = static_cast<int>( UTInfo::SectorNumbers::MaxSectorsPerRegion ); constexpr const int maxLayer = static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); constexpr const int maxRegion = static_cast<int>( UTInfo::DetectorNumbers::Regions ); - constexpr const int maxSectorNumber = + [[maybe_unused]] constexpr const int maxSectorNumber = maxSectorsPerRegion + ( ( maxLayer - 1 ) * maxRegion + ( maxRegion - 1 ) ) * maxSectorsPerRegion; const simd::float_v yTolSlope{m_yTolSlope.value()}; @@ -687,9 +720,6 @@ namespace LHCb::Pr { assert( ( sectors[j] > -1 ) && ( sectors[j] < maxSectorNumber ) && "sector number out of bound" ); // -- let's try to make it branchless - - assert( ( sectors[j] > -1 ) && ( sectors[j] < 1176 ) && "sector number out of bound" ); - const std::pair<int, int>& temp = hh.indices( sectors[j] ); const std::pair<int, int>& temp2 = hh.indices( sectors[j + 1] ); const int firstIndex = temp.first; @@ -986,12 +1016,15 @@ namespace LHCb::Pr { float txArray[simd::size]; txUT.store( txArray ); + float xArray[simd::size]; + xUT.store( xArray ); - // TxStorage txArray; - // txArray.store_txUT<simd::float_v>( 0, txUT ); + // -- This is needed to find the planeCode of the layer with the missing hit + float sumLayArray[simd::size] = {}; // -- from here on, go over each track individually to find and add the overlap hits // -- this is not particularly elegant... + // -- As before, these are "pseudo layers", i.e. it is not guaranteed that if i > j, z[i] > z[j] for ( int iLayer = 0; iLayer < static_cast<int>( UTInfo::DetectorNumbers::TotalLayers ); ++iLayer ) { int trackIndex2 = 0; @@ -1004,20 +1037,32 @@ namespace LHCb::Pr { const int hitIdx = protoTracks.hitIndex<scalar::int_v>( tscalar, iLayer ).cast(); const int id = protoTracks.id<scalar::int_v>( tscalar, iLayer ).cast(); + // -- Only add the hit, if it is not in an empty layer (that sounds like a tautology, + // -- but given that one always has 4 hits, even if only 3 make sense, it is needed) + // -- Only the last pseudo-layer can be an empty layer if ( goodHit ) outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, id, hitIdx ); - // -- + // ----------------------------------------------------------------------------------- + // -- The idea of the following code is: In layers where we have found a hit, we search for + // -- overlap hits. + // -- In layers where no hit was found initially, we use the better parametrization of the final + // -- track fit to pick up hits that were lost in the initial search + // ----------------------------------------------------------------------------------- + const float zhit = goodHit ? protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast() : m_zMidUT; + const float xhit = goodHit ? protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast() : xArray[t2]; + const int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); + + // -- The total sum of all plane codes is: 0 + 1 + 2 + 3 = 6 + // -- We can therefore get the plane code of the last pseudo-layer + // -- as: 6 - sumOfAllOtherPlaneCodes + const int pC = goodHit ? planeCode( id ) : 6 - sumLayArray[t2]; + sumLayArray[t2] += pC; - const float zhit = protoTracks.z<scalar::float_v>( tscalar, iLayer ).cast(); - const float xhit = protoTracks.x<scalar::float_v>( tscalar, iLayer ).cast(); - // const float txUTS = txArray.txUT<scalar::float_v>( t2 ).cast(); const float txUTS = txArray[t2]; - int hitContIndex = protoTracks.hitContIndex<scalar::int_v>( tscalar ).cast(); - - const int begin = hitsInLayers[hitContIndex].layerIndices[iLayer]; + const int begin = hitsInLayers[hitContIndex].layerIndices[pC]; const int end = - ( iLayer == 3 ) ? hitsInLayers[hitContIndex].size : hitsInLayers[hitContIndex].layerIndices[iLayer + 1]; + ( pC == 3 ) ? hitsInLayers[hitContIndex].size : hitsInLayers[hitContIndex].layerIndices[pC + 1]; for ( int index2 = begin; index2 < end; ++index2 ) { const float zohit = hitsInLayers[hitContIndex].zs[index2]; @@ -1035,12 +1080,11 @@ namespace LHCb::Pr { outputTracks.addUTIndexAndLHCbID( trackIndex + trackIndex2, oid.lhcbID(), hitsInLayers[hitContIndex].indexs[index2] ); // only one overlap hit - // break; + break; // this should ensure there are never more than 8 hits on the track } trackIndex2++; } } - // outputTracks.size() += simd::popcount( validTrackMask ); this is done when filling the Velo information } } } // namespace LHCb::Pr -- GitLab From 53091cb3e07b425be5c2ba023fe43727764e5697 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 22 Jun 2020 17:57:47 +0200 Subject: [PATCH 079/111] fix brunel test error --- Pr/PrAlgorithms/src/PrAddUTHitsTool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h index 42c8d9dc244..0c8c6a60853 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.h +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.h @@ -127,7 +127,7 @@ namespace LHCb::Pr { /// information about the different layers LHCb::UTDAQ::GeomCache m_geomcache; - DataObjectReadHandle<LHCb::Pr::UT::HitHandler> m_HitHandler{this, "UTHitsLocation", "PrUTHitHandler"}; + DataObjectReadHandle<LHCb::Pr::UT::HitHandler> m_HitHandler{this, "UTHitsLocation", "UT/PrUTHits"}; Gaudi::Property<float> p_zUTField{this, "ZUTField", 1740. * Gaudi::Units::mm}; Gaudi::Property<float> p_zMSPoint{this, "ZMSPoint", 400. * Gaudi::Units::mm}; -- GitLab From 3b94bfa8fa31ee70dcd48439a10cbd27e0f8605d Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 23 Jun 2020 21:06:56 +0200 Subject: [PATCH 080/111] PrMatchNN with PrLongTracks output, converter to convert to v2 track --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 223 +++++++++++---------- Pr/PrAlgorithms/src/PrMatchNN.h | 42 ++-- Tr/TrackUtils/src/TracksMatchConverter.cpp | 94 +++++++++ 3 files changed, 232 insertions(+), 127 deletions(-) create mode 100644 Tr/TrackUtils/src/TracksMatchConverter.cpp diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 5722d4c8198..32410b27582 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,9 +29,8 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"VeloHitsLocation", "Raw/VP/Hits"}, - KeyValue{"SeedInput", "Rec/Track/Seed"}}, - KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, + KeyValue{"MatchOutput", "Rec/Track/Match"} ) {} //============================================================================= // Initialization @@ -49,33 +48,26 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, - const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds ) const { - std::vector<Track> matches; - matches.reserve( velos.size() * 1.5 ); - +LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds ) const { std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; - if ( velos.size() == 0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; - return matches; - } + LHCb::Pr::Long::Tracks noneresult( nullptr, nullptr, nullptr ); - if ( veloHits.size() == 0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Hit container '" << inputLocation<1>() << "' is empty" << endmsg; - return matches; - } + if ( velos.size() == 0 || seeds.size() == 0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) { + debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << seeds.size() + << endmsg; + debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << velos.size() + << endmsg; + } - if ( seeds.size() == 0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<2>() << "' is empty" << endmsg; - return matches; + LHCb::Pr::Long::Tracks noneresult( nullptr, nullptr, nullptr ); + return noneresult; } - seedMLPPairs seedMLP; + seedMLPPairs seedMLP; + MatchCandidates matches; seedMLP.reserve( seeds.size() ); @@ -111,26 +103,27 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track } } - std::sort( seedMLP.begin(), seedMLP.end(), [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { - return sP1.second > sP2.second; - } ); + std::sort( seedMLP.begin(), seedMLP.end(), + [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { return sP1.second > sP2.second; } ); for ( unsigned int sm = 0; sm != seedMLP.size(); sm++ ) { if ( seedMLP[0].second - seedMLP[sm].second > m_maxdDist ) break; - - auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, seedMLP[sm].first ) ); - - if ( m_addUTHitsTool.isEnabled() ) { - StatusCode sc = m_addUTHitsTool->addUTHits( match ); - if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); - } + matches.emplace_back( v, seedMLP[sm].first ); } + seedMLP.clear(); } - m_tracksCount += matches.size(); - return matches; + auto outputTracks = makeTracks( velos, seeds, matches ); + + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( outputTracks ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + + m_tracksCount += outputTracks.size(); + return outputTracks; } //============================================================================= @@ -188,77 +181,97 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di } //============================================================================= -PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { - auto output = Track{}; - - // output.addToAncestors( velo ); - // output.addToAncestors( seed ); - - //== Adjust flags - output.setType( Track::Type::Long ); - output.setHistory( Track::History::PrMatch ); - output.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); - - //== copy LHCbIDs - int nSeedHits = seeds.nHits<I>( s ).cast(); - std::vector<LHCb::LHCbID> seedlhcbIDs; - seedlhcbIDs.reserve( nSeedHits ); - - for ( int i = 0; i < nSeedHits; ++i ) { seedlhcbIDs.emplace_back( seeds.hit<I>( s, i ).cast() ); } - output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Sorted ); - - output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Unordered ); - output.addToLhcbIDs( velos.lhcbIDs( v, veloHits ), LHCb::Tag::Unordered ); - - //== copy Velo and T states at the usual pattern reco positions - std::vector<LHCb::State> newstates; - newstates.reserve( 5 ); - auto state_beam = getVeloState( velos, v, 0 ); - state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); - newstates.push_back( state_beam ); - - auto state_endvelo = getVeloState( velos, v, 1 ); - state_endvelo.setLocation( LHCb::State::Location::EndVelo ); - newstates.push_back( state_endvelo ); - - auto state_begT = getSeedState( seeds, s, 0 ); - state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); - newstates.push_back( state_begT ); - - auto state_midT = getSeedState( seeds, s, 1 ); - state_midT.setLocation( LHCb::State::Location::EndVelo ); - newstates.push_back( state_midT ); - - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) - newstates.pop_back(); // make sure we don't include same state twice - - auto state_endT = getSeedState( seeds, s, 2 ); - state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_endT ); - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) - newstates.pop_back(); // make sure we don't include same state twice - - //== estimate q/p - double qOverP, sigmaQOverP; - // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); - - StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); - if ( sc.isFailure() ) { - Warning( "momentum determination failed!", sc ).ignore(); - // assume the Velo/T station standalone reco do something reasonable - } else { - // adjust q/p and its uncertainty - sigmaQOverP = sigmaQOverP * sigmaQOverP; - for ( auto& st : newstates ) { - st.covariance()( 4, 4 ) = sigmaQOverP; - st.setQOverP( qOverP ); +LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds, MatchCandidates matches ) const { + + LHCb::Pr::Long::Tracks result( &velos, nullptr, &seeds ); + + for ( const auto match : matches ) { + int const currentsize = result.size(); + + result.store_trackVP<I>( currentsize, match.vTr() ); + result.store_trackUT<I>( currentsize, -1 ); + result.store_trackSeed<I>( currentsize, match.sTr() ); + + //== copy LHCbIDs + const int nSeedHits = seeds.nHits<I>( match.sTr() ).cast(); + const int nVeloHits = velos.nHits<I>( match.vTr() ).cast(); + + result.store_nFTHits<I>( currentsize, nSeedHits ); + result.store_nVPHits<I>( currentsize, nVeloHits ); + result.store_nUTHits<I>( currentsize, 0 ); + + for ( auto idx{0}; idx < nVeloHits; ++idx ) { + result.store_vp_index<I>( currentsize, idx, velos.vp_index<I>( match.vTr(), idx ) ); + result.store_lhcbID<I>( currentsize, idx, velos.lhcbID<I>( match.vTr(), idx ) ); } - } - //== add copied states to output track - output.addToStates( newstates, LHCb::Tag::Unordered ); + for ( auto idx{0}; idx < nSeedHits; ++idx ) { + result.store_ft_index<I>( currentsize, idx, seeds.ft_index<I>( match.sTr(), nVeloHits ) ); + result.store_lhcbID<I>( currentsize, nVeloHits + idx, seeds.hit<I>( match.sTr(), idx ) ); + } + result.store_ut_index<I>( currentsize, 0, -1 ); + + //== get Velo and T states at the usual pattern reco positions + auto state_beam = getVeloState( velos, match.vTr(), 0 ); + state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_endvelo = getVeloState( velos, match.vTr(), 1 ); + state_endvelo.setLocation( LHCb::State::Location::EndVelo ); + + // from Seeding order of States + // StateParameters::ZBegT, StateParameters::ZMidT, StateParameters::ZEndT + auto state_begT = getSeedState( seeds, match.sTr(), 0 ); + state_begT.setLocation( LHCb::State::Location::AtT ); + + auto state_midT = getSeedState( seeds, match.sTr(), 1 ); + state_midT.setLocation( LHCb::State::Location::AtT ); + + auto state_endT = getSeedState( seeds, match.sTr(), 2 ); + state_endT.setLocation( LHCb::State::Location::AtT ); + + //== estimate q/p + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); //what to do about this? + double qOverP, sigmaQOverP; + StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); + + if ( sc.isFailure() ) { + Warning( "momentum determination failed!", sc ).ignore(); + // assume the Velo/T station standalone reco do something reasonable + qOverP = -9999.; // what is good nonsense value + } else { + // adjust q/p and its uncertainty + sigmaQOverP = sigmaQOverP * sigmaQOverP; + + state_beam.covariance()( 4, 4 ) = sigmaQOverP; + state_beam.setQOverP( qOverP ); + + state_begT.covariance()( 4, 4 ) = sigmaQOverP; + state_begT.setQOverP( qOverP ); + } + + result.store_stateQoP<F>( currentsize, qOverP ); + + //== store Velo and T states at the usual pattern reco positions + auto velopos = Vec3<F>( state_endvelo.x(), state_endvelo.y(), state_endvelo.z() ); + auto velodir = Vec3<F>( state_endvelo.tx(), state_endvelo.ty(), 1.f ); + result.store_vStatePos<F>( currentsize, velopos ); + result.store_vStateDir<F>( currentsize, velodir ); + + auto pos = Vec3<F>( state_midT.x(), state_midT.y(), state_midT.z() ); + auto dir = Vec3<F>( state_midT.tx(), state_midT.ty(), 1.f ); + result.store_statePos<F>( currentsize, pos ); + result.store_stateDir<F>( currentsize, dir ); + + result.size() += 1; + + if ( UNLIKELY( result.size() == LHCb::Pr::Long::Tracks::max_tracks ) ) { + // FIXME: find a better way to define size of container + ++m_maxTracksErr; + break; // FIXME: do something smarter than this + } + } - return output; + return result; } //============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 180983d4e74..9d80e646d39 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -13,6 +13,7 @@ // Include files // from Gaudi +#include "Event/PrLongTracks.h" #include "Event/PrSeedTracks.h" #include "Event/PrVeloTracks.h" #include "Event/Track_v2.h" @@ -46,7 +47,6 @@ namespace { using SeedTracks = LHCb::Pr::Seeding::Tracks; using VeloTracks = LHCb::Pr::Velo::Tracks; - using Hits = LHCb::Pr::Velo::Hits; LHCb::State getVeloState( VeloTracks const& tracks, int t, int index ) { @@ -106,8 +106,8 @@ namespace { } // namespace -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, const LHCb::Pr::Seeding::Tracks& )> { +class PrMatchNN : public Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( const LHCb::Pr::Velo::Tracks&, + const LHCb::Pr::Seeding::Tracks& )> { using Track = LHCb::Event::v2::Track; @@ -119,43 +119,40 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, - const LHCb::Pr::Seeding::Tracks& ) const override; + LHCb::Pr::Long::Tracks operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * * Match candidate for PrMatcNNh algorithm * - * @author Manuel Schiller - * @date 2012-01-31 - * code cleanups - * - * @author Olivier Callot - * @date 2007-02-07 + * @author Sevda Esen, Michel De Cian + * @date 2015-02-07 * initial implementation + * @date 2020-06-23 + * implemented SOA version */ class MatchCandidate { public: - MatchCandidate( const Track* vTr, const Track* sTr, float dist ) : m_vTr( vTr ), m_sTr( sTr ), m_dist( dist ) {} + MatchCandidate( int v, int s ) : m_vTr( v ), m_sTr( s ) {} - const Track* vTr() const { return m_vTr; } - const Track* sTr() const { return m_sTr; } - float dist() const { return m_dist; } + int vTr() const { return m_vTr; } + int sTr() const { return m_sTr; } private: - const Track* m_vTr = nullptr; - const Track* m_sTr = nullptr; - float m_dist{0}; + int m_vTr = 0; + int m_sTr = 0; }; private: + typedef std::vector<MatchCandidate> MatchCandidates; + // calculate matching chi^2 float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; // merge velo and seed segment to output track - Track makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds, int s ) const; + LHCb::Pr::Long::Tracks makeTracks( const LHCb::Pr::Velo::Tracks& velos, const LHCb::Pr::Seeding::Tracks& seeds, + MatchCandidates matches ) const; Gaudi::Property<std::vector<double>> m_zMagParams{ this, "ZMagnetParams", {5287.6, -7.98878, 317.683, 0.0119379, -1418.42}}; @@ -177,6 +174,9 @@ private: std::unique_ptr<IClassifierReader> m_MLPReader; + using ErrorCounter = Gaudi::Accumulators::MsgCounter<MSG::ERROR>; + mutable ErrorCounter m_maxTracksErr{this, "Number of tracks reached maximum!"}; + mutable Gaudi::Accumulators::SummingCounter<unsigned int> m_tracksCount{this, "#MatchingTracks"}; mutable Gaudi::Accumulators::SummingCounter<float> m_tracksMLP{this, "TracksMLP"}; mutable Gaudi::Accumulators::SummingCounter<float> m_tracksChi2{this, "#MatchingChi2"}; @@ -185,8 +185,6 @@ private: ToolHandle<IPrDebugMatchTool> m_matchDebugTool{this, "MatchDebugToolName", ""}; ToolHandle<ITrackMomentumEstimate> m_fastMomentumTool{this, "FastMomentumToolName", "FastMomentumEstimate"}; - typedef std::pair<const Track*, const LHCb::State*> TrackStatePair; - typedef std::vector<TrackStatePair> TrackStatePairs; typedef std::vector<std::pair<unsigned int, float>> seedMLPPairs; }; diff --git a/Tr/TrackUtils/src/TracksMatchConverter.cpp b/Tr/TrackUtils/src/TracksMatchConverter.cpp new file mode 100644 index 00000000000..a832aa141ac --- /dev/null +++ b/Tr/TrackUtils/src/TracksMatchConverter.cpp @@ -0,0 +1,94 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ + +#include <vector> + +// Gaudi +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/StdArrayAsProperty.h" + +// LHCb +#include "Event/StateParameters.h" +#include "Event/Track.h" +#include "Kernel/FTChannelID.h" +#include "Kernel/LHCbID.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/VPChannelID.h" + +#include "Event/PrLongTracks.h" +#include "Event/PrVeloTracks.h" + +/** + * Converter between TracksFT SoA PoD and vector<Track_v2> + * + * @author Arthur Hennequin (CERN, LIP6) + */ + +class TracksMatchConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const std::vector<LHCb::Event::v2::Track>&, const std::vector<LHCb::Event::v2::Track>&, + const LHCb::Pr::Long::Tracks& )> { + using Track = LHCb::Event::v2::Track; + using Tracks = LHCb::Pr::Long::Tracks; + // From PrGeometryTool in PrAlgorithms + +public: + TracksMatchConverter( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, + {KeyValue{"TracksSeedLocation", "Rec/Track/Seed"}, + KeyValue{"TracksVeloLocation", "Rec/Track/Velo"}, + KeyValue{"TracksMatchLocation", "Rec/Track/MatchSOA"}}, + KeyValue{"OutputTracksLocation", "Rec/Track/Match"} ) {} + + Gaudi::Property<std::array<float, 5>> m_covarianceValues{this, "covarianceValues", {4.0, 400.0, 4.e-6, 1.e-4, 0.1}}; + + std::vector<Track> operator()( const std::vector<Track>& tracksSeed, const std::vector<Track>& tracksVelo, + const Tracks& tracksMatch ) const override { + std::vector<Track> out; + out.reserve( tracksMatch.size() ); + m_nbTracksCounter += tracksMatch.size(); + + using dType = SIMDWrapper::scalar::types; + using I = dType::int_v; + using F = dType::float_v; + + for ( int t = 0; t < tracksMatch.size(); t++ ) { + auto& trackSeed = tracksSeed[tracksMatch.trackSeed<I>( t ).cast()]; + auto& trackVelo = tracksVelo[tracksMatch.trackVP<I>( t ).cast()]; + auto& newTrack = out.emplace_back( trackVelo ); + newTrack.addToAncestors( trackSeed ); + newTrack.addToAncestors( trackVelo ); + + for ( auto& state : trackSeed.states() ) { newTrack.addToStates( state ); } + + // set q/p in all of the existing states + auto const qop = tracksMatch.stateQoP<F>( t ).cast(); + auto const errQop2 = m_covarianceValues[4] * qop * qop; + + for ( auto& state : newTrack.states() ) { + state.setQOverP( qop ); + state.setErrQOverP2( errQop2 ); + } + + newTrack.setLhcbIDs( tracksMatch.lhcbIDs( t ), LHCb::Tag::Unordered ); + + newTrack.setType( Track::Type::Long ); + newTrack.setHistory( Track::History::PrMatch ); + newTrack.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); + } + + return out; + }; + +private: + mutable Gaudi::Accumulators::SummingCounter<> m_nbTracksCounter{this, "Nb of Produced Tracks"}; +}; + +DECLARE_COMPONENT( TracksMatchConverter ) -- GitLab From 911c121e014a81f75e088e6130c872760e6356a1 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 23 Jun 2020 22:38:22 +0200 Subject: [PATCH 081/111] use std::optional for default values --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 22 ++++++++++++---------- Pr/PrAlgorithms/src/PrMatchNN.h | 4 ++-- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 32410b27582..0f1eaba7cf3 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -13,6 +13,7 @@ // local #include "PrMatchNN.h" #include "Event/StateParameters.h" +#include <optional> //----------------------------------------------------------------------------- // Implementation file for class : PrMatchNN // @@ -91,7 +92,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& v const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); if ( posYApproxS > posYApproxV + m_fastYTol ) continue; - const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); + const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ).value_or( 9999.0 ); if ( chi2 < m_maxChi2 ) { @@ -127,17 +128,18 @@ LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& v } //============================================================================= -float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, - const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { +std::optional<float> PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, + const Vec3<F> sState_pos, const Vec3<F> sState_dir, + std::array<float, 6>& mLPReaderInput ) const { const float tx2 = vState_dir.x.cast() * vState_dir.x.cast(); const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); - if ( std::abs( dSlope ) > 1.5 ) return 9999.; + if ( std::abs( dSlope ) > 1.5 ) return std::nullopt; const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); - if ( std::abs( dSlopeY ) > 0.15 ) return 9999.; + if ( std::abs( dSlopeY ) > 0.15 ) return std::nullopt; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + @@ -156,9 +158,9 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; - if ( std::abs( distX ) > 400 ) return 9999.; + if ( std::abs( distX ) > 400 ) return std::nullopt; const float distY = yS - yV; - if ( std::abs( distX ) > 250 ) return 9999.; + if ( std::abs( distX ) > 250 ) return std::nullopt; const float teta2 = tx2 + ty2; const float tolX = dxTol2 + dSlope * dSlope * dxTolSlope2; @@ -168,7 +170,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di chi2 += dSlopeY * dSlopeY * 10000 * 0.0625; - if ( m_maxChi2 < chi2 ) return chi2; + if ( m_maxChi2 < chi2 ) return std::nullopt; mLPReaderInput[0] = chi2; mLPReaderInput[1] = teta2; @@ -177,7 +179,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di mLPReaderInput[4] = std::abs( dSlope ); mLPReaderInput[5] = std::abs( dSlopeY ); - return chi2; + return std::optional{chi2}; } //============================================================================= @@ -238,7 +240,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& v if ( sc.isFailure() ) { Warning( "momentum determination failed!", sc ).ignore(); // assume the Velo/T station standalone reco do something reasonable - qOverP = -9999.; // what is good nonsense value + qOverP = -9999.; // what is a good nonsense value } else { // adjust q/p and its uncertainty sigmaQOverP = sigmaQOverP * sigmaQOverP; diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 9d80e646d39..26f439b5f99 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -147,8 +147,8 @@ private: typedef std::vector<MatchCandidate> MatchCandidates; // calculate matching chi^2 - float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, - const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; + std::optional<float> getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; // merge velo and seed segment to output track LHCb::Pr::Long::Tracks makeTracks( const LHCb::Pr::Velo::Tracks& velos, const LHCb::Pr::Seeding::Tracks& seeds, -- GitLab From 84e41e7948c838343bbd1a0ee0f3475fc066571e Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 23 Jun 2020 22:47:06 +0200 Subject: [PATCH 082/111] revert comment in PrMatchNN --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 5f5f02e744a..d269f4f4057 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -168,12 +168,12 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } auto& match = matches.emplace_back( makeTrack( *vTr, *sTr ) ); - /* + if ( m_addUTHitsTool.isEnabled() ) { StatusCode sc = m_addUTHitsTool->addUTHits( match ); if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); } - */ + } // end loop match cands } // end loop velo tracks -- GitLab From 5053e9f5404435513fd9ced7828898c2b130717e Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Thu, 30 Apr 2020 20:35:18 +0200 Subject: [PATCH 083/111] First commit of PrMatchNN to use the PrTrack classes. non functional --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 15 +++++++++------ Pr/PrAlgorithms/src/PrMatchNN.h | 8 +++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 5f5f02e744a..31c74f471ec 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,7 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", LHCb::TrackLocation::Velo}, KeyValue{"SeedInput", LHCb::TrackLocation::Seed}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,18 +48,18 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN::Track>& velos, - const std::vector<PrMatchNN::Track>& seeds ) const { +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); - if ( velos.empty() ) { + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; return matches; } - if ( seeds.empty() ) { + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; return matches; @@ -76,7 +76,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - + + /* for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -177,6 +178,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const std::vector<PrMatchNN } // end loop match cands } // end loop velo tracks + */ + m_tracksCount += matches.size(); return matches; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index cb7581998fa..50071ecad47 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -14,6 +14,9 @@ // Include files // from Gaudi #include "Event/Track_v2.h" +#include "Event/PrVeloTracks.h" +#include "Event/PrSeedTracks.h" + #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -36,8 +39,7 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const std::vector<LHCb::Event::v2::Track>&, const std::vector<LHCb::Event::v2::Track>& )> { +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { using Track = LHCb::Event::v2::Track; public: @@ -48,7 +50,7 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const std::vector<Track>&, const std::vector<Track>& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * -- GitLab From 1ca5a3cff9b437aaf0db80e07256ea7c1414f544 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 12 May 2020 22:15:11 +0200 Subject: [PATCH 084/111] update matching --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 149 ++++++++++++++++++++++-------- Pr/PrAlgorithms/src/PrMatchNN.h | 89 ++++++++++++++++-- 2 files changed, 193 insertions(+), 45 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 31c74f471ec..99a1cbc4c2c 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,7 +29,8 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"VeloHitsLocation", "Raw/VP/Hits"}, + KeyValue{"SeedInput", "Rec/Track/Seed"}}, KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} //============================================================================= @@ -48,11 +49,14 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, +std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; matches.reserve( 200 ); + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; @@ -65,9 +69,50 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + for ( int v = 0; v != velos.size(); v++ ) { + + auto mlpCounterBuf = m_tracksMLP.buffer(); + auto chi2CounterBuf = m_tracksChi2.buffer(); + + const int EndVelo = 1; + auto velo_pos = velos.statePos<F>( v, EndVelo ); + auto velo_dir = velos.stateDir<F>( v, EndVelo ); + + const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); + + const int EndT3 = 3; + for ( int s = 0; s != seeds.size(); s++ ) { + auto seed_pos = seeds.statePos<F>( s, EndT3 ); + auto seed_dir = seeds.stateDir<F>( s, EndT3 ); + + const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + + const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); + + if ( chi2 < m_maxChi2 ) { + + const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); + mlpCounterBuf += mlp; + chi2CounterBuf += chi2; + if ( mlp > m_minNN ) { + auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); + + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( match ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + } + } + } + } + + /* std::vector<MatchCandidate> cands; cands.reserve( seeds.size() ); + + + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // -- make pairs of Velo track and state @@ -76,8 +121,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track // -- typedef in header file TrackStatePairs veloPairs; veloPairs.reserve( velos.size() ); - - /* + + for ( auto const& vTr : velos ) { if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; @@ -183,34 +228,37 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track m_tracksCount += matches.size(); return matches; } + //============================================================================= // -float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const { - const float tx2 = vState.tx() * vState.tx(); - const float ty2 = vState.ty() * vState.ty(); +float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { + + const float tx2 = vState_dir.x.cast() * vState_dir.x.cast(); + const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); - const float dSlope = vState.tx() - sState.tx(); + const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); if ( std::abs( dSlope ) > 1.5 ) return 99.; - const float dSlopeY = vState.ty() - sState.ty(); + const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); if ( std::abs( dSlopeY ) > 0.15 ) return 99.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + - m_zMagParams[3] * std::abs( sState.x() ) + m_zMagParams[4] * vState.tx() * vState.tx(); + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + + m_zMagParams[4] * vState_dir.x.cast() * vState_dir.x.cast(); const float dxTol2 = m_dxTol * m_dxTol; const float dxTolSlope2 = m_dxTolSlope * m_dxTolSlope; - const float xV = vState.x() + ( zForX - vState.z() ) * vState.tx(); + const float xV = vState_pos.x.cast() + ( zForX - vState_pos.z.cast() ) * vState_dir.x.cast(); // -- This is the function that calculates the 'bending' in y-direction // -- The parametrisation can be derived with the MatchFitParams package - const float yV = vState.y() + ( m_zMatchY - vState.z() ) * vState.ty() + - vState.ty() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); + const float yV = vState_pos.y.cast() + ( m_zMatchY - vState_pos.z.cast() ) * vState_dir.y.cast() + + vState_dir.y.cast() * ( m_bendYParams[0] * dSlope * dSlope + m_bendYParams[1] * dSlopeY * dSlopeY ); - const float xS = sState.x() + ( zForX - sState.z() ) * sState.tx(); - const float yS = sState.y() + ( m_zMatchY - sState.z() ) * sState.ty(); + const float xS = sState_pos.x.cast() + ( zForX - sState_pos.z.cast() ) * sState_dir.x.cast(); + const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; if ( std::abs( distX ) > 400 ) return 99.; @@ -238,40 +286,67 @@ float PrMatchNN::getChi2Match( const LHCb::State& vState, const LHCb::State& sSt return chi2; } -PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMatchNN::Track& seed ) const { +PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; - output.addToAncestors( velo ); - output.addToAncestors( seed ); + + // output.addToAncestors( velo ); + // output.addToAncestors( seed ); + //== Adjust flags output.setType( Track::Type::Long ); output.setHistory( Track::History::PrMatch ); output.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); + //== copy LHCbIDs - output.addToLhcbIDs( velo.lhcbIDs(), LHCb::Tag::Sorted ); - output.addToLhcbIDs( seed.lhcbIDs(), LHCb::Tag::Sorted ); + int nSeedHits = seeds.nHits<I>( s ).cast(); + std::vector<LHCb::LHCbID> seedlhcbIDs; + seedlhcbIDs.reserve( nSeedHits ); + + for ( int i = 0; i < nSeedHits; ++i ) { seedlhcbIDs.emplace_back( seeds.hit<I>( s, i ).cast() ); } + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Sorted ); + + output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Unordered ); + output.addToLhcbIDs( velos.lhcbIDs( v, veloHits ), LHCb::Tag::Unordered ); + //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; newstates.reserve( 6 ); - if ( velo.hasStateAt( LHCb::State::Location::ClosestToBeam ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::ClosestToBeam ) ); - if ( velo.hasStateAt( LHCb::State::Location::FirstMeasurement ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::FirstMeasurement ) ); - if ( velo.hasStateAt( LHCb::State::Location::EndVelo ) ) - newstates.push_back( *velo.stateAt( LHCb::State::Location::EndVelo ) ); - newstates.push_back( seed.closestState( StateParameters::ZBegT ) ); - newstates.push_back( seed.closestState( StateParameters::ZMidT ) ); + auto state_beam = getVeloState( velos, v, 0 ); + state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_endvelo = getVeloState( velos, v, 1 ); + state_endvelo.setLocation( LHCb::State::Location::EndVelo ); + + auto state_firstmeas = getVeloState( velos, v, 2 ); + state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_beam ); + newstates.push_back( state_endvelo ); + newstates.push_back( state_firstmeas ); + + auto state_begT = getSeedState( seeds, s, 0 ); + state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_midT = getSeedState( seeds, s, 1 ); + state_midT.setLocation( LHCb::State::Location::EndVelo ); + + auto state_endT = getSeedState( seeds, s, 2 ); + state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); + newstates.push_back( state_begT ); + + newstates.push_back( state_midT ); // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } - newstates.push_back( seed.closestState( StateParameters::ZEndT ) ); + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; + + newstates.push_back( state_endT ); // make sure we don't include same state twice if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } //== estimate q/p - double qOverP, sigmaQOverP; - bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); - const LHCb::State& vState = velo.closestState( 0. ); - const LHCb::State& sState = seed.closestState( m_zMatchY ); - StatusCode sc = m_fastMomentumTool->calculate( &vState, &sState, qOverP, sigmaQOverP, cubicFit ); + double qOverP, sigmaQOverP; + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + + StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { Warning( "momentum determination failed!", sc ).ignore(); // assume the Velo/T station standalone reco do something reasonable @@ -283,7 +358,9 @@ PrMatchNN::Track PrMatchNN::makeTrack( const PrMatchNN::Track& velo, const PrMat st.setQOverP( qOverP ); } } + //== add copied states to output track output.addToStates( newstates, LHCb::Tag::Unordered ); + return output; } diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 50071ecad47..922921cc620 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -13,9 +13,9 @@ // Include files // from Gaudi -#include "Event/Track_v2.h" -#include "Event/PrVeloTracks.h" #include "Event/PrSeedTracks.h" +#include "Event/PrVeloTracks.h" +#include "Event/Track_v2.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" @@ -39,7 +39,76 @@ * @date 2007-02-07 */ -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>(const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& )> { +namespace { + using dType = SIMDWrapper::scalar::types; + using I = dType::int_v; + using F = dType::float_v; + + using SeedTracks = LHCb::Pr::Seeding::Tracks; + using VeloTracks = LHCb::Pr::Velo::Tracks; + using Hits = LHCb::Pr::Velo::Hits; + + LHCb::State getVeloState( VeloTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + Vec3<F> covX = tracks.stateCovX<F>( t, index ); + Vec3<F> covY = tracks.stateCovY<F>( t, index ); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( 0. ); + + c( 0, 0 ) = covX.x.cast(); + c( 2, 0 ) = covX.y.cast(); + c( 2, 2 ) = covX.z.cast(); + c( 1, 1 ) = covY.x.cast(); + c( 3, 1 ) = covY.y.cast(); + c( 3, 3 ) = covY.z.cast(); + c( 4, 4 ) = 1.f; + + state.setState( s ); + + state.setCovariance( c ); + + return state; + } + LHCb::State getSeedState( SeedTracks const& tracks, int t, int index ) { + + LHCb::State state; + LHCb::StateVector s; + Gaudi::TrackSymMatrix c; + + // Add state closest to beam + Vec3<F> pos = tracks.statePos<F>( t, index ); + Vec3<F> dir = tracks.stateDir<F>( t, index ); + auto const qop = tracks.QoP<F>( t ).cast(); + + s.setX( pos.x.cast() ); + s.setY( pos.y.cast() ); + s.setZ( pos.z.cast() ); + s.setTx( dir.x.cast() ); + s.setTy( dir.y.cast() ); + s.setQOverP( qop ); + + state.setState( s ); + + return state; + } + +} // namespace + +class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, const LHCb::Pr::Seeding::Tracks& )> { + using Track = LHCb::Event::v2::Track; public: @@ -50,7 +119,8 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; + std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, + const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * @@ -79,12 +149,13 @@ public: }; private: - /// calculate matching chi^2 - float getChi2Match( const LHCb::State& vState, const LHCb::State& sState, - std::array<float, 6>& mLPReaderInput ) const; + // calculate matching chi^2 + float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; - /// merge velo and seed segment to output track - Track makeTrack( const Track& velo, const Track& seed ) const; + // merge velo and seed segment to output track + Track makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, + const LHCb::Pr::Seeding::Tracks& seeds, int s ) const; Gaudi::Property<std::vector<double>> m_zMagParams{ this, "ZMagnetParams", {5287.6, -7.98878, 317.683, 0.0119379, -1418.42}}; -- GitLab From 5325b96feaf53d8aa3e861dcc9a2297604d70a41 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 20 May 2020 15:27:55 +0200 Subject: [PATCH 085/111] working PrMatchNN with SOA containers --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 168 +++++++----------------------- Pr/PrAlgorithms/src/PrMatchNN.h | 1 + 2 files changed, 39 insertions(+), 130 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 99a1cbc4c2c..b38bf362607 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -53,7 +53,7 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds ) const { std::vector<Track> matches; - matches.reserve( 200 ); + matches.reserve( velos.size() * 1.5 ); std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; @@ -63,12 +63,22 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } + if ( veloHits.size() == 0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Hit container '" << inputLocation<1>() << "' is empty" << endmsg; + return matches; + } + if ( seeds.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<1>() << "' is empty" << endmsg; + debug() << "Track container '" << inputLocation<2>() << "' is empty" << endmsg; return matches; } + seedMLPPairs seedMLP; + + seedMLP.reserve( seeds.size() ); + for ( int v = 0; v != velos.size(); v++ ) { auto mlpCounterBuf = m_tracksMLP.buffer(); @@ -80,12 +90,14 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float posYApproxV = velo_pos.y.cast() + ( m_zMatchY - velo_pos.z.cast() ) * velo_dir.y.cast(); - const int EndT3 = 3; + const int EndT3 = 2; + for ( int s = 0; s != seeds.size(); s++ ) { auto seed_pos = seeds.statePos<F>( s, EndT3 ); auto seed_dir = seeds.stateDir<F>( s, EndT3 ); const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); + if ( posYApproxS > posYApproxV + m_fastYTol ) continue; const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); @@ -94,116 +106,18 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); mlpCounterBuf += mlp; chi2CounterBuf += chi2; - if ( mlp > m_minNN ) { - auto& match = matches.emplace_back( makeTrack( velos, v, veloHits, seeds, s ) ); - - if ( m_addUTHitsTool.isEnabled() ) { - StatusCode sc = m_addUTHitsTool->addUTHits( match ); - if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); - } - } - } - } - } - - /* - std::vector<MatchCandidate> cands; - cands.reserve( seeds.size() ); - - - - std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; - - // -- make pairs of Velo track and state - // -- TrackStatePair is std::pair<const Track*, const LHCb::State*> - // -- TrackStatePairs is std::vector<TrackStatePair> - // -- typedef in header file - TrackStatePairs veloPairs; - veloPairs.reserve( velos.size() ); - - - for ( auto const& vTr : velos ) { - if ( vTr.checkFlag( Track::Flag::Invalid ) ) continue; - if ( vTr.checkFlag( Track::Flag::Backward ) ) continue; - const LHCb::State* vState = vTr.stateAt( LHCb::State::Location::EndVelo ); - assert( vState != nullptr ); - veloPairs.emplace_back( &vTr, vState ); - } - - // -- sort according to approx y position - // -- We don't know deltaSlope, so we just extrapolate linearly - std::sort( veloPairs.begin(), veloPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( 0.0 - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( 0.0 - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - - // -- make pairs of Seed track and state - TrackStatePairs seedPairs; - seedPairs.reserve( seeds.size() ); - - for ( auto const& sTr : seeds ) { - if ( sTr.checkFlag( Track::Flag::Invalid ) ) continue; - const LHCb::State& sState = sTr.closestState( m_zMatchY ); - seedPairs.emplace_back( &sTr, &sState ); - } - - // -- sort according to approx y position - std::sort( seedPairs.begin(), seedPairs.end(), [&]( const TrackStatePair& sP1, const TrackStatePair& sP2 ) { - const float posA = sP1.second->y() + ( m_zMatchY - sP1.second->z() ) * sP1.second->ty(); - const float posB = sP2.second->y() + ( m_zMatchY - sP2.second->z() ) * sP2.second->ty(); - return posA < posB; - } ); - - auto mlpCounterBuf = m_tracksMLP.buffer(); - auto chi2CounterBuf = m_tracksChi2.buffer(); - for ( auto const& vP : veloPairs ) { - cands.clear(); - - const float posYApproxV = vP.second->y() + ( m_zMatchY - vP.second->z() ) * vP.second->ty(); - // -- The TrackStatePairs are sorted according to the approximate extrapolated y position - // -- We can use a binary search to find the starting point from where we need to calculate the chi2 - // -- The tolerance should be large enough such that it is essentially losseless, but speeds things up - // significantly. - auto it = std::lower_bound( - seedPairs.begin(), seedPairs.end(), m_fastYTol, [&]( const TrackStatePair& sP, const float tol ) { - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - return posYApproxS < posYApproxV - tol; - } ); - - // -- The loop to calculate the chi2 between Velo and Seed track - for ( ; it < seedPairs.end(); ++it ) { - TrackStatePair sP = *it; - - // -- Stop the loop at the upper end of the tolerance interval - const float posYApproxS = sP.second->y() + ( m_zMatchY - sP.second->z() ) * sP.second->ty(); - if ( posYApproxS > posYApproxV + m_fastYTol ) break; - - const float chi2 = getChi2Match( *vP.second, *sP.second, mLPReaderInput ); - - if ( m_matchDebugTool.isEnabled() ) { - std::vector<float> v( std::begin( mLPReaderInput ), std::end( mLPReaderInput ) ); - /// TODO: This needs to be updated with Track_v2 (PrMCTools/src/PrDebugMatchTool.{h,cpp} and - /// PrKernel/PrKernel/IPrDebugMatchTool.h) - // m_matchDebugTool->fillTuple( *vP.first, *sP.first, v ); - } - - if ( chi2 < m_maxChi2 ) { - const float mlp = m_MLPReader->GetMvaValue( mLPReaderInput ); - mlpCounterBuf += mlp; - chi2CounterBuf += chi2; - if ( mlp > m_minNN ) cands.emplace_back( vP.first, sP.first, mlp ); + if ( mlp > m_minNN ) { seedMLP.emplace_back( s, mlp ); } } } - std::sort( cands.begin(), cands.end(), - []( const MatchCandidate& lhs, const MatchCandidate& rhs ) { return lhs.dist() > rhs.dist(); } ); + std::sort( seedMLP.begin(), seedMLP.end(), [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { + return sP1.second > sP2.second; + } ); - // convert unused match candidates to tracks - for ( const MatchCandidate& cand : cands ) { + for ( unsigned int sm = 0; sm != seedMLP.size(); sm++ ) { - if ( cands[0].dist() - cand.dist() > m_maxdDist ) break; + if ( seedMLP[0].second - seedMLP[sm].second > m_maxdDist ) break; const Track* vTr = cand.vTr(); const Track* sTr = cand.sTr(); @@ -230,8 +144,6 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track } //============================================================================= -// - float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { @@ -239,10 +151,10 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); - if ( std::abs( dSlope ) > 1.5 ) return 99.; + if ( std::abs( dSlope ) > 1.5 ) return 9999.; const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); - if ( std::abs( dSlopeY ) > 0.15 ) return 99.; + if ( std::abs( dSlopeY ) > 0.15 ) return 9999.; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + @@ -261,17 +173,16 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; - if ( std::abs( distX ) > 400 ) return 99.; + if ( std::abs( distX ) > 400 ) return 9999.; const float distY = yS - yV; - if ( std::abs( distX ) > 250 ) return 99.; + if ( std::abs( distX ) > 250 ) return 9999.; const float teta2 = tx2 + ty2; const float tolX = dxTol2 + dSlope * dSlope * dxTolSlope2; const float tolY = m_dyTol * m_dyTol + teta2 * m_dyTolSlope * m_dyTolSlope; - float chi2 = distX * distX / tolX + distY * distY / tolY; + float chi2 = ( tolX != 0 and tolY != 0 ? distX * distX / tolX + distY * distY / tolY : 9999. ); - // chi2 += dslY * dslY / sState.errTy2() / 16.; chi2 += dSlopeY * dSlopeY * 10000 * 0.0625; if ( m_maxChi2 < chi2 ) return chi2; @@ -286,6 +197,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di return chi2; } +//============================================================================= PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { auto output = Track{}; @@ -311,40 +223,35 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int //== copy Velo and T states at the usual pattern reco positions std::vector<LHCb::State> newstates; - newstates.reserve( 6 ); + newstates.reserve( 5 ); auto state_beam = getVeloState( velos, v, 0 ); state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_beam ); auto state_endvelo = getVeloState( velos, v, 1 ); state_endvelo.setLocation( LHCb::State::Location::EndVelo ); - - auto state_firstmeas = getVeloState( velos, v, 2 ); - state_firstmeas.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_beam ); newstates.push_back( state_endvelo ); - newstates.push_back( state_firstmeas ); auto state_begT = getSeedState( seeds, s, 0 ); state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); + newstates.push_back( state_begT ); auto state_midT = getSeedState( seeds, s, 1 ); state_midT.setLocation( LHCb::State::Location::EndVelo ); + newstates.push_back( state_midT ); + + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice auto state_endT = getSeedState( seeds, s, 2 ); state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_begT ); - - newstates.push_back( state_midT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); }; - newstates.push_back( state_endT ); - // make sure we don't include same state twice - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) { newstates.pop_back(); } + if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) + newstates.pop_back(); // make sure we don't include same state twice //== estimate q/p double qOverP, sigmaQOverP; - // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); if ( sc.isFailure() ) { @@ -364,3 +271,4 @@ PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int return output; } +//============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 922921cc620..180983d4e74 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -187,6 +187,7 @@ private: typedef std::pair<const Track*, const LHCb::State*> TrackStatePair; typedef std::vector<TrackStatePair> TrackStatePairs; + typedef std::vector<std::pair<unsigned int, float>> seedMLPPairs; }; #endif // PRMATCH_H -- GitLab From c765f074b55823f26305b7bcde33168a6efeb8d6 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 12 May 2020 22:15:11 +0200 Subject: [PATCH 086/111] update matching --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index b38bf362607..18477944e6d 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -57,6 +57,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + if ( velos.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; -- GitLab From 355ba27f6e7f8c55f016279444e2fd22fe413f06 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 23 Jun 2020 21:06:56 +0200 Subject: [PATCH 087/111] PrMatchNN with PrLongTracks output, converter to convert to v2 track --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 208 ++++++++++++--------- Pr/PrAlgorithms/src/PrMatchNN.h | 42 ++--- Tr/TrackUtils/src/TracksMatchConverter.cpp | 94 ++++++++++ 3 files changed, 229 insertions(+), 115 deletions(-) create mode 100644 Tr/TrackUtils/src/TracksMatchConverter.cpp diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 18477944e6d..6b5824ded7a 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -29,9 +29,8 @@ DECLARE_COMPONENT( PrMatchNN ) //============================================================================= PrMatchNN::PrMatchNN( const std::string& name, ISvcLocator* pSvcLocator ) : Transformer( name, pSvcLocator, - {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"VeloHitsLocation", "Raw/VP/Hits"}, - KeyValue{"SeedInput", "Rec/Track/Seed"}}, - KeyValue{"MatchOutput", LHCb::TrackLocation::Match} ) {} + {KeyValue{"VeloInput", "Rec/Track/Velo"}, KeyValue{"SeedInput", "Rec/Track/Seed"}}, + KeyValue{"MatchOutput", "Rec/Track/Match"} ) {} //============================================================================= // Initialization @@ -49,12 +48,8 @@ StatusCode PrMatchNN::initialize() { //============================================================================= // Main execution //============================================================================= -std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, - const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds ) const { - std::vector<Track> matches; - matches.reserve( velos.size() * 1.5 ); - +LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds ) const { std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; std::array<float, 6> mLPReaderInput = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; @@ -65,19 +60,20 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track return matches; } - if ( veloHits.size() == 0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Hit container '" << inputLocation<1>() << "' is empty" << endmsg; - return matches; - } + if ( velos.size() == 0 || seeds.size() == 0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) { + debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << seeds.size() + << endmsg; + debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << velos.size() + << endmsg; + } - if ( seeds.size() == 0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<2>() << "' is empty" << endmsg; - return matches; + LHCb::Pr::Long::Tracks noneresult( nullptr, nullptr, nullptr ); + return noneresult; } - seedMLPPairs seedMLP; + seedMLPPairs seedMLP; + MatchCandidates matches; seedMLP.reserve( seeds.size() ); @@ -113,9 +109,8 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track } } - std::sort( seedMLP.begin(), seedMLP.end(), [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { - return sP1.second > sP2.second; - } ); + std::sort( seedMLP.begin(), seedMLP.end(), + [&]( std::pair<int, float> sP1, std::pair<int, float> sP2 ) { return sP1.second > sP2.second; } ); for ( unsigned int sm = 0; sm != seedMLP.size(); sm++ ) { @@ -141,8 +136,15 @@ std::vector<PrMatchNN::Track> PrMatchNN::operator()( const LHCb::Pr::Velo::Track */ - m_tracksCount += matches.size(); - return matches; + auto outputTracks = makeTracks( velos, seeds, matches ); + + if ( m_addUTHitsTool.isEnabled() ) { + StatusCode sc = m_addUTHitsTool->addUTHits( outputTracks ); + if ( sc.isFailure() ) Warning( "adding UT clusters failed!", sc ).ignore(); + } + + m_tracksCount += outputTracks.size(); + return outputTracks; } //============================================================================= @@ -200,77 +202,97 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di } //============================================================================= -PrMatchNN::Track PrMatchNN::makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds, int s ) const { - auto output = Track{}; - - // output.addToAncestors( velo ); - // output.addToAncestors( seed ); - - //== Adjust flags - output.setType( Track::Type::Long ); - output.setHistory( Track::History::PrMatch ); - output.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); - - //== copy LHCbIDs - int nSeedHits = seeds.nHits<I>( s ).cast(); - std::vector<LHCb::LHCbID> seedlhcbIDs; - seedlhcbIDs.reserve( nSeedHits ); - - for ( int i = 0; i < nSeedHits; ++i ) { seedlhcbIDs.emplace_back( seeds.hit<I>( s, i ).cast() ); } - output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Sorted ); - - output.addToLhcbIDs( seedlhcbIDs, LHCb::Tag::Unordered ); - output.addToLhcbIDs( velos.lhcbIDs( v, veloHits ), LHCb::Tag::Unordered ); - - //== copy Velo and T states at the usual pattern reco positions - std::vector<LHCb::State> newstates; - newstates.reserve( 5 ); - auto state_beam = getVeloState( velos, v, 0 ); - state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); - newstates.push_back( state_beam ); - - auto state_endvelo = getVeloState( velos, v, 1 ); - state_endvelo.setLocation( LHCb::State::Location::EndVelo ); - newstates.push_back( state_endvelo ); - - auto state_begT = getSeedState( seeds, s, 0 ); - state_begT.setLocation( LHCb::State::Location::ClosestToBeam ); - newstates.push_back( state_begT ); - - auto state_midT = getSeedState( seeds, s, 1 ); - state_midT.setLocation( LHCb::State::Location::EndVelo ); - newstates.push_back( state_midT ); - - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) - newstates.pop_back(); // make sure we don't include same state twice - - auto state_endT = getSeedState( seeds, s, 2 ); - state_endT.setLocation( LHCb::State::Location::FirstMeasurement ); - newstates.push_back( state_endT ); - if ( std::abs( newstates[newstates.size() - 2].z() - newstates.back().z() ) < 300. ) - newstates.pop_back(); // make sure we don't include same state twice - - //== estimate q/p - double qOverP, sigmaQOverP; - // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); - - StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); - if ( sc.isFailure() ) { - Warning( "momentum determination failed!", sc ).ignore(); - // assume the Velo/T station standalone reco do something reasonable - } else { - // adjust q/p and its uncertainty - sigmaQOverP = sigmaQOverP * sigmaQOverP; - for ( auto& st : newstates ) { - st.covariance()( 4, 4 ) = sigmaQOverP; - st.setQOverP( qOverP ); +LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& velos, + const LHCb::Pr::Seeding::Tracks& seeds, MatchCandidates matches ) const { + + LHCb::Pr::Long::Tracks result( &velos, nullptr, &seeds ); + + for ( const auto match : matches ) { + int const currentsize = result.size(); + + result.store_trackVP<I>( currentsize, match.vTr() ); + result.store_trackUT<I>( currentsize, -1 ); + result.store_trackSeed<I>( currentsize, match.sTr() ); + + //== copy LHCbIDs + const int nSeedHits = seeds.nHits<I>( match.sTr() ).cast(); + const int nVeloHits = velos.nHits<I>( match.vTr() ).cast(); + + result.store_nFTHits<I>( currentsize, nSeedHits ); + result.store_nVPHits<I>( currentsize, nVeloHits ); + result.store_nUTHits<I>( currentsize, 0 ); + + for ( auto idx{0}; idx < nVeloHits; ++idx ) { + result.store_vp_index<I>( currentsize, idx, velos.vp_index<I>( match.vTr(), idx ) ); + result.store_lhcbID<I>( currentsize, idx, velos.lhcbID<I>( match.vTr(), idx ) ); } - } - //== add copied states to output track - output.addToStates( newstates, LHCb::Tag::Unordered ); + for ( auto idx{0}; idx < nSeedHits; ++idx ) { + result.store_ft_index<I>( currentsize, idx, seeds.ft_index<I>( match.sTr(), nVeloHits ) ); + result.store_lhcbID<I>( currentsize, nVeloHits + idx, seeds.hit<I>( match.sTr(), idx ) ); + } + result.store_ut_index<I>( currentsize, 0, -1 ); + + //== get Velo and T states at the usual pattern reco positions + auto state_beam = getVeloState( velos, match.vTr(), 0 ); + state_beam.setLocation( LHCb::State::Location::ClosestToBeam ); + + auto state_endvelo = getVeloState( velos, match.vTr(), 1 ); + state_endvelo.setLocation( LHCb::State::Location::EndVelo ); + + // from Seeding order of States + // StateParameters::ZBegT, StateParameters::ZMidT, StateParameters::ZEndT + auto state_begT = getSeedState( seeds, match.sTr(), 0 ); + state_begT.setLocation( LHCb::State::Location::AtT ); + + auto state_midT = getSeedState( seeds, match.sTr(), 1 ); + state_midT.setLocation( LHCb::State::Location::AtT ); + + auto state_endT = getSeedState( seeds, match.sTr(), 2 ); + state_endT.setLocation( LHCb::State::Location::AtT ); + + //== estimate q/p + // bool const cubicFit = seed.checkHistory( Track::History::PrSeeding ); //what to do about this? + double qOverP, sigmaQOverP; + StatusCode sc = m_fastMomentumTool->calculate( &state_beam, &state_endT, qOverP, sigmaQOverP, true ); + + if ( sc.isFailure() ) { + Warning( "momentum determination failed!", sc ).ignore(); + // assume the Velo/T station standalone reco do something reasonable + qOverP = -9999.; // what is good nonsense value + } else { + // adjust q/p and its uncertainty + sigmaQOverP = sigmaQOverP * sigmaQOverP; + + state_beam.covariance()( 4, 4 ) = sigmaQOverP; + state_beam.setQOverP( qOverP ); + + state_begT.covariance()( 4, 4 ) = sigmaQOverP; + state_begT.setQOverP( qOverP ); + } + + result.store_stateQoP<F>( currentsize, qOverP ); + + //== store Velo and T states at the usual pattern reco positions + auto velopos = Vec3<F>( state_endvelo.x(), state_endvelo.y(), state_endvelo.z() ); + auto velodir = Vec3<F>( state_endvelo.tx(), state_endvelo.ty(), 1.f ); + result.store_vStatePos<F>( currentsize, velopos ); + result.store_vStateDir<F>( currentsize, velodir ); + + auto pos = Vec3<F>( state_midT.x(), state_midT.y(), state_midT.z() ); + auto dir = Vec3<F>( state_midT.tx(), state_midT.ty(), 1.f ); + result.store_statePos<F>( currentsize, pos ); + result.store_stateDir<F>( currentsize, dir ); + + result.size() += 1; + + if ( UNLIKELY( result.size() == LHCb::Pr::Long::Tracks::max_tracks ) ) { + // FIXME: find a better way to define size of container + ++m_maxTracksErr; + break; // FIXME: do something smarter than this + } + } - return output; + return result; } //============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 180983d4e74..9d80e646d39 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -13,6 +13,7 @@ // Include files // from Gaudi +#include "Event/PrLongTracks.h" #include "Event/PrSeedTracks.h" #include "Event/PrVeloTracks.h" #include "Event/Track_v2.h" @@ -46,7 +47,6 @@ namespace { using SeedTracks = LHCb::Pr::Seeding::Tracks; using VeloTracks = LHCb::Pr::Velo::Tracks; - using Hits = LHCb::Pr::Velo::Hits; LHCb::State getVeloState( VeloTracks const& tracks, int t, int index ) { @@ -106,8 +106,8 @@ namespace { } // namespace -class PrMatchNN : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( - const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, const LHCb::Pr::Seeding::Tracks& )> { +class PrMatchNN : public Gaudi::Functional::Transformer<LHCb::Pr::Long::Tracks( const LHCb::Pr::Velo::Tracks&, + const LHCb::Pr::Seeding::Tracks& )> { using Track = LHCb::Event::v2::Track; @@ -119,43 +119,40 @@ public: StatusCode initialize() override; // main method - std::vector<Track> operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Velo::Hits&, - const LHCb::Pr::Seeding::Tracks& ) const override; + LHCb::Pr::Long::Tracks operator()( const LHCb::Pr::Velo::Tracks&, const LHCb::Pr::Seeding::Tracks& ) const override; /** @class MatchCandidate PrMatchNN.h * * Match candidate for PrMatcNNh algorithm * - * @author Manuel Schiller - * @date 2012-01-31 - * code cleanups - * - * @author Olivier Callot - * @date 2007-02-07 + * @author Sevda Esen, Michel De Cian + * @date 2015-02-07 * initial implementation + * @date 2020-06-23 + * implemented SOA version */ class MatchCandidate { public: - MatchCandidate( const Track* vTr, const Track* sTr, float dist ) : m_vTr( vTr ), m_sTr( sTr ), m_dist( dist ) {} + MatchCandidate( int v, int s ) : m_vTr( v ), m_sTr( s ) {} - const Track* vTr() const { return m_vTr; } - const Track* sTr() const { return m_sTr; } - float dist() const { return m_dist; } + int vTr() const { return m_vTr; } + int sTr() const { return m_sTr; } private: - const Track* m_vTr = nullptr; - const Track* m_sTr = nullptr; - float m_dist{0}; + int m_vTr = 0; + int m_sTr = 0; }; private: + typedef std::vector<MatchCandidate> MatchCandidates; + // calculate matching chi^2 float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; // merge velo and seed segment to output track - Track makeTrack( const LHCb::Pr::Velo::Tracks& velos, int v, const LHCb::Pr::Velo::Hits& veloHits, - const LHCb::Pr::Seeding::Tracks& seeds, int s ) const; + LHCb::Pr::Long::Tracks makeTracks( const LHCb::Pr::Velo::Tracks& velos, const LHCb::Pr::Seeding::Tracks& seeds, + MatchCandidates matches ) const; Gaudi::Property<std::vector<double>> m_zMagParams{ this, "ZMagnetParams", {5287.6, -7.98878, 317.683, 0.0119379, -1418.42}}; @@ -177,6 +174,9 @@ private: std::unique_ptr<IClassifierReader> m_MLPReader; + using ErrorCounter = Gaudi::Accumulators::MsgCounter<MSG::ERROR>; + mutable ErrorCounter m_maxTracksErr{this, "Number of tracks reached maximum!"}; + mutable Gaudi::Accumulators::SummingCounter<unsigned int> m_tracksCount{this, "#MatchingTracks"}; mutable Gaudi::Accumulators::SummingCounter<float> m_tracksMLP{this, "TracksMLP"}; mutable Gaudi::Accumulators::SummingCounter<float> m_tracksChi2{this, "#MatchingChi2"}; @@ -185,8 +185,6 @@ private: ToolHandle<IPrDebugMatchTool> m_matchDebugTool{this, "MatchDebugToolName", ""}; ToolHandle<ITrackMomentumEstimate> m_fastMomentumTool{this, "FastMomentumToolName", "FastMomentumEstimate"}; - typedef std::pair<const Track*, const LHCb::State*> TrackStatePair; - typedef std::vector<TrackStatePair> TrackStatePairs; typedef std::vector<std::pair<unsigned int, float>> seedMLPPairs; }; diff --git a/Tr/TrackUtils/src/TracksMatchConverter.cpp b/Tr/TrackUtils/src/TracksMatchConverter.cpp new file mode 100644 index 00000000000..a832aa141ac --- /dev/null +++ b/Tr/TrackUtils/src/TracksMatchConverter.cpp @@ -0,0 +1,94 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ + +#include <vector> + +// Gaudi +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/StdArrayAsProperty.h" + +// LHCb +#include "Event/StateParameters.h" +#include "Event/Track.h" +#include "Kernel/FTChannelID.h" +#include "Kernel/LHCbID.h" +#include "Kernel/UTChannelID.h" +#include "Kernel/VPChannelID.h" + +#include "Event/PrLongTracks.h" +#include "Event/PrVeloTracks.h" + +/** + * Converter between TracksFT SoA PoD and vector<Track_v2> + * + * @author Arthur Hennequin (CERN, LIP6) + */ + +class TracksMatchConverter : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const std::vector<LHCb::Event::v2::Track>&, const std::vector<LHCb::Event::v2::Track>&, + const LHCb::Pr::Long::Tracks& )> { + using Track = LHCb::Event::v2::Track; + using Tracks = LHCb::Pr::Long::Tracks; + // From PrGeometryTool in PrAlgorithms + +public: + TracksMatchConverter( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, + {KeyValue{"TracksSeedLocation", "Rec/Track/Seed"}, + KeyValue{"TracksVeloLocation", "Rec/Track/Velo"}, + KeyValue{"TracksMatchLocation", "Rec/Track/MatchSOA"}}, + KeyValue{"OutputTracksLocation", "Rec/Track/Match"} ) {} + + Gaudi::Property<std::array<float, 5>> m_covarianceValues{this, "covarianceValues", {4.0, 400.0, 4.e-6, 1.e-4, 0.1}}; + + std::vector<Track> operator()( const std::vector<Track>& tracksSeed, const std::vector<Track>& tracksVelo, + const Tracks& tracksMatch ) const override { + std::vector<Track> out; + out.reserve( tracksMatch.size() ); + m_nbTracksCounter += tracksMatch.size(); + + using dType = SIMDWrapper::scalar::types; + using I = dType::int_v; + using F = dType::float_v; + + for ( int t = 0; t < tracksMatch.size(); t++ ) { + auto& trackSeed = tracksSeed[tracksMatch.trackSeed<I>( t ).cast()]; + auto& trackVelo = tracksVelo[tracksMatch.trackVP<I>( t ).cast()]; + auto& newTrack = out.emplace_back( trackVelo ); + newTrack.addToAncestors( trackSeed ); + newTrack.addToAncestors( trackVelo ); + + for ( auto& state : trackSeed.states() ) { newTrack.addToStates( state ); } + + // set q/p in all of the existing states + auto const qop = tracksMatch.stateQoP<F>( t ).cast(); + auto const errQop2 = m_covarianceValues[4] * qop * qop; + + for ( auto& state : newTrack.states() ) { + state.setQOverP( qop ); + state.setErrQOverP2( errQop2 ); + } + + newTrack.setLhcbIDs( tracksMatch.lhcbIDs( t ), LHCb::Tag::Unordered ); + + newTrack.setType( Track::Type::Long ); + newTrack.setHistory( Track::History::PrMatch ); + newTrack.setPatRecStatus( Track::PatRecStatus::PatRecIDs ); + } + + return out; + }; + +private: + mutable Gaudi::Accumulators::SummingCounter<> m_nbTracksCounter{this, "Nb of Produced Tracks"}; +}; + +DECLARE_COMPONENT( TracksMatchConverter ) -- GitLab From 0946465b0ad7bad6deccf545605eb4aed79f1cc3 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Tue, 23 Jun 2020 22:38:22 +0200 Subject: [PATCH 088/111] use std::optional for default values --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 22 ++++++++++++---------- Pr/PrAlgorithms/src/PrMatchNN.h | 4 ++-- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 6b5824ded7a..5323506eaee 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -13,6 +13,7 @@ // local #include "PrMatchNN.h" #include "Event/StateParameters.h" +#include <optional> //----------------------------------------------------------------------------- // Implementation file for class : PrMatchNN // @@ -97,7 +98,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& v const float posYApproxS = seed_pos.y.cast() + ( m_zMatchY - seed_pos.z.cast() ) * seed_dir.y.cast(); if ( posYApproxS > posYApproxV + m_fastYTol ) continue; - const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ); + const float chi2 = getChi2Match( velo_pos, velo_dir, seed_pos, seed_dir, mLPReaderInput ).value_or( 9999.0 ); if ( chi2 < m_maxChi2 ) { @@ -148,17 +149,18 @@ LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& v } //============================================================================= -float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, - const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const { +std::optional<float> PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, + const Vec3<F> sState_pos, const Vec3<F> sState_dir, + std::array<float, 6>& mLPReaderInput ) const { const float tx2 = vState_dir.x.cast() * vState_dir.x.cast(); const float ty2 = vState_dir.y.cast() * vState_dir.y.cast(); const float dSlope = vState_dir.x.cast() - sState_dir.x.cast(); - if ( std::abs( dSlope ) > 1.5 ) return 9999.; + if ( std::abs( dSlope ) > 1.5 ) return std::nullopt; const float dSlopeY = vState_dir.y.cast() - sState_dir.y.cast(); - if ( std::abs( dSlopeY ) > 0.15 ) return 9999.; + if ( std::abs( dSlopeY ) > 0.15 ) return std::nullopt; const float zForX = m_zMagParams[0] + m_zMagParams[1] * std::abs( dSlope ) + m_zMagParams[2] * dSlope * dSlope + m_zMagParams[3] * std::abs( sState_pos.x.cast() ) + @@ -177,9 +179,9 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di const float yS = sState_pos.y.cast() + ( m_zMatchY - sState_pos.z.cast() ) * sState_dir.y.cast(); const float distX = xS - xV; - if ( std::abs( distX ) > 400 ) return 9999.; + if ( std::abs( distX ) > 400 ) return std::nullopt; const float distY = yS - yV; - if ( std::abs( distX ) > 250 ) return 9999.; + if ( std::abs( distX ) > 250 ) return std::nullopt; const float teta2 = tx2 + ty2; const float tolX = dxTol2 + dSlope * dSlope * dxTolSlope2; @@ -189,7 +191,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di chi2 += dSlopeY * dSlopeY * 10000 * 0.0625; - if ( m_maxChi2 < chi2 ) return chi2; + if ( m_maxChi2 < chi2 ) return std::nullopt; mLPReaderInput[0] = chi2; mLPReaderInput[1] = teta2; @@ -198,7 +200,7 @@ float PrMatchNN::getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_di mLPReaderInput[4] = std::abs( dSlope ); mLPReaderInput[5] = std::abs( dSlopeY ); - return chi2; + return std::optional{chi2}; } //============================================================================= @@ -259,7 +261,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& v if ( sc.isFailure() ) { Warning( "momentum determination failed!", sc ).ignore(); // assume the Velo/T station standalone reco do something reasonable - qOverP = -9999.; // what is good nonsense value + qOverP = -9999.; // what is a good nonsense value } else { // adjust q/p and its uncertainty sigmaQOverP = sigmaQOverP * sigmaQOverP; diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 9d80e646d39..26f439b5f99 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -147,8 +147,8 @@ private: typedef std::vector<MatchCandidate> MatchCandidates; // calculate matching chi^2 - float getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, - const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; + std::optional<float> getChi2Match( const Vec3<F> vState_pos, const Vec3<F> vState_dir, const Vec3<F> sState_pos, + const Vec3<F> sState_dir, std::array<float, 6>& mLPReaderInput ) const; // merge velo and seed segment to output track LHCb::Pr::Long::Tracks makeTracks( const LHCb::Pr::Velo::Tracks& velos, const LHCb::Pr::Seeding::Tracks& seeds, -- GitLab From 5d55da8156da34c386100d40851975693389bb2d Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 24 Jun 2020 12:36:36 +0200 Subject: [PATCH 089/111] updated RecoUpgradeSequence to take correct velo and seed track locations, added seed and match converters to the sequence --- .../python/TrackSys/RecoUpgradeTracking.py | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py index b2cc5eca6a1..1c15b730565 100755 --- a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py +++ b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py @@ -343,18 +343,30 @@ def RecoSeeding(output_tracks="Rec/Track/Seed"): prHybridSeeding.OutputName = output_tracks return [prHybridSeeding] +def RecoConvertSeeding(input_tracks="Rec/Track/Seed", output_tracks="Rec/Track/PrSeedingTracks"): + from Configurables import LHCb__Converters__Track__PrSeeding__fromTrackv2PrSeedingTracks as SeedConverter + seedConverter= SeedConverter("SeedConverter") + seedConverter.OutputTracks = output_tracks + seedConverter.InputTracks = input_tracks + return [seedConverter] + # Set Matching -def RecoMatch(output_tracks="Rec/Track/Match", - input_seed="Rec/Track/Seed", - input_velo="Rec/Track/Velo"): +def RecoMatch(output_tracks="Rec/Track/MatchSOA", + input_seed="Rec/Track/PrSeedingTracks"): from Configurables import PrMatchNN + from Configurables import VeloClusterTrackingSIMD as VeloTracking prMatch = PrMatchNN("PrMatchNNBest") prMatch.MatchOutput = output_tracks - prMatch.VeloInput = input_velo + prMatch.VeloInput = VeloTracking("VeloClusterTracking").TracksLocation prMatch.SeedInput = input_seed return [prMatch] +def RecoConvertMatch(output_tracks="Rec/Track/Match"): + from Configurables import TracksMatchConverter + tracksMatchConverter= TracksMatchConverter("TracksMatchConverter") + tracksMatchConverter.OutputTracksLocation = output_tracks + return [tracksMatchConverter] # Set Downstream def RecoDownstream(output_tracks="Rec/Track/Downstream", @@ -538,13 +550,12 @@ def RecoBestTrackingStage(tracklists=[], if "Seeding" in trackTypes: algs += RecoSeeding(output_tracks=defTracks["Seeding"]["Location"]) defTracks["Seeding"]["BestUsed"] = True + algs += RecoConvertSeeding() if "Match" in trackTypes: - algs += RecoMatch( - output_tracks=defTracks["Match"]["Location"], - input_seed=defTracks["Seeding"]["Location"], - input_velo=defTracks["Velo"]["Location"]) + algs += RecoMatch(output_tracks="Rec/Track/MatchSOA") defTracks["Match"]["BestUsed"] = True + algs += RecoConvertMatch(output_tracks=defTracks["Match"]["Location"]) if "Downstream" in trackTypes: algs += RecoDownstream( @@ -598,8 +609,8 @@ def RecoBestTrackCreator(defTracks={}, ) trconverter = FromV2TrackV1Track(tracktype + "Converter") trconverter.InputTracksName = defTracks[tracktype]["Location"] - trconverter.OutputTracksName = defTracksConverted[tracktype][ - "Location"] + + trconverter.OutputTracksName = defTracksConverted[tracktype]["Location"] #insert in the sequence the converter for the tracks listed in UpgrateTracksToConvert seq_converters.Members += [trconverter] if "Velo" in tracksToConvert: -- GitLab From 57d4200530c41a23c87b7994a87b95e5ea722d09 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 24 Jun 2020 12:40:08 +0200 Subject: [PATCH 090/111] lb-format RecoUpgradeTracking --- .../python/TrackSys/RecoUpgradeTracking.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py index 1c15b730565..6b561305269 100755 --- a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py +++ b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py @@ -343,9 +343,11 @@ def RecoSeeding(output_tracks="Rec/Track/Seed"): prHybridSeeding.OutputName = output_tracks return [prHybridSeeding] -def RecoConvertSeeding(input_tracks="Rec/Track/Seed", output_tracks="Rec/Track/PrSeedingTracks"): + +def RecoConvertSeeding(input_tracks="Rec/Track/Seed", + output_tracks="Rec/Track/PrSeedingTracks"): from Configurables import LHCb__Converters__Track__PrSeeding__fromTrackv2PrSeedingTracks as SeedConverter - seedConverter= SeedConverter("SeedConverter") + seedConverter = SeedConverter("SeedConverter") seedConverter.OutputTracks = output_tracks seedConverter.InputTracks = input_tracks return [seedConverter] @@ -355,19 +357,21 @@ def RecoConvertSeeding(input_tracks="Rec/Track/Seed", output_tracks="Rec/Track/P def RecoMatch(output_tracks="Rec/Track/MatchSOA", input_seed="Rec/Track/PrSeedingTracks"): from Configurables import PrMatchNN - from Configurables import VeloClusterTrackingSIMD as VeloTracking + from Configurables import VeloClusterTrackingSIMD as VeloTracking prMatch = PrMatchNN("PrMatchNNBest") prMatch.MatchOutput = output_tracks - prMatch.VeloInput = VeloTracking("VeloClusterTracking").TracksLocation + prMatch.VeloInput = VeloTracking("VeloClusterTracking").TracksLocation prMatch.SeedInput = input_seed return [prMatch] + def RecoConvertMatch(output_tracks="Rec/Track/Match"): from Configurables import TracksMatchConverter - tracksMatchConverter= TracksMatchConverter("TracksMatchConverter") + tracksMatchConverter = TracksMatchConverter("TracksMatchConverter") tracksMatchConverter.OutputTracksLocation = output_tracks return [tracksMatchConverter] + # Set Downstream def RecoDownstream(output_tracks="Rec/Track/Downstream", input_seed="Rec/Track/Seed"): @@ -610,7 +614,8 @@ def RecoBestTrackCreator(defTracks={}, trconverter = FromV2TrackV1Track(tracktype + "Converter") trconverter.InputTracksName = defTracks[tracktype]["Location"] - trconverter.OutputTracksName = defTracksConverted[tracktype]["Location"] + trconverter.OutputTracksName = defTracksConverted[tracktype][ + "Location"] #insert in the sequence the converter for the tracks listed in UpgrateTracksToConvert seq_converters.Members += [trconverter] if "Velo" in tracksToConvert: -- GitLab From ee25d8904e296c7f879a198a1aabff8269fa6faa Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Fri, 26 Jun 2020 17:07:40 +0200 Subject: [PATCH 091/111] remove redundant lines --- Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp | 1 - Tr/TrackUtils/src/TracksUTConverter.cpp | 6 +----- Tr/TrackUtils/src/TracksVPConverter.cpp | 2 -- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp index 0c8894a47e4..b0e53d6f1f4 100644 --- a/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp +++ b/Pr/PrPixel/src/VeloClusterTrackingSIMD.cpp @@ -866,7 +866,6 @@ namespace LHCb::Pr::Velo { tracksBackward.compressstore_nHits( i, backwards, n_hits ); tracksBackward.compressstore_stateDir( i, 0, backwards, dir ); - for ( int h = 0; h < max_hits; h++ ) { tracksBackward.compressstore_vp_index( i, h, backwards, tracks.hit<I>( t, h ) ); auto hit_index = select( h < n_hits, tracks.hit<I>( t, h ), 0 ); diff --git a/Tr/TrackUtils/src/TracksUTConverter.cpp b/Tr/TrackUtils/src/TracksUTConverter.cpp index caa184ab118..2abfdc499ba 100644 --- a/Tr/TrackUtils/src/TracksUTConverter.cpp +++ b/Tr/TrackUtils/src/TracksUTConverter.cpp @@ -65,11 +65,7 @@ public: for ( auto& state : newTrack.states() ) state.setQOverP( tracksUT.stateQoP<F>( t ).cast() ); // Add LHCbIds - int n_hits = tracksUT.template nHits<I>( t ).cast(); - for ( int i = 0; i < n_hits; i++ ) { - int lhcbid = tracksUT.lhcbID<I>( t, i ).cast(); - newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); - } + newTrack.setLhcbIDs( tracksUT.lhcbIDs( t ), LHCb::Tag::Unordered ); // As we don't need the state in the UT, it is not added in PrVeloUT // and can't be added here. diff --git a/Tr/TrackUtils/src/TracksVPConverter.cpp b/Tr/TrackUtils/src/TracksVPConverter.cpp index d3fd567c8e9..d20a1c23abb 100644 --- a/Tr/TrackUtils/src/TracksVPConverter.cpp +++ b/Tr/TrackUtils/src/TracksVPConverter.cpp @@ -19,7 +19,6 @@ #include "Event/Track.h" #include "Kernel/VPConstants.h" -#include "Event/PrVeloHits.h" #include "Event/PrVeloTracks.h" #include "Event/VPLightCluster.h" @@ -31,7 +30,6 @@ namespace { using Track = LHCb::Event::v2::Track; using Tracks = LHCb::Pr::Velo::Tracks; - using Hits = LHCb::Pr::Velo::Hits; using dType = SIMDWrapper::scalar::types; using I = dType::int_v; -- GitLab From 847820103fba3b498a2b0339a61aa682bfa5cffd Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 29 Jun 2020 19:03:09 +0200 Subject: [PATCH 092/111] Add PrResidualSeeding --- Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp | 98 ++++++++++++++++++ Pr/PrAlgorithms/src/PrResidualSeeding.cpp | 89 +++++++++++++++++ Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 110 +++++++++++---------- 3 files changed, 246 insertions(+), 51 deletions(-) create mode 100644 Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp create mode 100644 Pr/PrAlgorithms/src/PrResidualSeeding.cpp diff --git a/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp new file mode 100644 index 00000000000..91ab696e46a --- /dev/null +++ b/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp @@ -0,0 +1,98 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +#include "Event/ODIN.h" +#include "Event/PrLongTracks.h" +#include "Event/PrUpstreamTracks.h" +#include "Event/Track.h" +#include "Event/Track_v2.h" +#include "Gaudi/Accumulators.h" +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/IRegistry.h" +#include "PrKernel/PrHit.h" +#include "PrKernel/PrUTHitHandler.h" +#include "UTDAQ/UTInfo.h" +#include "UTDet/DeUTDetector.h" +#include <Vc/Vc> +#include <vector> + +#include "boost/container/small_vector.hpp" +#include "boost/container/static_vector.hpp" +#include "boost/dynamic_bitset.hpp" +#include <memory> + +//----------------------------------------------------------------------------- +// class : PrResidualPrUTHits +// Store residual PrUTHits after other Algorithms, e.g. PrMatchNN/PrForward used +// +// 2020-04-21 : Peilian Li +// +//----------------------------------------------------------------------------- + +template <typename T> +class PrResidualPrUTHits + : public Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )> { + +public: + using base_class_t = + Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )>; + + LHCb::Pr::UT::HitHandler operator()( const T&, const LHCb::Pr::UT::HitHandler& ) const override; + + PrResidualPrUTHits( const std::string& name, ISvcLocator* pSvcLocator ) + : base_class_t( name, pSvcLocator, + std::array{typename base_class_t::KeyValue{"TracksLocation", ""}, + typename base_class_t::KeyValue{"PrUTHitsLocation", ""}}, + typename base_class_t::KeyValue{"PrUTHitsOutput", ""} ) {} +}; + +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID( PrResidualPrUTHits<LHCb::Pr::Long::Tracks>, "PrResidualPrUTHits" ) +DECLARE_COMPONENT_WITH_ID( PrResidualPrUTHits<LHCb::Pr::Upstream::Tracks>, "PrResidualPrUTHits_Upstream" ) + +//============================================================================= +// Main execution +//============================================================================= +template <typename T> +LHCb::Pr::UT::HitHandler PrResidualPrUTHits<T>::operator()( const T& tracks, + const LHCb::Pr::UT::HitHandler& uthithandler ) const { + LHCb::Pr::UT::HitHandler tmp{}; + + using scalar = SIMDWrapper::scalar::types; + using sI = scalar::int_v; + + // mark used UT hits + const unsigned int nhits = uthithandler.nHits(); + boost::dynamic_bitset<> used{nhits, false}; + + for ( int t = 0; t < tracks.size(); t++ ) { + const int nuthits = tracks.template nUTHits<sI>( t ).cast(); + for ( int idx = 0; idx < nuthits; idx++ ) { + const int index = tracks.template ut_index<sI>( t, idx ).cast(); + if ( index >= 0 ) used[index] = true; + } + } + + const auto& allhits = uthithandler.hits(); + const int fullChanIdx = + static_cast<int>( UTInfo::DetectorNumbers::Layers ) * static_cast<int>( UTInfo::DetectorNumbers::Stations ) * + static_cast<int>( UTInfo::DetectorNumbers::Regions ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ); + + for ( auto fullchan = 0; fullchan < fullChanIdx; fullchan++ ) { + const auto indexs = uthithandler.indices( fullchan ); + + for ( int idx = indexs.first; idx != indexs.second; idx++ ) { + if ( used[idx] ) continue; + tmp.copyHit( fullchan, idx, allhits ); + } + } + return tmp; +} diff --git a/Pr/PrAlgorithms/src/PrResidualSeeding.cpp b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp new file mode 100644 index 00000000000..38725b32783 --- /dev/null +++ b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp @@ -0,0 +1,89 @@ +/*****************************************************************************\ +* (c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration * +* * +* This software is distributed under the terms of the GNU General Public * +* Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". * +* * +* In applying this licence, CERN does not waive the privileges and immunities * +* granted to it by virtue of its status as an Intergovernmental Organization * +* or submit itself to any jurisdiction. * +\*****************************************************************************/ +// Include files +#include "Event/ODIN.h" +#include "Event/Track.h" +#include "Event/Track_v2.h" +#include "Event/PrLongTracks.h" +#include "Gaudi/Accumulators.h" +#include "GaudiAlg/Transformer.h" +#include "GaudiKernel/IRegistry.h" +#include "PrKernel/PrFTHitHandler.h" +#include "PrKernel/PrFTInfo.h" +#include "PrKernel/PrFTZoneHandler.h" +#include "PrKernel/PrHit.h" +#include <Vc/Vc> +#include <vector> + +#include "boost/container/small_vector.hpp" +#include "boost/container/static_vector.hpp" +#include "boost/dynamic_bitset.hpp" +#include <memory> + +//----------------------------------------------------------------------------- +// class : PrResidualSeeding +// Store residual Seeding tracks after other Algorithms, e.g. PrMatchNN used +// +// 2020-04-20: Peilian Li +// +//----------------------------------------------------------------------------- + +class PrResidualSeeding : public Gaudi::Functional::Transformer<std::vector<LHCb::Event::v2::Track>( + const LHCb::Pr::Long::Tracks&, const std::vector<LHCb::Event::v2::Track>& )> { + + using Track = LHCb::Event::v2::Track; + using Tracks = std::vector<LHCb::Event::v2::Track>; + +public: + PrResidualSeeding( const std::string& name, ISvcLocator* pSvcLocator ); + + Tracks operator()( const LHCb::Pr::Long::Tracks&, const Tracks& ) const override; +}; + +// Declaration of the Algorithm Factory +DECLARE_COMPONENT_WITH_ID( PrResidualSeeding, "PrResidualSeeding" ) + +//============================================================================= +// Standard constructor, initializes variables +//============================================================================= +PrResidualSeeding::PrResidualSeeding( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, {KeyValue{"MatchTracksLocation", ""}, KeyValue{"SeedTrackLocation", ""}}, + KeyValue{"SeedTrackOutput", ""} ) {} + +//============================================================================= +// Main execution +//============================================================================= +std::vector<LHCb::Event::v2::Track> PrResidualSeeding::operator()( const LHCb::Pr::Long::Tracks& matchtracks, + const Tracks& seedtracks ) const { + Tracks tmptracks{}; + tmptracks.reserve( seedtracks.size() ); + + if ( seedtracks.empty() ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Seed Track container '" << inputLocation<Tracks>() << "' is empty" << endmsg; + return tmptracks; + } + + boost::dynamic_bitset<> used{seedtracks.size(), false}; + + for ( int t = 0; t < matchtracks.size(); t ++ ) { + const auto trackseed = matchtracks.trackSeed<SIMDWrapper::scalar::types::int_v>( t ).cast(); + used[trackseed] = true; + } + int itrack = -1; + for ( auto& track : seedtracks ) { + itrack++; + if ( used[itrack] ) continue; + tmptracks.push_back( track ); + } + + return tmptracks; +} diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index c4a38f7b779..0c5b3f6e8b4 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -10,10 +10,9 @@ \*****************************************************************************/ // Include files #include "Event/ODIN.h" -#include "Event/PrLongTracks.h" -#include "Event/PrUpstreamTracks.h" #include "Event/Track.h" #include "Event/Track_v2.h" +#include "Event/PrLongTracks.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -22,6 +21,10 @@ #include "PrKernel/UTHit.h" #include "PrKernel/UTHitHandler.h" #include "PrKernel/UTHitInfo.h" +#include "DetDesc/Condition.h" +#include "DetDesc/ConditionAccessorHolder.h" +#include "DetDesc/GenericConditionAccessorHolder.h" +#include "DetDesc/IConditionDerivationMgr.h" #include "UTDAQ/UTInfo.h" #include "UTDet/DeUTDetector.h" #include <Vc/Vc> @@ -29,7 +32,6 @@ #include "boost/container/small_vector.hpp" #include "boost/container/static_vector.hpp" -#include "boost/dynamic_bitset.hpp" #include <memory> //----------------------------------------------------------------------------- @@ -40,72 +42,78 @@ // //----------------------------------------------------------------------------- -template <typename T> -class PrResidualUTHits - : public Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )> { +class PrResidualUTHits : public Gaudi::Functional::Transformer<UT::HitHandler( + const LHCb::Pr::Long::Tracks&, const UT::HitHandler&)> { + + using Tracks = LHCb::Pr::Long::Tracks; public: - // StatusCode initialize() override; - using base_class_t = - Gaudi::Functional::Transformer<LHCb::Pr::UT::HitHandler( const T&, const LHCb::Pr::UT::HitHandler& )>; + StatusCode initialize() override; - // PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); + PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ); - LHCb::Pr::UT::HitHandler operator()( const T&, const LHCb::Pr::UT::HitHandler& ) const override; + UT::HitHandler operator()( const Tracks&, const UT::HitHandler& ) const override; + +private: + DeUTDetector* m_utDet = nullptr; - PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class_t( name, pSvcLocator, - std::array{typename base_class_t::KeyValue{"TracksLocation", ""}, - typename base_class_t::KeyValue{"UTHitsLocation", ""}}, - typename base_class_t::KeyValue{"UTHitsOutput", ""} ) {} }; // Declaration of the Algorithm Factory -DECLARE_COMPONENT_WITH_ID( PrResidualUTHits<LHCb::Pr::Long::Tracks>, "PrResidualUTHits" ) -DECLARE_COMPONENT_WITH_ID( PrResidualUTHits<LHCb::Pr::Upstream::Tracks>, "PrResidualUTHits_Upstream" ) +DECLARE_COMPONENT_WITH_ID( PrResidualUTHits, "PrResidualUTHits" ) //============================================================================= +// Standard constructor, initializes variables +//============================================================================= +PrResidualUTHits::PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLocator ) + : Transformer( name, pSvcLocator, {KeyValue{"TracksLocation", ""}, KeyValue{"UTHitsLocation", ""}}, + KeyValue{"UTHitsOutput", ""} ) {} + +// initialisation +StatusCode PrResidualUTHits::initialize() { + return GaudiAlgorithm::initialize().andThen( [&] { + m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); + }); +} // Main execution //============================================================================= -template <typename T> -LHCb::Pr::UT::HitHandler PrResidualUTHits<T>::operator()( const T& tracks, - const LHCb::Pr::UT::HitHandler& uthithandler ) const { - LHCb::Pr::UT::HitHandler tmp{}; - - /* - if ( tracks.size()==0 ) { - if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<T>() << "' is empty" << endmsg; - return &uthithandler; - } - */ - - using scalar = SIMDWrapper::scalar::types; - using sI = scalar::int_v; +UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::HitHandler& uthithandler ) const { - // mark used UT hits - const unsigned int nhits = uthithandler.nHits(); - boost::dynamic_bitset<> used{nhits, false}; + UT::HitHandler tmp{}; - for ( int t = 0; t < tracks.size(); t++ ) { - const int nuthits = tracks.template nUTHits<sI>( t ).cast(); - for ( int idx = 0; idx < nuthits; idx++ ) { - const int index = tracks.template ut_index<sI>( t, idx ).cast(); - if ( index >= 0 ) used[index] = true; - } + if ( tracks.size()==0 ) { + if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) + debug() << "Track container '" << inputLocation<Tracks>() << "' is empty" << endmsg; + return uthithandler; } - const auto& allhits = uthithandler.hits(); - const int fullChanIdx = - static_cast<int>( UTInfo::DetectorNumbers::Layers ) * static_cast<int>( UTInfo::DetectorNumbers::Stations ) * - static_cast<int>( UTInfo::DetectorNumbers::Regions ) * static_cast<int>( UTInfo::DetectorNumbers::Sectors ); + std::vector<long unsigned int> usedUTHits{}; + usedUTHits.reserve( uthithandler.nbHits() ); - for ( auto fullchan = 0; fullchan < fullChanIdx; fullchan++ ) { - const auto indexs = uthithandler.indices( fullchan ); + for ( int t = 0; t < tracks.size(); t++ ){ + const auto ids = tracks.lhcbIDs( t ); + for ( auto id : ids ) { + if ( !( id.isUT() ) ) continue; + usedUTHits.emplace_back( id.utID().channelID() ); + } + } - for ( int idx = indexs.first; idx != indexs.second; idx++ ) { - if ( used[idx] ) continue; - tmp.copyHit( fullchan, idx, allhits ); + for ( int iStation = 1; iStation <= static_cast<int>( UTInfo::DetectorNumbers::Stations ); ++iStation ) { + for ( int iLayer = 1; iLayer <= static_cast<int>( UTInfo::DetectorNumbers::Layers ); ++iLayer ) { + for ( int iRegion = 1; iRegion <= static_cast<int>( UTInfo::DetectorNumbers::Regions ); ++iRegion ) { + for ( int iSector = 1; iSector <= static_cast<int>( UTInfo::DetectorNumbers::Sectors ); ++iSector ) { + for ( auto& uthit : uthithandler.hits( iStation, iLayer, iRegion, iSector ) ) { + bool used = std::any_of( usedUTHits.begin(), usedUTHits.end(), + [utid = uthit.chanID().channelID()]( const auto& id ) { return utid == id; } ); + + if ( used ) continue; + const unsigned int fullChanIdx = UT::HitHandler::HitsInUT::idx( iStation, iLayer, iRegion, iSector ); + const auto* aSector = m_utDet->getSector( uthit.chanID() ); + tmp.AddHit( aSector, fullChanIdx, uthit.strip(), uthit.fracStrip(), uthit.chanID(), uthit.size(), + uthit.highThreshold() ); + } + } + } } } return tmp; -- GitLab From 17a1597bf7b51fefcec26fc9cf33a0c5a4a55704 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 30 Jun 2020 23:28:02 +0200 Subject: [PATCH 093/111] fix test errors --- Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp | 3 +-- Pr/PrVeloUT/src/PrVeloUT.h | 2 +- Tr/TrackUtils/src/TracksFTConverter.cpp | 2 +- Tr/TrackUtils/src/TracksUTConverter.cpp | 6 +++++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp index 64aea9606d0..cb5d012d7d7 100644 --- a/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp +++ b/Pr/PrConverters/src/fromPrFittedTrackTrackv2.cpp @@ -74,7 +74,6 @@ namespace { for ( int t = 0; t < fitted_tracks.size(); t++ ) { auto forward_track_index = fitted_tracks.trackFT<I>( t ).cast(); auto& newTrack = out.emplace_back(); - // set track flags newTrack.setType( LHCb::Event::v2::Track::Type::Long ); newTrack.setHistory( LHCb::Event::v2::Track::History::PrForward ); @@ -108,7 +107,7 @@ namespace { fitted_tracks.chi2nDof<I>( t ).cast()} ); // If we rely on pointers internally stored in the classes we can take it from fitted tracks - auto lhcbids = fitted_tracks.lhcbIDs( t ); + auto lhcbids = forward_tracks.lhcbIDs( forward_track_index ); newTrack.addToLhcbIDs( lhcbids, LHCb::Tag::Unordered_tag{} ); } return out; diff --git a/Pr/PrVeloUT/src/PrVeloUT.h b/Pr/PrVeloUT/src/PrVeloUT.h index 8ba40976b27..dde1f59f2e2 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.h +++ b/Pr/PrVeloUT/src/PrVeloUT.h @@ -105,7 +105,7 @@ namespace LHCb::Pr { F( &poss[at + 2 * max_tracks] ).compressstore( mask, &poss[size + 2 * max_tracks] ); F( &dirs[at] ).compressstore( mask, &dirs[size] ); F( &dirs[at + max_tracks] ).compressstore( mask, &dirs[size + max_tracks] ); - F( &covs[at + max_tracks] ).compressstore( mask, &covs[size] ); + F( &covs[at] ).compressstore( mask, &covs[size] ); F( &covs[at + max_tracks] ).compressstore( mask, &covs[size + max_tracks] ); F( &covs[at + 2 * max_tracks] ).compressstore( mask, &covs[size + 2 * max_tracks] ); I( &indexs[at] ).compressstore( mask, &indexs[size] ); diff --git a/Tr/TrackUtils/src/TracksFTConverter.cpp b/Tr/TrackUtils/src/TracksFTConverter.cpp index 7fad8b3c752..52dda994a7e 100644 --- a/Tr/TrackUtils/src/TracksFTConverter.cpp +++ b/Tr/TrackUtils/src/TracksFTConverter.cpp @@ -101,7 +101,7 @@ public: newTrack.addToStates( state ); // Add LHCbIds - newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); newTrack.setHistory( Track::History::PrForward ); diff --git a/Tr/TrackUtils/src/TracksUTConverter.cpp b/Tr/TrackUtils/src/TracksUTConverter.cpp index 2abfdc499ba..caa184ab118 100644 --- a/Tr/TrackUtils/src/TracksUTConverter.cpp +++ b/Tr/TrackUtils/src/TracksUTConverter.cpp @@ -65,7 +65,11 @@ public: for ( auto& state : newTrack.states() ) state.setQOverP( tracksUT.stateQoP<F>( t ).cast() ); // Add LHCbIds - newTrack.setLhcbIDs( tracksUT.lhcbIDs( t ), LHCb::Tag::Unordered ); + int n_hits = tracksUT.template nHits<I>( t ).cast(); + for ( int i = 0; i < n_hits; i++ ) { + int lhcbid = tracksUT.lhcbID<I>( t, i ).cast(); + newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); + } // As we don't need the state in the UT, it is not added in PrVeloUT // and can't be added here. -- GitLab From 18ddaeff96625597f7c997154ecd8c4b2ac21f1b Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 30 Jun 2020 23:13:31 +0000 Subject: [PATCH 094/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/9038792 --- Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp | 2 +- Pr/PrAlgorithms/src/PrResidualSeeding.cpp | 12 +++++------ Pr/PrAlgorithms/src/PrResidualUTHits.cpp | 25 ++++++++++------------ Tr/TrackUtils/src/TracksFTConverter.cpp | 2 +- 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp index 91ab696e46a..f77483c4162 100644 --- a/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualPrUTHits.cpp @@ -63,7 +63,7 @@ DECLARE_COMPONENT_WITH_ID( PrResidualPrUTHits<LHCb::Pr::Upstream::Tracks>, "PrRe //============================================================================= template <typename T> LHCb::Pr::UT::HitHandler PrResidualPrUTHits<T>::operator()( const T& tracks, - const LHCb::Pr::UT::HitHandler& uthithandler ) const { + const LHCb::Pr::UT::HitHandler& uthithandler ) const { LHCb::Pr::UT::HitHandler tmp{}; using scalar = SIMDWrapper::scalar::types; diff --git a/Pr/PrAlgorithms/src/PrResidualSeeding.cpp b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp index 38725b32783..f8ebd3ca5bf 100644 --- a/Pr/PrAlgorithms/src/PrResidualSeeding.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp @@ -10,9 +10,9 @@ \*****************************************************************************/ // Include files #include "Event/ODIN.h" +#include "Event/PrLongTracks.h" #include "Event/Track.h" #include "Event/Track_v2.h" -#include "Event/PrLongTracks.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -62,7 +62,7 @@ PrResidualSeeding::PrResidualSeeding( const std::string& name, ISvcLocator* pSvc // Main execution //============================================================================= std::vector<LHCb::Event::v2::Track> PrResidualSeeding::operator()( const LHCb::Pr::Long::Tracks& matchtracks, - const Tracks& seedtracks ) const { + const Tracks& seedtracks ) const { Tracks tmptracks{}; tmptracks.reserve( seedtracks.size() ); @@ -74,13 +74,13 @@ std::vector<LHCb::Event::v2::Track> PrResidualSeeding::operator()( const LHCb::P boost::dynamic_bitset<> used{seedtracks.size(), false}; - for ( int t = 0; t < matchtracks.size(); t ++ ) { - const auto trackseed = matchtracks.trackSeed<SIMDWrapper::scalar::types::int_v>( t ).cast(); - used[trackseed] = true; + for ( int t = 0; t < matchtracks.size(); t++ ) { + const auto trackseed = matchtracks.trackSeed<SIMDWrapper::scalar::types::int_v>( t ).cast(); + used[trackseed] = true; } int itrack = -1; for ( auto& track : seedtracks ) { - itrack++; + itrack++; if ( used[itrack] ) continue; tmptracks.push_back( track ); } diff --git a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp index 0c5b3f6e8b4..6c018ae9220 100644 --- a/Pr/PrAlgorithms/src/PrResidualUTHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualUTHits.cpp @@ -9,10 +9,14 @@ * or submit itself to any jurisdiction. * \*****************************************************************************/ // Include files +#include "DetDesc/Condition.h" +#include "DetDesc/ConditionAccessorHolder.h" +#include "DetDesc/GenericConditionAccessorHolder.h" +#include "DetDesc/IConditionDerivationMgr.h" #include "Event/ODIN.h" +#include "Event/PrLongTracks.h" #include "Event/Track.h" #include "Event/Track_v2.h" -#include "Event/PrLongTracks.h" #include "Gaudi/Accumulators.h" #include "GaudiAlg/Transformer.h" #include "GaudiKernel/IRegistry.h" @@ -21,10 +25,6 @@ #include "PrKernel/UTHit.h" #include "PrKernel/UTHitHandler.h" #include "PrKernel/UTHitInfo.h" -#include "DetDesc/Condition.h" -#include "DetDesc/ConditionAccessorHolder.h" -#include "DetDesc/GenericConditionAccessorHolder.h" -#include "DetDesc/IConditionDerivationMgr.h" #include "UTDAQ/UTInfo.h" #include "UTDet/DeUTDetector.h" #include <Vc/Vc> @@ -42,8 +42,8 @@ // //----------------------------------------------------------------------------- -class PrResidualUTHits : public Gaudi::Functional::Transformer<UT::HitHandler( - const LHCb::Pr::Long::Tracks&, const UT::HitHandler&)> { +class PrResidualUTHits + : public Gaudi::Functional::Transformer<UT::HitHandler( const LHCb::Pr::Long::Tracks&, const UT::HitHandler& )> { using Tracks = LHCb::Pr::Long::Tracks; @@ -56,7 +56,6 @@ public: private: DeUTDetector* m_utDet = nullptr; - }; // Declaration of the Algorithm Factory @@ -69,11 +68,9 @@ PrResidualUTHits::PrResidualUTHits( const std::string& name, ISvcLocator* pSvcLo : Transformer( name, pSvcLocator, {KeyValue{"TracksLocation", ""}, KeyValue{"UTHitsLocation", ""}}, KeyValue{"UTHitsOutput", ""} ) {} -// initialisation +// initialisation StatusCode PrResidualUTHits::initialize() { - return GaudiAlgorithm::initialize().andThen( [&] { - m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); - }); + return GaudiAlgorithm::initialize().andThen( [&] { m_utDet = getDet<DeUTDetector>( DeUTDetLocation::UT ); } ); } // Main execution //============================================================================= @@ -81,7 +78,7 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit UT::HitHandler tmp{}; - if ( tracks.size()==0 ) { + if ( tracks.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) debug() << "Track container '" << inputLocation<Tracks>() << "' is empty" << endmsg; return uthithandler; @@ -90,7 +87,7 @@ UT::HitHandler PrResidualUTHits::operator()( const Tracks& tracks, const UT::Hit std::vector<long unsigned int> usedUTHits{}; usedUTHits.reserve( uthithandler.nbHits() ); - for ( int t = 0; t < tracks.size(); t++ ){ + for ( int t = 0; t < tracks.size(); t++ ) { const auto ids = tracks.lhcbIDs( t ); for ( auto id : ids ) { if ( !( id.isUT() ) ) continue; diff --git a/Tr/TrackUtils/src/TracksFTConverter.cpp b/Tr/TrackUtils/src/TracksFTConverter.cpp index 52dda994a7e..7fad8b3c752 100644 --- a/Tr/TrackUtils/src/TracksFTConverter.cpp +++ b/Tr/TrackUtils/src/TracksFTConverter.cpp @@ -101,7 +101,7 @@ public: newTrack.addToStates( state ); // Add LHCbIds - newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); + newTrack.setLhcbIDs( tracksFT.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); newTrack.setHistory( Track::History::PrForward ); -- GitLab From 84d8e8a8bba628b31b8370eab6594fa0bbee0ee9 Mon Sep 17 00:00:00 2001 From: sesen <sevda.esen@cern.ch> Date: Wed, 1 Jul 2020 14:35:36 +0200 Subject: [PATCH 095/111] add padding to PrMatchNN --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 9 ++++++++- Pr/PrAlgorithms/src/PrMatchNN.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 0f1eaba7cf3..1f41b4d2137 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -209,7 +209,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& v } for ( auto idx{0}; idx < nSeedHits; ++idx ) { - result.store_ft_index<I>( currentsize, idx, seeds.ft_index<I>( match.sTr(), nVeloHits ) ); + result.store_ft_index<I>( currentsize, idx, seeds.ft_index<I>( match.sTr(), idx ) ); result.store_lhcbID<I>( currentsize, nVeloHits + idx, seeds.hit<I>( match.sTr(), idx ) ); } result.store_ut_index<I>( currentsize, 0, -1 ); @@ -274,6 +274,13 @@ LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& v } } + // padding results to avoid FPEs + result.store_stateQoP<simd::float_v>( result.size(), simd::float_v( 1.f ) ); + result.store_vStatePos<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_vStateDir<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_statePos<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + result.store_stateDir<simd::float_v>( result.size(), Vec3<simd::float_v>( 1.f, 1.f, 1.f ) ); + return result; } //============================================================================= diff --git a/Pr/PrAlgorithms/src/PrMatchNN.h b/Pr/PrAlgorithms/src/PrMatchNN.h index 26f439b5f99..45373daf065 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.h +++ b/Pr/PrAlgorithms/src/PrMatchNN.h @@ -41,6 +41,7 @@ */ namespace { + using simd = SIMDWrapper::avx256::types; using dType = SIMDWrapper::scalar::types; using I = dType::int_v; using F = dType::float_v; -- GitLab From 3e39c776217e2d00533ebb2821f07da8a2cc80ce Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 1 Jul 2020 15:23:55 +0200 Subject: [PATCH 096/111] fix FPEs in Brunel test --- Pr/PrAlgorithms/src/PrMatchNN.cpp | 2 +- Pr/PrMCTools/src/PrUTCounter.cpp | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 1f41b4d2137..60ac8ed2248 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -59,7 +59,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::operator()( const LHCb::Pr::Velo::Tracks& v if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) { debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << seeds.size() << endmsg; - debug() << "Track container '" << inputLocation<LHCb::Pr::Seeding::Tracks>() << "' has size " << velos.size() + debug() << "Track container '" << inputLocation<LHCb::Pr::Velo::Tracks>() << "' has size " << velos.size() << endmsg; } diff --git a/Pr/PrMCTools/src/PrUTCounter.cpp b/Pr/PrMCTools/src/PrUTCounter.cpp index 94cfdb7653f..53a3d4561b2 100644 --- a/Pr/PrMCTools/src/PrUTCounter.cpp +++ b/Pr/PrMCTools/src/PrUTCounter.cpp @@ -164,7 +164,6 @@ void PrUTCounter::countAndPlot( const IHistoTool* htool, const ITrackExtrapolato for ( std::vector<LHCb::LHCbID>::const_iterator itId = ids.begin(); ids.end() != itId; ++itId ) { if ( ( *itId ).isUT() ) { ttIds.push_back( *itId ); } } - std::vector<bool> shallIPlotTheHistograms( flags.size(), false ); for ( unsigned int kk = 0; flags.size() > kk; ++kk ) { @@ -379,11 +378,13 @@ void PrUTCounter::printStatistics( MsgStream& info, std::string location ) { double bad = m_nbGhostHit / m_nbGhost; info << format( "%6.0f ghost, %5.2f UT per track", m_nbGhost, bad ) << endmsg; } - if ( m_triggerNumbers ) + if ( m_triggerNumbers ){ + double gosttrig = 0; + if(m_totTrackTrigger !=0 ) gosttrig = 100. * m_totGhostTrigger / m_totTrackTrigger; info << "**** " << strigger - << format( "%7d tracks including %7d ghosts [%4.1f %%] ****", m_totTrackTrigger, m_totGhostTrigger, - 100. * m_totGhostTrigger / m_totTrackTrigger ) + << format( "%7d tracks including %7d ghosts [%4.1f %%] ****", m_totTrackTrigger, m_totGhostTrigger, gosttrig ) << endmsg; + } for ( unsigned int kk = 0; m_name.size() > kk; ++kk ) { if ( 0.5 > m_nbTrack[kk] ) continue; -- GitLab From e5d66136886068aebebc31be19df9e6bb11350bd Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Wed, 1 Jul 2020 13:24:49 +0000 Subject: [PATCH 097/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/9050519 --- Pr/PrMCTools/src/PrUTCounter.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Pr/PrMCTools/src/PrUTCounter.cpp b/Pr/PrMCTools/src/PrUTCounter.cpp index 53a3d4561b2..1c373371372 100644 --- a/Pr/PrMCTools/src/PrUTCounter.cpp +++ b/Pr/PrMCTools/src/PrUTCounter.cpp @@ -378,11 +378,12 @@ void PrUTCounter::printStatistics( MsgStream& info, std::string location ) { double bad = m_nbGhostHit / m_nbGhost; info << format( "%6.0f ghost, %5.2f UT per track", m_nbGhost, bad ) << endmsg; } - if ( m_triggerNumbers ){ + if ( m_triggerNumbers ) { double gosttrig = 0; - if(m_totTrackTrigger !=0 ) gosttrig = 100. * m_totGhostTrigger / m_totTrackTrigger; + if ( m_totTrackTrigger != 0 ) gosttrig = 100. * m_totGhostTrigger / m_totTrackTrigger; info << "**** " << strigger - << format( "%7d tracks including %7d ghosts [%4.1f %%] ****", m_totTrackTrigger, m_totGhostTrigger, gosttrig ) + << format( "%7d tracks including %7d ghosts [%4.1f %%] ****", m_totTrackTrigger, m_totGhostTrigger, + gosttrig ) << endmsg; } -- GitLab From 934be12dd3f33eaa638651b9aacc1f1bffd8dc59 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Wed, 1 Jul 2020 18:22:45 +0200 Subject: [PATCH 098/111] make consistent input name --- Pr/PrAlgorithms/src/PrResidualSeeding.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrResidualSeeding.cpp b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp index f8ebd3ca5bf..a6dad011ba4 100644 --- a/Pr/PrAlgorithms/src/PrResidualSeeding.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSeeding.cpp @@ -55,8 +55,8 @@ DECLARE_COMPONENT_WITH_ID( PrResidualSeeding, "PrResidualSeeding" ) // Standard constructor, initializes variables //============================================================================= PrResidualSeeding::PrResidualSeeding( const std::string& name, ISvcLocator* pSvcLocator ) - : Transformer( name, pSvcLocator, {KeyValue{"MatchTracksLocation", ""}, KeyValue{"SeedTrackLocation", ""}}, - KeyValue{"SeedTrackOutput", ""} ) {} + : Transformer( name, pSvcLocator, {KeyValue{"MatchTracksLocation", ""}, KeyValue{"SeedTracksLocation", ""}}, + KeyValue{"SeedTracksOutput", ""} ) {} //============================================================================= // Main execution -- GitLab From 7e8633b327a67ebb309f9aba808d04919603990d Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 2 Jul 2020 20:41:33 +0200 Subject: [PATCH 099/111] fill in vStatePos and vStateDir --- Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp index 093d9f2bb97..73598101a90 100644 --- a/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp +++ b/Pr/SciFiTrackForwarding/src/SciFiTrackForwarding.cpp @@ -668,6 +668,8 @@ TracksFT SciFiTrackForwarding::operator()( EventContext const& evtCtx, SciFiTrac float const qop = 1.f / bestcandidate.PQ; Output.compressstore_stateQoP<sF>( i, mask, qop ); + Output.compressstore_vStatePos<sF>( i, mask, scalar_endv_pos ); + Output.compressstore_vStateDir<sF>( i, mask, scalar_endv_dir ); // store Velo hit indices const int vphits = tracks.nVPHits<sI>( uttrack + tr ).cast(); -- GitLab From 0dda92a01dc521e7189ef010ab7e3cc45f552590 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Fri, 3 Jul 2020 08:57:34 +0200 Subject: [PATCH 100/111] keep lhcbID consistent in all tracks --- Phys/FunctorCache/CMakeLists.txt | 3 ++- Pr/PrAlgorithms/src/PrMatchNN.cpp | 2 +- Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Phys/FunctorCache/CMakeLists.txt b/Phys/FunctorCache/CMakeLists.txt index a9bf9500d4f..5248fe51b43 100644 --- a/Phys/FunctorCache/CMakeLists.txt +++ b/Phys/FunctorCache/CMakeLists.txt @@ -36,6 +36,7 @@ if(conf_deps) list(INSERT conf_deps 0 DEPENDS) endif() +set(THOR_BUILD_TEST_FUNCTOR_CACHE ON) # Only actually build a functor cache if it is explicitly requested if(THOR_BUILD_TEST_FUNCTOR_CACHE) # Disable LoKi-specific hacks in LoKiFunctorsCachePostActionOpts.py @@ -55,4 +56,4 @@ if(THOR_BUILD_TEST_FUNCTOR_CACHE) # Restore the old value set(LOKI_FUNCTORS_CACHE_POST_ACTION_OPTS ${LOKI_FUNCTORS_CACHE_POST_ACTION_OPTS_TMP}) -endif(THOR_BUILD_TEST_FUNCTOR_CACHE) \ No newline at end of file +endif(THOR_BUILD_TEST_FUNCTOR_CACHE) diff --git a/Pr/PrAlgorithms/src/PrMatchNN.cpp b/Pr/PrAlgorithms/src/PrMatchNN.cpp index 60ac8ed2248..13ff4e2e1fc 100644 --- a/Pr/PrAlgorithms/src/PrMatchNN.cpp +++ b/Pr/PrAlgorithms/src/PrMatchNN.cpp @@ -210,7 +210,7 @@ LHCb::Pr::Long::Tracks PrMatchNN::makeTracks( const LHCb::Pr::Velo::Tracks& v for ( auto idx{0}; idx < nSeedHits; ++idx ) { result.store_ft_index<I>( currentsize, idx, seeds.ft_index<I>( match.sTr(), idx ) ); - result.store_lhcbID<I>( currentsize, nVeloHits + idx, seeds.hit<I>( match.sTr(), idx ) ); + result.store_lhcbID<I>( currentsize, nVeloHits + idx, seeds.lhcbID<I>( match.sTr(), idx ) ); } result.store_ut_index<I>( currentsize, 0, -1 ); diff --git a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp index 866a05d375b..fbb0816d419 100644 --- a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp +++ b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp @@ -67,7 +67,7 @@ namespace LHCb::Converters::Track::PrSeeding { << LHCb::Pr::Seeding::Tracks::max_hits << "No more hits will be added" << endmsg; break; } - outputTracks.store_hit<I>( t, i, id.lhcbID() ); + outputTracks.store_lhcbID<I>( t, i, id.lhcbID() ); i++; } -- GitLab From 9397bad95efa1f571b45f1d784e7c1438b5bb7eb Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Mon, 6 Jul 2020 20:48:01 +0200 Subject: [PATCH 101/111] make sure sector numbers stay within bound --- Pr/PrVeloUT/src/PrVeloUT.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 814dd90a620..9ad71519a2b 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -561,9 +561,9 @@ namespace LHCb::Pr { int contSize = filteredStates.size; filteredStates.size = 0; - std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray; + std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray{}; std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> - helperArray; // 4 layers x maximum 9 sectors + helperArray{}; // 4 layers x maximum 9 sectors std::array<int, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> maxColsRows; // -- This now works with up to 9 sectors @@ -713,7 +713,7 @@ namespace LHCb::Pr { std::array<int, maxNumSectors + 1> sectors{0}; - for ( int i = 0; i < nPos; ++i ) { sectors[i] = compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(); } + for ( int i = 0; i < nPos; ++i ) { sectors[i] = min(maxSectorNumber, max(compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(),0)); } for ( int j = 0; j < nPos; j++ ) { -- GitLab From 8f6e90018aaf191bb0c6fad7da2723e8a8d72f9c Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Mon, 6 Jul 2020 20:51:29 +0200 Subject: [PATCH 102/111] make sure sector numbers stay within bound --- Pr/PrVeloUT/src/PrVeloUT.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 9ad71519a2b..0168a8a23a8 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -563,7 +563,7 @@ namespace LHCb::Pr { std::array<simd::int_v, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> posArray{}; std::array<simd::int_v, maxNumSectors* static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> - helperArray{}; // 4 layers x maximum 9 sectors + helperArray{}; // 4 layers x maximum 9 sectors std::array<int, static_cast<int>( UTInfo::DetectorNumbers::TotalLayers )> maxColsRows; // -- This now works with up to 9 sectors @@ -713,7 +713,9 @@ namespace LHCb::Pr { std::array<int, maxNumSectors + 1> sectors{0}; - for ( int i = 0; i < nPos; ++i ) { sectors[i] = min(maxSectorNumber, max(compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(),0)); } + for ( int i = 0; i < nPos; ++i ) { + sectors[i] = min( maxSectorNumber, max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); + } for ( int j = 0; j < nPos; j++ ) { -- GitLab From d6c96e6546321d0fd2d35587ef270f1bc6f07b27 Mon Sep 17 00:00:00 2001 From: Michel De Cian <michel.de.cian@cern.ch> Date: Mon, 6 Jul 2020 20:55:06 +0200 Subject: [PATCH 103/111] forgot -1 --- Pr/PrVeloUT/src/PrVeloUT.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index 0168a8a23a8..db51f76678c 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -714,7 +714,8 @@ namespace LHCb::Pr { std::array<int, maxNumSectors + 1> sectors{0}; for ( int i = 0; i < nPos; ++i ) { - sectors[i] = min( maxSectorNumber, max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); + sectors[i] = + min( maxSectorNumber - 1, max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); } for ( int j = 0; j < nPos; j++ ) { -- GitLab From c5b9321e6e8a99e5de6044684fc02997231b1f0b Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 6 Jul 2020 21:03:03 +0200 Subject: [PATCH 104/111] avoid duplicates check in v2 track --- Tr/TrackUtils/src/TracksMatchConverter.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Tr/TrackUtils/src/TracksMatchConverter.cpp b/Tr/TrackUtils/src/TracksMatchConverter.cpp index a832aa141ac..5dc3743cfa2 100644 --- a/Tr/TrackUtils/src/TracksMatchConverter.cpp +++ b/Tr/TrackUtils/src/TracksMatchConverter.cpp @@ -77,7 +77,11 @@ public: state.setErrQOverP2( errQop2 ); } - newTrack.setLhcbIDs( tracksMatch.lhcbIDs( t ), LHCb::Tag::Unordered ); + int n_hits = tracksMatch.template nHits<I>( t ).cast(); + for ( int i = 0; i < n_hits; i++ ) { + int lhcbid = tracksMatch.lhcbID<I>( t, i ).cast(); + newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); + } newTrack.setType( Track::Type::Long ); newTrack.setHistory( Track::History::PrMatch ); -- GitLab From feee7ef96ed08a0ae177491b452195a375f3d5be Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Mon, 6 Jul 2020 21:33:45 +0200 Subject: [PATCH 105/111] min to std::min --- Pr/PrVeloUT/src/PrVeloUT.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index db51f76678c..eb7a7d7f28d 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -715,7 +715,7 @@ namespace LHCb::Pr { for ( int i = 0; i < nPos; ++i ) { sectors[i] = - min( maxSectorNumber - 1, max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); + std::min( maxSectorNumber - 1, std::max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); } for ( int j = 0; j < nPos; j++ ) { -- GitLab From a8428b6fdbf79e5591763e0715ac8c5cbc516d90 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Mon, 6 Jul 2020 19:38:23 +0000 Subject: [PATCH 106/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/9122854 --- Pr/PrVeloUT/src/PrVeloUT.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Pr/PrVeloUT/src/PrVeloUT.cpp b/Pr/PrVeloUT/src/PrVeloUT.cpp index eb7a7d7f28d..de634a2ef02 100644 --- a/Pr/PrVeloUT/src/PrVeloUT.cpp +++ b/Pr/PrVeloUT/src/PrVeloUT.cpp @@ -714,8 +714,8 @@ namespace LHCb::Pr { std::array<int, maxNumSectors + 1> sectors{0}; for ( int i = 0; i < nPos; ++i ) { - sectors[i] = - std::min( maxSectorNumber - 1, std::max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); + sectors[i] = std::min( maxSectorNumber - 1, + std::max( compBoundsArray[layerIndex].sect<scalar::int_v>( t, i ).cast(), 0 ) ); } for ( int j = 0; j < nPos; j++ ) { -- GitLab From 711a52565f8ba7230b7fb5400fe56414547dc7a4 Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 7 Jul 2020 17:26:14 +0200 Subject: [PATCH 107/111] remove duplicates of UT hits and fill in ft_index for seedtracks --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 10 ++++++--- Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp | 6 +++--- .../src/fromTrackv2PrSeedingTrack.cpp | 21 ++++++++++++------- .../python/TrackSys/RecoUpgradeTracking.py | 3 ++- Tr/TrackUtils/src/TracksMatchConverter.cpp | 7 ++----- 5 files changed, 28 insertions(+), 19 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index e4005e0338c..a554fa0a8a0 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -280,9 +280,13 @@ namespace LHCb::Pr { std::sort( hitIdx.begin(), hitIdx.end(), [&hitsInLayers]( const int i, const int j ) { return hitsInLayers.projections[i] < hitsInLayers.projections[j]; } ); + // remove duplicates if there is + hitIdx.erase( std::unique( hitIdx.begin(), hitIdx.end(), [&hitsInLayers]( const int i, const int j ) { + return hitsInLayers.channelIDs[i] == hitsInLayers.channelIDs[j]; + } ), hitIdx.end() ); // -- Loop over all hits and make "groups" of hits to form a candidate - for ( auto itB = 0; itB + 2 < int( hitsInLayers.size ); ++itB ) { + for ( auto itB = 0; itB + 2 < hitIdx.size(); ++itB ) { const int itBeg = hitIdx[itB]; const float firstProj = hitsInLayers.projections[itBeg]; @@ -302,7 +306,7 @@ namespace LHCb::Pr { if ( ( hitsInLayers.projections[hitIdx[itB + 2]] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group - for ( auto itE = itB; itE < int( hitsInLayers.size ); itE++ ) { + for ( auto itE = itB; itE < hitIdx.size() ; itE++ ) { const int itEnd = hitIdx[itE]; if ( hitsInLayers.projections[itEnd] > maxProj ) break; if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { @@ -394,7 +398,7 @@ namespace LHCb::Pr { const std::pair<int, int>& temp = m_HitHandler.get()->indices( sectors[j] ); const std::pair<int, int>& temp2 = m_HitHandler.get()->indices( sectors[j + 1] ); const int firstIndex = temp.first; - const int shift = ( temp2.first == temp.second ); + const int shift = ( temp2.first == temp.second || ( temp.first == temp2.first && temp.second == temp2.second ) ); const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; j += shift; diff --git a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp index f083cdafb98..0b20ab00e74 100644 --- a/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp +++ b/Pr/PrAlgorithms/src/PrResidualSciFiHits.cpp @@ -84,7 +84,7 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi if ( tracks.size() == 0 ) { if ( UNLIKELY( msgLevel( MSG::DEBUG ) ) ) - debug() << "Track container '" << inputLocation<0>() << "' is empty" << endmsg; + debug() << "Track container '" << inputLocation<Tracks>() << "' is empty" << endmsg; return fthits; } @@ -96,7 +96,7 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi const int nfthits = tracks.nFTHits<I>( t ).cast(); for ( int id = 0; id != nfthits; id++ ) { auto idx = tracks.ft_index<I>( t, id ).cast(); - if ( idx != 0 ) used[idx] = true; + if ( idx >= 0 ) used[idx] = true; } } constexpr auto xu = PrFTInfo::xZonesUpper; @@ -110,7 +110,7 @@ PrSciFiHits PrResidualSciFiHits::operator()( const Tracks& tracks, const PrSciFi zoneIndexes[hitzones[0]] = hitvec.size(); int j = 1; - for ( long unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop whole SciFiHits container + for ( unsigned int i = 0; i != fthits._IDs.size(); i++ ) { // loop whole SciFiHits container if ( used[i] ) continue; hitvec.emplace_back( fthits._x[i] ); diff --git a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp index fbb0816d419..82dceee0b1e 100644 --- a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp +++ b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp @@ -20,6 +20,8 @@ #include "Event/StateParameters.h" #include "Event/Track.h" #include "SOAExtensions/ZipUtils.h" +#include "PrKernel/PrSciFiHits.h" +#include "PrKernel/PrFTInfo.h" // Gaudi #include "GaudiKernel/StdArrayAsProperty.h" @@ -38,18 +40,19 @@ namespace { namespace LHCb::Converters::Track::PrSeeding { class fromTrackv2PrSeedingTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Seeding::Tracks( - const EventContext& evtCtx, const std::vector<Event::v2::Track>& )> { + const EventContext& evtCtx, const std::vector<Event::v2::Track>&, const SciFiHits::PrSciFiHits& )> { public: using base_class = Gaudi::Functional::Transformer<LHCb::Pr::Seeding::Tracks( - const EventContext& evtCtx, const std::vector<Event::v2::Track>& )>; + const EventContext& evtCtx, const std::vector<Event::v2::Track>&, const SciFiHits::PrSciFiHits& )>; using KeyValue = typename base_class::KeyValue; fromTrackv2PrSeedingTracks( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class( name, pSvcLocator, {KeyValue{"InputTracks", ""}}, KeyValue{"OutputTracks", ""} ) {} + : base_class( name, pSvcLocator, {KeyValue{"InputTracks", ""}, KeyValue{"InputSciFiHits", PrFTInfo::SciFiHitsLocation} }, KeyValue{"OutputTracks", ""} ) {} - LHCb::Pr::Seeding::Tracks operator()( const EventContext& evtCtx, - const std::vector<Event::v2::Track>& inputTracks ) const override { + LHCb::Pr::Seeding::Tracks operator()( const EventContext& evtCtx, + const std::vector<Event::v2::Track>& inputTracks, + const SciFiHits::PrSciFiHits& fthits ) const override { LHCb::Pr::Seeding::Tracks outputTracks{Zipping::generateZipIdentifier(), LHCb::getMemResource( evtCtx )}; @@ -57,19 +60,23 @@ namespace LHCb::Converters::Track::PrSeeding { const LHCb::Event::v2::Track& inTrack = inputTracks[t]; outputTracks.store_QoP<F>( t, inTrack.charge() / inTrack.p() ); - outputTracks.store_nHits<I>( t, (int)inTrack.nLHCbIDs() ); // -- copy LHCbIDs int i = 0; for ( auto id : inTrack.lhcbIDs() ) { if ( i == LHCb::Pr::Seeding::Tracks::max_hits ) { base_class::error() << "Reached maximum number of hits in LHCb::Pr::Seeding::Tracks " - << LHCb::Pr::Seeding::Tracks::max_hits << "No more hits will be added" << endmsg; + << LHCb::Pr::Seeding::Tracks::max_hits << " No more hits will be added" << endmsg; break; } outputTracks.store_lhcbID<I>( t, i, id.lhcbID() ); + for( unsigned int ihit = 0; ihit != fthits._IDs.size(); ihit++ ){ + if(id.lhcbID() == (LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ) + outputTracks.store_ft_index<I>( t, i, ihit ); + } i++; } + outputTracks.store_nHits<I>( t, i ); // -- copy states i = 0; diff --git a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py index 6b561305269..9cc2af443e0 100755 --- a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py +++ b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py @@ -346,10 +346,11 @@ def RecoSeeding(output_tracks="Rec/Track/Seed"): def RecoConvertSeeding(input_tracks="Rec/Track/Seed", output_tracks="Rec/Track/PrSeedingTracks"): - from Configurables import LHCb__Converters__Track__PrSeeding__fromTrackv2PrSeedingTracks as SeedConverter + from Configurables import LHCb__Converters__Track__PrSeeding__fromTrackv2PrSeedingTracks as SeedConverter, PrStoreSciFiHits seedConverter = SeedConverter("SeedConverter") seedConverter.OutputTracks = output_tracks seedConverter.InputTracks = input_tracks + seedConverter.InputSciFiHits = PrStoreSciFiHits().Output return [seedConverter] diff --git a/Tr/TrackUtils/src/TracksMatchConverter.cpp b/Tr/TrackUtils/src/TracksMatchConverter.cpp index 5dc3743cfa2..01aa7f6572f 100644 --- a/Tr/TrackUtils/src/TracksMatchConverter.cpp +++ b/Tr/TrackUtils/src/TracksMatchConverter.cpp @@ -77,11 +77,8 @@ public: state.setErrQOverP2( errQop2 ); } - int n_hits = tracksMatch.template nHits<I>( t ).cast(); - for ( int i = 0; i < n_hits; i++ ) { - int lhcbid = tracksMatch.lhcbID<I>( t, i ).cast(); - newTrack.addToLhcbIDs( LHCb::LHCbID( lhcbid ) ); - } + ///add lhcbIDs + newTrack.setLhcbIDs( tracksMatch.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); newTrack.setHistory( Track::History::PrMatch ); -- GitLab From 55695d5956fda68d02b4c408e436936edd85df51 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 7 Jul 2020 15:27:19 +0000 Subject: [PATCH 108/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/9142256 --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 15 ++++++++----- .../src/fromTrackv2PrSeedingTrack.cpp | 22 ++++++++++--------- .../python/TrackSys/RecoUpgradeTracking.py | 2 +- Tr/TrackUtils/src/TracksMatchConverter.cpp | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index a554fa0a8a0..a82fde35fe2 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -281,9 +281,11 @@ namespace LHCb::Pr { return hitsInLayers.projections[i] < hitsInLayers.projections[j]; } ); // remove duplicates if there is - hitIdx.erase( std::unique( hitIdx.begin(), hitIdx.end(), [&hitsInLayers]( const int i, const int j ) { - return hitsInLayers.channelIDs[i] == hitsInLayers.channelIDs[j]; - } ), hitIdx.end() ); + hitIdx.erase( std::unique( hitIdx.begin(), hitIdx.end(), + [&hitsInLayers]( const int i, const int j ) { + return hitsInLayers.channelIDs[i] == hitsInLayers.channelIDs[j]; + } ), + hitIdx.end() ); // -- Loop over all hits and make "groups" of hits to form a candidate for ( auto itB = 0; itB + 2 < hitIdx.size(); ++itB ) { @@ -306,7 +308,7 @@ namespace LHCb::Pr { if ( ( hitsInLayers.projections[hitIdx[itB + 2]] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group - for ( auto itE = itB; itE < hitIdx.size() ; itE++ ) { + for ( auto itE = itB; itE < hitIdx.size(); itE++ ) { const int itEnd = hitIdx[itE]; if ( hitsInLayers.projections[itEnd] > maxProj ) break; if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { @@ -398,8 +400,9 @@ namespace LHCb::Pr { const std::pair<int, int>& temp = m_HitHandler.get()->indices( sectors[j] ); const std::pair<int, int>& temp2 = m_HitHandler.get()->indices( sectors[j + 1] ); const int firstIndex = temp.first; - const int shift = ( temp2.first == temp.second || ( temp.first == temp2.first && temp.second == temp2.second ) ); - const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; + const int shift = + ( temp2.first == temp.second || ( temp.first == temp2.first && temp.second == temp2.second ) ); + const int lastIndex = ( shift == 1 ) ? temp2.second : temp.second; j += shift; for ( int i = firstIndex; i < lastIndex; i += simd::size ) { diff --git a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp index 82dceee0b1e..0a4a0d5f056 100644 --- a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp +++ b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp @@ -19,9 +19,9 @@ #include "Event/PrSeedTracks.h" #include "Event/StateParameters.h" #include "Event/Track.h" -#include "SOAExtensions/ZipUtils.h" -#include "PrKernel/PrSciFiHits.h" #include "PrKernel/PrFTInfo.h" +#include "PrKernel/PrSciFiHits.h" +#include "SOAExtensions/ZipUtils.h" // Gaudi #include "GaudiKernel/StdArrayAsProperty.h" @@ -39,8 +39,9 @@ namespace { } // namespace namespace LHCb::Converters::Track::PrSeeding { - class fromTrackv2PrSeedingTracks : public Gaudi::Functional::Transformer<LHCb::Pr::Seeding::Tracks( - const EventContext& evtCtx, const std::vector<Event::v2::Track>&, const SciFiHits::PrSciFiHits& )> { + class fromTrackv2PrSeedingTracks + : public Gaudi::Functional::Transformer<LHCb::Pr::Seeding::Tracks( + const EventContext& evtCtx, const std::vector<Event::v2::Track>&, const SciFiHits::PrSciFiHits& )> { public: using base_class = Gaudi::Functional::Transformer<LHCb::Pr::Seeding::Tracks( @@ -48,11 +49,12 @@ namespace LHCb::Converters::Track::PrSeeding { using KeyValue = typename base_class::KeyValue; fromTrackv2PrSeedingTracks( const std::string& name, ISvcLocator* pSvcLocator ) - : base_class( name, pSvcLocator, {KeyValue{"InputTracks", ""}, KeyValue{"InputSciFiHits", PrFTInfo::SciFiHitsLocation} }, KeyValue{"OutputTracks", ""} ) {} + : base_class( name, pSvcLocator, + {KeyValue{"InputTracks", ""}, KeyValue{"InputSciFiHits", PrFTInfo::SciFiHitsLocation}}, + KeyValue{"OutputTracks", ""} ) {} - LHCb::Pr::Seeding::Tracks operator()( const EventContext& evtCtx, - const std::vector<Event::v2::Track>& inputTracks, - const SciFiHits::PrSciFiHits& fthits ) const override { + LHCb::Pr::Seeding::Tracks operator()( const EventContext& evtCtx, const std::vector<Event::v2::Track>& inputTracks, + const SciFiHits::PrSciFiHits& fthits ) const override { LHCb::Pr::Seeding::Tracks outputTracks{Zipping::generateZipIdentifier(), LHCb::getMemResource( evtCtx )}; @@ -70,8 +72,8 @@ namespace LHCb::Converters::Track::PrSeeding { break; } outputTracks.store_lhcbID<I>( t, i, id.lhcbID() ); - for( unsigned int ihit = 0; ihit != fthits._IDs.size(); ihit++ ){ - if(id.lhcbID() == (LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ) + for ( unsigned int ihit = 0; ihit != fthits._IDs.size(); ihit++ ) { + if ( id.lhcbID() == ( LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ) outputTracks.store_ft_index<I>( t, i, ihit ); } i++; diff --git a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py index 9cc2af443e0..15b8ba943e5 100755 --- a/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py +++ b/Tf/TrackSys/python/TrackSys/RecoUpgradeTracking.py @@ -350,7 +350,7 @@ def RecoConvertSeeding(input_tracks="Rec/Track/Seed", seedConverter = SeedConverter("SeedConverter") seedConverter.OutputTracks = output_tracks seedConverter.InputTracks = input_tracks - seedConverter.InputSciFiHits = PrStoreSciFiHits().Output + seedConverter.InputSciFiHits = PrStoreSciFiHits().Output return [seedConverter] diff --git a/Tr/TrackUtils/src/TracksMatchConverter.cpp b/Tr/TrackUtils/src/TracksMatchConverter.cpp index 01aa7f6572f..f24c44f797f 100644 --- a/Tr/TrackUtils/src/TracksMatchConverter.cpp +++ b/Tr/TrackUtils/src/TracksMatchConverter.cpp @@ -77,7 +77,7 @@ public: state.setErrQOverP2( errQop2 ); } - ///add lhcbIDs + /// add lhcbIDs newTrack.setLhcbIDs( tracksMatch.lhcbIDs( t ), LHCb::Tag::Unordered ); newTrack.setType( Track::Type::Long ); -- GitLab From fc0b014216141c32ccf34ac3051320c9552fd84b Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Tue, 7 Jul 2020 22:52:16 +0200 Subject: [PATCH 109/111] make the converter a little bit faster --- Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp | 4 ++-- Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp index a82fde35fe2..049d8a4839d 100644 --- a/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp +++ b/Pr/PrAlgorithms/src/PrAddUTHitsTool.cpp @@ -288,7 +288,7 @@ namespace LHCb::Pr { hitIdx.end() ); // -- Loop over all hits and make "groups" of hits to form a candidate - for ( auto itB = 0; itB + 2 < hitIdx.size(); ++itB ) { + for ( auto itB = 0; itB + 2 < int( hitIdx.size() ); ++itB ) { const int itBeg = hitIdx[itB]; const float firstProj = hitsInLayers.projections[itBeg]; @@ -308,7 +308,7 @@ namespace LHCb::Pr { if ( ( hitsInLayers.projections[hitIdx[itB + 2]] ) > maxProj ) continue; // -- Make "group" of hits which are within a certain distance to the first hit of the group - for ( auto itE = itB; itE < hitIdx.size(); itE++ ) { + for ( auto itE = itB; itE < int( hitIdx.size() ); itE++ ) { const int itEnd = hitIdx[itE]; if ( hitsInLayers.projections[itEnd] > maxProj ) break; if ( 0 == firedPlanes[hitsInLayers.planeCode<sI>( itEnd ).cast()] ) { diff --git a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp index 0a4a0d5f056..8f106df8cef 100644 --- a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp +++ b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp @@ -73,8 +73,10 @@ namespace LHCb::Converters::Track::PrSeeding { } outputTracks.store_lhcbID<I>( t, i, id.lhcbID() ); for ( unsigned int ihit = 0; ihit != fthits._IDs.size(); ihit++ ) { - if ( id.lhcbID() == ( LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ) + if ( id.lhcbID() == ( LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ){ outputTracks.store_ft_index<I>( t, i, ihit ); + break; + } } i++; } -- GitLab From 9bbd948eaa6d042b5366dc99c45e8f9a9b05f1f7 Mon Sep 17 00:00:00 2001 From: Gitlab CI <noreply@cern.ch> Date: Tue, 7 Jul 2020 20:53:05 +0000 Subject: [PATCH 110/111] Fixed formatting patch generated by https://gitlab.cern.ch/lhcb/Rec/-/jobs/9146071 --- Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp index 8f106df8cef..a91a527632d 100644 --- a/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp +++ b/Pr/PrConverters/src/fromTrackv2PrSeedingTrack.cpp @@ -73,7 +73,7 @@ namespace LHCb::Converters::Track::PrSeeding { } outputTracks.store_lhcbID<I>( t, i, id.lhcbID() ); for ( unsigned int ihit = 0; ihit != fthits._IDs.size(); ihit++ ) { - if ( id.lhcbID() == ( LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ){ + if ( id.lhcbID() == ( LHCb::LHCbID( fthits.ID( ihit ) ) ).lhcbID() ) { outputTracks.store_ft_index<I>( t, i, ihit ); break; } -- GitLab From 76a33e0dce61036b3ab9ef875b0b99bbe2de0b9e Mon Sep 17 00:00:00 2001 From: Peilian LI <peilian.li@cern.ch> Date: Thu, 9 Jul 2020 10:49:49 +0200 Subject: [PATCH 111/111] fix FPEs in the Brunel test --- Pr/PrMCTools/src/PrCheatedSciFiTracking.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Pr/PrMCTools/src/PrCheatedSciFiTracking.cpp b/Pr/PrMCTools/src/PrCheatedSciFiTracking.cpp index 858c04e38e0..9b5d140d51b 100644 --- a/Pr/PrMCTools/src/PrCheatedSciFiTracking.cpp +++ b/Pr/PrMCTools/src/PrCheatedSciFiTracking.cpp @@ -150,6 +150,8 @@ std::vector<Track> PrCheatedSciFiTracking::operator()( const PrFTHitHandler<PrHi tState.setState( 100, 50, z, 0.1, 0.1, qOverP ); // tState.setCovariance( m_geoTool->covariance( qOverP ) ); tmp.addToStates( tState ); + tmp.addToStates( tState ); + tmp.addToStates( tState ); result.emplace_back( tmp ); } return result; -- GitLab