Skip to content
Snippets Groups Projects
Commit 62c31710 authored by Marian Stahl's avatar Marian Stahl
Browse files

add HyperMatch to Phys/GenericVertexFinder

parent c1a1d892
No related tags found
No related merge requests found
Pipeline #5809684 failed
...@@ -27,6 +27,7 @@ gaudi_add_module(GenericVertexFinder ...@@ -27,6 +27,7 @@ gaudi_add_module(GenericVertexFinder
src/GenericVertexFinder.cpp src/GenericVertexFinder.cpp
src/GenericVertexFinderAlg.cpp src/GenericVertexFinderAlg.cpp
src/GenericVertexFinderTwo.cpp src/GenericVertexFinderTwo.cpp
src/HyperMatch.cpp
src/JetVertexAlg.cpp src/JetVertexAlg.cpp
src/ParticleTrajProvider.cpp src/ParticleTrajProvider.cpp
src/TrackGenericVertexFinderAlg.cpp src/TrackGenericVertexFinderAlg.cpp
......
#include "HyperMatch.h"
// from
// https://gitlab.cern.ch/lhcb/Phys/-/blob/071bfe3f2bbf2c4033f82f8a79e626415651c6ea/Phys/GenericVertexFinder/src/ParticleTrajProvider.cpp
namespace {
LHCb::State compositeParticleToState( const LHCb::Particle& particle ) {
// This replaces the function "Particle2State::particle2State"
// which is just wrong if the result is to be used in a vertex
// fit. See also ParticleVertexFitter.
LHCb::State state;
const auto& p4 = particle.momentum();
const auto& px = p4.Px();
const auto& py = p4.Py();
const auto& pz = p4.Pz();
const double tx = px / pz;
const double ty = py / pz;
double p = p4.P();
state.setTx( tx );
state.setTy( ty );
const int Q = particle.charge();
state.setQOverP( Q / p );
const Gaudi::XYZPoint& pos = particle.endVertex()->position();
state.setX( pos.x() );
state.setY( pos.y() );
state.setZ( pos.z() );
// For the computation of the Jacobian it is important to understand the following.
// x' = x + (z' - z) * tx
// --> dx/dz = - tx ;
//
// Notation:
// x = (x,y)
// tx = (tx,ty,q/p)
// J_{5,6} = ( Jxpos Jxmom )
// ( Jtxpos Jtxmom )
ROOT::Math::SMatrix<double, 2, 3> Jxpos;
Jxpos( 0, 0 ) = Jxpos( 1, 1 ) = 1;
Jxpos( 0, 2 ) = -tx;
Jxpos( 1, 2 ) = -ty;
ROOT::Math::SMatrix<double, 3, 3> Jtxmom;
Jtxmom( 0, 0 ) = 1 / pz; // dtx/dpx
Jtxmom( 1, 1 ) = 1 / pz; // dty/dpy
Jtxmom( 0, 2 ) = -tx / pz; // dtx/dpz
Jtxmom( 1, 2 ) = -ty / pz; // dty/dpz
const double dqopdp = -Q / ( p * p );
Jtxmom( 2, 0 ) = dqopdp * px / p; // dqop/dpx
Jtxmom( 2, 1 ) = dqopdp * py / p; // dqop/dpy
Jtxmom( 2, 2 ) = dqopdp * pz / p; // dqop/dpz
// perhaps it would be more efficient to fill one jacobian and one
// covariance and then use similatiry once, but I don't think
// so...
const auto& momposcov = particle.posMomCovMatrix(); // it has the wrong name: it is mompos not posmom
const Gaudi::Matrix2x3 xtxcov =
Jxpos * ROOT::Math::Transpose( momposcov.Sub<Gaudi::Matrix3x3>( 0, 0 ) ) * ROOT::Math::Transpose( Jtxmom );
const Gaudi::SymMatrix2x2 xcov = ROOT::Math::Similarity( Jxpos, particle.posCovMatrix() );
const Gaudi::SymMatrix3x3 txcov =
ROOT::Math::Similarity( Jtxmom, particle.momCovMatrix().Sub<Gaudi::SymMatrix3x3>( 0, 0 ) );
auto& cov = state.covariance();
for ( int i = 0; i < 2; ++i )
for ( int j = 0; j <= i; ++j ) cov( i, j ) = xcov( i, j );
for ( int i = 0; i < 2; ++i )
for ( int j = 0; j < 3; ++j ) cov( i, j + 2 ) = xtxcov( i, j );
for ( int i = 0; i < 3; ++i )
for ( int j = 0; j <= i; ++j ) cov( i + 2, j + 2 ) = txcov( i, j );
return state;
}
template <typename STATEA, typename STATEB>
auto vertexChi2( const STATEA& stateA, const STATEB& stateB ) {
// first compute the cross product of the directions. we'll need this in any case
const auto txA = stateA.tx();
const auto tyA = stateA.ty();
const auto txB = stateB.tx();
const auto tyB = stateB.ty();
auto nx = tyA - tyB; // y1 * z2 - y2 * z1
auto ny = txB - txA; // - x1 * z2 + x2 * z1
auto nz = txA * tyB - tyA * txB; // x1 * y2 - x2 * y1
// auto n2 = nx*nx + ny*ny + nz*nz ;
// compute doca. we don't divide by the normalization to save time. we call it 'ndoca'
auto dx = stateA.x() - stateB.x();
auto dy = stateA.y() - stateB.y();
auto dz = stateA.z() - stateB.z();
auto ndoca = dx * nx + dy * ny + dz * nz;
// figure out what floating point type to use for the covariance matrix manipulations
using float_t = std::decay_t<decltype( stateA.covariance()( 0, 0 ) )>;
using Vector4 = ROOT::Math::SVector<float_t, 4>;
using SymMatrix4x4 = ROOT::Math::SMatrix<float_t, 4, 4, ROOT::Math::MatRepSym<float_t, 4>>;
// the hard part: compute the jacobians :-)
Vector4 jacA, jacB;
jacA( 0 ) = nx;
jacA( 1 ) = ny;
jacA( 2 ) = -dy + dz * tyB;
jacA( 3 ) = dx - dz * txB;
jacB( 0 ) = -nx;
jacB( 1 ) = -ny;
jacB( 2 ) = dy - dz * tyA;
jacB( 3 ) = -dx + dz * txA;
// compute the variance on ndoca
decltype( auto ) covA = stateA.covariance();
decltype( auto ) covB = stateB.covariance();
using ROOT::Math::Similarity;
auto const varndoca = Similarity( jacA, covA.template Sub<SymMatrix4x4>( 0, 0 ) ) +
Similarity( jacB, covB.template Sub<SymMatrix4x4>( 0, 0 ) );
// return the chi2
return ndoca * ndoca / varndoca;
}
template <typename StateVector>
auto doca( const StateVector& stateA, const StateVector& stateB ) {
// first compute the cross product of the directions.
const auto txA = stateA.tx();
const auto tyA = stateA.ty();
const auto txB = stateB.tx();
const auto tyB = stateB.ty();
auto nx = tyA - tyB; // y1 * z2 - y2 * z1
auto ny = txB - txA; // - x1 * z2 + x2 * z1
auto nz = txA * tyB - tyA * txB; // x1 * y2 - x2 * y1
auto n = std::sqrt( nx * nx + ny * ny + nz * nz );
// compute the doca
auto dx = stateA.x() - stateB.x();
auto dy = stateA.y() - stateB.y();
auto dz = stateA.z() - stateB.z();
auto ndoca = dx * nx + dy * ny + dz * nz;
return fabs( ndoca / n );
}
} // namespace
//=============================================================================
// Standard constructor, initializes variables
//=============================================================================
HyperMatch::HyperMatch( const std::string& name, ISvcLocator* pSvcLocator ) : GaudiAlgorithm( name, pSvcLocator ) {
declareProperty( "InputVeloTracks", m_velo_loc = LHCb::TrackLocation::Velo );
declareProperty( "InputSeeds", m_seed_loc );
declareProperty( "OutputTracks", m_output_loc = "Rec/Track/HyperMatchedVelo" );
declareProperty( "ExtrapolatorName", m_extrapolator_name = "TrackMasterExtrapolator" );
declareProperty( "TrackFitterName", m_fitter_name = "FastVeloFitLHCbIDs" );
declareProperty( "VeloHyperonIP", m_ip_cut = 5. * Gaudi::Units::mm );
declareProperty( "MatchChi2", m_chi2_cut = 9999.f );
}
//=============================================================================
// Initialization
//=============================================================================
StatusCode HyperMatch::initialize() {
StatusCode sc = GaudiAlgorithm::initialize(); // must be executed first
if ( sc.isFailure() ) return sc; // error printed already by GaudiAlgorithm
m_extrapolator = tool<ITrackExtrapolator>( m_extrapolator_name, this );
if ( !m_extrapolator ) {
error() << "Failed to create the track extrapolator." << endmsg;
return StatusCode::FAILURE;
}
m_velo_track_refitter = tool<ITrackFitter>( m_fitter_name, this );
if ( !m_velo_track_refitter ) {
error() << "Failed to create the VELO track fitter." << endmsg;
return StatusCode::FAILURE;
}
return sc;
}
StatusCode HyperMatch::execute() {
StatusCode sc = StatusCode::SUCCESS;
// get the velo tracks and copy them, as they will be manipulated and serve as output
const LHCb::Tracks* velo_tracks = get<LHCb::Tracks>( m_velo_loc );
debug() << "Got " << velo_tracks->size() << " Velo tracks" << endmsg;
std::vector<LHCb::Track*> copied_velo_tracks;
copied_velo_tracks.reserve( velo_tracks->size() );
copied_velo_tracks.insert( copied_velo_tracks.end(), velo_tracks->begin(), velo_tracks->end() );
// get the downstream part of the decay chain
LHCb::Particle::Range seed_particles = get<LHCb::Particle::Range>( m_seed_loc );
debug() << "Got " << seed_particles.size() << " Seeds" << endmsg;
LHCb::Tracks* best_velo_tracks;
if ( exist<LHCb::Tracks>( m_output_loc ) )
best_velo_tracks = get<LHCb::Tracks>( m_output_loc );
else {
best_velo_tracks = new LHCb::Tracks();
put( best_velo_tracks, m_output_loc );
}
best_velo_tracks->reserve( seed_particles.size() );
for ( auto seed : seed_particles ) {
// output track
LHCb::Track* best_velo_track{nullptr};
double best_chi2 = m_chi2_cut, best_ip = -1., best_doca = -1., best_vchi2 = -1.;
for ( auto velo_track : copied_velo_tracks ) {
// check the rough IP of the VELO w.r.t. the seed's decay vertex
// N.B. the pattern recognition doesn't set LHCb::State::LastMeasurement, but LHCb::State::EndVelo is effectively
// the last measurement and not the usual EndVelo position z=770
LHCb::State* last_velo_state{nullptr};
if ( velo_track->hasStateAt( LHCb::State::EndVelo ) )
last_velo_state = velo_track->stateAt( LHCb::State::EndVelo );
else
last_velo_state = &velo_track->closestState( 770 * Gaudi::Units::mm );
const auto distance = last_velo_state->position() - seed->endVertex()->position();
const auto ip = ( distance - last_velo_state->slopes() * distance.Dot( last_velo_state->slopes() ) /
last_velo_state->slopes().Mag2() )
.r();
verbose() << "IP " << ip << endmsg;
if ( ip > m_ip_cut ) continue;
// the seed is charged, so we need to properly transport it from it's decay vertex to the last measurement of
// the Velo track
LHCb::State seed_as_state = compositeParticleToState( *seed );
verbose() << "Seed state at endvtx" << endmsg;
if ( msgLevel( MSG::VERBOSE ) ) seed_as_state.fillStream( std::cout );
// extrapolate seed state to last measurement in Velo
if ( m_extrapolator->propagate( seed_as_state, last_velo_state->z(),
/*TrackMatrix (transport update matrix)*/ nullptr,
seed->particleID() ) == StatusCode::FAILURE )
continue;
// fit velo track with momentum estimate from seed state at EndVelo to get a better estimate of the covariance
// matrix
for ( auto state : velo_track->states() ) {
state->setQOverP( seed_as_state.qOverP() );
state->setErrQOverP2( seed_as_state.errQOverP2() );
}
if ( m_velo_track_refitter->fit( *velo_track ) == StatusCode::FAILURE ) continue;
// after fitting, we get the state at the last measurement
last_velo_state = velo_track->stateAt( LHCb::State::LastMeasurement );
if ( msgLevel( MSG::VERBOSE ) ) last_velo_state->fillStream( std::cout );
debug() << "Seed state at end velo" << endmsg;
if ( msgLevel( MSG::DEBUG ) ) seed_as_state.fillStream( std::cout );
last_velo_state = velo_track->stateAt( LHCb::State::LastMeasurement );
debug() << "Velo state after fitting (LastMeasurement)" << endmsg;
if ( msgLevel( MSG::DEBUG ) ) last_velo_state->fillStream( std::cout );
// get a chi^2 for deciding which velo track fits best
const auto match_doca = doca( *last_velo_state, seed_as_state );
const auto vx_chi2 = vertexChi2( *last_velo_state, seed_as_state );
const auto cov_seed = seed_as_state.covariance();
const auto cov_velo = last_velo_state->covariance();
Gaudi::Matrix5x5 tmp_cov = cov_seed + cov_velo;
Gaudi::Matrix4x4 cov = tmp_cov.Sub<Gaudi::Matrix4x4>( 0, 0 );
bool isc = cov.InvertFast();
if ( !isc ) continue;
Gaudi::Vector4 dv{seed_as_state.x() - last_velo_state->x(), seed_as_state.y() - last_velo_state->y(),
seed_as_state.tx() - last_velo_state->tx(), seed_as_state.ty() - last_velo_state->ty()};
const auto match_chi2 = ROOT::Math::Similarity( dv, cov );
if ( match_chi2 < best_chi2 ) {
best_chi2 = match_chi2;
best_velo_track = velo_track;
for ( auto state : best_velo_track->states() ) {
state->setQOverP( seed_as_state.qOverP() );
state->setErrQOverP2( seed_as_state.errQOverP2() );
}
const auto new_distance = last_velo_state->position() - seed_as_state.position();
best_ip = ( new_distance - last_velo_state->slopes() * new_distance.Dot( last_velo_state->slopes() ) /
last_velo_state->slopes().Mag2() )
.r();
best_doca = match_doca;
best_vchi2 = vx_chi2;
}
}
verbose() << "Done looping Velo tracks." << endmsg;
if ( best_velo_track ) {
if ( msgLevel( MSG::VERBOSE ) ) best_velo_track->fillStream( std::cout );
debug() << "KEY " << seed->key() << " IP " << best_ip << " DOCA " << best_doca << " VCHI2 " << best_vchi2
<< " SCHI2 " << best_chi2 << endmsg;
auto ei = best_velo_track->extraInfo();
ei.insert( 360, static_cast<double>( seed->key() ) );
ei.insert( 361, best_ip );
ei.insert( 362, best_doca );
ei.insert( 363, best_vchi2 );
ei.insert( 364, best_chi2 );
best_velo_track->setExtraInfo( ei );
best_velo_tracks->insert( best_velo_track->clone() );
} else { // hack for being able to tuple both the downstream candidate and the decay chain including the matched
// Velo track later.
debug() << "Found no matching track."
<< " KEY " << seed->key() << endmsg;
best_velo_track = copied_velo_tracks.front();
auto ei = best_velo_track->extraInfo();
ei.insert( 360, static_cast<double>( seed->key() ) );
ei.insert( 361, -1. );
ei.insert( 362, -1. );
ei.insert( 363, -1. );
ei.insert( 364, -1. );
best_velo_track->setExtraInfo( ei );
best_velo_tracks->insert( best_velo_track->clone() );
}
}
verbose() << "Done looping Seed tracks." << endmsg;
return sc;
}
DECLARE_COMPONENT( HyperMatch )
\ No newline at end of file
#ifndef HyperMatch_H
#define HyperMatch_H
// from Gaudi
#include "GaudiAlg/GaudiAlgorithm.h"
// from LHCb
#include "Event/Particle.h"
#include "Event/State.h"
#include "Event/Track.h"
#include "VeloDet/DeVelo.h"
// for master branch: #include "LHCbMath/StateVertexUtils.h"
// from Rec
#include "TrackInterfaces/ITrackExtrapolator.h"
#include "TrackInterfaces/ITrackFitter.h"
/**
@class HyperMatch HyperMatch.h
@brief Match a Velo stub of a charged particle decaying before the TT to its reconstructed downstream decay products.
In a pyton options-file, use e.g.:
@code
from Configurables import HyperMatch
xi_match=HyperMatch("XiReco")
TODO: fill
@endcode
then add the tool upstream in your decay chain where you'd like to carry out a decay tree fit.
@date 2022-07-01
@author Marian Stahl, Laurent Dufour
**/
class HyperMatch : public GaudiAlgorithm {
public:
// Standard constructor
HyperMatch( const std::string& name, ISvcLocator* pSvcLocator );
StatusCode initialize() override;
StatusCode execute() override;
private:
// Tools
// ToolHandle<ITrackExtrapolator> m_extrapolator{this, "TrackMasterExtrapolator", "TrackMasterExtrapolator"};
const ITrackFitter* m_velo_track_refitter;
const ITrackExtrapolator* m_extrapolator;
// members
double m_ip_cut;
double m_chi2_cut;
std::string m_velo_loc;
std::string m_seed_loc;
std::string m_output_loc;
std::string m_fitter_name;
std::string m_extrapolator_name;
};
#endif // HyperMatch_H
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment