Skip to content
Snippets Groups Projects
Commit 05e72b6e authored by Eduardo Rodrigues's avatar Eduardo Rodrigues
Browse files

Merge branch 'mamartin-dv-pyconf' into 'master'

Proof-of-concept for how to use PyConf to configure DaVinci

See merge request !502
parents a3a10499 da3dde34
No related branches found
No related tags found
2 merge requests!1103Draft: Add AnalysisHelpers to DaVinci Stack,!502Proof-of-concept for how to use PyConf to configure DaVinci
Pipeline #2344696 passed
Showing
with 1806 additions and 1 deletion
###############################################################################
# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
# Definitions of, and defaults for, the DaVinci application options.
auditors:
text: '"""List of auditors to run. Possible common choices include "NameAuditor", "MemoryAuditor", "ChronoAuditor". See Gaudi manual for a full list. Default = []."""'
value: []
buffer_events:
text: '"""Number of events to pre-fetch if use_iosvc=True. Default = 20000 is reasonable for most machines; it might need to be increased for more modern/powerful machines."""'
value: 20000
callgrind_profile:
text: '"""Enable callgrind profiling. Default = False."""'
value: False
conddb_tag:
text: '"""Tag for the CondDB."""'
value: ''
control_flow_file:
text: '"""Control flow file name (.gv extension since default output format for the DOT language). Default = '' for no file generation."""'
value: ''
data_flow_file:
text: '"""Data flow file name (.gv extension since default output format for the DOT language). Default = '' for no file generation."""'
value: ''
data_type:
text: '"""Data type, can be ["Upgrade"]."""'
value: 'Upgrade'
dddb_tag:
text: '""" Data type, can be ["Upgrade"] Forwarded to PhysConf, AnalysisConf, DstConf and LHCbApp"""'
value: 'dddb-20171126'
dqflags_tag:
text: '"""Tag for DQFLAGS. Default as set in DDDBConf for DataType """'
value: ''
evt_max:
text: '"""Number of events to analyse. Default = -1 to run over all events."""'
value: -1
histo_file:
text: '"""Name of output histogram file. Default = ''."""'
value: ''
ignore_dq_flags:
text: '"""If False, process only events with good DQ. Default = False."""'
value: False
input_files:
text: '"""Input data. Default = []. """'
value: []
input_type:
text: '"""Type of input files, e.g. "DST", "DIGI", "RDST", "MDST", "XDST" or "LDST". Default = DST."""'
value: 'DST'
msg_svc_format:
text: '"""MessageSvc output format.Default = "% F%35W%S %7W%R%T %0W%M"."""'
value: '% F%35W%S %7W%R%T %0W%M'
msg_svc_time_format:
text: '"""MessageSvc time format. Default = "%Y-%m-%d %H:%M:%S UTC"."""'
value: '%Y-%m-%d %H:%M:%S UTC'
ntuple_file:
text: '"""Name of output ntuple file. Default = ''."""'
value: ''
output_level:
text: '"""Set the output level used in the job. Default = INFO=3."""'
value: 3
print_freq:
text: '"""Frequency at which to print event numbers. Default = 1000."""'
value: 1000
python_logging_level:
text: '"""Python logger level. Default = logging.WARNING=30."""'
value: 30
skip_events:
text: '"""Number of events to skip at the beginning. Default = 0."""'
value: 0
simulation:
text: '"""Boolean to specify simulated samples. Default = False."""'
value: False
user_algorithms:
text: '"""List of user algorithms to run. Default = []."""'
value: []
use_iosvc:
text: '"""Use an alternative, faster, IIOSvc implementation for MDFs. Default = False."""'
value: False
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
from __future__ import absolute_import from __future__ import absolute_import
from .ConfigurationUpgrade import data, mc, main from .ConfigurationUpgrade import data, mc, main
from .config import options, run_davinci, dv_node, DVSelection
__all__ = ('data', 'mc', 'main') __all__ = ('data', 'mc', 'main')
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""Selection and combiner wrappers.
Algorithms that inherit from DVCommonBase, like FilterDesktop and
CombineParticles, are not functional and do not expose input/output
DataHandles. They also do some funky internal location mangling to save
additional objects next to the Particle objects they create. The wrappers here
try to work around these traits to make the algorithms behave like any other
functional algorithms.
"""
from __future__ import absolute_import, division, print_function
import string
from Configurables import (
LoKi__Hybrid__DictTransform_TMVATransform_ as TMVAtransform,
LoKi__Hybrid__DictOfFunctors as DictOfFunctors,
LoKi__Hybrid__DictValue as DictValue,
)
#from RecoConf.hlt1_tracking import EmptyFilter
from PyConf.Algorithms import (CombineParticles, FilterDesktop,
DaVinci__N3BodyDecays as N3BodyDecays,
DaVinci__N4BodyDecays as N4BodyDecays)
__all__ = [
#'EmptyFilter', 'ParticleFilter', 'ParticleCombiner',
'ParticleFilter',
'ParticleCombiner',
'ParticleFilterWithPVs',
'ParticleCombinerWithPVs',
'require_all',
'N3BodyCombiner',
'N3BodyCombinerWithPVs',
'N4BodyCombiner',
'N4BodyCombinerWithPVs',
'NeutralParticleCombiner',
'NeutralParticleCombinerWithPVs'
]
def require_all(*cuts):
"""Return a cut string requiring all arguments.
Example:
>>> require_all('PT > {pt_min}', 'DLLK < {dllk_max}')
'(PT > {pt_min}) & (DLLK < {dllk_max})'
"""
cuts = ['({})'.format(c) for c in cuts]
return ' & '.join(cuts)
def _dvalgorithm_inputs(particle_inputs, pvs):
"""Return a dict suitable for a DVAlgorithm input transform."""
# ExtraInputs is added by the data handle mixin, so we bundle all inputs
# there to make them available to the scheduler
d = {'Inputs': particle_inputs, 'ExtraInputs': particle_inputs}
if pvs:
d['InputPrimaryVertices'] = pvs
return d
def _dvalgorithm_inputs_1(ParticlesA, PrimaryVertices=None):
return _dvalgorithm_inputs([ParticlesA], PrimaryVertices)
def _dvalgorithm_inputs_2(ParticlesA, ParticlesB, PrimaryVertices=None):
return _dvalgorithm_inputs([ParticlesA, ParticlesB], PrimaryVertices)
def _dvalgorithm_inputs_3(ParticlesA,
ParticlesB,
ParticlesC,
PrimaryVertices=None):
return _dvalgorithm_inputs([ParticlesA, ParticlesB, ParticlesC],
PrimaryVertices)
def _dvalgorithm_inputs_4(ParticlesA,
ParticlesB,
ParticlesC,
ParticlesD,
PrimaryVertices=None):
return _dvalgorithm_inputs(
[ParticlesA, ParticlesB, ParticlesC, ParticlesD], PrimaryVertices)
def _dvalgorithm_inputs_5(ParticlesA,
ParticlesB,
ParticlesC,
ParticlesD,
ParticlesE,
PrimaryVertices=None):
return _dvalgorithm_inputs(
[ParticlesA, ParticlesB, ParticlesC, ParticlesD, ParticlesE],
PrimaryVertices)
def _dvalgorithm_outputs(Particles):
"""Return a dict suitable for a DVAlgorithm output transform."""
# ExtraOutputs is added by the data handle mixin, so we can add the output
# there to make it available to the scheduler
# Could add, for example, output P2PV relations or refitted PVs here as
# well
d = {'Output': Particles, 'ExtraOutputs': [Particles]}
return d
def make_dvalgorithm(algorithm, ninputs=1):
"""Return a DVAlgorithm that's wrapped to make it behave nicely."""
# TODO(AP, NN): Workaround for CombineParticles accepting a list of inputs
# We have to have one 'Algorithm' wrapper per number of inputs, as we have
# to have one named input property per input container
input_transform = {
1: _dvalgorithm_inputs_1,
2: _dvalgorithm_inputs_2,
3: _dvalgorithm_inputs_3,
4: _dvalgorithm_inputs_4,
5: _dvalgorithm_inputs_5
}[ninputs]
def wrapped(**kwargs):
return algorithm(
input_transform=input_transform,
output_transform=_dvalgorithm_outputs,
WriteP2PVRelations=False,
ModifyLocations=False,
**kwargs)
return wrapped
filter_desktop = make_dvalgorithm(FilterDesktop)
# Map number of inputs to the combiner that should be used
combiners = {
1: make_dvalgorithm(CombineParticles),
2: make_dvalgorithm(CombineParticles, ninputs=2),
3: make_dvalgorithm(CombineParticles, ninputs=3),
4: make_dvalgorithm(CombineParticles, ninputs=4),
5: make_dvalgorithm(CombineParticles, ninputs=5)
}
threebodycombiners = {
1: make_dvalgorithm(N3BodyDecays),
2: make_dvalgorithm(N3BodyDecays, ninputs=2),
3: make_dvalgorithm(N3BodyDecays, ninputs=3),
4: make_dvalgorithm(N3BodyDecays, ninputs=4)
}
fourbodycombiners = {
1: make_dvalgorithm(N4BodyDecays),
2: make_dvalgorithm(N4BodyDecays, ninputs=2),
3: make_dvalgorithm(N4BodyDecays, ninputs=3),
4: make_dvalgorithm(N4BodyDecays, ninputs=4)
}
def ParticleFilter(particles, **kwargs):
"""Return a filter algorithm that takes `particles` as inputs.
Additional keyword arguments are forwarded to FilterDesktop.
"""
return filter_desktop(ParticlesA=particles, **kwargs).Particles
def ParticleFilterWithPVs(particles, pvs, **kwargs):
"""Return a filter algorithm that takes `particles` and `pvs` as inputs.
Additional keyword arguments are forwarded to FilterDesktop.
"""
return ParticleFilter(particles=particles, PrimaryVertices=pvs, **kwargs)
def ParticleFilterWithTMVA(name,
particles,
pvs,
mva_code,
mva_name,
xml_file,
bdt_vars,
Key="BDT",
**kwargs):
"""Return a filter algorithm that takes `particles`, the `MVACode`, the `MVA_name`,
an `XMLFile` and the `BDTVars` as inputs. The `Key` is an optional input.
Additional keyword arguments are forwarded to FilterDesktop.
"""
#setup the name for the filter
particlefiltered = ParticleFilterWithPVs(
particles=particles,
pvs=pvs,
Code=mva_code.format(mva_name=mva_name),
**kwargs)
#setup the names for the DictValue, TMVA and MVADict.
#each tool needs to be named relative to its 'owner'
dv_name = "{owner}.{mva_name}".format(
owner=particlefiltered.producer.name, mva_name=mva_name)
tmva_name = "{owner}.TMVA".format(owner=dv_name)
mvadict_name = "{owner}.MVAdict".format(owner=tmva_name)
tmva_source = "LoKi::Hybrid::DictOfFunctors/MVAdict"
dv_source = "LoKi::Hybrid::DictTransform<TMVATransform>/TMVA"
Options = {"XMLFile": xml_file, "Name": Key, "KeepVars": "0"}
# Just need to make sure each Configurable is instantied; don't need to
# assign them to anything
TMVAtransform(name=tmva_name, Options=Options, Source=tmva_source)
DictOfFunctors(name=mvadict_name, Variables=bdt_vars)
DictValue(name=dv_name, Key=Key, Source=dv_source)
return particlefiltered
def ParticleCombiner(particles, my_combiners=combiners, **kwargs):
"""Return a combiner algorithm that takes `particles` as inputs.
Additional keyword arguments are forwarded to CombineParticles.
"""
particles = particles if isinstance(particles, list) else [particles]
ninputs = len(particles)
# Need to dispatch to the right combiner, based on the number of inputs
assert len(my_combiners
) >= ninputs, 'Do not have a combiner for {} inputs'.format(
ninputs)
combiner = my_combiners[ninputs]
# Map each input container to an input property name
inputs = {
'Particles' + letter: p
for p, letter in zip(particles, string.ascii_uppercase)
}
# We need to merge dicts, we make sure we don't have overlapping keys (the
# caller really shouldn't specify ParticleX keys anyway)
assert set(inputs).intersection(kwargs) == set()
kwargs = dict(list(inputs.items()) + list(kwargs.items()))
return combiner(**kwargs).Particles
def N3BodyCombiner(particles, **kwargs):
"""Return a N3BodyDecays combiner algorithm that takes particles as inputs.
Additional keyword arguments are forwarded to N3BodyDecays.
"""
return ParticleCombiner(
particles, my_combiners=threebodycombiners, **kwargs)
def N4BodyCombiner(particles, **kwargs):
"""Return a N4BodyDecays combiner algorithm that takes particles as inputs.
Additional keyword arguments are forwarded to N4BodyDecays.
"""
return ParticleCombiner(
particles, my_combiners=fourbodycombiners, **kwargs)
def ParticleCombinerWithPVs(particles, pvs, **kwargs):
"""Return a combiner algorithm that takes `particles` and `pvs` as inputs.
Additional keyword arguments are forwarded to CombineParticles.
"""
return ParticleCombiner(particles=particles, PrimaryVertices=pvs, **kwargs)
def N3BodyCombinerWithPVs(particles, pvs, **kwargs):
"""Return a combiner algorithm that takes `particles` and `pvs` as inputs.
Additional keyword arguments are forwarded to CombineParticles.
"""
## TODO: eliminate duplication of code with ParticleCombinerWithPVs
return N3BodyCombiner(particles=particles, PrimaryVertices=pvs, **kwargs)
def N4BodyCombinerWithPVs(particles, pvs, **kwargs):
"""Return a combiner algorithm that takes `particles` and `pvs` as inputs.
Additional keyword arguments are forwarded to CombineParticles.
"""
## TODO: eliminate duplication of code with ParticleCombinerWithPVs
return N4BodyCombiner(particles=particles, PrimaryVertices=pvs, **kwargs)
def NeutralParticleCombinerWithPVs(particles, pvs, **kwargs):
"""Return a combiner algorithm that takes `particles` and `pvs` as inputs.
No vertex fit is performed, just momentum addition
Additional keyword arguments are forwarded to CombineParticles.
"""
return NeutralParticleCombiner(
particles=particles, PrimaryVertices=pvs, **kwargs)
def NeutralParticleCombiner(particles, **kwargs):
"""Return a combiner algorithm that takes `particles` as input.
No vertex fit is performed, just momentum addition
Additional keyword arguments are forwarded to CombineParticles.
"""
return ParticleCombiner(
particles=particles, ParticleCombiners={"": "ParticleAdder"}, **kwargs)
###############################################################################
# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
import os, yaml
from PyConf.application import ApplicationOptions
def define_app_option_defaults(
yaml_file="$DAVINCIROOT/options/application-option-defaults.yaml"):
"""
Define/digest the default values for the application options as described in YAML.
"""
slots, doc = {}, {}
with open(os.path.expandvars(yaml_file)) as defaults:
config = yaml.safe_load(defaults)
for key, args in config.items():
for name, value in args.items():
if name == 'value':
slots[key] = value
else:
doc[key] = value
return slots, doc
class DVAppOptions(ApplicationOptions):
"""
Enhanced PyConf.application.ApplicationOptions class
with slots defined via a YAML file.
"""
__slots__, _propertyDocDct = define_app_option_defaults()
###############################################################################
# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""DaVinci configured using PyConf components.
"""
from __future__ import absolute_import
import logging
from collections import namedtuple
from PyConf import configurable
from PyConf.Algorithms import DeterministicPrescaler
from PyConf.application import configure_input, configure, make_odin
from PyConf.control_flow import CompositeNode, NodeLogic
from DaVinci.application import DVAppOptions
log = logging.getLogger(__name__)
options = DVAppOptions(_enabled=False)
class DVSelection(namedtuple('DVSelection',
['node', 'extra_outputs'])): # noqa
"""Immutable object fully qualifiying an DaVinci selection.
Copied from HltLine without the prescaler
Attributes:
node (CompositeNode): the control flow node of the line
"""
__slots__ = () # do not add __dict__ (preserve immutability)
def __new__(cls, name, algs, extra_outputs=None):
"""Initialize a HltLine from name, algs and prescale.
Creates a control flow `CompositeNode` with the given `algs`
combined with `LAZY_AND` logic.
Args:
name (str): name of the line
algs: iterable of algorithms
extra_outputs (iterable of 2-tuple): List of (name, DataHandle) pairs.
"""
node = CompositeNode(
name,
tuple(algs),
combineLogic=NodeLogic.LAZY_AND,
forceOrder=True)
if extra_outputs is None:
extra_outputs = []
return super(DVSelection, cls).__new__(cls, node,
frozenset(extra_outputs))
@property
def name(self):
"""Selection name"""
return self.node.name
@property
def output_producer(self):
"""Return the producer that defines the output of this line.
The producer is defined as the last child in the control flow node,
i.e. the last item passed as the `algs` argument to the `HltLine`
constructor.
If the producer creates no output, None is returned.
"""
children = self.node.children
last = children[-1]
# Could in principle have control node here; will deal with this use
# case if it arises
assert isinstance(last, Algorithm)
# If the last algorithm produces nothing, there is no 'producer'
return last if last.outputs else None
def produces_output(self):
"""Return True if this line produces output."""
return self.output_producer is not None
def dv_node(name, algs, logic=NodeLogic.NONLAZY_OR, forceOrder=False):
return CompositeNode(
name, combineLogic=logic, children=algs, forceOrder=forceOrder)
def davinci_control_flow(options, dvsels=[]):
options.finalize()
dec = CompositeNode(
'dv_decision',
combineLogic=NodeLogic.NONLAZY_OR,
children=[dvsel.node for dvsel in dvsels],
forceOrder=False)
return CompositeNode(
'davinci',
combineLogic=NodeLogic.NONLAZY_OR,
children=[dec],
forceOrder=True)
def run_davinci(options, dvsels=[], public_tools=[]):
'''
DaVinci application control flow
Args:
options (ApplicationOptions): holder of application options
public_tools (list): list of public `Tool` instances to configure
'''
if not dvsels:
dummy = CompositeNode("EmptyNode", children=[])
config = configure(options, dummy, public_tools=public_tools)
else:
config = configure_input(options)
top_dv_node = davinci_control_flow(options, dvsels)
config.update(
configure(options, top_dv_node, public_tools=public_tools))
return config
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
######
### N.B. THIS FILE IS INTENDED TO AVOID DEPENDENCIES ON MOORE,
### IS NEEDED FOR TESTING PURPOSES AND NEEDS TO BE REMOVED IN PRODUCTION
######
"""Load data from files and set up unpackers.
There are two things we have to deal with:
1. Loading the data from the file in to the TES, done by
Gaudi::Hive::FetchDataFromFile.
2. Unpacking and preparing packed containers, if the 'reconstruction' is
defined as the objects already present in the file.
In most LHCb applications, step 2 is done for you behind the scenes. The
DataOnDemandSvc is configured in LHCb/GaudiConf/DstConf.py to unpack containers
when they are requested. It also configures adding RICH, MUON, and combined PID
information to ProtoParticles when they're unpacked. Because we don't have the
DataOnDemandSvc here, we have to do this by hand.
The interesting 'user-facing' exports of this module are:
* [reco,mc]_from_file(): Dict of names to locations that can be loaded from a file.
* [reco,mc]_unpackers(): Dict from unpacked object name to Algorithm that produces a
container of those objects.
"""
from __future__ import absolute_import, division, print_function
import collections
from Gaudi.Configuration import ERROR
from Configurables import (
DataPacking__Unpack_LHCb__MuonPIDPacker_,
DataPacking__Unpack_LHCb__RichPIDPacker_, UnpackCaloHypo,
UnpackProtoParticle, UnpackRecVertex, UnpackTrackFunctional,
UnpackMCParticle, UnpackMCVertex, DataPacking__Unpack_LHCb__MCVPHitPacker_
as UnpackMCVPHit, DataPacking__Unpack_LHCb__MCUTHitPacker_ as
UnpackMCUTHit, DataPacking__Unpack_LHCb__MCFTHitPacker_ as UnpackMCFTHit,
DataPacking__Unpack_LHCb__MCRichHitPacker_ as UnpackMCRichHit,
DataPacking__Unpack_LHCb__MCEcalHitPacker_ as UnpackMCEcalHit,
DataPacking__Unpack_LHCb__MCHcalHitPacker_ as UnpackMCHcalHit,
DataPacking__Unpack_LHCb__MCMuonHitPacker_ as UnpackMCMuonHit,
DataPacking__Unpack_LHCb__MCRichDigitSummaryPacker_ as RichSumUnPack)
from PyConf.components import Algorithm, force_location
from PyConf.application import make_data_with_FetchDataFromFile
from PyConf.Tools import (ChargedProtoParticleAddRichInfo,
ChargedProtoParticleAddMuonInfo,
ChargedProtoParticleAddCombineDLLs)
def packed_reco_from_file():
return {
'PackedPVs': '/Event/pRec/Vertex/Primary',
'PackedCaloElectrons': '/Event/pRec/Calo/Electrons',
'PackedCaloPhotons': '/Event/pRec/Calo/Photons',
'PackedCaloMergedPi0s': '/Event/pRec/Calo/MergedPi0s',
'PackedCaloSplitPhotons': '/Event/pRec/Calo/SplitPhotons',
'PackedMuonPIDs': '/Event/pRec/Muon/MuonPID',
'PackedRichPIDs': '/Event/pRec/Rich/PIDs',
'PackedTracks': '/Event/pRec/Track/Best',
'PackedMuonTracks': '/Event/pRec/Track/Muon',
'PackedNeutralProtos': '/Event/pRec/ProtoP/Neutrals',
'PackedChargedProtos': '/Event/pRec/ProtoP/Charged',
}
def packed_mc_from_file():
return {
'PackedMCParticles': '/Event/pSim/MCParticles',
'PackedMCVertices': '/Event/pSim/MCVertices',
'PackedMCVPHits': '/Event/pSim/VP/Hits',
'PackedMCUTHits': '/Event/pSim/UT/Hits',
'PackedMCFTHits': '/Event/pSim/FT/Hits',
'PackedMCRichHits': '/Event/pSim/Rich/Hits',
'PackedMCEcalHits': '/Event/pSim/Ecal/Hits',
'PackedMCHcalHits': '/Event/pSim/Hcal/Hits',
'PackedMCMuonHits': '/Event/pSim/Muon/Hits',
'PackedMCRichDigitSummaries': '/Event/pSim/Rich/DigitSummaries',
}
def unpacked_reco_locations():
# If the structure is not like this, pointers point to to the wrong place...
# The SmartRefs held by the unpacked MC objects only work if we unpack to these specific locations
locations = {
k: v.replace('pRec', 'Rec')
for k, v in packed_reco_from_file().items()
}
return locations
def unpacked_mc_locations():
# If the structure is not like this, pointers point to to the wrong place...
# The SmartRefs held by the unpacked MC objects only work if we unpack to these specific locations
return {
'PackedMCParticles': '/Event/MC/Particles',
'PackedMCVertices': '/Event/MC/Vertices',
'PackedMCVPHits': '/Event/MC/VP/Hits',
'PackedMCUTHits': '/Event/MC/UT/Hits',
'PackedMCFTHits': '/Event/MC/FT/Hits',
'PackedMCRichHits': '/Event/MC/Rich/Hits',
'PackedMCEcalHits': '/Event/MC/Ecal/Hits',
'PackedMCHcalHits': '/Event/MC/Hcal/Hits',
'PackedMCMuonHits': '/Event/MC/Muon/Hits',
'PackedMCRichDigitSummaries': '/Event/MC/Rich/DigitSummaries',
}
def reco_from_file():
# TODO(AP) should only add the packed data if we're running on Upgrade MC
# where Brunel has already been run
packed_data = packed_reco_from_file()
# raw_event = raw_event_from_file()
# We don't want any keys accidentally overwriting each other
# assert set(packed_data.keys()).intersection(set(raw_event.keys())) == set()
# return dict(list(packed_data.items()) + list(raw_event.items()))
return packed_data
def mc_from_file():
# TODO(AP) should only add the packed data if we're running on Upgrade MC
# where Brunel has already been run
packed_data = packed_mc_from_file()
return packed_data
def reco_unpacker(key, configurable, name, **kwargs):
"""Return unpacker that reads from file and unpacks to a forced output location."""
packed_loc = reco_from_file()[key]
unpacked_loc = unpacked_reco_locations()[key]
alg = Algorithm(
configurable,
name=name,
outputs={'OutputName': force_location(unpacked_loc)},
InputName=make_data_with_FetchDataFromFile(packed_loc),
**kwargs)
return alg
def mc_unpacker(key, configurable, name, **kwargs):
"""Return unpacker that reads from file and unpacks to a forced output location."""
packed_loc = mc_from_file()[key]
unpacked_loc = unpacked_mc_locations()[key]
alg = Algorithm(
configurable,
name=name,
outputs={'OutputName': force_location(unpacked_loc)},
InputName=make_data_with_FetchDataFromFile(packed_loc),
**kwargs)
return alg
def make_mc_track_info():
return make_data_with_FetchDataFromFile('/Event/MC/TrackInfo')
def reco_unpackers():
muonPIDs = reco_unpacker('PackedMuonPIDs',
DataPacking__Unpack_LHCb__MuonPIDPacker_,
'UnpackMuonPIDs')
richPIDs = reco_unpacker(
'PackedRichPIDs',
DataPacking__Unpack_LHCb__RichPIDPacker_,
'UnpackRichPIDs',
OutputLevel=ERROR)
# The OutputLevel above suppresses the following useless warnings (plus more?)
# WARNING DataPacking::Unpack<LHCb::RichPIDPacker>:: Incorrect data version 0 for packing version > 3. Correcting data to version 2.
# Ordered so that dependents are unpacked first
d = collections.OrderedDict([
('PVs', reco_unpacker('PackedPVs', UnpackRecVertex,
'UnpackRecVertices')),
('CaloElectrons',
reco_unpacker('PackedCaloElectrons', UnpackCaloHypo,
'UnpackCaloElectrons')),
('CaloPhotons',
reco_unpacker('PackedCaloPhotons', UnpackCaloHypo,
'UnpackCaloPhotons')),
('CaloMergedPi0s',
reco_unpacker('PackedCaloMergedPi0s', UnpackCaloHypo,
'UnpackCaloMergedPi0s')),
('CaloSplitPhotons',
reco_unpacker('PackedCaloSplitPhotons', UnpackCaloHypo,
'UnpackCaloSplitPhotons')),
('MuonPIDs', muonPIDs),
('RichPIDs', richPIDs),
('Tracks',
reco_unpacker('PackedTracks', UnpackTrackFunctional,
'UnpackBestTracks')),
('MuonTracks',
reco_unpacker('PackedMuonTracks', UnpackTrackFunctional,
'UnpackMuonTracks')),
('NeutralProtos',
reco_unpacker('PackedNeutralProtos', UnpackProtoParticle,
'UnpackNeutralProtos')),
('ChargedProtos',
reco_unpacker(
'PackedChargedProtos',
UnpackProtoParticle,
'UnpackChargedProtos',
AddInfo=[
ChargedProtoParticleAddRichInfo(
InputRichPIDLocation=richPIDs.OutputName),
ChargedProtoParticleAddMuonInfo(
InputMuonPIDLocation=muonPIDs.OutputName),
ChargedProtoParticleAddCombineDLLs()
])),
])
# Make sure we have consistent names, and that we're unpacking everything
# we load from the file
assert set(['Packed' + k for k in d.keys()]) - set(
packed_reco_from_file().keys()) == set()
return d
def mc_unpackers():
# Ordered so that dependents are unpacked first
mc_vertices = mc_unpacker('PackedMCVertices', UnpackMCVertex,
'UnpackMCVertices')
# Make sure that MC particles and MC vertices are unpacked together,
# see https://gitlab.cern.ch/lhcb/LHCb/issues/57 for details.
mc_particles = mc_unpacker(
'PackedMCParticles',
UnpackMCParticle,
'UnpackMCParticles',
ExtraInputs=[mc_vertices])
mc_vp_hits = mc_unpacker('PackedMCVPHits', UnpackMCVPHit, 'UnpackMCVPHits')
mc_ut_hits = mc_unpacker('PackedMCUTHits', UnpackMCUTHit, 'UnpackMCUTHits')
mc_ft_hits = mc_unpacker('PackedMCFTHits', UnpackMCFTHit, 'UnpackMCFTHits')
mc_rich_hits = mc_unpacker('PackedMCRichHits', UnpackMCRichHit,
'UnpackMCRichHits')
mc_ecal_hits = mc_unpacker('PackedMCEcalHits', UnpackMCEcalHit,
'UnpackMCEcalHits')
mc_hcal_hits = mc_unpacker('PackedMCHcalHits', UnpackMCHcalHit,
'UnpackMCHcalHits')
mc_muon_hits = mc_unpacker('PackedMCMuonHits', UnpackMCMuonHit,
'UnpackMCMuonHits')
# RICH Digit summaries
mc_rich_digit_sums = mc_unpacker('PackedMCRichDigitSummaries',
RichSumUnPack, "RichSumUnPack")
d = collections.OrderedDict([
('MCRichDigitSummaries', mc_rich_digit_sums),
('MCParticles', mc_particles),
('MCVertices', mc_vertices),
('MCVPHits', mc_vp_hits),
('MCUTHits', mc_ut_hits),
('MCFTHits', mc_ft_hits),
('MCRichHits', mc_rich_hits),
('MCEcalHits', mc_ecal_hits),
('MCHcalHits', mc_hcal_hits),
('MCMuonHits', mc_muon_hits),
])
# Make sure we have consistent names, and that we're unpacking everything
# we load from the file
assert set(['Packed' + k for k in d.keys()]) - set(
packed_mc_from_file().keys()) == set()
return d
def boole_links_digits_mcparticles():
"""Return a dict of locations for MC linker tables (to mcparticles) created by Boole."""
locations = {
"EcalDigits": "/Event/Link/Raw/Ecal/Digits",
"FTLiteClusters": "/Event/Link/Raw/FT/LiteClusters",
"HcalDigits": "/Event/Link/Raw/Hcal/Digits",
"MuonDigits": "/Event/Link/Raw/Muon/Digits",
"UTClusters": "/Event/Link/Raw/UT/Clusters",
"VPDigits": "/Event/Link/Raw/VP/Digits",
}
return {
key: make_data_with_FetchDataFromFile(loc)
for key, loc in locations.items()
}
def boole_links_digits_mchits():
"""Return a dict of locations for MC linker tables (to mchits) created by Boole.
These locations are only propagated out of Boole for eXtendend DIGI and DST types.
"""
locations = {
"FTLiteClusters": "/Event/Link/Raw/FT/LiteClusters2MCHits",
"UTClusters": "/Event/Link/Raw/UT/Clusters2MCHits",
"VPDigits": "/Event/Link/Raw/VP/Digits2MCHits",
}
return {
key: make_data_with_FetchDataFromFile(loc)
for key, loc in locations.items()
}
def brunel_links():
"""Return a dict of locations for MC linker tables created by Brunel."""
locations = {
"CaloElectrons": "/Event/Link/Rec/Calo/Electrons",
"CaloMergedPi0s": "/Event/Link/Rec/Calo/MergedPi0s",
"CaloPhotons": "/Event/Link/Rec/Calo/Photons",
"CaloSplitPhotons": "/Event/Link/Rec/Calo/SplitPhotons",
"Tracks": "/Event/Link/Rec/Track/Best",
}
return {
key: make_data_with_FetchDataFromFile(loc)
for key, loc in locations.items()
}
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""Hacks for making legacy and future code work together."""
from __future__ import absolute_import, division, print_function
from Configurables import LoKi__Hybrid__Tool
from PyConf.components import Tool
def patched_hybrid_tool(name):
"""Return a LoKi::Hybrid::Tool configured for non-DVAlgorithms.
Some modules import functors that depend on the DVAlgorithm context being
available. The LoKi::Hybrid::Tool tool loads these modules by default,
breaking algorithms that don't inherit from DVAlgorithm, so we remove them
from the list.
"""
# List of modules we will delete from the default list
dv_modules = ['LoKiPhys.decorators', 'LoKiArrayFunctors.decorators']
dummy = LoKi__Hybrid__Tool('DummyFactoryNotForUse')
return Tool(
LoKi__Hybrid__Tool,
name='{}HybridFactory'.format(name),
public=True,
Modules=[m for m in dummy.Modules if m not in dv_modules])
##############################################################################
# (c) Copyright 2020-2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
######
### N.B. THIS FILE IS INTENDED TO AVOID DEPENDENCIES ON MOORE,
### IS NEEDED FOR TESTING PURPOSES AND NEEDS TO BE REMOVED IN PRODUCTION
######
from PyConf import configurable
from .data_from_file import reco_unpackers
def upfront_reconstruction_from_file(): # renamed from upfront_reconstruction
"""Return a list DataHandles that define the upfront reconstruction output.
This differs from `reconstruction` as it should not be used as inputs to
other algorithms, but only to define the control flow, i.e. the return
value of this function should be ran before all HLT2 lines.
"""
return list(reco_unpackers().values())
def reconstruction_from_file(): # renamed from reconstruction
"""Return a {name: DataHandle} dict that define the reconstruction output."""
return {k: v.OutputName for k, v in reco_unpackers().items()}
def make_charged_protoparticles():
return reconstruction()['ChargedProtos']
def make_neutral_protoparticles():
return reconstruction()['NeutralProtos']
def make_pvs():
return reconstruction()['PVs']
def make_tracks():
return reconstruction()['Tracks']
@configurable
def reconstruction(from_file=True):
"""Return reconstruction objects.
Note it is advised to use this function if more than one object is needed,
rather than the accessors below as it makes the configuration slower.
"""
# removed reco since it will not be done in DV
reco = reconstruction_from_file()
upfront_reconstruction = upfront_reconstruction_from_file()
charged_protos = reco["ChargedProtos"]
neutral_protos = reco["NeutralProtos"]
best_tracks = reco["Tracks"]
pvs = reco["PVs"]
electrons = reco["CaloElectrons"]
photons = reco["CaloPhotons"]
mergedPi0s = reco["CaloMergedPi0s"]
splitPhotons = reco["CaloSplitPhotons"]
muon_pids = reco["MuonPIDs"]
rich_pids = reco["RichPIDs"]
return {
"ChargedProtos": charged_protos,
"NeutralProtos": neutral_protos,
"Tracks": best_tracks,
"PVs": pvs,
"UpfrontReconstruction": upfront_reconstruction,
"CaloElectrons": electrons,
"CaloPhotons": photons,
"CaloMergedPi0s": mergedPi0s,
"CaloSplitPhotons": splitPhotons,
"MuonPIDs": muon_pids,
"RichPIDs": rich_pids,
}
This diff is collapsed.
###############################################################################
# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""
A dummy DaVinci job with no algorithm/node run.
"""
from __future__ import absolute_import
from DaVinci import DVSelection
from DaVinci.config import options, run_davinci
# Basic input-like metadata required even though there is no input data
options.input_type = "ROOT"
options.dddb_tag = "dummy"
options.conddb_tag = "dummy"
run_davinci(options)
###############################################################################
# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""
A dummy job with no algorithm/node run. Does not use the "DaVinci runner".
"""
from __future__ import absolute_import
from PyConf.application import configure
from PyConf.control_flow import CompositeNode
from DaVinci.config import options
dummy = CompositeNode("Dummy", children=[], forceOrder=True)
# Basic input-like metadata required even though there is no input data
options.input_type = "ROOT"
options.dddb_tag = "dummy"
options.conddb_tag = "dummy"
config = configure(options, dummy)
##############################################################################
# (c) Copyright 2020-2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
from PyConf import configurable
from PyConf.Algorithms import GaudiHistoAlgorithm, FunctionalParticleMaker
from DaVinci import options, run_davinci, dv_node, DVSelection
from DaVinci.hacks import patched_hybrid_tool
#
from PyConf.control_flow import NodeLogic
from PyConf.Tools import (LoKi__Hybrid__ProtoParticleFilter as
ProtoParticleFilter, LoKi__Hybrid__TrackSelector as
TrackSelector)
# print control flow and data flow graphs
options.control_flow_file = 'control_flow.gv'
options.data_flow_file = 'data_flow.gv'
options.ntuple_file = 'DVU_test-ntp.root'
options.histo_file = 'DVU_test-his.root'
# Setup dataset
options.evt_max = 10
options.set_input_and_conds_from_testfiledb('Upgrade_Bd2KstarMuMu')
#options.set_input_and_conds_from_testfiledb('upgrade-magdown-sim09c-up02-reco-up01-minbias-ldst')
options.input_raw_format = 4.3
print(options)
from DaVinci.standard_particles import make_detached_mumu
from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
sel = DVSelection(
name="DVselection", algs=upfront_reconstruction() + [make_detached_mumu()])
# run davinci
#public_tools = [stateProvider_with_simplified_geom()]
#run_davinci(options, [sel, sel2], public_tools)
run_davinci(options, [sel]) #, public_tools)
##############################################################################
# (c) Copyright 2020-2021 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
from PyConf import configurable
from PyConf.Algorithms import GaudiHistoAlgorithm
from DaVinci import options, run_davinci, dv_node, DVSelection
# print control flow and data flow graphs
options.control_flow_file = 'control_flow.gv'
options.data_flow_file = 'data_flow.gv'
options.ntuple_file = 'DVU_test-ntp.root'
options.histo_file = 'DVU_test-his.root'
# Setup datase
options.evt_max = 100
options.set_input_and_conds_from_testfiledb('Upgrade_Bd2KstarMuMu')
# define algorithm
simple_histos = GaudiHistoAlgorithm(name="SimpleHistos", HistoPrint=True)
# run davinci
algs = DVSelection(name="DVselection", algs=[simple_histos])
run_davinci(options, [algs])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment