Skip to content
Snippets Groups Projects
Commit 00ab7698 authored by Eduardo Rodrigues's avatar Eduardo Rodrigues
Browse files

Merge branch 'lbexec-docs' into 'master'

Update documentation for lbexec

See merge request !724
parents 0f85ea0e 3180a96e
No related branches found
No related tags found
2 merge requests!1103Draft: Add AnalysisHelpers to DaVinci Stack,!724Update documentation for lbexec
Pipeline #4234613 failed
...@@ -33,32 +33,24 @@ def main(options): ...@@ -33,32 +33,24 @@ def main(options):
fields['MuPlus'] = 'J/psi(1S) -> ^mu+ mu-' fields['MuPlus'] = 'J/psi(1S) -> ^mu+ mu-'
#FunTuple: make collection of functors for Jpsi #FunTuple: make collection of functors for Jpsi
variables_jpsi = FunctorCollection({ variables_jpsi = {
'LOKI_P': 'LOKI_P': 'P',
'P', 'LOKI_PT': 'PT',
'LOKI_PT': 'LOKI_Muonp_PT': 'CHILD(PT, 1)',
'PT', 'LOKI_Muonm_PT': 'CHILD(PT, 2)',
'LOKI_Muonp_PT': 'LOKI_MAXPT': 'TRACK_MAX_PT',
'CHILD(PT, 1)', 'LOKI_N_HIGHPT_TRCKS': 'NINTREE(ISBASIC & HASTRACK & (PT > 1500*MeV))',
'LOKI_Muonm_PT': 'THOR_P': F.P,
'CHILD(PT, 2)', 'THOR_PT': F.PT
'LOKI_MAXPT': }
'TRACK_MAX_PT',
'LOKI_N_HIGHPT_TRCKS':
'NINTREE(ISBASIC & HASTRACK & (PT > 1500*MeV))',
'THOR_P':
F.P,
'THOR_PT':
F.PT
})
#FunTuple: make collection of functors for Muplus #FunTuple: make collection of functors for Muplus
variables_muplus = FunctorCollection({'LOKI_P': 'P', 'THOR_P': F.P}) variables_muplus = {'LOKI_P': 'P', 'THOR_P': F.P}
#FunTuple: associate functor collections to field (branch) name #FunTuple: associate functor collections to field (branch) name
variables = {} variables = {}
variables['Jpsi'] = variables_jpsi variables['Jpsi'] = FunctorCollection(variables_jpsi)
variables['MuPlus'] = variables_muplus variables['MuPlus'] = FunctorCollection(variables_muplus)
#FunTuple: define list of preambles for loki #FunTuple: define list of preambles for loki
loki_preamble = ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)'] loki_preamble = ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)']
...@@ -77,7 +69,7 @@ def main(options): ...@@ -77,7 +69,7 @@ def main(options):
fields_KS['KS'] = 'KS0 -> pi+ pi-' fields_KS['KS'] = 'KS0 -> pi+ pi-'
#associate the functor collections to KS field name (NB: here we use functor collection used for jpsi) #associate the functor collections to KS field name (NB: here we use functor collection used for jpsi)
variables_KS = {} variables_KS = {}
variables_KS['KS'] = variables_jpsi variables_KS['KS'] = FunctorCollection(variables_jpsi)
#funtuple instance #funtuple instance
tuple_kshorts = Funtuple( tuple_kshorts = Funtuple(
name="KsTuple", name="KsTuple",
......
...@@ -19,46 +19,38 @@ from DaVinci import make_config ...@@ -19,46 +19,38 @@ from DaVinci import make_config
def main(options): def main(options):
#FunTuple: define fields (branches) # FunTuple: define fields (branches)
fields = { fields = {
'B0': "[B0 -> D_s- pi+]CC", 'B0': "[B0 -> D_s- pi+]CC",
'Ds': "[B0 -> ^D_s- pi+]CC", 'Ds': "[B0 -> ^D_s- pi+]CC",
'pip': "[B0 -> D_s- ^pi+]CC", 'pip': "[B0 -> D_s- ^pi+]CC",
} }
#FunTuple: define variables for the B meson # FunTuple: define variables for the B meson
variables_B = FunctorCollection({ variables_B = {
'LOKI_MAXPT': 'LOKI_MAXPT': 'TRACK_MAX_PT',
'TRACK_MAX_PT', 'LOKI_Muonp_PT': 'CHILD(PT, 1)',
'LOKI_Muonp_PT': 'LOKI_Muonm_PT': 'CHILD(PT, 2)',
'CHILD(PT, 1)', 'LOKI_NTRCKS_ABV_THRSHLD': 'NINTREE(ISBASIC & (PT > 15*MeV))'
'LOKI_Muonm_PT': }
'CHILD(PT, 2)',
'LOKI_NTRCKS_ABV_THRSHLD':
'NINTREE(ISBASIC & (PT > 15*MeV))'
})
#FunTuple: make functor collection from the imported functor library Kinematics # FunTuple: make functor collection from the imported functor library Kinematics
variables_all = Kinematics() variables_all = Kinematics()
#FunTuple: associate functor collections to field (branch) name # FunTuple: associate functor collections to field (branch) name
variables = { variables = {
'ALL': variables_all, #adds variables to all fields 'ALL': variables_all, # adds variables to all fields
'B0': variables_B, 'B0': FunctorCollection(variables_B),
} }
line = "SpruceB2OC_BdToDsmPi_DsmToKpKmPim_Line"
config = { config = {
"location": "location": f"/Event/Spruce/{line}/Particles",
"/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToKpKmPim_Line/Particles", "filters": [f"HLT_PASS('{line}Decision')"],
"filters":
["HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToKpKmPim_LineDecision')"],
"preamble": ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)'], "preamble": ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)'],
"tuple": "tuple": "DecayTree",
"DecayTree", "fields": fields,
"fields": "variables": variables,
fields,
"variables":
variables,
} }
algs = configured_FunTuple(options, {"B0Dspi": config}) algs = configured_FunTuple(options, {"B0Dspi": config})
......
...@@ -25,7 +25,7 @@ def main(options): ...@@ -25,7 +25,7 @@ def main(options):
turbo_line = "Hlt2BsToJpsiPhi_JPsi2MuMu_PhiToKK_Line" turbo_line = "Hlt2BsToJpsiPhi_JPsi2MuMu_PhiToKK_Line"
input_data = force_location(f"/Event/HLT2/{turbo_line}/Particles") input_data = force_location(f"/Event/HLT2/{turbo_line}/Particles")
#Add a filter: We are not really filtering over particles, we are getting over a technical hurdle here. # Add a filter: We are not really filtering over particles, we are getting over a technical hurdle here.
# The hurdle being that if the event hasn't fired a HLT2 line then no TES location exists # The hurdle being that if the event hasn't fired a HLT2 line then no TES location exists
# and therefore if any algorithm tries to look for this location, we run into a problem. # and therefore if any algorithm tries to look for this location, we run into a problem.
# Side step this issue with a filter, where: # Side step this issue with a filter, where:
......
...@@ -47,8 +47,13 @@ extensions = [ ...@@ -47,8 +47,13 @@ extensions = [
"sphinx.ext.graphviz", "sphinx.ext.graphviz",
"sphinx.ext.todo", "sphinx.ext.todo",
"graphviz_linked", "graphviz_linked",
"sphinxcontrib.autodoc_pydantic",
] ]
# Control the display of the DaVinci.Options object
autodoc_pydantic_model_show_json = True
autodoc_pydantic_settings_show_json = False
# Assume unmarked references (in backticks) refer to Python objects # Assume unmarked references (in backticks) refer to Python objects
default_role = "py:obj" default_role = "py:obj"
......
DaVinci Configuration DaVinci Configuration
===================== =====================
From ``DaVinci/v53r0`` the DaVinci configuration has been modernized and revisited in order to improve Creating an ntuple
the user accessibility and hide all technicalities the user doesn't need to deal with. ------------------
The two major changes with respect to the old configuration are: the general structure 'À la'
PyConf and the usage of the Click package to handle the job context and ensure that all the
job arguments are passed correctly to Gaudi.
The main scripts based on the Click package are implemented in A basic example of creating a ntuple with FunTuple is:
`this file <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciSys/scripts/davinci>`__,
while all the other files related to the Configuration itself are implemented in
`this directory <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/>`__.
The DaVinci application can be run by means of two commands: the first one exploits the new Click-based script .. literalinclude:: ../../DaVinciExamples/python/DaVinciExamples/tupling/basic.py
:: :language: python
:start-at: import
./run davinci [davinci_option] command [command_option] [extra_options] This example can be run using ``lbexec`` with the following ``options.yaml`` file:
while the latter is the more compact command using the ``gaudirun.py`` script, developed for running DaVinci jobs on the Grid via DIRAC, .. code-block:: yaml
::
./run gaudirun.py [options_file.py] input_files: root://eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Upgrade/LDST/00076720/0000/00076720_000000{02,04,43,68}_1.ldst
input_type: ROOT
Additional details on how to configure the DaVinci options with the two methods can be found in the following paragraphs. input_raw_format: 4.3
data_type: Upgrade
Click-based scripts simulation: true
------------------- dddb_tag: dddb-20171126
The ``main`` function, conddb_tag: sim-20171127-vc-md100
defined in `this file <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciSys/scripts/davinci>`__ using the Click package, ntuple_file: basic-funtuple-example.root
takes a few input arguments to allow the user to run the job in any scenario (davinci_options): evt_max: 10
1. ``--export``: a flag for exporting the options in a format suitable for gaudirun.py; a filename (with extention `.opts`) has to be provided as an argument
2. ``--with-defaults``: a flag that explicitly includes default values of properties in the final configuration
3. ``--dry-run``: a flag for processing the options but do not run the application
When calling this function it invokes the ``resultcallback`` method which in this case is overwritten by the function ``run_job``.
A helper can be obtained by running ``./run davinci --help``.
::
@main.resultcallback()
def run_job(configurables, export=None, with_defaults=False, dry_run=None):
``run_job`` has the aim of instantiating and running a new Gaudi application given a list of Gaudi configurables.
These configurables can be defined both as ``LHCbUserConfigurables`` and ``GaudiConfig2`` configurables
since a dedicated implementation allows proper handling and merging of both types.
This script is implemented in
`this file <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/utilities_script.py>`__.
::
def run_mc(ctx, inputfiledb, joboptfile, simplejob):
Both these methods accept a set of input arguments (``command_options``):
#. ``--inputfiledb`` (also ``-i``): takes a pair of arguments defining the input files (key, fileDB), allowing DaVinci
to run the job with all the files listed in the fileDB database with the specified key.
* The fileDB file (``.yaml``) will be created along with ntuples in the Analysis Production.
* To use the general ``TestFileDB``,
defined `here <https://gitlab.cern.ch/lhcb-datapkg/PRConfig/blob/master/python/PRConfig/TestFileDB.py>`__,
it is sufficient to put ``fileDB = '-'``.
* In addition, all the metadata values reported in the ``fileDB`` will be automatically
configured in the DaVinci job so that the user doesn't need to care anymore.
* A dedicated option flag named has to be activated by the users
in case they need to overwrite any metadata information.
#. ``--joboptfile``: Option file containing the job information to be set (``.py``, ``.yaml``).
#. ``--simplejob``: flag for running a DaVinci job without any specific configuration (data/mc) (only PyConf).
Click also allows adding any other argument when calling one of these two methods,
allowing the user to set any DaVinci option from the command line (``extra_options``).
These extra arguments are saved in the job context and then retrieved during the configuration.
For example, it is possible to set the maximum number of events to 100 adding ``--evt_max 100``
to the command line. The full list of all the DaVinci options that can be set is reported
`here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/options_default.py>`__,
along with their default values.
DaVinci Configuration scripts
-----------------------------
``davinci`` returns a list of configurables to be passed to Gaudi
by means of the (internally called) ``run_job`` function. This list of Configurables is created calling the ``run_davinci_app``
function, implemented `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/ConfigurationUpgrade.py>`__.
This function takes a few arguments in input:
1. ``fileDB_key``: a key in the ``testfileDB``,
2. ``fileDB_file``: a file with the ``testfileDB``,
3. ``jobOpt_file``: a file containing the options to be used for running the job,
4. ``override_data_options``: a flag for override the dataset properties found in the ``testfileDB``,
5. ``simplejob``: a flag for running a job using only the ``PyConf`` configurations,
6. ``ctx_args``: click context containing all the extra options.
7. ``prod_conf``: a flag to be enable only when running DaVinci with ProdConf
The ``fileDB_key``, ``fileDB_file`` and ``jobOpt_file`` arguments are meant to pass to DaVinci the options related to the job. In particular, the first two are related to input dataset information, e.g. ``data_type`` or ``simulation``, listed in the ``testfileDB`` file which can be the TestFileDB or a TestFileDB-like YAML file. The latter argument specifies a .py or .yaml file containing a dictionary with all the job options, e.g: ``evt_max`` or ``enable_unpack``.
In case the user wants to override any dataset option found in the ``testfileDB``, the new value can be defined in the ``jobOpt_file`` as any other DaVinci property but the ``override_data_options`` flag has to be activated otherwise DaVinci will return an error preventing the job from running.
DaVinci will read the ``testfileDB`` and the ``jobOpt_File`` and store all the values in the ``DaVinci.options`` object, representing an instance of ``PyConf.application.ApplicationOptions``.
There are also two other methods for setting the property values in DaVinci:
1. by means of the ``ctx_args`` argument, passing direcly the value of the property of interest via command line,
e.g. ``--evt_max 100``,
2. in the option file containing the user algorithms to be run in the job, importing the ``DaVinci.options`` and
setting values of the properties of interest, e.g.
::
from DaVinci import options
options.evt_max = 100
DaVinci will set the value of the properties following a specific source order:
1. `option_defaults.py <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/option-defaults.py>`__ script,
2. ``fileDB_file`` argument related to the input data of interest,
3. ``jobOpt_file`` argument if specified,
4. ``ctx_args`` argument if passed by command line,
5. user algorithm file if defined by the user.
Finally, DaVinci checks that the final value of each property is valid and finalizes the configuration using the ``configure_input`` and ``configure`` methods developed in the PyConf packages,
configuring the input files with the corresponding CondDB and DDDB tags and all the default tools and algorithms needed by a general LHCb job.
If the DaVinci ``simplejob`` flag is not active,
DaVinci performs additional steps adding to the configuration the ``HltANNSvc`` service,
the unpacking algorithms,
the event pre-filters
and the function for writing generator and lumi FSRs.
The DaVinci control flow is governed by a structure completely similar to the one used in ``PyConf``, implemented
`here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/config.py>`__ and `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/application.py>`__.
How to run a DaVinci job
------------------------
The DaVinci application can be run via a command-line as follows:
::
./run davinci [davinci_option] command [command_option] [extra_options]
The parts of interest are the options you can pass to ``davinci`` and ``command``, which define the configuration of the job to be executed.
Then you will need to define a database storing the path to reach the input files along with the relevant metadata information, that will be imported automatically by DaVinci.
You can of course use the general TestFileDB,
but you can also create a new one similar to
`this example <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/options/DaVinciDB-Example.yaml>`__
containing only the files you need.
The mandatory elements of this database are:
1. a ``key`` to identify the files
2. the ``filenames`` with the path to all your input files
3. all the ``qualifiers`` you want will be set in DaVinci
1. data_type
2. input_type
3. simulation
4. conddb_tag
5. dddb_tag
As a minimal example we can create a new database named ``newDB.yaml`` with a unique key ``Upgrade_Bd2KstarMuMu_ldst``:
::
fileKey:
filenames:
- 'root://eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Upgrade/LDST/00076720/0000/00076720_00000002_1.ldst'
qualifiers:
data_type: Upgrade
input_type: LDST
simulation: true
conddb_tag: sim-20171127-vc-md100
dddb_tag: dddb-20171126
The next step is to write an option file containing all the DaVinci options related to the job itself;
you can find an example `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/options/jobOptions-Example.yaml>`__.
Also in this case we can create an option file named ``newOpts.yaml`` with the following lines:
::
evt_max: 100
skip_events: 2
ntuple_file: 'ExampleTuple.root'
histo_file: 'ExampleHistos.root'
You can add the following options to your command line in order to let DaVinci import the input files and all the option values reported in the database and in the job option file:
::
./run davinci --inputfiledb fileKey ./newDB.yaml --joboptfile ./newOpts.yaml
Passing algorithms defined by the user
--------------------------------------
The algorithms defined by the user can be implemented in a .py file and passed to DaVinci via the ``user_algorithms`` property using the following notation: ``./path/to/userfile:main``
where the ``:`` symbol separates the path to file ``./path/to/userfile.py`` from the name of the function ``main`` that has to be called.
The ``user_algorithms`` property can be passed to DaVinci both in the ``--joboptfile`` argument or via the command-line exploiting the extra ``ctx_args`` arguments.
In this way DaVinci will call the ``setup_user_algorithms`` function, implemented `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/Phys/DaVinci/python/DaVinci/algorithms.py>`__,
adding to the configuration all the algorithms and tools defined by the user in the ``main`` function.
The ``main`` function, defined by the user, has to return two objects: a list of algorithms and a list of public tools that have to be used in the job.
In case the user needs to create multiple tuples with a different set of algorithms, the list of algorithms can
be replaced with a dictionary, where the key is the name of the tuple node and the element is the related list of algorithms.
Here is an example of a valid user algorithm file:
::
import Functors as F
from DaVinci.standard_particles import make_detached_mumu
from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
from FunTuple import FunctorCollection, FunTuple_Particles as Funtuple
# Prepare the node with the selection
dimuons = make_detached_mumu()
#FunTuple: Jpsi info
branches = {}
branches['Jpsi'] = 'J/psi(1S) -> mu+ mu-'
variables_jpsi = FunctorCollection({
'THOR_PT': F.PT,
'THOR_FourMom_P': F.FOURMOMENTUM
})
#associate FunctorCollection to branch name
variables = {}
variables['Jpsi'] = variables_jpsi
#Configure Funtuple algorithm
tuple_dimuons = Funtuple(
name="DimuonsTuple",
tree_name="DecayTree",
branches=branches,
variables=variables,
inputs=dimuons)
def main():
algs = upfront_reconstruction() + [dimuons, tuple_dimuons]
return algs, []
A few more complete examples can be found in
`this directory <https://gitlab.cern.ch/lhcb/DaVinci/-/tree/master/DaVinciExamples/python/DaVinciExamples/tupling/>`__.
In order to include this algorithm in the DaVinci job you can add the following option to the command line:
::
./run davinci --inputfiledb fileKey ./newDB.yaml --joboptfile ./newOpts.yaml --user_algorithms ./myAlgs:main
In case the database, the job option file, or the user algorithm file is saved in a different location you can add the full path in the command line.
You can also define the ``user_algorithms`` property in the ``./newOpts.yaml`` like any other DaVinci property instead of passing the information via command line,
just adding the following line to the .yaml file:
::
user_algorithms: ./myAlgs:main
Finally, another useful feature is that you can add in the command line any DaVinci job option to update run-time its value via command line:
::
./run davinci --inputfiledb fileKey ./newDB.yaml --joboptfile ./newOpts.yaml --user_algorithms ./myAlgs:main --evt_max 20
for running the job over 20 events instead of the 100 defined in the job option file.
How to run DaVinci with gaudirun.py
-----------------------------------
The DaVinci application can also be run using the gaudirun.py script:
::
./run gaudirun.py myAlgs.py
This second syntax has been implemented in order to be able to run DaVinci in production by means of ProdConf but it can also be exploited by the users if preferred to the click-based one.
In this case the myAlgs.py script is the one containing the user algorithms and tools that have to be used in the job.
Since the ``--joboptfile`` can not be passed in the command line, all the DaVinci options have to be set in myAlgs.py, importing
the DaVinci.options object.
The DaVinci application can be run calling the ``DaVinci.ConfigurationUpgrade.run_davinci_app()`` method at the end of the file.
This method takes in input the arguments listed :ref:`here<DaVinciApp_conf>`, but in this case the two important ones are the fileDB and the key related to the input files to be used in the job.
In case DaVinci has to be run in production, it is also needed to enable the corresponding ``prod_conf`` flag in order to allow a correct propagation of the job options from ProdConf to DaVinci.
Using as starting point the example used in the previous section, the updated myAlgs.py file for running DaVinci with gaudirun.py will look like:
::
import Functors as F
from DaVinci.standard_particles import make_detached_mumu
from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
from FunTuple import FunctorCollection, FunTuple_Particles as Funtuple
# Prepare the node with the selection
dimuons = make_detached_mumu()
#FunTuple: Jpsi info
branches = {}
branches['Jpsi'] = 'J/psi(1S) -> mu+ mu-'
variables_jpsi = FunctorCollection({
'THOR_PT': F.PT,
'THOR_FourMom_P': F.FOURMOMENTUM
})
#associate FunctorCollection to branch name
variables = {}
variables['Jpsi'] = variables_jpsi
#Configure Funtuple algorithm
tuple_dimuons = Funtuple(
name="DimuonsTuple",
tree_name="DecayTree",
branches=branches,
variables=variables,
inputs=dimuons)
def main():
algs = upfront_reconstruction() + [dimuons, tuple_dimuons]
return algs, []
from DaVinci import options
options.enable_unpack = False
options.evt_max = 10
options.histo_file = 'DV-example-his.root'
options.ntuple_file = 'DV-example-ntp.root'
options.user_algorithms = "./myAlgs:main"
options.write_fsr = False
from DaVinci.ConfigurationUpgrade import run_davinci_app
fileDB_key = "Upgrade_Bd2KstarMuMu_ldst"
fileDB_path = "Phys/DaVinci/options/DaVinciDB-Example.yaml"
run_davinci_app(fileDB_key, fileDB_path, prod_conf=False)
How to add an event pre-filter How to add an event pre-filter
...@@ -335,66 +35,63 @@ For adding an event per-filter in a job one can set the value of the ``evt_pre_f ...@@ -335,66 +35,63 @@ For adding an event per-filter in a job one can set the value of the ``evt_pre_f
containing the an element for each filter you want to implement, where the key is the filter name and the object is a string containing the an element for each filter you want to implement, where the key is the filter name and the object is a string
containing the filter code. Then DaVinci will automatically create a ``LoKi__HDRFilter`` or containing the filter code. Then DaVinci will automatically create a ``LoKi__HDRFilter`` or
``LoKi__VoidFilter`` instance according to filter code for each dict entry. ``LoKi__VoidFilter`` instance according to filter code for each dict entry.
For example, the ``filter_code`` can exploit the ``HLT_PASS`` feature to ensure the trigger/sprucing corresponding to the input location fired in that specific event: For example, the ``filter_code`` can exploit the ``HLT_PASS`` feature to ensure the trigger/sprucing corresponding to the input location fired in that specific event.
:: This is done by including the following in your ``options.yaml`` file:
from DaVinci import options .. code-block:: yaml
options.evt_pre_filters = {"Hlt2TopoLineFilter": "HLT_PASS('Hlt2Topo2BodyLineDecision')"}
evt_pre_filters:
Hlt2TopoLineFilter: HLT_PASS('Hlt2Topo2BodyLineDecision')
A filter can also be instantiated using the ``add_filter`` function: A filter can also be instantiated using the ``add_filter`` function:
::
.. code-block:: python
from DaVinci.algorithms import add_filter from DaVinci.algorithms import add_filter
filter = add_filter("Hlt2TopoLineFilter", "HLT_PASS('Hlt2Topo2BodyLineDecision')"). filter = add_filter(options, "Hlt2TopoLineFilter", "HLT_PASS('Hlt2Topo2BodyLineDecision')").
Additional examples can be found `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciTests/tests/options/option_davinci_filters.py>`_ and `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py>`__. Additional examples can be found `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciTests/tests/options/option_davinci_filters.py>`_ and `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py>`__.
Additional information on how to implement a filter code can be found `here <https://twiki.cern.ch/twiki/bin/view/LHCb/FAQ/DaVinciFAQ#How_to_process_the_stripped_DSTs>`__ and `here <https://gitlab.cern.ch/lhcb/Phys/blob/master/Phys/PhysConf/python/PhysConf/Filters.py>`__. Additional information on how to implement a filter code can be found `here <https://twiki.cern.ch/twiki/bin/view/LHCb/FAQ/DaVinciFAQ#How_to_process_the_stripped_DSTs>`__ and `here <https://gitlab.cern.ch/lhcb/Phys/blob/master/Phys/PhysConf/python/PhysConf/Filters.py>`__.
Using the ConfiguredFuntuple wrapper Using the configured_FunTuple wrapper
------------------------------------ -------------------------------------
The ``ConfiguredFuntuple`` wrapper has been implemented to make easier the configuration of a FunTuple algorithm instance in DaVinci, combinining in a single step the instantiation of different objects: The ``configured_FunTuple`` wrapper has been implemented to make easier the configuration of a FunTuple algorithm instance in DaVinci, combining in a single step the instantiation of different objects:
#. ``LoKi__HDRfilter``: to select only the events passing a specific set of trigger lines, #. ``LoKi__HDRfilter``: to select only the events passing a specific set of trigger lines,
#. ``make_data_with_FetchDataFromFile``: to obtain the correct DataHandle object needed by FunTuple, #. ``make_data_with_FetchDataFromFile``: to obtain the correct DataHandle object needed by FunTuple,
#. ``Funtuple``: object containing all the branches and variables defined by the user. #. ``Funtuple``: object containing all the branches and variables defined by the user.
Thanks to this wrapper the user can instatiate in the user file all three objects in the following way: Thanks to this wrapper the user can instantiate in the user file all three objects in the following way:
::
def main(): .. literalinclude:: ../../DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_configFuntuple.py
from DaVinci.algorithms import ConfiguredFuntuple :language: python
:start-at: import
config = { ``configured_FunTuple`` takes as input a dictionary containing an entry for each FunTuple instance that has to be created,
"location": "/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToHHH_Line/Particles",
"filters": ["HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToHHH_LineDecision')"],
"preamble": ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)'],
"tree": "DecayTree",
"branches": branches,
"variables": variables,
}
algs = ConfiguredFuntuple({"B0Dspi": config})
return algs, []
``ConfiguredFuntuple`` takes as input a dictionary containing an entry for each FunTuple instance that has to be created,
where the the two elements are the FunTuple name and a configuration dictionary with the following information: where the the two elements are the FunTuple name and a configuration dictionary with the following information:
- ``"location"``: string with input location to be used as input to FunTuple, - ``"location"``: string with input location to be used as input to FunTuple,
- ``"filters"``: list of filters to be applied in front of FunTuple, - ``"filters"``: list of filters to be applied in front of FunTuple,
- ``"preamble"``: list of LoKi functors to simplify the code that is used to fill the FunTuple leaves, - ``"preamble"``: list of LoKi functors to simplify the code that is used to fill the FunTuple leaves,
- ``"tree"``: name of the FunTuple tree, - ``"tree"``: name of the FunTuple tree,
- ``"branches"``: dictionary with the FunTuple braches, - ``"branches"``: dictionary with the FunTuple braches,
- ``"variables"``: dictionary with the FunTuple variables for each branch. - ``"variables"``: dictionary with the FunTuple variables for each branch.
``ConfiguredFuntuple(...)`` returns a dictionary containing lists of all the algorithms that have to be implemented for running all the defined FunTuple instances. ``configured_FunTuple(...)`` returns a dictionary containing lists of all the algorithms that have to be implemented for running all the defined FunTuple instances.
This wrapper is meant to be used only for the simplest cases where no other algorithms have to be added between the HDRfilter and the FunTuple; This wrapper is meant to be used only for the simplest cases where no other algorithms have to be added between the HDRfilter and the FunTuple;
however, it can still be used as starting point for more complex wrappers. however, it can still be used as starting point for more complex wrappers.
How to run a job on an XGEN file How to run a job on an XGEN file
-------------------------------- --------------------------------
The DaVinci application can also be run over an XGEN (extended generator) file without any additional step. The DaVinci application can also be run over an XGEN (extended generator) file by setting two keys in the ``options.yaml`` file:
The DaVinci application is able to add to the configuration only the MC unpacking algorithms when the ``input_type`` property is set to XGEN.
* Setting the ``input_type: ROOT``
* Setting ``unpack_only_mc: true`` (this is a temporary workaround until unpacking is functional)
The new FunTupleMC algorithm is used to create the tuple. The new FunTupleMC algorithm is used to create the tuple.
A working example can be found `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_xgen.py>`__. A working example can be found `here <https://gitlab.cern.ch/lhcb/DaVinci/-/blob/master/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_xgen.py>`__.
\ No newline at end of file This example can be run with::
lb-run DaVinci/vXrY lbexec DaVinciExamples.tupling.option_davinci_tupling_from_xgen:main '$DAVINCIEXAMPLESROOT/example_data/Gauss_12143001_xgen.yaml'
...@@ -4,6 +4,5 @@ DaVinci ...@@ -4,6 +4,5 @@ DaVinci
.. toctree:: .. toctree::
application options
Application
===========
The ``DaVinci.options`` object holds an instance of
`PyConf.application.ApplicationOptions`.
.. autoclass:: PyConf.application.ApplicationOptions
:members:
The other members of the ``DaVinci`` module are used for high-level application
configuration. Most 'main' options files will call `DaVinci.run_davinci`.
.. automodule:: DaVinci
.. autofunction:: DaVinci.run_davinci
Options YAML
============
The YAML provided to populate the ``options`` object passed to the user provided function, often called ``options.yaml``, is parsed using the following model:
.. autopydantic_model:: DaVinci.Options
:inherited-members: BaseModel
:model-show-field-summary: False
.. autoclass:: GaudiConf.LbExec.options.DataTypeEnum
:members:
:undoc-members:
.. autoclass:: GaudiConf.LbExec.options.FileFormats
:members:
:undoc-members:
.. autoclass:: GaudiConf.LbExec.options.EventStores
:members:
:undoc-members:
Welcome to DaVinci's documentation! Welcome to DaVinci's documentation!
=================================== ===================================
DaVinci is the LHCb offline analysis application. DaVinci is the LHCb offline analysis application.
It allows the users to produce the tuples in which the relevant information It allows the users to produce the tuples in which the relevant information
of the reconstructed particles of the decay of interest are stored. of the reconstructed particles of the decay of interest are stored.
Consider it as your way to access LHCb data! Consider it as your way to access LHCb data!
...@@ -26,10 +26,10 @@ and Turbo output. ...@@ -26,10 +26,10 @@ and Turbo output.
:caption: User Guide :caption: User Guide
:maxdepth: 3 :maxdepth: 3
configuration/davinci_configuration
tutorials/running tutorials/running
configuration/davinci_configuration
.. toctree:: .. toctree::
:caption: API Reference :caption: API Reference
:maxdepth: 3 :maxdepth: 3
......
...@@ -3,4 +3,5 @@ ...@@ -3,4 +3,5 @@
# means default value... https://github.com/sphinx-doc/sphinx/pull/8546 # means default value... https://github.com/sphinx-doc/sphinx/pull/8546
sphinx==4.4.0 sphinx==4.4.0
sphinx_rtd_theme==1.0.0 sphinx_rtd_theme==1.0.0
gitpython gitpython
\ No newline at end of file autodoc_pydantic==1.6.2
Running DaVinci Running DaVinci
=============== ===============
Broadly speaking, DaVinci is a repository of Python files which can be used to From ``DaVinci/v62r0`` the DaVinci configuration has been modernized and revisited in order to improve
configure a Gaudi application. In this way, "running DaVinci" is the same as the user accessibility and hide all technicalities the user doesn't need to deal with.
running any other LHCb application (such as Brunel or Moore):: The two major changes with respect to the old configuration are:
lb-run DaVinci/latest gaudirun.py some_options_file.py * the general structure 'À la' PyConf
* the requirement to use ``lbexec`` (see the `talk from the 104th LHCb week for details <https://indico.cern.ch/event/1160084/#249-replacing-gaudirunpy-with>`__)
The configuration of your job is now declared in two files:
Until `DaVinci/v53r0` the above was the only way to run DaVinci. * A Python function that takes an ``options`` returns the PyConf configuration
With that release a new way of running DaVinci is introduced:: * A YAML file which declares data specific configuration and which is used to populate the ``options`` object
lb-run DaVinci/latest davinci [davinci_option] command [command_option] [extra_options] DaVinci can then be run using:
This command exploits the potential of a Click-based script that allows to set all the options directly by command line. .. code-block:: bash
\ No newline at end of file
lb-run DaVinci/vXrY lbexec my_module:my_function options.yaml
Replacing ``lb-run DaVinci/vXrY`` with a specific version, or in the case of development builds replacing it with with the ``./run`` script.
Minimal example
---------------
Make a file named ``my_module.py`` that contains a function that takes an ``options`` argument and returns the result of ``DaVinci.make_config```:
.. code-block:: python
from DaVinci import make_config
from DaVinci.algorithms import add_filter
from PyConf.Algorithms import PrintDecayTree
from PyConf.dataflow import force_location
def print_decay_tree(options):
turbo_line = "Hlt2BsToJpsiPhi_JPsi2MuMu_PhiToKK_Line"
input_data = force_location(f"/Event/HLT2/{turbo_line}/Particles")
user_algorithms = [
add_filter(options, "HDRFilter_SeeNoEvil", f"HLT_PASS('{turbo_line}')"),
PrintDecayTree(name="PrintBsToJpsiPhi", Input=input_data)
]
return make_config(options, user_algorithms)
Also make a file named ``options.yaml`` containing:
.. code-block:: yaml
input_files:
- root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/hlt2_passthrough_thor_lines.dst
annsvc_config: root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/hlt2_passthrough_thor_lines.tck.json
input_type: ROOT
evt_max: 100
ntuple_file: davinci_ntuple.root
enable_unpack: True
process: Turbo
print_freq: 1
data_type: Upgrade
simulation: true
conddb_tag: sim-20180530-vc-md100
dddb_tag: dddb-20180815
This example can then be run using:
.. code-block:: bash
lb-run DaVinci/vXrY lbexec my_module:print_decay_tree options.yaml
For a more detailed explanation of this job, as well as many more examples, see the `DaVinci tutorials repository <https://gitlab.cern.ch/lhcb/DaVinci/-/tree/master/DaVinciTutorials>`__.
Options YAML
------------
The full schema with which the ``options.yaml`` file is parsed can be found in :class:`~DaVinci.Options`.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment