Commit d9f3f916 authored by Alex Pearce's avatar Alex Pearce
Browse files

Add tests of HLT1 MDF and DST writing and reading.

Towards #55.
parent 5e3d12fd
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""Read a file created by the `hlt1_write.py` options.
This options file must be run with a command line flag that specifies which
type of file to read, MDF or DST. This can be done like:
gaudirun.py --option 'import sys; sys.argv.append("--dst")' hlt1_read.py
Pass the `--mdf` flag instead to read an MDF file.
"""
import sys
from PyConf.environment import EverythingHandler
from PyConf.Algorithms import FTRawBankDecoder
from RecoConf.hlt1_tracking import require_gec
from Hlt1Conf.lines.track_mva import (one_track_mva_line, two_track_mva_line,
debug_two_track_mva_line)
ftdec_v = 4
env = EverythingHandler(
threadPoolSize=1, nEventSlots=1, evtMax=1000, debug=True)
with FTRawBankDecoder.bind(DecodingVersion=ftdec_v), \
require_gec.bind(FTDecodingVersion=ftdec_v):
builders = {
'Hlt1TrackMVALine': one_track_mva_line,
'Hlt1TwoTrackMVALine': two_track_mva_line,
'Hlt1DebugTwoTrackMVALine': debug_two_track_mva_line,
}
for name, builder in builders.items():
env.registerLine(name, builder())
read_mdf = '--mdf' in sys.argv
read_dst = '--dst' in sys.argv
assert read_mdf or read_dst and not (
read_mdf and read_dst), 'Must specify exactly one of --mdf or --dst'
if read_mdf:
env.setupInputFromTestFileDB('MiniBrunel_2018_MinBias_FTv4_DIGI',
['test_hlt1_persistence_mdf_write.mdf'],
'MDF')
elif read_dst:
env.setupInputFromTestFileDB('MiniBrunel_2018_MinBias_FTv4_DIGI',
['test_hlt1_persistence_dst_write.dst'],
'ROOT')
env.configure()
###############################################################################
# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
"""Write an HLT1-filtered file.
This options file must be run with a command line flag that specifies which
type of file to read, MDF or DST. This can be done like:
gaudirun.py --option 'import sys; sys.argv.append("--dst")' hlt1_write.py
Pass the `--mdf` flag instead to write an MDF file.
"""
import sys
from PyConf.environment import EverythingHandler
from PyConf.Algorithms import FTRawBankDecoder
from RecoConf.hlt1_tracking import require_gec
from Hlt1Conf.lines.track_mva import (one_track_mva_line, two_track_mva_line,
debug_two_track_mva_line)
ftdec_v = 4
env = EverythingHandler(
threadPoolSize=1, nEventSlots=1, evtMax=1000, debug=True)
with FTRawBankDecoder.bind(DecodingVersion=ftdec_v), \
require_gec.bind(FTDecodingVersion=ftdec_v):
builders = {
'Hlt1TrackMVALine': one_track_mva_line,
'Hlt1TwoTrackMVALine': two_track_mva_line,
'Hlt1DebugTwoTrackMVALine': debug_two_track_mva_line,
}
for name, builder in builders.items():
env.registerLine(name, builder())
write_mdf = '--mdf' in sys.argv
write_dst = '--dst' in sys.argv
assert write_mdf or write_dst and not (
write_mdf and write_dst), 'Must specify exactly one of --mdf or --dst'
env.setupInputFromTestFileDB('MiniBrunel_2018_MinBias_FTv4_DIGI')
if write_mdf:
env.setupOutput('test_hlt1_persistence_mdf_write.mdf', 'MDF')
elif write_dst:
env.setupOutput('test_hlt1_persistence_dst_write.dst', 'ROOT')
env.configure()
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
<!--
(c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration
This software is distributed under the terms of the GNU General Public
Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".
In applying this licence, CERN does not waive the privileges and immunities
granted to it by virtue of its status as an Intergovernmental Organization
or submit itself to any jurisdiction.
-->
<!--
Run HLT1 on an HLT1-filtered DST file.
-->
<extension class="GaudiTest.GaudiExeTest" kind="test">
<argument name="prerequisites"><set>
<tuple><text>persistency.dst_write</text><enumeral>PASS</enumeral></tuple>
</set></argument>
<argument name="program"><text>gaudirun.py</text></argument>
<argument name="args"><set>
<text>--option</text><text>import sys; sys.argv.append("--dst")</text>
<text>$HLT1CONFROOT/tests/options/hlt1_read.py</text>
</set></argument>
<argument name="use_temp_dir"><enumeral>true</enumeral></argument>
<argument name="validator"><text>
countErrorLines({"FATAL": 0, "ERROR": 0, "WARNING": 0})
import re
pattern = re.compile(r'\s+NONLAZY_OR: hlt_decision\s+#=(\d+)\s+Sum=(\d+)')
# Check that:
# 1. We processed the same number of events as filtered by the previous job
# 2. We made the same number of positive decisions (which should be 100% of input events)
nread = nselected = -1
for line in stdout.split('\n'):
m = re.match(pattern, line)
if m:
nread, nselected = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from reader stdout')
# We're running the same HLT1 configuration as the one that created the
# filtered data, so all input events should have a positive decision
if nread != nselected:
causes.append('expected all input events to pass')
nread_writing = nselected_writing = -1
with open('test_hlt1_persistence_dst_write.stdout') as f_ref:
for line in f_ref.readlines():
m = re.match(pattern, line)
if m:
nread_writing, nselected_writing = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from writer stdout')
if nread != nselected_writing:
causes.append('did not read the same number of events as written out')
</text></argument>
</extension>
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
<!--
(c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration
This software is distributed under the terms of the GNU General Public
Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".
In applying this licence, CERN does not waive the privileges and immunities
granted to it by virtue of its status as an Intergovernmental Organization
or submit itself to any jurisdiction.
-->
<!--
Run HLT1 and save an DST file.
-->
<extension class="GaudiTest.GaudiExeTest" kind="test">
<argument name="program"><text>gaudirun.py</text></argument>
<argument name="args"><set>
<text>--option</text><text>import sys; sys.argv.append("--dst")</text>
<text>$HLT1CONFROOT/tests/options/hlt1_write.py</text>
</set></argument>
<argument name="use_temp_dir"><enumeral>true</enumeral></argument>
<argument name="validator"><text>
# Expect a single WARNING:
# HiveDataBrokerSvc WARNING non-reentrant algorithm: OutputStream
countErrorLines({"FATAL": 0, "ERROR": 0, "WARNING": 1})
import re
pattern = re.compile(r'\s+NONLAZY_OR: hlt_decision\s+#=(\d+)\s+Sum=(\d+)')
# Check that:
# 1. We read at least two events
# 2. We make a positive decision on at least one event
# 3. We make a negative decision on at least one event
nread = nselected = -1
for line in stdout.split('\n'):
m = re.match(pattern, line)
if m:
nread, nselected = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from stdout')
if nread &lt; 2:
causes.append('expected at least two events to be processed')
if nselected &lt; 1:
causes.append('expected at least one event to be selected')
if nselected == nread:
causes.append('expected at least one event to be filtered out')
# Write out the log file so that we can compare the number of
# selected events here with the number of events processed by
# a second HLT1 job that uses the output file as input
with open('test_hlt1_persistence_dst_write.stdout', 'w') as f:
f.write(stdout)
</text></argument>
</extension>
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
<!--
(c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration
This software is distributed under the terms of the GNU General Public
Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".
In applying this licence, CERN does not waive the privileges and immunities
granted to it by virtue of its status as an Intergovernmental Organization
or submit itself to any jurisdiction.
-->
<!--
Run HLT1 on an HLT1-filtered MDF file.
-->
<extension class="GaudiTest.GaudiExeTest" kind="test">
<argument name="prerequisites"><set>
<tuple><text>persistency.mdf_write</text><enumeral>PASS</enumeral></tuple>
</set></argument>
<argument name="program"><text>gaudirun.py</text></argument>
<argument name="args"><set>
<text>--option</text><text>import sys; sys.argv.append("--mdf")</text>
<text>$HLT1CONFROOT/tests/options/hlt1_read.py</text>
</set></argument>
<argument name="use_temp_dir"><enumeral>true</enumeral></argument>
<argument name="validator"><text>
countErrorLines({"FATAL": 0, "ERROR": 0, "WARNING": 0})
import re
pattern = re.compile(r'\s+NONLAZY_OR: hlt_decision\s+#=(\d+)\s+Sum=(\d+)')
# Check that:
# 1. We processed the same number of events as filtered by the previous job
# 2. We made the same number of positive decisions (which should be 100% of input events)
nread = nselected = -1
for line in stdout.split('\n'):
m = re.match(pattern, line)
if m:
nread, nselected = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from reader stdout')
# We're running the same HLT1 configuration as the one that created the
# filtered data, so all input events should have a positive decision
if nread != nselected:
causes.append('expected all input events to pass')
nread_writing = nselected_writing = -1
with open('test_hlt1_persistence_mdf_write.stdout') as f_ref:
for line in f_ref.readlines():
m = re.match(pattern, line)
if m:
nread_writing, nselected_writing = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from writer stdout')
if nread != nselected_writing:
causes.append('did not read the same number of events as written out')
</text></argument>
</extension>
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE extension PUBLIC '-//QM/2.3/Extension//EN' 'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
<!--
(c) Copyright 2000-2018 CERN for the benefit of the LHCb Collaboration
This software is distributed under the terms of the GNU General Public
Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".
In applying this licence, CERN does not waive the privileges and immunities
granted to it by virtue of its status as an Intergovernmental Organization
or submit itself to any jurisdiction.
-->
<!--
Run HLT1 and save an MDF file.
-->
<extension class="GaudiTest.GaudiExeTest" kind="test">
<argument name="program"><text>gaudirun.py</text></argument>
<argument name="args"><set>
<text>--option</text><text>import sys; sys.argv.append("--mdf")</text>
<text>$HLT1CONFROOT/tests/options/hlt1_write.py</text>
</set></argument>
<argument name="use_temp_dir"><enumeral>true</enumeral></argument>
<argument name="validator"><text>
# Expect a single WARNING:
# HiveDataBrokerSvc WARNING non-reentrant algorithm: LHCb::MDFWriter/LHCb__MDFWriter
countErrorLines({"FATAL": 0, "ERROR": 0, "WARNING": 1})
import re
pattern = re.compile(r'\s+NONLAZY_OR: hlt_decision\s+#=(\d+)\s+Sum=(\d+)')
# Check that:
# 1. We read at least two events
# 2. We make a positive decision on at least one event
# 3. We make a negative decision on at least one event
nread = nselected = -1
for line in stdout.split('\n'):
m = re.match(pattern, line)
if m:
nread, nselected = map(int, m.groups())
break
else:
causes.append('could not parse event statistics from stdout')
if nread &lt; 2:
causes.append('expected at least two events to be processed')
if nselected &lt; 1:
causes.append('expected at least one event to be selected')
if nselected == nread:
causes.append('expected at least one event to be filtered out')
# Write out the log file so that we can compare the number of
# selected events here with the number of events processed by
# a second HLT1 job that uses the output file as input
with open('test_hlt1_persistence_mdf_write.stdout', 'w') as f:
f.write(stdout)
</text></argument>
</extension>
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment