Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mstahl/PRConfig
  • chaen/PRConfig
  • lhcb-datapkg/PRConfig
3 results
Show changes
Commits on Source (5)
......@@ -3,7 +3,7 @@
# Maintainer : Ben Couturier
#============================================================================
package PRConfig
version v1r61
version v1r62
#============================================================================
# Structure, i.e. directories to process.
......
......@@ -4,6 +4,41 @@
! Purpose : App Configuration for performance and regression tests
!-----------------------------------------------------------------------------
========================= PRConfig v1r62 2023-08-24 =========================
! 2023-08-23 - commit 50a59a1
- Merge branch 'rjhunter-extend-BW-testing-options' into 'master'
Remove necessity to use TestFileDB in HLT2 BW tests and add new 2023 HLT2 BW
test
See merge request lhcb-datapkg/PRConfig!316
! 2023-08-21 - commit 37fe0d3
- Merge branch 'new_spruce_test_file' into 'master'
Add new Sprucing BW test files
See merge request lhcb-datapkg/PRConfig!340
! 2023-08-08 - commit 9a16765
- Merge branch 'follow-moore-2352' into 'master'
Remove DiMuonNoIP from list of modules in BW testing options files
See merge request lhcb-datapkg/PRConfig!329
! 2023-08-07 - commit 2e2a595
- Merge branch 'kmattiol_addSMOG2RealDataFile' into 'master'
Add SMOG2 real data file to TestFileDB
See merge request lhcb-datapkg/PRConfig!338
========================= PRConfig v1r61 2023-08-04 =========================
! 2023-08-04 - commit ac62563
......
......@@ -59,6 +59,7 @@ options.input_type = 'MDF'
options.simulation = True
options.dddb_tag = 'dddb-20171010'
options.conddb_tag = 'sim-20171127-vc-md100'
options.persistreco_version = 0.0
configure_input(options)
if options.input_process == 'Spruce': all_lines = sprucing_lines
......
......@@ -66,8 +66,7 @@ REPORT_TEMPLATE = jinja2.Template("""
A line is considered to be "problematic" if it has a rate of 0 Hz
or larger than 1 kHz, which requires some attention. <br>
The rates of all lines are listed in a html page attached below. <br>
The input rate of Hlt2 (Hlt1-filtered) is 1 MHz,
and the input rate of Sprucing (output rate of inclusive Hlt2 lines) is assumed to be 100 kHz.
{{INPUT_RATE_SENTENCE}}
</p>
<object type="image/png" data="hist_dst_size.png"></object>
<p>
......@@ -534,6 +533,12 @@ if __name__ == '__main__':
choices=['Hlt2', 'Sprucing'],
required=True,
help='Which stage was the test run on')
parser.add_argument(
'-r',
'--rate',
default=500, # kHz
type=float,
help='Input rate corresponding to the input file in kHz')
args = parser.parse_args()
# Read info of all lines
......@@ -591,6 +596,7 @@ if __name__ == '__main__':
table_5stream_rates = rate_html.read()
hlt2_or_spruce_template = HLT2_REPORT_TEMPLATE.render(
WWW_BASE_URL=WWW_BASE_URL, table_5stream_rates=table_5stream_rates)
input_rate_sentence = f"The input rate to this job was {args.rate} kHz (output rate of Hlt1)."
elif args.process == 'Sprucing':
with open(f"{args.input}/rates-wg-stream-configuration.html",
"r") as rate_html:
......@@ -598,11 +604,13 @@ if __name__ == '__main__':
hlt2_or_spruce_template = SPRUCE_REPORT_TEMPLATE.render(
WWW_BASE_URL=WWW_BASE_URL,
table_wgstream_rates=table_wgstream_rates)
input_rate_sentence = f"The input rate to this job was {args.rate} kHz (output rate of Hlt2)."
with open(f"{args.input}/index.html", "w") as html_file:
html = REPORT_TEMPLATE.render(
WWW_BASE_URL=WWW_BASE_URL,
HLT2_OR_SPRUCE_TEMPLATE=hlt2_or_spruce_template)
HLT2_OR_SPRUCE_TEMPLATE=hlt2_or_spruce_template,
INPUT_RATE_SENTENCE=input_rate_sentence)
html_file.write(html)
with open(f"{args.input}/other_lines.html", "w") as html_file:
......
......@@ -26,6 +26,7 @@ import socket
import tempfile
import atexit
import shutil
import yaml
# Default cache dir is the current working directory as this is most convenient for the machine
# that the test runs on periodically. It assumes the working directory is not cleaned up often,
......@@ -35,6 +36,9 @@ DEFAULT_CACHE_DIRS = {'default': ['.']}
# prefer XDG_RUNTIME_DIR which should be on tmpfs
FALLBACK_CACHE_DIR = os.getenv('XDG_RUNTIME_DIR', tempfile.gettempdir())
# Limit size of output log if many options files
MAX_NFILES_TO_PRINT_TO_LOG = 10
def default_cache_dirs():
hostname = socket.getfqdn()
......@@ -46,42 +50,38 @@ def is_remote(url):
return url.startswith('mdf:root:') or url.startswith('root:')
def run_gaudi_job(args, n_events, job_input):
def parse_yaml(f):
with open(os.path.expandvars(args.config), 'r') as f:
return yaml.safe_load(f)
def run_gaudi_job(args, config, job_input):
# Build command line
# Case 1: No input is given, only test_file_db_key
if not args.input and args.test_file_db_key:
extra_options = f"""
from Moore import options
options.n_threads = {args.threads}
options.n_event_slots = {args.evtSlots}
options.evt_max = {args.events}
options.event_store = 'EvtStoreSvc'
options.set_input_and_conds_from_testfiledb('{args.test_file_db_key}')
options.use_iosvc = True
options.input_files = {job_input}
"""
# Case 2: Input and conditions are specified
# If both input and test_file_db_key are specified, key is ignored
# as checking that input and conditions match is non-trivial
elif all([args.input, args.condDB, args.DDDB, args.form, args.simulation]):
extra_options = f"""
from Moore import options
options.n_threads = {args.threads}
options.n_event_slots = {args.evtSlots}
options.evt_max = {args.events}
options.input_type = '{args.form}'
options.input_files = {args.input.split(',')}
options.use_iosvc = True
options.event_store = 'EvtStoreSvc'
options.conddb_tag = '{args.condDB}'
options.dddb_tag = '{args.DDDB}'
options.simulation = {args.simulation}
"""
extra_options = [
f"n_threads = {args.threads}", f"n_event_slots = {args.evtSlots}",
f"evt_max = {args.events}",
f"input_raw_format = {config['input_raw_format']}",
f"input_files = {job_input}"
]
if "testfiledb_key" in config.keys():
extra_options += [
f"set_conds_from_testfiledb('{config['testfiledb_key']}')",
f"input_type = '{config['input_type']}'"
]
else:
logging.info("Incorrect configuration of Moore")
extra_options += [f"simulation = {config['simulation']}"] + [
f"{opt} = '{config[opt]}'"
for opt in ['input_type', 'data_type', 'conddb_tag', 'dddb_tag']
]
cmd = ['gaudirun.py', '--option', extra_options
if args.download_input_files:
extra_options += ["event_store = 'EvtStoreSvc'", "use_iosvc = True"]
extra_options = [f"options.{opt_str}" for opt_str in extra_options]
extra_options.insert(0, "from Moore import options")
cmd = ['gaudirun.py', '--option', "\n".join(extra_options)
] + [os.path.expandvars(x) for x in args.options]
cmd.insert(1, '-T')
......@@ -96,6 +96,12 @@ options.simulation = {args.simulation}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('options', nargs='+', help='Gaudi options files.')
parser.add_argument(
'-c',
'--config',
type=str,
required=True,
help='Path to yaml config file defining the input.')
parser.add_argument(
'-t',
'--threads',
......@@ -121,25 +127,15 @@ if __name__ == '__main__':
type=int,
help='average event size in input file in kB',
required=True)
parser.add_argument(
'--test-file-db-key',
default='UpgradeHLT1FilteredWithGEC',
help='TestFileDB key defining input files and tags.')
parser.add_argument(
'--debug', action='store_true', help='Debugging output')
parser.add_argument(
'-f',
'--input',
help='Names of input files, multiple names possible (defaults to '
'files from TestFileDB entry if not given)')
parser.add_argument(
'-s',
'--simulation',
#type=str,
default=None)
parser.add_argument('-c', '--condDB', type=str, default=None)
parser.add_argument('-d', '--DDDB', type=str, default=None)
parser.add_argument('-r', '--form', type=str, default=None)
'-d',
'--download-input-files',
action='store_true',
help=
"Download files to local disk before running Moore. Achieves big speedup (5x) in Moore, but only worth it if the downloading is fast (probably only true if you're at CERN.)"
)
parser.add_argument(
'--cache-dirs',
default=None,
......@@ -153,23 +149,29 @@ if __name__ == '__main__':
level=(logging.DEBUG if args.debug else logging.INFO))
# run at most 1e5 events for each test
n_events = args.events
if n_events == -1 or n_events > 1e5: n_events = 1e5
if args.events == -1 or args.events > 1e5: args.events = 1e5
if args.evtSlots is None:
args.evtSlots = max(int(round(1.2 * args.threads)), 1 + args.threads)
'''
Make sure input files are available locally
(5x speed-up compared to using online)
'''
if args.input:
inputs_fns = args.input
else:
config = parse_yaml(args.config)
if "testfiledb_key" in config.keys():
from PRConfig.TestFileDB import test_file_db
inputs_fns = test_file_db[args.test_file_db_key].filenames
logging.info(inputs_fns)
# Set up local directories where inputs are cached
inputs_fns = test_file_db[config['testfiledb_key']].filenames
elif "input_files" in config.keys():
inputs_fns = config["input_files"]
else:
raise KeyError(
f'{args.config} does not provide either the "testfiledb_key" or "input_files".'
)
job_inputs = [
inputs_fns
] # This is a list to allow for possible NUMA extension: see discussion on !316.
logging.info(inputs_fns[:MAX_NFILES_TO_PRINT_TO_LOG])
# Set up local directories where inputs are cached
if args.download_input_files:
if args.cache_dirs:
args.cache_dirs = args.cache_dirs.split(',')
else:
......@@ -184,9 +186,11 @@ if __name__ == '__main__':
# if we use the fallback directory, clean up after ourselves
atexit.register(shutil.rmtree, fallback_dir)
job_inputs = [inputs_fns]
# Now download files
for i, inputs in enumerate(job_inputs):
logging.info('Downloading input files {}'.format(inputs))
logging.info(
f'Downloading input files {inputs[:MAX_NFILES_TO_PRINT_TO_LOG]}'
)
if all(is_remote(url) for url in inputs):
from Moore.qmtest.context import download_mdf_inputs_locally
# download_mdf_inputs_locally only downloads if files
......@@ -194,11 +198,17 @@ if __name__ == '__main__':
logging.info(
'Downloading inputs for bandwidth job to {}'.format(
args.cache_dirs[i]))
kB_to_GB = 1e3
job_inputs[i] = download_mdf_inputs_locally(
inputs,
args.cache_dirs[i],
max_size=args.avg_evt_size * 1e3 * args.events)
max_size=args.avg_evt_size * kB_to_GB * args.events)
logging.info(inputs)
elif any(is_remote(url) for url in inputs_fns):
parser.error('inputs must either be all xrootd or all local')
run_gaudi_job(args, n_events, job_inputs[i])
else:
pass # They're all local so don't worry about it...
run_gaudi_job(args, config, job_inputs[i])
else:
run_gaudi_job(args, config, job_inputs[0])
#!/bin/bash
###############################################################################
# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
# Run with Moore/run /path/to/Moore_hlt2_2023_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
# Output (MDFs, html pages, plots of rate/bandwidth etc. will be written to ./tmp/)
export MOORE_THREADS=$(nproc)
export EVTS_STREAMLESS=1e5 # Used for both streamless and 16-stream (per WG) config
export EVTS_PRODUCTION=1e5
test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
config_file='hlt2_bandwidth_input_2023.yaml'
event_size_upper_limit=200
export HLT1_OUTPUT_RATE=1e3 # in kHz
mkdir -p tmp/MDF
mkdir -p tmp/Output
mkdir -p tmp/Output/Inter
# 1. Run options files for baseline models over the 2023 HLT1-filtered min. bias
# -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
# Options for CALO decoding and old muon geometry are not required.
echo 'Running trigger to obtain MDF files for comparison for 2023 conditions'
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_streamless.py' # No streaming
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_PRODUCTION -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_production_streams.py' # Turbo/Turcal/Full/Monitoring/IFT
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_wg_streams.py' # Streaming per module
# 2. Compute line descriptives: persist reco, extra output
echo 'Obtaining line descriptives'
time gaudirun.py $PRCONFIGROOT/python/MooreTests/line-descriptives.py
# 3. Compute similarity matrix for all lines based on streamless file
echo 'Obtaining similarity matrix and rates for all lines computed using streamless MDF file'
time python $PRCONFIGROOT/python/MooreTests/line-similarity.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json
# 4. Compute similarity matrix including each production stream
echo 'Obtaining similarity matrix for production stream configuration'
time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-production-stream-config.json -c production
# 5. Compute similarity matrix including each working-group
echo 'Obtaining similarity matrix for WG-stream configuration'
time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c wg
# 6. Computing rates per stream as well as per line (tables split by stream)
echo 'Obtaining rates and bandwidth for 5-stream configuration'
for stream in turbo full turcal; do
echo "Stream name: ${stream}"
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_PRODUCTION -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -c 'production'
done
# 7. Computing rates per WG as well as per line (tables split by WG)
echo 'Obtaining rates and bandwidth for WG-stream configuration'
for module in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
echo "Stream name: ${module}"
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMLESS -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c 'wg'
done
# 8. Combine all output into tables
echo 'Combining all tables from previous two sets of jobs'
time python $PRCONFIGROOT/python/MooreTests/combine-hlt2-rate-output.py
# 9. Check filesize of baseline models in terms of events and bytes
# Can be run on any manifest/MDF by changing the command-line arguments
echo 'Computing size for baseline models'
time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -o tmp/Output/filesize.txt
echo 'Computing filesizes for production stream allocation'
for strm in full turbo turcal; do
time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-production-"${strm}".mdf -t tmp/MDF/baseline-production-streams.tck.json -o tmp/Output/filesize.txt
done
echo 'Computing filesizes for per-WG stream allocation'
for strm in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-wg-"${strm}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -o tmp/Output/filesize.txt
done
# 10. Produce plots and HTML pages
echo 'Making plots and HTML pages'
time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -r $HLT1_OUTPUT_RATE
# force 0 return code so the handler runs even for failed jobs
exit 0
......@@ -11,27 +11,33 @@
# or submit itself to any jurisdiction. #
###############################################################################
# Run with ./Moore_hlt2_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
# Run with Moore/run /path/to/Moore_hlt2_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
# Output (MDFs, html pages, plots of rate/bandwidth etc. will be written to ./tmp/)
export MOORE_THREADS=$(nproc)
export EVTS_STREAMLESS=1e5 # Used for both streamless and 16-stream (per WG) config
export EVTS_PRODUCTION=1e5
test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
config_file='hlt2_bandwidth_input_nominal_with_gec.yaml'
event_size_upper_limit=200
export HLT1_OUTPUT_RATE=500 # in kHz
mkdir -p tmp/MDF
mkdir -p tmp/Output
mkdir -p tmp/Output/Inter
# 1. Run options files for baseline models
# -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
echo 'Running trigger to obtain MDF files for comparison'
time python -m MooreTests.run_bandwidth_test_jobs -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=100 --test-file-db-key='UpgradeHLT1FilteredWithGEC' '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' '$HLT2CONFROOT/tests/options/bandwidth/hlt2_bandwidth_streamless.py' # No streaming
time python -m MooreTests.run_bandwidth_test_jobs -n=$EVTS_PRODUCTION -t=$MOORE_THREADS -a=100 --test-file-db-key='UpgradeHLT1FilteredWithGEC' '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' '$HLT2CONFROOT/tests/options/bandwidth/hlt2_bandwidth_production_streams.py' # Turbo/Turcal/Full/Monitoring/IFT
time python -m MooreTests.run_bandwidth_test_jobs -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=100 --test-file-db-key='UpgradeHLT1FilteredWithGEC' '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' '$HLT2CONFROOT/tests/options/bandwidth/hlt2_bandwidth_wg_streams.py' # Streaming per module
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_streamless.py' # No streaming
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_PRODUCTION -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_production_streams.py' # Turbo/Turcal/Full/Monitoring/IFT
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_wg_streams.py' # Streaming per module
# 2. Compute line descriptives: persist reco, extra output
echo 'Obtaining line descriptives'
time gaudirun.py $PRCONFIGROOT/python/MooreTests/line-descriptives.py
# 3. Compute similarity matrix for all lines based on streamless file
# 3. Compute similarity matrix for all lines based on streamless file
echo 'Obtaining similarity matrix and rates for all lines computed using streamless MDF file'
time python $PRCONFIGROOT/python/MooreTests/line-similarity.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json
......@@ -47,14 +53,14 @@ time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF
echo 'Obtaining rates and bandwidth for 5-stream configuration'
for stream in turbo full turcal; do
echo "Stream name: ${stream}"
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_PRODUCTION -p Hlt2 -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -c 'production'
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_PRODUCTION -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -c 'production'
done
# 7. Computing rates per WG as well as per line (tables split by WG)
echo 'Obtaining rates and bandwidth for WG-stream configuration'
for module in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
echo "Stream name: ${module}"
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMLESS -p Hlt2 -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c 'wg'
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMLESS -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c 'wg'
done
# 8. Combine all output into tables
......@@ -79,7 +85,7 @@ done
# 10. Produce plots and HTML pages
echo 'Making plots and HTML pages'
time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2
time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -r $HLT1_OUTPUT_RATE
# force 0 return code so the handler runs even for failed jobs
exit 0
\ No newline at end of file
......@@ -11,18 +11,24 @@
# or submit itself to any jurisdiction. #
###############################################################################
# Run with ./Moore_spruce_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
# Run with Moore/run /path/to/Moore_spruce_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
export MOORE_THREADS=$(nproc)
export EVTS_STREAMLESS=1e4 # Used for both streamless and 16-stream (per WG) config
export EVTS_STREAMED=1e5
test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
config_file='spruce_bandwidth_input.yaml'
export HLT2_OUTPUT_RATE=124 # in kHz, upgrade_minbias_hlt1_filtered HLT1 output rate (1.65 MHz - see MooreAnalysis#42) * HLT2 retention of file in spruce_bandwidth_input.yaml (~7.5%)
mkdir -p tmp/MDF
mkdir -p tmp/Output
mkdir -p tmp/Output/Inter
# 1. Run options files for baseline models
# -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
echo 'Running trigger to obtain MDF files for comparison'
time python -m MooreTests.run_bandwidth_test_jobs -n=1e4 -t=$MOORE_THREADS -a=300 --test-file-db-key=upgrade-minbias-hlt2-full-output-Aug2023 '$HLT2CONFROOT/tests/options/bandwidth/spruce_bandwidth_streamless.py' # No streaming
time python -m MooreTests.run_bandwidth_test_jobs -n=1e5 -t=$MOORE_THREADS -a=300 --test-file-db-key=upgrade-minbias-hlt2-full-output-Aug2023 '$HLT2CONFROOT/tests/options/bandwidth/spruce_bandwidth_wg_streams.py' # One stream per WG
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -d -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=300 ${test_path_prefix}'spruce_bandwidth_streamless.py' # No streaming
time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -d -n=$EVTS_STREAMED -t=$MOORE_THREADS -a=300 ${test_path_prefix}'spruce_bandwidth_wg_streams.py' # One stream per WG
# 2. Compute line descriptives: persist reco, extra output
echo 'Obtaining line descriptives'
......@@ -42,7 +48,7 @@ time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Spruce -i tmp/M
echo 'Obtaining rates and bandwidth for wg-stream configuration'
for module in b_to_open_charm rd bandq charm qee charm b_to_charmonia slepton c_to_dimuon bnoc; do
echo "Stream name: ${module}"
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n 1e5 -p Spruce -r 124 -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/wg-stream-config.json -c wg
time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMED -p Spruce -r $HLT2_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/wg-stream-config.json -c wg
done
echo 'Combining all tables from previous two jobs'
......@@ -61,7 +67,7 @@ done
# 6. Produce plots and HTML pages
echo 'Making plots and HTML pages'
time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Sprucing
time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Sprucing -r $HLT2_OUTPUT_RATE
# force 0 return code so the handler runs even for failed jobs
exit 0
\ No newline at end of file