Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • mstahl/PRConfig
  • chaen/PRConfig
  • lhcb-datapkg/PRConfig
3 results
Show changes
Commits on Source (5)
Showing
with 809 additions and 1237 deletions
......@@ -47,7 +47,7 @@ test-python2.6:
test-python2:
stage: test
script:
- python2 -m compileall -q -x "/python/MooreTests/" .
- python2 -m compileall -q -x "/python/MooreTests/|bandwidth_helpers" .
- xenv -x PRConfig.xenv python2 -c "import PRConfig.TestFileDB"
test-python3:
......
......@@ -3,7 +3,7 @@
# Maintainer : Ben Couturier
#============================================================================
package PRConfig
version v1r62
version v1r63
#============================================================================
# Structure, i.e. directories to process.
......
......@@ -4,6 +4,48 @@
! Purpose : App Configuration for performance and regression tests
!-----------------------------------------------------------------------------
========================= PRConfig v1r63 2023-09-25 =========================
! 2023-09-13 - commit 1fd8aa7
- Merge branch 'msaur_DC_Sim10aU1_Dst2D0pi_LowLumi.fix' into 'master'
Fix to DC_Sim10aU1_Dst2D0pi_LowLumi
See merge request lhcb-datapkg/PRConfig!347
! 2023-09-12 - commit 6f88e4a
- Merge branch 'zcjUpdateFilesFromDirac' into 'master'
Add get_access_url function for data in FilesFromDirac
See merge request lhcb-datapkg/PRConfig!343
! 2023-09-12 - commit 5efd848
- Merge branch 'erodrigu-enhance-sprucing' into 'master'
Enhance and fix Sprucing timing plot
See merge request lhcb-datapkg/PRConfig!345
! 2023-09-11 - commit 070beea
- Merge branch 'rjhunter-bw-test-extra-info' into 'master'
Add more information to BW test pages
See merge request lhcb-datapkg/PRConfig!342
! 2023-08-29 - commit 3801f15
- Merge branch 'rjhunter-fix-2023-bw-test' into 'master'
Small fixes to 2023 BW test
See merge request lhcb-datapkg/PRConfig!344
========================= PRConfig v1r62 2023-08-24 =========================
! 2023-08-23 - commit 50a59a1
......
......@@ -24,7 +24,7 @@ options.dddb_tag = "master"
options.conditions_version = "AlignmentV10_2023_05_09_LHCP" #specific DD4hep tag
#options.conditions_version = "master" #-- general settings for DD4hep builds
options.data_type = "Upgrade"
options.geometry_version = "trunk"
options.geometry_version = "run3/trunk"
options.conditions_version = "master"
#options.output_level = 0
......
......@@ -24,7 +24,7 @@ options.dddb_tag = "master"
options.conditions_version = "AlignmentV10_2023_05_09_LHCP" #specific DD4hep tag
#options.conditions_version = "master" #-- general settings for DD4hep builds
options.data_type = "Upgrade"
options.geometry_version = "trunk"
options.geometry_version = "run3/trunk"
options.conditions_version = "master"
#options.output_level = 0
......
......@@ -24,7 +24,7 @@ options.dddb_tag = "master"
options.conditions_version = "AlignmentV10_2023_05_09_LHCP" #specific DD4hep tag
#options.conditions_version = "master" #-- general settings for DD4hep builds
options.data_type = "Upgrade"
options.geometry_version = "trunk"
options.geometry_version = "run3/trunk"
options.conditions_version = "master"
#options.output_level = 0
......
......@@ -15,6 +15,6 @@ from Moore import options
from DDDB.CheckDD4Hep import UseDD4Hep
if UseDD4Hep:
from Configurables import DDDBConf
DDDBConf().GeometryVersion = 'before-rich1-geom-update-26052022'
DDDBConf().GeometryVersion = 'run3/before-rich1-geom-update-26052022'
options.evt_max = 1000
......@@ -15,6 +15,6 @@ from Moore import options
from DDDB.CheckDD4Hep import UseDD4Hep
if UseDD4Hep:
from Configurables import DDDBConf
DDDBConf().GeometryVersion = 'before-rich1-geom-update-26052022'
DDDBConf().GeometryVersion = 'run3/before-rich1-geom-update-26052022'
options.evt_max = 10000
###############################################################################
# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
''' Compare event numbers extracted from mdf outputs of different streams to
quantify the overlap between the streams by Jaccard similarity index.
Writes similarity matrix to console and out to .html for usage in BW test page.
'''
import argparse
import pandas as pd
from sklearn.metrics.pairwise import pairwise_distances
import json
from PRConfig.bandwidth_helpers import FileNameHelper
def get_all_event_numbers(args):
fname_helper = FileNameHelper(args.process)
ret = {}
for stream in args.streams:
with open(
fname_helper.event_no_fname(args.stream_config, stream),
'r') as f:
ret.update(json.load(f))
return ret
def calculate_similarity_matrix(event_numbers_by_stream):
all_event_numbers = set([
evt_no for evt_no_list in event_numbers_by_stream.values()
for evt_no in evt_no_list
])
print(
f"Found {len(all_event_numbers)} unique event numbers across {len(event_numbers_by_stream.keys())} streams."
)
df = pd.DataFrame(
False,
index=list(all_event_numbers),
columns=event_numbers_by_stream.keys())
for stream, evt_no_list in event_numbers_by_stream.items():
for evt_no in evt_no_list:
df[stream][evt_no] = True
jaccard = 1 - pairwise_distances(
df.T.to_numpy(), metric='jaccard'
) # .T bcuz pairwise_distance must expect the fields to take similarity between to be rows rather than columns
jaccard_sim_matrix_df = pd.DataFrame(
jaccard, columns=df.columns, index=df.columns)
return jaccard_sim_matrix_df
def save(df, htmlpath):
# Generate HTML table for similarity matrix
html = df.to_html(float_format=lambda x: f"{x:.1%}")
with open(htmlpath, 'w') as f:
f.write(html)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--process', type=str, required=True, choices=['hlt2', 'spruce'])
parser.add_argument(
'--stream-config',
type=str,
required=True,
choices=["wg", "production"])
parser.add_argument('--streams', nargs='+', type=str, required=True)
args = parser.parse_args()
fname_helper = FileNameHelper(args.process)
event_numbers = get_all_event_numbers(args)
for stream in args.streams:
print(
f"Found {len(event_numbers[stream])} events for {stream} stream.")
ofile = fname_helper.jaccard_similarities_path(args.stream_config)
sim_matrix = calculate_similarity_matrix(event_numbers)
print(
f"Calculated similarity matrix. Printing and saving to html at {ofile}."
)
print(sim_matrix)
save(sim_matrix, ofile)
if __name__ == "__main__":
main()
###############################################################################
# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
'''
Each method works the same: reads in all relevant CSV files
and combines into single dataframe.
It writes out CSV and HTML for each dataframe.
'''
import pandas as pd
def rates_all_lines():
streams = [
'b_to_open_charm', 'rd', 'bandq', 'qee', 'charm', 'b_to_charmonia',
'slepton', 'c_to_dimuon', 'bnoc'
]
frames = []
for stream in streams:
file = f'tmp/Output/Inter/rates-all-lines-wg-stream-{stream}.csv'
try:
df = pd.read_csv(file, header=None) # no lines, empty file
except:
continue
frames.append(df)
df = pd.concat(frames)
df.columns = [
'Line', 'Total Retention (%)', 'Rate (kHz)', 'Exclusive Retention(%)',
'Exclusive Rate (kHz)', 'Avg Total Event Size (kB)',
'Total Bandwidth (GB/s)', 'Avg DstData Size (kB)',
'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
df.to_csv('tmp/Output/rates-for-all-lines.csv')
def highlight_vals(val, threshold, color='red'):
if val > threshold: return 'background-color: {}'.format(color)
else: return ''
styler = df.style.applymap(
highlight_vals, subset=['Rate (kHz)'], threshold=1) # 1 kHz rate limit
styler = styler.applymap(
highlight_vals, subset=['Avg DstData Size (kB)'],
threshold=1e3) # 1 MB evt size limit
styler = styler.applymap(
highlight_vals, subset=['Avg Total Event Size (kB)'],
threshold=1e3) # 1 MB evt size limit
styler = styler.applymap(
highlight_vals, subset=['Total Bandwidth (GB/s)'],
threshold=0.2) # 200 MB/s bandwidth limit
styler = styler.applymap(
highlight_vals, subset=['DstData Bandwidth (GB/s)'],
threshold=0.2) # 200 MB/s bandwidth limit
html = styler.set_table_attributes("border=1").to_html()
with open('tmp/Output/rates-for-all-lines.html', 'w') as f:
f.write(html)
return
def rates_wg_streams():
streams = [
'b_to_open_charm', 'rd', 'bandq', 'qee', 'charm', 'b_to_charmonia',
'slepton', 'c_to_dimuon', 'bnoc'
]
frames = []
for stream in streams:
file = f'tmp/Output/Inter/rates-per-stream-wg-stream-{stream}.csv'
df = pd.read_csv(file, header=None)
frames.append(df)
df = pd.concat(frames)
df.columns = [
'Stream', 'Total Retention (%)', 'Rate (kHz)',
'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
df.to_csv('tmp/Output/rates-wg-stream-configuration.csv')
html = df.to_html()
with open('tmp/Output/rates-wg-stream-configuration.html', 'w') as f:
f.write(html)
return
rates_all_lines()
rates_wg_streams()
......@@ -15,97 +15,91 @@
'''
import pandas as pd
import glob
import re
import argparse
from PRConfig.bandwidth_helpers import FileNameHelper
COLUMNS_PER_STREAM = [
'Stream', 'Total Retention (%)', 'Rate (kHz)', 'Avg Total Event Size (kB)',
'Total Bandwidth (GB/s)', 'Avg DstData Size (kB)',
'DstData Bandwidth (GB/s)'
]
def _columns_per_line():
# Possibility is here (add an arg) to make the thresholds change based on hlt2/spruce
return {
# col_name, threshold for turning it red to catch the reader's eye
'Line': None,
'Total Retention (%)': None,
'Rate (kHz)': 1,
'Exclusive Retention(%)': None,
'Exclusive Rate (kHz)': None,
'Avg Total Event Size (kB)': 1e3,
'Total Bandwidth (GB/s)': 0.2,
'Avg DstData Size (kB)': 1e3,
'DstData Bandwidth (GB/s)': 0.2
}
def _sorted_df_by_retention(df):
return df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
def rates_all_lines():
def rates_all_lines(stream_config, fname_helper):
"""Make 1 enormous table with rate/bw info per line for all lines in all streams (i.e. n_rows = n_lines).
Saves to .csv and .html.
stream_config is either "production" or "wg"
"""
frames = []
for file in glob.glob(
f'tmp/Output/Inter/rates-all-lines-production-*.csv'):
fname_helper.tmp_rate_table_per_line_path(stream_config, "*")):
df = pd.read_csv(file, header=None)
frames.append(df)
df = pd.concat(frames)
df.columns = [
'Line', 'Total Retention (%)', 'Rate (kHz)', 'Exclusive Retention(%)',
'Exclusive Rate (kHz)', 'Avg Total Event Size (kB)',
'Total Bandwidth (GB/s)', 'Avg DstData Size (kB)',
'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
df.to_csv('tmp/Output/rates-for-all-lines.csv')
df.columns = _columns_per_line().keys()
df = _sorted_df_by_retention(df)
df.to_csv(fname_helper.final_rate_table_all_lines_path("csv"))
def highlight_vals(val, threshold, color='red'):
if val > threshold: return 'background-color: {}'.format(color)
else: return ''
styler = df.style.applymap(
highlight_vals, subset=['Rate (kHz)'], threshold=1) # 1 kHz rate limit
styler = styler.applymap(
highlight_vals, subset=['Avg DstData Size (kB)'],
threshold=1e3) # 1 MB evt size limit
styler = styler.applymap(
highlight_vals, subset=['Avg Total Event Size (kB)'],
threshold=1e3) # 1 MB evt size limit
styler = styler.applymap(
highlight_vals, subset=['Total Bandwidth (GB/s)'],
threshold=0.2) # 200 MB/s bandwidth limit
styler = styler.applymap(
highlight_vals, subset=['DstData Bandwidth (GB/s)'],
threshold=0.2) # 200 MB/s bandwidth limit
return f'background-color: {color}' if val > threshold else ''
styler = None
for column, threshold in _columns_per_line().items():
# Make cell red if column value greater than threshold
if threshold:
if styler:
styler = styler.applymap(
highlight_vals, subset=[column], threshold=threshold)
else:
styler = df.style.applymap(
highlight_vals, subset=[column], threshold=threshold)
html = styler.set_table_attributes("border=1").to_html()
with open('tmp/Output/rates-for-all-lines.html', 'w') as f:
with open(fname_helper.final_rate_table_all_lines_path("html"), 'w') as f:
f.write(html)
return
def rates_all_lines_split_wg():
def make_rate_table_row_per_line(stream_config, fname_helper):
""" Makes (1 table with rate/bw info per line in the streamed mdf) for all <stream_config> streams (i.e. n_tables = n_streams).
Puts them all on 1 html page, adds hyperlinks to jump to the different streams on the page.
Saves to .html page only.
stream_config is either "production" or "wg"
"""
with open('tmp/Output/line-rates-split-wg.html', 'w') as f:
files = glob.glob('tmp/Output/Inter/rates-all-lines-wg-*.csv')
with open(
fname_helper.final_rate_table_all_lines_split_by_stream_path(
stream_config), 'w') as f:
files = glob.glob(
fname_helper.tmp_rate_table_per_line_path(stream_config, "*"))
files_by_stream = {
str(re.search("-(?!.*-)(.*).csv", file).group(1)): file
for file in files
}
f.write('<head></head>\n<p>')
f.write('Jump to:\n<ul>')
for stream in files_by_stream.keys():
f.write(
f'<li><a href="#{stream}_label"> {stream.upper()}</a></li>')
f.write('</ul>\n</p>')
for stream, file in files_by_stream.items():
f.write(f'<head>{stream.upper()}</head>')
f.write(f'<a id="{stream}_label">')
df = pd.read_csv(file, header=None)
df.columns = [
'Line', 'Total Retention (%)', 'Rate (kHz)',
'Exclusive Retention(%)', 'Exclusive Rate (kHz)',
'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'],
ascending=False).reset_index(drop=True)
f.write(df.to_html())
f.write('</a>')
f.write('<br/><br/>')
return
def rates_all_lines_split_stream():
with open('tmp/Output/line-rates-split-production.html', 'w') as f:
files = glob.glob('tmp/Output/Inter/rates-all-lines-production-*.csv')
files_by_stream = {
str(re.search("-(?!.*-)(.*).csv", file).group(1)): file
fname_helper.get_stream_from_bw_path(file): file
for file in files
}
f.write('<head></head>\n<body>\n<p>')
......@@ -119,15 +113,8 @@ def rates_all_lines_split_stream():
f.write(f'<head>{stream.upper()}</head>')
f.write(f'<a id="{stream}_label">')
df = pd.read_csv(file, header=None)
df.columns = [
'Line', 'Total Retention (%)', 'Rate (kHz)',
'Exclusive Retention(%)', 'Exclusive Rate (kHz)',
'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'],
ascending=False).reset_index(drop=True)
df.columns = _columns_per_line().keys()
df = _sorted_df_by_retention(df)
f.write(df.to_html())
f.write('</a>')
f.write('<br/><br/>')
......@@ -135,62 +122,47 @@ def rates_all_lines_split_stream():
return
def rates_production_streams():
def make_rate_table_row_per_stream(stream_config, fname_helper):
""" Makes 1 table with rate/bw info integrated over the whole streamed mdf for all <stream_config> streams (i.e. a table with n_rows = n_streams).
Saves to .html and .csv.
stream_config is either "production" or "wg"
"""
frames = []
for file in glob.glob(
f'tmp/Output/Inter/rates-per-stream-production-*.csv'):
fname_helper.tmp_rate_table_per_stream_path(stream_config, "*")):
df = pd.read_csv(file, header=None)
frames.append(df)
df = pd.concat(frames)
df.columns = [
'Stream', 'Total Retention (%)', 'Rate (kHz)',
'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
]
df.columns = COLUMNS_PER_STREAM
df = df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
df.to_csv('tmp/Output/rates-production-stream-configuration.csv')
df = _sorted_df_by_retention(df)
df.to_csv(
fname_helper.final_rate_table_all_streams_path(
stream_config, ext="csv"))
html = df.to_html()
with open('tmp/Output/rates-production-stream-configuration.html',
'w') as f:
with open(
fname_helper.final_rate_table_all_streams_path(
stream_config, ext="html"), 'w') as f:
f.write(html)
return
def rates_wg_streams():
frames = []
for file in glob.glob(f'tmp/Output/Inter/rates-per-stream-wg-*.csv'):
df = pd.read_csv(file, header=None)
frames.append(df)
df = pd.concat(frames)
df.columns = [
'Stream', 'Total Retention (%)', 'Rate (kHz)',
'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
]
df = df.sort_values(
by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
df.to_csv('tmp/Output/rates-wg-stream-configuration.csv')
html = df.to_html()
with open('tmp/Output/rates-wg-stream-configuration.html', 'w') as f:
f.write(html)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--process', type=str, required=True, choices=["hlt2", "spruce"])
args = parser.parse_args()
fname_helper = FileNameHelper(args.process)
stream_configs = ["production", "wg"] if args.process == "hlt2" else ["wg"]
rates_all_lines()
rates_all_lines_split_wg()
rates_all_lines_split_stream()
rates_production_streams()
rates_wg_streams()
rates_all_lines("production" if args.process == "hlt2" else "wg",
fname_helper)
for stream_config in stream_configs:
make_rate_table_row_per_stream(stream_config, fname_helper)
make_rate_table_row_per_line(stream_config, fname_helper)
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
import json
from Hlt2Conf.lines import (
b_to_open_charm,
inclusive_detached_dilepton,
rd,
bandq,
topological_b,
pid,
qee,
charm,
b_to_charmonia,
semileptonic,
monitoring,
charmonium_to_dimuon,
bnoc,
trackeff,
ift,
)
def module_names(modules):
names = []
for mod in modules:
try:
lines = mod.all_lines
except AttributeError:
raise AttributeError(
'line module {} does not define mandatory `all_lines`'.format(
mod.__name__))
for name, builder in lines.items():
names.append(name)
return names
def fiveConfig():
config = {
'turbo':
module_names([
b_to_open_charm, rd, bandq, charm, b_to_charmonia, semileptonic,
bnoc, charmonium_to_dimuon, qee
]),
'full':
module_names([topological_b, inclusive_detached_dilepton]),
'turcal':
module_names([pid, trackeff]),
'monitoring':
module_names([monitoring]),
'ift':
module_names([ift])
}
with open('tmp/Output/5-stream-config.json', 'w') as f:
json.dump(config, f)
return
def sixteenConfig():
names = [
'b_to_open_charm', 'rd', 'bandq', 'charm', 'b_to_charmonia', 'slepton',
'bnoc', 'c_to_dimuon', 'qee', 'pid', 'trackeff', 'monitoring',
'topo_b', 'dilepton', 'ift'
]
mods = [
b_to_open_charm, rd, bandq, charm, b_to_charmonia, semileptonic, bnoc,
charmonium_to_dimuon, qee, pid, trackeff, monitoring, topological_b,
inclusive_detached_dilepton, ift
]
config = {}
i = 1
for mod in mods:
config.update({names[i - 1]: module_names([mod])})
i = i + 1
with open('tmp/Output/16-stream-config.json', 'w') as f:
json.dump(config, f)
return
fiveConfig()
sixteenConfig()
......@@ -18,16 +18,13 @@ from PyConf.application import configured_ann_svc
import operator
from collections import Counter
import json
import re
import argparse
import csv
import os
import yaml
from PRConfig.bandwidth_helpers import FileNameHelper
'''
Run snippet with 'python line-rates.py and [1] <MDF file name> [2] <TCK config file name> [3] <JSON file name specifying configuration>'
+ '--c' flag with 'production or 'wg' for production or WG stream configuration
When running production-stream config, returns:
Per line (in form of single HTML table):
......@@ -76,7 +73,7 @@ def rawbank_sizes(rawevent, lst):
return [(name, size(i)) for i, name in lst]
def processing_events_per_line_and_stream(evt_max, lines, process='Hlt2'):
def processing_events_per_line_and_stream(evt_max, lines, process):
'''
Returns, per line:
i) How many events triggered on
......@@ -119,7 +116,7 @@ def processing_events_per_line_and_stream(evt_max, lines, process='Hlt2'):
# Run an event
appMgr.run(1)
report = evt['/Event/{}/DecReports'.format(process)]
report = evt[f'/Event/{process.capitalize()}/DecReports']
rawevent = evt['/Event/DAQ/RawEvent']
evtsize = sum(
......@@ -147,8 +144,8 @@ def processing_events_per_line_and_stream(evt_max, lines, process='Hlt2'):
return events_file, raw_size_all, dst_size_all, event_stats, exclusive, raw, dst
def rates_per_line(event_stats, exclusive, raw, dst, configname, streamname,
input_rate):
def rates_per_line(event_stats, exclusive, raw, dst, input_rate,
output_file_path):
data = []
......@@ -191,9 +188,7 @@ def rates_per_line(event_stats, exclusive, raw, dst, configname, streamname,
data.append(row_values)
with open(
f'tmp/Output/Inter/rates-all-lines-{configname}-{streamname}.csv',
'w') as f:
with open(output_file_path, 'w') as f:
csv_out = csv.writer(f)
for tup in data:
csv_out.writerow(tup)
......@@ -201,8 +196,8 @@ def rates_per_line(event_stats, exclusive, raw, dst, configname, streamname,
return
def rates_per_stream(events, raw_size, dst_size, configname, streamname,
input_rate):
def rates_per_stream(events, raw_size, dst_size, streamname, input_rate,
output_file_path):
data = []
......@@ -223,9 +218,7 @@ def rates_per_stream(events, raw_size, dst_size, configname, streamname,
data.append(row_values)
with open(
f'tmp/Output/Inter/rates-per-stream-{configname}-{streamname}.csv',
'w') as f:
with open(output_file_path, 'w') as f:
csv_out = csv.writer(f)
for tup in data:
csv_out.writerow(tup)
......@@ -236,8 +229,6 @@ def rates_per_stream(events, raw_size, dst_size, configname, streamname,
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inspect Moore output')
parser.add_argument(
'-i', '--input', type=str, help='MDF input file', required=True)
parser.add_argument(
'-n',
'--events',
......@@ -251,44 +242,33 @@ if __name__ == '__main__':
type=str,
required=True,
help='Path to yaml config file defining the input.')
parser.add_argument('-s', '--stream', type=str, required=True)
parser.add_argument(
'-r',
'--rate',
default=500, # kHz
type=float,
help='Input rate of the process in kHz')
parser.add_argument(
'-t',
'--tck',
type=str,
help='Manifest file for chosen MDF',
required=True)
parser.add_argument(
'-j',
'--json',
'-p',
'--process',
type=str,
help='Stream configuration specified as JSON',
help='Compute for Hlt2 or Sprucing lines',
choices=['hlt2', 'spruce'],
required=True)
parser.add_argument(
'-s',
'--stream-config',
type=str,
help='Choose production or per-WG stream configuration',
choices=['production', 'wg'],
required=True)
parser.add_argument(
'-p',
'--process',
type=str,
help='Compute for Hlt2 or Sprucing lines',
choices=['Hlt2', 'Spruce'],
required=True)
args = parser.parse_args()
fname_helper = FileNameHelper(args.process)
n_events = args.events
input_config = parse_yaml(args.config)
if args.process == "spruce" and args.stream_config == "production":
raise RuntimeError(
'"production" stream config not defined for sprucing. Please use "wg".'
)
LHCbApp(
DataType="Upgrade",
Simulation=True,
......@@ -306,65 +286,39 @@ if __name__ == '__main__':
bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
configurables=True)
hlt2 = [hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")]
if args.process == 'Spruce':
if args.process == 'spruce':
spruce = [
hlt_decisions(
source="Spruce", output_loc="/Event/Spruce/DecReports")
]
else:
spruce = []
decoder = decoder(input_process=args.process)
decoder = decoder(input_process=args.process.capitalize())
algs = [unpack] + hlt2 + spruce + [decoder] + [createODIN(ODIN='myODIN')]
appMgr = ApplicationMgr(TopAlg=algs)
appMgr.ExtSvc += [configured_ann_svc(json_file=args.tck)]
file = args.input
# if no line is registered in one module,
# the output file won't be created.
# In this case we just create an empty file.
if not os.path.exists(file): os.system('touch {}'.format(file))
IOHelper("MDF").inputFiles([file])
with open(args.json) as f:
config = json.load(f)
if args.process == 'Hlt2':
# Two conditions for HLT2 run:
# Use production-stream config to compute rate/size/bandwidth per line and stream
# Use wg-stream config to compute rate/size/bandwidth per line and stream
if args.stream_config == 'production': configname = 'production'
elif args.stream_config == 'wg': configname = 'wg'
elif args.process == 'Spruce':
# Three conditions for Spruce run:
# Use wg-stream config to compute rate/size/bandwidth per line
# Use wg-stream config to compute rate/size/bandwidth per stream
if not args.stream_config == 'wg': exit()
configname = 'wg-stream'
stream = str(re.search("-(?!.*-)(.*).mdf", file).group(
1)) # Finds string between last - and .mdf suffix = stream identifier
lines = config[stream]
appMgr.ExtSvc += [
configured_ann_svc(json_file=fname_helper.tck(args.stream_config))
]
IOHelper("MDF").inputFiles(
[fname_helper.mdf_fname_for_reading(args.stream_config, args.stream)])
with open(fname_helper.stream_config_json_path(args.stream_config)) as f:
lines = json.load(f)[args.stream]
appMgr = GP.AppMgr()
evt = appMgr.evtsvc()
# Calculates retention, rate and bandwidth per line and stream (file)
evts_all, rawbanks_all, dst_all, event_stats, exclusive, raw, dst = processing_events_per_line_and_stream(
LHCbApp().EvtMax, lines, process=args.process)
LHCbApp().EvtMax, lines, args.process)
rates_per_line(
event_stats,
exclusive,
raw,
dst,
configname,
stream,
input_rate=input_config['input_rate'])
event_stats, exclusive, raw, dst, input_config['input_rate'],
fname_helper.tmp_rate_table_per_line_path(args.stream_config,
args.stream))
rates_per_stream(
evts_all,
rawbanks_all,
dst_all,
configname,
stream,
input_rate=input_config['input_rate'])
evts_all, rawbanks_all, dst_all, args.stream,
input_config['input_rate'],
fname_helper.tmp_rate_table_per_stream_path(args.stream_config,
args.stream))
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
......@@ -10,9 +10,7 @@
###############################################################################
'''
Simple script that returns if a line has persist reco
and/or extra output flag enabled
Returns information both as CSV as HTML
and/or extra output flag enabled. Saves to .html.
'''
import pandas as pd
......@@ -20,38 +18,23 @@ from Hlt2Conf.lines import all_lines as hlt2_lines
from Hlt2Conf.lines import sprucing_lines
from PyConf.application import configure_input
from Moore import options
from PRConfig.bandwidth_helpers import FileNameHelper
def make_line(all_lines, line):
return [builder() for name, builder in all_lines.items() if name == line]
# Returns binary array describing if each line is persist reco or not
def _descriptives(lines):
data = []
for line in lines:
row = []
obj = make_line(lines, line.removesuffix('Decision'))
if obj:
row.append(line)
row.append(int(obj[0].persistreco))
if len(obj[0].extra_outputs):
row.append(int(1))
else:
row.append(int(0))
data.append(tuple(row))
def _descriptives(lines, process):
lines = [builder() for builder in lines.values()]
descript = pd.DataFrame(
data, columns=['Line', 'PersistReco', 'ExtraOutput'])
index=[l.name for l in lines], columns=['PersistReco', 'ExtraOutput'])
descript.to_csv('tmp/Output/line-descriptives.csv')
for line in lines:
descript['PersistReco'][line.name] = line.persistreco
descript['ExtraOutput'][line.name] = bool(len(line.extra_outputs))
fname_helper = FileNameHelper(process)
html = descript.to_html(justify='left')
with open('tmp/Output/line-descriptives.html', 'w') as f:
with open(fname_helper.line_descr_path(), 'w') as f:
f.write(html)
return
......@@ -62,6 +45,6 @@ options.conddb_tag = 'sim-20171127-vc-md100'
options.persistreco_version = 0.0
configure_input(options)
if options.input_process == 'Spruce': all_lines = sprucing_lines
else: all_lines = hlt2_lines
_descriptives(all_lines)
_descriptives(
sprucing_lines if options.input_process == "Spruce" else hlt2_lines,
options.input_process.lower())
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
''' Compute the overlap between streams.
Run snippet with inputs
-n: number of events to process
-p: Hlt2 or Spruce
-i: path to the output MDF file from previous steps
-t: path to the tck JSON file from previous steps
Returns Jaccard similarity matrix as csv for specified stream config
'''
from itertools import permutations
import pandas as pd
import GaudiPython as GP
from GaudiConf.reading import decoder, unpack_rawevent, hlt_decisions
from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
EventSelector, createODIN)
from Hlt2Conf.lines import all_lines as hlt2_lines
from Hlt2Conf.lines import sprucing_lines
from GaudiConf import IOHelper
from PyConf.application import configured_ann_svc
import argparse
LHCb = GP.gbl.LHCb
def processing_events(evt_max, process='Hlt2'):
'''Returns list of events that were triggered on per line'''
# Stores a list of which events each line fired on
if process == 'Hlt2': all_lines = hlt2_lines
elif process == 'Spruce': all_lines = sprucing_lines
event_stats = {
line: []
for line in [line + 'Decision' for line in list(all_lines.keys())]
}
# Loop over all events
analysed = 0
while analysed < evt_max:
analysed += 1
# Run an event
appMgr.run(1)
report = evt['/Event/{}/DecReports'.format(process)]
# Will quit running if there are no more events in the input file
if report:
for line in event_stats.keys():
if report.decReport(line):
if report.decReport(line).decision() == 1:
event_stats[line].append(
analysed) # If fired: add event to list of events
else:
break
return event_stats
def compute_intersection(fired_lines):
''' Returns similarities for each set of lines based on events'''
# Create dictionary with permuted pairs of each line
similarities = {pair: 0 for pair in list(permutations(fired_lines, 2))}
# Compute Jaccard (similarity) index for each set of keys in pair
for pair in similarities:
a = set(fired_lines[pair[0]])
b = set(fired_lines[pair[1]])
c = a.intersection(b)
if (len(a) + len(b) - len(c)) > 0:
similarities[
pair] = f'{round(float(len(c)) / (len(a) + len(b) - len(c)), 2) * 100:.1f}' + '%'
else:
similarities[pair] = '0.0%'
for s in fired_lines:
similarities.update({(s, s): '100.0%'})
ser = pd.Series(
list(similarities.values()),
index=pd.MultiIndex.from_tuples(similarities.keys()))
df = ser.unstack().fillna(0)
df.to_csv('tmp/Output/similarity-jaccard-all.csv')
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument(
'-n',
'--events',
default=-1,
type=lambda x: int(round(float(x))),
help='nb of events to process')
parser.add_argument(
'-p',
'--process',
type=str,
required=True,
choices=['Hlt2', 'Spruce'],
help='Run for Hlt2 or Sprucing')
parser.add_argument(
'-i',
'--input',
type=str,
default='tmp/MDF/baseline-streamless-all.mdf',
help='Input MDF file to process')
parser.add_argument(
'-t',
'--tck',
type=str,
default='tmp/MDF/baseline-streamless-all.tck.json',
help='Input manifest file for the MDF')
args = parser.parse_args()
n_events = args.events
if n_events == -1 or n_events > 1e5: n_events = 1e5
LHCbApp(
DataType="Upgrade",
Simulation=True,
DDDBtag="dddb-20171126",
CondDBtag="sim-20171127-vc-md100",
EvtMax=n_events)
EventSelector().PrintFreq = 10000
IODataManager(DisablePFNWarning=True)
# we have to configure the algorithms manually instead of `do_unpacking`
# because we need to set `input_process='Hlt2'` in `unpack_rawevent`
# to read MDF output from Sprucing
algs = []
unpack = unpack_rawevent(
bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
configurables=True)
hlt2 = [hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")]
if args.process == 'Spruce':
spruce = [
hlt_decisions(
source="Spruce", output_loc="/Event/Spruce/DecReports")
]
else:
spruce = []
decoder = decoder(input_process=args.process)
algs = [unpack] + hlt2 + spruce + [decoder] + [createODIN(ODIN='myODIN')]
appMgr = ApplicationMgr(TopAlg=algs)
appMgr.ExtSvc += [configured_ann_svc(json_file=args.tck)]
IOHelper("MDF").inputFiles([args.input])
appMgr = GP.AppMgr()
evt = appMgr.evtsvc()
event_stats = processing_events(LHCbApp().EvtMax, process=args.process)
compute_intersection(event_stats)
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
......@@ -8,130 +8,84 @@
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
''' Measure the file size of each stream from their output MDF
Run snippet with inputs
-n: number of events to process
-p: Hlt2 or Spruce
-i: path to the output MDF file from previous steps
-t: path to the tck JSON file from previous steps
-o: path to save the output numbers
''' Extract all event numbers saved to MDF such that similarity between files can
be calculated later. Saves list of event numbers to json file.
'''
import argparse
import GaudiPython as GP
from GaudiConf import IOHelper
from GaudiConf.reading import decoder, unpack_rawevent, hlt_decisions
from GaudiConf.reading import unpack_rawevent
from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
EventSelector, createODIN)
from PyConf.application import configured_ann_svc
import os
import argparse
def read_mdf(file):
return os.stat(file).st_size
def processing_events(appMgr, process='Hlt1'):
'''Returns filesize in terms of events'''
events = 0
appMgr.run(1)
report = evt['/Event/{}/DecReports'.format(process)]
while report:
events += 1
appMgr.run(1)
report = evt['/Event/{}/DecReports'.format(process)]
return events
from GaudiConf import IOHelper
import json
from PRConfig.bandwidth_helpers import FileNameHelper
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-n',
'--events',
default=-1,
type=lambda x: int(round(float(x))),
help='nb of events to process')
parser.add_argument(
'-p',
'--process',
type=str,
required=True,
choices=['Hlt2', 'Spruce'],
help='Run for Hlt2 or Sprucing')
help='Max number of events to process')
parser.add_argument(
'-i',
'--input',
type=str,
default='tmp/MDF/baseline-streamless-all.mdf',
help='Input MDF file to process')
'-p', '--process', type=str, required=True, choices=['hlt2', 'spruce'])
parser.add_argument(
'-t',
'--tck',
'-c',
'--stream-config',
type=str,
default='tmp/MDF/baseline-streamless-all.tck.json',
help='Input manifest file for the MDF')
required=True,
choices=["wg", "production"],
help='Name of the stream config')
parser.add_argument(
'-o',
'--output',
type=str,
default='tmp/Output/filesize.txt',
help='Output file to save the file size')
'-s', '--stream', type=str, required=True, help='Name of the stream')
args = parser.parse_args()
# EvtMax can be set arbitrarily high, will quit if there are no more events
n_events = args.events
if n_events == -1 or n_events > 1e5: n_events = 1e5
fname_helper = FileNameHelper(args.process)
# TODO this is a standard setup for the BW test analysis scripts. Share in a header.
LHCbApp(
DataType="Upgrade",
Simulation=True,
DDDBtag="dddb-20171126",
CondDBtag="sim-20171127-vc-md100",
EvtMax=n_events)
EventSelector().PrintFreq = 10000
EvtMax=args.events)
EventSelector(PrintFreq=10000)
IODataManager(DisablePFNWarning=True)
# we have to configure the algorithms manually instead of `do_unpacking`
# because we need to set `input_process='Hlt2'` in `unpack_rawevent`
# to read MDF output from Sprucing
algs = []
unpack = unpack_rawevent(
bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
configurables=True)
hlt2 = [hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")]
if args.process == 'Spruce':
spruce = [
hlt_decisions(
source="Spruce", output_loc="/Event/Spruce/DecReports")
]
else:
spruce = []
decoder = decoder(input_process=args.process)
algs = [unpack] + hlt2 + spruce + [decoder] + [createODIN(ODIN='myODIN')]
appMgr = ApplicationMgr(TopAlg=algs)
appMgr.ExtSvc += [configured_ann_svc(json_file=args.tck)]
file = args.input
# if no line is registered in one module,
# the output file won't be created.
# In this case we just create an empty file.
if not os.path.exists(file): os.system('touch {}'.format(file))
IOHelper("MDF").inputFiles([file])
appMgr = ApplicationMgr(TopAlg=[
unpack_rawevent(bank_types=['ODIN'], configurables=True),
createODIN(ODIN='myODIN')
])
IOHelper("MDF").inputFiles(
[fname_helper.mdf_fname_for_reading(args.stream_config, args.stream)])
appMgr = GP.AppMgr()
evt = appMgr.evtsvc()
n_evts = processing_events(appMgr, args.process)
file_size = read_mdf(args.input)
event_numbers = []
# Loop over all events
i_evt = 0
while i_evt < args.events:
# Iterate 1 event in file
appMgr.run(1)
header = evt["/Event/myODIN"]
if not header:
break # ran out of events in file
event_numbers.append(header.eventNumber())
i_evt += 1
ofile = fname_helper.event_no_fname(args.stream_config, args.stream)
with open(ofile, 'w') as f:
json.dump({args.stream: event_numbers}, f)
print(
f"Found {len(event_numbers)} event numbers for {args.stream} stream. Saved list to {ofile}."
)
print('Filename', args.input, 'Events', n_evts, 'Bytes', file_size)
with open(args.output, 'a') as of:
of.write(
f'Filename: {args.input}, Events: {n_evts}, Bytes: {file_size}\n')
if __name__ == "__main__":
main()
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
import json
from Hlt2Conf.lines import (
b_to_open_charm,
rd,
bandq,
qee,
charm,
b_to_charmonia,
semileptonic,
charmonium_to_dimuon,
bnoc,
)
import logging
log = logging.getLogger()
def module_names(modules):
names = []
for mod in modules:
try:
lines = mod.sprucing_lines
except AttributeError:
log.info('line module {} does not define `sprucing_lines`'.format(
mod.__name__))
continue
for name, builder in lines.items():
names.append(name)
return names
def wgConfig():
names = [
'b_to_open_charm', 'rd', 'bandq', 'qee', 'charm', 'b_to_charmonia',
'slepton', 'c_to_dimuon', 'bnoc'
]
mods = [
b_to_open_charm, rd, bandq, qee, charm, b_to_charmonia, semileptonic,
charmonium_to_dimuon, bnoc
]
config = {}
i = 1
for mod in mods:
config.update({names[i - 1]: module_names([mod])})
i = i + 1
with open('tmp/Output/wg-stream-config.json', 'w') as f:
json.dump(config, f)
return
wgConfig()
###############################################################################
# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration #
# #
# This software is distributed under the terms of the GNU General Public #
# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". #
# #
# In applying this licence, CERN does not waive the privileges and immunities #
# granted to it by virtue of its status as an Intergovernmental Organization #
# or submit itself to any jurisdiction. #
###############################################################################
''' Compute the overlap between streams.
Run snippet with inputs
-n: number of events to process
-p: Hlt2 or Spruce
-i: path to the output MDF file from previous steps
-t: path to the tck JSON file from previous steps
-j: path to the JSON file specifying stream configuration
Returns Jaccard similarity matrix as csv for specified stream config
'''
import GaudiPython as GP
from GaudiConf.reading import decoder, unpack_rawevent, hlt_decisions
from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
EventSelector, createODIN)
from Hlt2Conf.lines import all_lines as hlt2_lines
from Hlt2Conf.lines import sprucing_lines
from GaudiConf import IOHelper
from PyConf.application import configured_ann_svc
import numpy as np
from itertools import permutations
import pandas as pd
import json
import argparse
LHCb = GP.gbl.LHCb
def line_names(lines):
return [name + 'Decision' for name, builder in lines.items()]
def processing_events(evt_max, config, lines, process='Hlt2'):
'''Return which (and number) of events that were triggered on'''
names = list(config.keys())
# List of events written to each stream
events = {n: [] for n in config.keys()}
# Loop over all events
analysed = 0
while analysed < evt_max:
analysed += 1
# Run an event
appMgr.run(1)
report = evt['/Event/{}/DecReports'.format(process)]
# Will quit running if there are no more events in the input file
if report:
conf = np.zeros([len(config.items()), 1])
for line in lines:
if report.decReport(line):
if report.decReport(line).decision() == 1:
for lst in config.values():
if line.removesuffix('Decision') in lst:
conf[list(config.values()).index(lst)] = 1
for f in range(0, len(config.items())):
if conf[f] == 1:
events[names[f]].append(analysed)
else:
break
return events
def compute_intersection(streams, htmlpath):
# Create dictionary with permuted pairs of each line
similarities = {pair: 0 for pair in list(permutations(streams, 2))}
# Compute Jaccard (similarity) index for each set of keys in pair
for pair in similarities:
a = set(streams[pair[0]])
b = set(streams[pair[1]])
c = a.intersection(b)
if (len(a) + len(b) - len(c)) > 0:
similarities[
pair] = f'{round(float(len(c)) / (len(a) + len(b) - len(c)), 2) * 100:.1f}' + '%'
else:
similarities[pair] = '0.0%'
for s in streams:
similarities.update({(s, s): '100.0%'})
# Similarities table as a pandas dataframe
ser = pd.Series(
list(similarities.values()),
index=pd.MultiIndex.from_tuples(similarities.keys()))
df = ser.unstack().fillna(0)
# Generate HTML table for similarity matrix
html = df.to_html()
with open(htmlpath, 'w') as f:
f.write(html)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument(
'-n',
'--events',
default=-1,
type=lambda x: int(round(float(x))),
help='nb of events to process')
parser.add_argument(
'-p',
'--process',
type=str,
required=True,
choices=['Hlt2', 'Spruce'],
help='Run for Hlt2 or Sprucing')
parser.add_argument(
'-i',
'--input',
type=str,
default='tmp/MDF/baseline-streamless-all.mdf',
help='Input MDF file to process')
parser.add_argument(
'-t',
'--tck',
type=str,
default='tmp/MDF/baseline-streamless-all.tck.json',
help='Input manifest file for the MDF')
parser.add_argument(
'-j',
'--json',
type=str,
required=True,
help='stream configuration JSON file.')
parser.add_argument(
'-c',
'--config',
type=str,
required=True,
help='Name of the configuration (5-stream/wg-stream).')
args = parser.parse_args()
n_events = args.events
if n_events == -1 or n_events > 1e5: n_events = 1e5
LHCbApp(
DataType="Upgrade",
Simulation=True,
DDDBtag="dddb-20171126",
CondDBtag="sim-20171127-vc-md100",
EvtMax=n_events)
EventSelector().PrintFreq = 10000
IODataManager(DisablePFNWarning=True)
# we have to configure the algorithms manually instead of `do_unpacking`
# because we need to set `input_process='Hlt2'` in `unpack_rawevent`
# to read MDF output from Sprucing
algs = []
unpack = unpack_rawevent(
bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
configurables=True)
hlt2 = [hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")]
if args.process == 'Spruce':
spruce = [
hlt_decisions(
source="Spruce", output_loc="/Event/Spruce/DecReports")
]
else:
spruce = []
decoder = decoder(input_process=args.process)
algs = [unpack] + hlt2 + spruce + [decoder] + [createODIN(ODIN='myODIN')]
appMgr = ApplicationMgr(TopAlg=algs)
appMgr.ExtSvc += [configured_ann_svc(json_file=args.tck)]
IOHelper("MDF").inputFiles([args.input])
appMgr = GP.AppMgr()
evt = appMgr.evtsvc()
if args.process == 'Hlt2': all_lines = hlt2_lines
elif args.process == 'Spruce': all_lines = sprucing_lines
with open(args.json) as f:
config = json.load(f)
events = processing_events(
LHCbApp().EvtMax, config, line_names(all_lines), process=args.process)
compute_intersection(
events, f'tmp/Output/{args.config}-similarities-jaccard.html')
This diff is collapsed.