Skip to content
Snippets Groups Projects

Move spruce_all_lines_analytics test to nightlies

Merged Shunan Zhang requested to merge rate-and-size-tests into master
Files
2
@@ -42,15 +42,17 @@ everything.
import json, re
from Moore import options, run_moore
from PyConf.Algorithms import CombineRawBankViewsToRawEvent
from PyConf.application import all_nodes_and_algs
from RecoConf.global_tools import stateProvider_with_simplified_geom
from RecoConf.reconstruction_objects import reconstruction
import XRootD.client
from Configurables import HltANNSvc, ApplicationMgr
from PyConf.utilities import ConfigurationError
from Moore.streams_hlt2 import DETECTOR_RAW_BANK_TYPES, HLT1_REPORT_RAW_BANK_TYPES, HLT2_REPORT_RAW_BANK_TYPES
import Moore.streams_spruce
import XRootD.client
from Configurables import HltANNSvc
from Hlt2Conf.lines import sprucing_lines as all_lines
from Hlt2Conf.lines.b_to_charmonia import sprucing_lines as b2cc_lines
from Hlt2Conf.lines.b_to_open_charm import sprucing_lines as b2oc_lines
@@ -59,6 +61,8 @@ from Hlt2Conf.lines.semileptonic import sprucing_lines as sl_lines
from Hlt2Conf.lines.rd import sprucing_lines as rd_lines
#from Hlt2Conf.lines.charm import sprucing_lines as charm_lines
from Hlt2Conf.lines.qee import sprucing_lines as qee_lines
import logging
log = logging.getLogger()
########################
WGs = {
@@ -73,9 +77,38 @@ WGs = {
}
##CHANGE wg to desired
wg = 'rd'
wg = 'all'
########################
assert wg in WGs.keys(), 'Working group ("wg") not recognised'
if wg not in WGs.keys():
raise ConfigurationError(f'Working group ({wg}) not recognised.')
def get_all_algs_cpp_types(top_node):
return [alg.type() for alg in all_nodes_and_algs(top_node, True)[1]]
def is_DVCommonBase_alg(alg):
# a Gaudi::Property registers it's owner and appends it to the doc string
# e.g. the doc of ModifyLocations in DVCommonBase is:
# ' if set to false, does not append /Particles to outputParticles location [DVCommonBase<GaudiAlgorithm>] '
# so as a proxy if something inherits from DVCommonBase we check if we can find this property
return '[DVCommonBase<' in alg._propertyDocDct.get("ModifyLocations", "")
def is_GaudiHistoAlg(alg):
return '[GaudiHistos<' in alg._propertyDocDct.get(
"UseSequencialNumericAutoIDs", "")
def check_for_known_issues(line):
all_algs = get_all_algs_cpp_types(line.node)
# filter out lines which will crash in multi threaded mode
# this check is likely incomplete....
# what else is not thread safe?
# For now we just look for anything that inherits from DVCommonBase or GaudiHistos
return [
a for a in all_algs if is_DVCommonBase_alg(a) or is_GaudiHistoAlg(a)
]
## Return HltANNSvc when tck is on eos
@@ -111,25 +144,10 @@ def tck_from_eos(url):
## Configure `HltANNSvc`
url = 'root://eoslhcb.cern.ch//eos/lhcb/wg/rta/samples/mc/Hlt1Hlt2filtered_MinBias_sprucing/hlt2_2or3bodytopo_realtime_newPacking.tck.json'
url = 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp1/hlt2_persistreco_output/hlt2_persistreco_realtime.tck.json'
tck_from_eos(url)
##Run over HLT1 filtered Min bias sample that has been processed by TOPO{2, 3} HLT2 lines.
##To produce this see `Hlt/Hlt2Conf/options/Sprucing/hlt2_2or3bodytopo_realtime.py`
input_files = [
'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/rta/samples/mc/Hlt1Hlt2filtered_MinBias_sprucing/hlt2_2or3bodytopo_realtime_newPacking.mdf'
]
options.input_raw_format = 0.3
options.input_files = input_files
options.input_type = 'MDF'
options.output_type = 'ROOT'
options.evt_max = -1
options.simulation = True
options.data_type = 'Upgrade'
options.dddb_tag = 'dddb-20171126'
options.conddb_tag = 'sim-20171127-vc-md100'
options.monitoring_file = "monitoring.json"
# Create new dictionary based on `sprucing_lines` with "readable" line names:
@@ -149,12 +167,38 @@ for k, v in sprucing_lines.items():
def make_streams():
"Makes each line its own stream"
dict = {}
for k, v in streamdict.items():
# v can only contain one element
dict[k] = [v[0]()]
return dict
# if we are inside cache generation we don't filter lines out. This builds the cache
# for all lines and thus it is more helpful for other tests like hlt2_all_lines.
if "THOR_DISABLE_JIT" in ApplicationMgr(
).Environment and "THOR_DISABLE_CACHE" in ApplicationMgr().Environment:
print("HLT2_PP_THOR: Inside CACHE genertion not filtering lines")
return [builder() for builder in all_lines.values()]
lines = {}
filtered = []
def filtered_add(streamdict, lines, filtered):
# Several lines still use non-thread safe combiners or filters
# thus we don't actually use all lines but reject the problematic ones
for k, v in streamdict.items():
line = v[0]()
if not (reason := check_for_known_issues(line)):
lines[k] = [line]
else:
filtered.append((line.name, reason))
filtered_add(streamdict, lines, filtered)
log.info(f"Running test with {len(lines)} lines")
log.info(f"Following {len(filtered)} lines were automatically excluded:")
log.info(
"Name of Line ---- list of found algos that are known to be not thread safe"
)
for line_name, reason in filtered:
log.info(f"{line_name} ---- {reason}")
return lines
##Must declare "new" streams (one for each line) and the RawBanks you wish to analyse
Loading