Skip to content
Snippets Groups Projects
Commit 34ceef55 authored by Rafal Bielski's avatar Rafal Bielski :wave: Committed by Frank Winklmeier
Browse files

Rewrite test_trig_mc_v1Dev_slice_reproducibility_build

Make the counts comparison based on chainDump outputs instead of
grepping for a specific pattern in the logs. Also make sure the
comparison fails with clear error message if any input is missing.
parent 25cccb54
5 merge requests!58791DataQualityConfigurations: Modify L1Calo config for web display,!46784MuonCondInterface: Enable thread-safety checking.,!46776Updated LArMonitoring config file for WD to match new files produced using MT,!45405updated ART test cron job,!42417Draft: DIRE and VINCIA Base Fragments for Pythia 8.3
......@@ -13,13 +13,12 @@ import ROOT
from collections import OrderedDict
total_events_key = 'TotalEventsProcessed'
json_file_name = 'chainDump.json'
column_width = 10 # width of the count columns for print out
name_width = 50 # width of the item name column for print out
def get_parser():
parser = argparse.ArgumentParser(usage='%(prog)s [options] files',
parser = argparse.ArgumentParser(usage='%(prog)s [options]',
description=__doc__)
parser.add_argument('-f', '--inputFile',
metavar='PATH',
......@@ -40,9 +39,10 @@ def get_parser():
default=False,
help='Only store out of tolerance results (does not change JSON)')
parser.add_argument('--json',
action='store_true',
default=False,
help='Save outputs also to {:s}'.format(json_file_name))
metavar='PATH',
nargs='?',
const='chainDump.json',
help='Save outputs also to a json file with the given name or %(const)s if no name is given')
parser.add_argument('--fracTolerance',
metavar='FRAC',
default=0.001,
......@@ -418,8 +418,8 @@ def main():
write_txt_output(json_dict, args.diffOnly)
if args.json:
logging.info('Writing results to %s', json_file_name)
with open(json_file_name, 'w') as outfile:
logging.info('Writing results to %s', args.json)
with open(args.json, 'w') as outfile:
json.dump(json_dict, outfile)
return retcode
......
......@@ -7,108 +7,143 @@
# If you create a grid version, check art-output in existing grid tests.
import logging
from TrigValTools.TrigValSteering import Test, ExecStep, CheckSteps, Common
import six
Common.trigvalsteering_logging_level = logging.DEBUG
ex = ExecStep.ExecStep('FullMenu')
ex.type = 'athena'
ex.job_options = 'TriggerJobOpts/runHLT_standalone.py'
ex.input = 'ttbar'
ex.threads = 1
# LS2_v1 soon to be renamed to Dev_pp_run3_v1
ex.args = '-c "setMenu=\'LS2_v1\';doWriteBS=False;doWriteRDOTrigger=False;"'
def single_slice( name, args ):
slice_ex = ExecStep.ExecStep( name )
for prop,val in six.iteritems(ex.__dict__):
if prop != 'name':
setattr( slice_ex, prop, val )
slice_ex.args += " " + args
return slice_ex
from TrigValTools.TrigValSteering.Step import Step
import re
class CompareSlicesToFullMenuStep( Step ):
def __init__( self, name='CompareSlicesToFullMenuStep' ):
from TrigValTools.TrigValSteering import Test, ExecStep, CheckSteps, Step
import json
import os
def hist_rename_pre_exec(file_name):
pre_exec = ';'.join([
"from GaudiSvc.GaudiSvcConf import THistSvc",
"from AthenaCommon.AppMgr import ServiceMgr as svcMgr",
"svcMgr += THistSvc()",
"svcMgr.THistSvc.Output+=[\\\"EXPERT DATAFILE='{:s}' OPT='RECREATE'\\\"]".format(file_name),
])
pre_exec += ';'
return pre_exec
def generate_steps(slice_name = None):
name = slice_name or 'FullMenu'
# athena
ex = ExecStep.ExecStep(name)
ex.type = 'athena'
ex.job_options = 'TriggerJobOpts/runHLT_standalone.py'
ex.input = 'ttbar'
ex.threads = 1
hist_file_name = 'onlinemon_{:s}.root'.format(name)
pre_exec = hist_rename_pre_exec(hist_file_name)
if slice_name:
pre_exec += 'doEmptyMenu=True;do{:s}Slice=True;'.format(slice_name)
ex.args = '-c "setMenu=\'LS2_v1\';doWriteBS=False;doWriteRDOTrigger=False;{:s}"'.format(pre_exec)
# chainDump
cd = ExecStep.ExecStep('ChainDump' + name)
cd.type = 'other'
cd.executable = 'chainDump.py'
cd.input = ''
cd.args = '-f {:s} --json ChainDump.{:s}.json'.format(hist_file_name, name)
cd.auto_report_result = False
return [ex, cd]
class CompareSlicesToFullMenuStep( Step.Step ):
def __init__( self, name='CompareSlicesToFullMenu' ):
super( CompareSlicesToFullMenuStep, self ).__init__( name )
self.log_regex = re.compile(r'TrigSignatureMoniMT.*INFO.HLT.*decisions.*')
self.full_menu_log = None
self.slice_logs = None
self.ref_name = 'FullMenu'
self.slice_names = None
self.required = True
self.auto_report_result = True
def configure( self, test ):
self.full_menu_log = test.exec_steps[0].name+'.log'
self.slice_logs = [ t.name+'.log' for t in test.exec_steps[1:] ]
pass
def compare_counts(self, data, ref_key, slice_key, log_file):
all_good = True
for count_type in ['HLTChain', 'HLTDecision']:
counts_ref = data[ref_key][count_type]['counts']
counts_slice = data[slice_key][count_type]['counts']
for item_name in counts_slice.keys():
if not item_name.startswith('HLT_'):
continue # Skip 'All', streams and groups
slice_count = counts_slice[item_name]['count']
ref_count = counts_ref[item_name]['count']
if slice_count != ref_count:
all_good = False
log_file.write('ERROR {:s} count difference {:s}, {:s}: {:d}, {:s}: {:d}\n'.format(
count_type, item_name, slice_key, slice_count, ref_key, ref_count))
else:
log_file.write('INFO {:s} count matches {:s}, {:s}: {:d}, {:s}: {:d}\n'.format(
count_type, item_name, slice_key, slice_count, ref_key, ref_count))
return all_good
def fail_run(self, cmd):
cmd += ' -> failed'
self.result = 1
self.report_result()
return self.result, cmd
def fetch_lines( self, logname ):
lines = []
self.log.info('Scanning %s', logname )
with open( logname, 'r' ) as logfile:
for line in logfile:
if self.log_regex.match( line ):
lines.append( line.split()[2:-1] ) #drop component INFO and the "final decision" count
return lines
def run( self, dry_run=False ):
self.log.info( 'Running %s comparing %s with slices %s ',
self.name, self.ref_name, str( self.slice_names ) )
# Command to report in commands.json
cmd = '# (internal) {}'.format(self.name)
def run( self, dry_run=False ):
self.log.info( 'Running %s comparing %s with slice logs %s ',
self.name, self.full_menu_log, str( self.slice_logs ) )
if dry_run:
self.result = 0
return self.result, '# (internal) {} -> skipped'.format(self.name)
return self.result, cmd+' -> skipped'
full_menu_test_lines = self.fetch_lines( self.full_menu_log )
full_menu_test_lines.sort()
error = False
with open('crosscheck.log', 'w') as result_log:
for slice_log in self.slice_logs:
slice_test_lines = self.fetch_lines( slice_log )
slice_test_lines.sort()
for result_in_slice in slice_test_lines:
chain_name = result_in_slice[0]
result_in_full_menu = [ l for l in full_menu_test_lines if l[0] == chain_name ] [0] [ :len( result_in_slice ) ] # only first line, and only first few numbers
if result_in_slice != result_in_full_menu:
error = True
result_log.write( 'CompareSlicesToFullMenuStep ERROR Difference found in {} and {}\n'.format( slice_log, self.full_menu_log ) )
result_log.write( 'CompareSlicesToFullMenuStep ERROR Slice {}\n'.format( " ".join( result_in_slice ) ) )
result_log.write( 'CompareSlicesToFullMenuStep ERROR Full Menu {}\n'.format( " ".join( result_in_full_menu ) ) )
counts_data = {}
with open(self.get_log_file_name(), 'w') as log_file:
for key in [self.ref_name] + self.slice_names:
file_name = 'ChainDump.'+key+'.json'
if not os.path.isfile(file_name):
log_file.write('ERROR the counts file {:s} does not exist\n'.format(file_name))
error = True
if key in slice_names:
slice_names.remove(key)
continue
with open(file_name, 'r') as json_file:
counts_data[key] = json.load(json_file)
if self.ref_name not in counts_data.keys():
log_file.write('ERROR reference not loaded, cannot compare anything')
return self.fail_run(cmd)
for key in self.slice_names:
same = self.compare_counts(counts_data, self.ref_name, key, log_file)
if same:
log_file.write('INFO Counts for {:s} are consistent with {:s}\n'.format(key, self.ref_name))
else:
log_file.write('ERROR Counts for {:s} differ from {:s}\n'.format(key, self.ref_name))
error = True
# Command to report in commands.json
cmd = '# (internal) {}'.format(self.name)
if error:
self.result = 1
cmd += ' -> failed'
return self.fail_run(cmd)
else:
self.result = 0
self.report_result()
return self.result, cmd
self.report_result()
return self.result, cmd
# Test configuration
slice_names = ['Bjet', 'Bphysics', 'Egamma', 'Jet', 'MET', 'Muon', 'Tau']
test = Test.Test()
test.art_type = 'build'
test.exec_steps = [ex,
single_slice( 'Egamma', '-c "doEmptyMenu=True;doEgammaSlice=True;"' ),
single_slice( 'Muon', '-c "doEmptyMenu=True;doMuonSlice=True;"' ),
single_slice( 'Tau', '-c "doEmptyMenu=True;doTauSlice=True;"' ),
single_slice( 'Jet', '-c "doEmptyMenu=True;doJetSlice=True;"' ),
single_slice( 'Bjet', '-c "doEmptyMenu=True;doBjetSlice=True;"' ),
single_slice( 'Bphysics', '-c "doEmptyMenu=True;doBphysicsSlice=True;"' ),
single_slice( 'MET', '-c "doEmptyMenu=True;doMETSlice=True;"' )]
test = Test.Test()
test.art_type = 'build'
test.exec_steps = generate_steps() # Full menu
for name in slice_names:
test.exec_steps.extend(generate_steps(name))
cross_check = CompareSlicesToFullMenuStep()
cross_check.slice_names = slice_names
merge_log = CheckSteps.LogMergeStep()
merge_log.merged_name = 'athena.all.log'
merge_log.log_files = [ 'athena.'+x.name+'.log' for x in test.exec_steps ] + [ 'crosscheck.log']
merge_log.merged_name = 'athena.merged.log'
merge_log.log_files = [ step.get_log_file_name() for step in test.exec_steps ]
merge_log.log_files.append(cross_check.get_log_file_name())
check_log = CheckSteps.CheckLogStep('CheckLog')
check_log.log_file = merge_log.merged_name
cross_check_logs = CompareSlicesToFullMenuStep()
test.check_steps = [ cross_check_logs, merge_log, check_log ]
test.check_steps = [ cross_check, merge_log, check_log ]
import sys
sys.exit(test.run())
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment