diff --git a/Control/PerformanceMonitoring/PerfMonAna/CMakeLists.txt b/Control/PerformanceMonitoring/PerfMonAna/CMakeLists.txt index cd6c384f07969c5793da4d1b121d6b5b597a7197..034fdd28c6ac46df39f3172c301286c8e2216647 100644 --- a/Control/PerformanceMonitoring/PerfMonAna/CMakeLists.txt +++ b/Control/PerformanceMonitoring/PerfMonAna/CMakeLists.txt @@ -6,11 +6,16 @@ atlas_subdir( PerfMonAna ) # External dependencies: find_package( pandas ) find_package( sqlalchemy ) +# These need to be added to Externals first, then enabled +# However, the fact that they're missing is not an immediate +# Showstopper... +#find_package( matplotlib ) +#find_package( numpy ) find_package( ROOT COMPONENTS Core PyROOT Tree MathCore Hist RIO pthread ) # Install files from the package: -atlas_install_python_modules( python/*.py ) -atlas_install_scripts( bin/*.py ) +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) +atlas_install_scripts( bin/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) # Aliases: atlas_add_alias( perfgrind "perfgrind.py" ) diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perf-dpmon.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perf-dpmon.py index 112d40a56886d4e8787a8e794ee23ebb7a9ff1c3..6c3685fe1f96c98e6d39ae35ea410ca6086024bf 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perf-dpmon.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/perf-dpmon.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: perf-dpmon.py # @purpose: analyze the output of PerfMon::StorePayloadMon to display the # DataProxies' payload sizes @@ -13,38 +13,31 @@ __doc__ = "analyze the output of PerfMon::StorePayloadMon to display the Dat import os, glob import sys -import traceback def ana(fname, n_consumers): - import numpy as np - import matplotlib.pyplot as plt - DpLoad_dtype = np.dtype([('b0','int32'), ('b1','int32'), ('delta','int32'), - ('clid','|S40'),('sg','|S40')]) - import imp mod_name = 'perf_dpmon_data_%s' % ( os.path.splitext(os.path.basename(fname))[0], ) mod_file = open(fname) mod = imp.load_module(mod_name, mod_file, fname, ('', '', imp.PY_SOURCE)) - #execfile(fname) dp_mon_data = getattr(mod, 'data') nevts = len(dp_mon_data) - for ievt in xrange(nevts): + for ievt in range(nevts): data = dp_mon_data[ievt] store= data[-1] - print "::: evt=%4i: %10d -> %10d -- delta= %10d (= %10.3f kb)" % ( - ievt, store[0], store[1], store[2], store[2]/1024.) + print( "::: evt=%4i: %10d -> %10d -- delta= %10d (= %10.3f kb)" % ( + ievt, store[0], store[1], store[2], store[2]/1024.) ) top_consumers = [ d for d in data[:-1] ] - top_consumers.sort(cmp=lambda x,y: cmp(x[2], y[2])) - print "::: top-consumers: (%s/%s)" % (n_consumers,len(top_consumers)) + top_consumers = sorted(top_consumers, key=lambda x: x[2]) + print( "::: top-consumers: (%s/%s)" % (n_consumers,len(top_consumers)) ) for c in top_consumers[:n_consumers]: - print "%4s %10d -> %10d -- delta= %10d (= %10.3f kb) [%s#%s]" % ( + print( "%4s %10d -> %10d -- delta= %10d (= %10.3f kb) [%s#%s]" % ( '', c[0], c[1], c[2], c[2]/1024., c[3], c[4], - ) + ) ) del dp_mon_data del mod @@ -71,7 +64,7 @@ def main(): (options, args) = parser.parse_args() - if isinstance(options.input_files, basestring): + if isinstance(options.input_files, str): options.input_files = [ options.input_files ] for arg in args: @@ -85,23 +78,23 @@ def main(): input_files += f if len(input_files) == 0: - print "ERROR: invalid input files (do they exist ?)" - print "ERROR: got: %r" % options.input_files + print( "ERROR: invalid input files (do they exist ?)" ) + print( "ERROR: got: %r" % options.input_files ) return 1 all_good = True for fname in input_files: try: - print ":"*80 - print "::: analyzing: [%s]..." % (fname,) + print( ":"*80 ) + print( "::: analyzing: [%s]..." % (fname,) ) ana(fname, options.n_consumers) - print "::: analyzing: [%s]... [done]" % (fname,) - print "" - except Exception, err: - print "ERROR: caught:\n%s" % (err,) + print( "::: analyzing: [%s]... [done]" % (fname,) ) + print( "" ) + except Exception as err: + print( "ERROR: caught:\n%s" % (err,) ) all_good = False - print "::: bye." + print( "::: bye." ) if all_good: return 0 return 1 diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perfgrind.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perfgrind.py index 5bd35e4950f5d76bb946b742a4dab5c2c70a7df0..e1a19d8a54ebc6654d52b9ed00a272826e169976 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perfgrind.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/perfgrind.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file PerfMonAna/bin/perfgrind.py # @purpose a little script to convert a pmon.gz file into a kcachegrind one # @date December 2009 @@ -10,7 +10,6 @@ __doc__ = "a little script to convert a pmon.gz file into a kcachegrind one" __author__ = "Sebastien Binet" ### imports ------------------------------------------------------------------- -import os import sys import argparse diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmon.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmon.py index 05ad464624a61feb4ffa707fec073c3aadd38aae..63f9305f6b9721b313d0ead7c9f9bfdf6dd237d1 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmon.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmon.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: perfmon.py # @purpose: launch the performance monitoring analysis script # @author: Sebastien Binet <binet@.cern.ch> @@ -42,7 +42,7 @@ def main(): help = "comma-separated list of analyzers to be run during perfmon "\ "processing (eg: cpu,mem,io)" ) - + p( "-l", "--labels", dest = "dsLabels", @@ -97,8 +97,8 @@ def main(): print ("ERROR: you need to give an output file name !") parser.print_help() return ExitCodes.ERROR - - if type(options.inputFiles) == type(""): + + if isinstance(options.inputFiles, str): options.inputFiles = [ options.inputFiles ] for arg in args: @@ -106,14 +106,14 @@ def main(): options.inputFiles += [ arg ] from PerfMonAna.PyRootLib import importRoot - ROOT = importRoot( batch = options.rootBatch ) + ROOT = importRoot( batch = options.rootBatch ) # noqa: F841 inputFiles = [] for f in options.inputFiles: f = glob.glob(os.path.expandvars(os.path.expanduser(f))) f.sort() inputFiles += f - + ## we want to preserve the potential order of files ## => don't use a set inputFileNames = [] @@ -126,17 +126,17 @@ def main(): print ("ERROR: got: %r" % options.inputFiles) #parser.print_help() return ExitCodes.ERROR - + if options.outFileName is None: outFileName = os.path.basename(inputFileNames[0]) options.outFileName = outFileName.replace(".pmon.gz", ".perfmon.root") - + outFileName = os.path.expandvars(os.path.expanduser(options.outFileName)) ## massage the supposedly comma-separated list of dataset labels dsLabels = None - if type(options.dsLabels) == type(""): + if isinstance(options.dsLabels, str): options.dsLabels = options.dsLabels.strip() if options.dsLabels.count(",") > 0: dsLabels = options.dsLabels.split(",") @@ -149,7 +149,7 @@ def main(): analyzers = options.analyzers.split(",") else: analyzers = ( options.analyzers, ) - + ## loads and install the user filtering function from PerfMonAna.UserFct import loadFilterFct loadFilterFct(options.selectionUri) @@ -167,7 +167,7 @@ def main(): traceback.print_exc( file = sys.stdout ) sc = ExitCodes.ERROR pass - + return sc @@ -176,11 +176,11 @@ if __name__ == "__main__": print (":"*80) print ("::: perfmon analysis script :::") print ("") - + sc = main() - + print ("") print ("::: bye") print (":"*80) sys.exit( sc ) - + diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-plotter.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-plotter.py old mode 100644 new mode 100755 index 6585790a5fcf7f9880e1bbbb5c868e69bd32528f..f1df6823bb3708a28172bb0caec02436c5a0ea8c --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-plotter.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-plotter.py @@ -1,13 +1,12 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 + # Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @author: Hasan Ozturk <haozturk@cern.ch> - __author__ = "Hasan Ozturk <haozturk@cern.ch" __doc__ = "A python module which parses the PerfMonMTSvc results and makes plots" - import json import matplotlib @@ -96,7 +95,7 @@ def plotSnapshotLevel(snapshotData, plotname): stepNames, dCPUVals, dWallVals, dVmemVals, dRssVals, dPssVals, dSwapVals = [],[],[],[],[],[],[] for step in ['Finalize', 'Execute', 'Initialize', 'Configure']: meas = snapshotData[step] - + # Show in seconds dCPU = meas["dCPU"] * 0.001 dWall = meas["dWall"] * 0.001 @@ -106,7 +105,7 @@ def plotSnapshotLevel(snapshotData, plotname): dRss = meas["dRss"] * 0.001 dPss = meas["dPss"] * 0.001 dSwap = meas["dSwap"] * 0.001 - + stepNames.append(step) dCPUVals.append(dCPU) dWallVals.append(dWall) @@ -162,7 +161,7 @@ def plotSnapshotLevel(snapshotData, plotname): "ylabelFontSize": 40, "legendFontSize": 30 } - + plotBarChart(timeMonParams) plotBarChart(memMonParams) @@ -176,7 +175,7 @@ def plotSnapshotLevel(snapshotData, plotname): def plotComponentLevel(componentLevelData, compCountPerPlot): - + timeMonFig = plt.figure(figsize=(35,105)) memMonFig = plt.figure(figsize=(35,105)) @@ -292,7 +291,7 @@ def plotEventLevel(eventLevelData): timeMonParams = { "ax": timeMonAx, - "yVals": timeMonVals, + "yVals": timeMonVals, "xVals": eventVals, # Maybe x ticks? "xlabel": "Events", "ylabel": "Time [sec]", @@ -317,7 +316,7 @@ def plotEventLevel(eventLevelData): memMonFig.set_tight_layout(True) memMonFig.savefig("Event_Level_Memory") - + def main(): ''' Main function for producing plots from PerfMonMT JSON file.''' diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-printer.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-printer.py index 507fbaf6c16ae399520209a58d10ca9a2ab82f4a..b9e645a363cd68ee6cb4a5483cd61b20b5907ba1 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-printer.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/perfmonmt-printer.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python3 # Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/perfrtt.py b/Control/PerformanceMonitoring/PerfMonAna/bin/perfrtt.py deleted file mode 100755 index a0b5973bd3bb089c5f9a1def0c82ac8b3840ae8a..0000000000000000000000000000000000000000 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/perfrtt.py +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration -# @file: perfrtt.py -# @purpose: thin layer on top of @c perfmon.py for RTT harvesting -# @author: Sebastien Binet <binet@.cern.ch> -# $Id: perfrtt.py,v 1.1 2007-07-18 23:02:25 binet Exp $ - -__author__ = "Sebastien Binet" -__version__ = "$Revision: 1.1 $" -__doc__ = "thin layer on top of @c perfmon.py for RTT harvesting" - -import os -import sys -import traceback -import glob - -def importRoot( batch = True ): - import sys - _oldArgs = sys.argv - if batch: sys.argv = sys.argv[:1] + ['-b'] + sys.argv[1:] - import ROOT - sys.argv = _oldArgs - del _oldArgs - return ROOT - -class Rtt: - Home = "/afs/cern.ch/atlas/project/RTT" - Nightlies = [ 'bugfix', 'val', 'mig0', 'dev' ] - Builds = [ '10', '11', '12', '13' ] - Platforms = [ 'slc3', 'slc4' ] - - class Release( object ): - """A simple modeling of an Athena release (nightly,release,platform,...) - """ - def __init__(self, id): - object.__init__(self) - - self.name = None - self.platform = None - self._parseId(id) - return - - def _parseId(self, id): - id = [ i.lower() for i in id.split(',') ] - print id - name = None - relName = [] - archName = [] - isStable = False - for i in id: - if 'rel' in i: - relName.append(i) - elif i in Rtt.Nightlies: - relName.append(i) - elif i.replace('.','').isdigit(): # 13.0.20 -> 13020' - isStable = True - name = i - else: - archName.append(i) - - if len(relName) == 0: - assert(isStable) - self.name = name - else: - self.name = [ i for i in relName if i not in Rtt.Nightlies ]+\ - [ i for i in relName if i in Rtt.Nightlies ] - - - if len(archName) == 0: - archName.append('opt') - self.platform = '-'.join(archName) - - print "name:",self.name - print "arch:",self.platform - return - - def isNightly(self): - return True - - def isStable(self): - return not self.isNightly() - - def _id(self): - if self.isNightly(): return self.name - else: return [self.name] - - def root(self): - return [ Rtt.Home, 'Results' ] + self._id() - - def cmtconfig(self): - return "i686-slc4-gcc34-"+self.platform - - pass # class RttRelease - - pass # class Rtt - -def rttPath( rel, pkgName = 'RecExRecoTest' ): - root = os.sep.join( - rel.root() + ['build', rel.cmtconfig(), 'offline', pkgName,'*'] - ) - rtt = [f for f in glob.glob(root) \ - if os.path.isdir(f) and os.path.basename(f) != "AthenaATN" ] - assert(len(rtt)==1) - return rtt[0] - -def main(): - """main entry point""" - sc = 0 - from optparse import OptionParser - parser = OptionParser( usage = "usage: %prog [options]" ) -## parser.add_option( -## "-f", -## "--file", -## dest = "chkFileName", -## help = "path to the performance monitoring file to analyze" -## ) -## parser.add_option( -## "-r", -## "--ref", -## dest = "refFileName", -## default = "", -## help = "path to the (reference) performance monitoring file (if any)" -## ) - -## parser.add_option( -## "-o", -## "--out", -## dest = "outFileName", -## default = "", -## help = "path to the output file which will contain analyzed performance monitoring data/infos" -## ) - - parser.add_option( - "--no-batch", - action = "store_false", - dest = "rootBatch", - default = False, - help = "Switch to tell ROOT to load graphics libraries" - ) - - parser.add_option( - "-b", - "--batch", - action = "store_true", - dest = "rootBatch", - default = True, - help = "Switch to tell ROOT _NOT_ to load graphics libraries" - ) - - (options, args) = parser.parse_args() - -## if len(args) > 0 and args[0][0] != "-": -## options.chkFileName = args[0] -## pass - -## if len(args) > 1 and args[1][0] != "-": -## options.refFileName = args[1] -## pass - -## if len(args) > 2 and args[2][0] != "-": -## options.outFileName = args[2] -## pass - -## ROOT = importRoot( batch = options.rootBatch ) - -## from PerfMonAna.PerfMonProcessing import ExitCodes -## if options.chkFileName == None: -## str(parser.print_help() or "ERROR: no help to print !!") -## return ExitCodes.ERROR - - rel = Rtt.Release( "rel_2,opt,val" ) - rtt= rttPath(rel) - print rtt - print os.listdir(rtt) -## chkFileName = os.path.expandvars(os.path.expanduser(options.chkFileName)) -## refFileName = os.path.expandvars(os.path.expanduser(options.refFileName)) -## outFileName = os.path.expandvars(os.path.expanduser(options.outFileName)) - -## try: -## from PerfMonAna import PerfMonProcessing as pm -## ana = pm.AnaMgr( chkFileName, refFileName, outFileName ) -## sc = ana.run() -## except Exception, err: -## print "::: Caught:",err -## traceback.print_exc( file = sys.stdout ) -## sc = ExitCodes.ERROR -## pass - - return sc - - - -if __name__ == "__main__": - print ":"*80 - print "::: perfRTT analysis script :::" - print "" - - sc = main() - - print "" - print "::: bye" - print ":"*80 - sys.exit( sc ) - diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/pmonsd.py b/Control/PerformanceMonitoring/PerfMonAna/bin/pmonsd.py index 22bfec7510080bc45535e4c8cbd53fb7aca881f3..aa56e2ec6616fd2cd52bf492812c5e903bdded94 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/pmonsd.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/pmonsd.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration __doc__ ='Script for parsing and basic analysis of Semi-Detailed PerfMon (PMonSD) output. More info at https://twiki.cern.ch/twiki/bin/viewauth/Atlas/PerfMonSD' @@ -9,31 +9,31 @@ import sys def _usage(ec=0): import os appname=os.path.basename(sys.argv[0]) - print "Usage, one of the following:" - print - print "1) List most significant performance differences in PMonSD outputs:" - print " <EXPERIMENTAL FEATURE>" - print " %s --diff INFILE1 INFILE2"%appname - print - print "2) Parse PMonSD text output and create pickle file:" - print " %s --parse INFILE [--output=OUTFILE[.psd[.gz]]]"%appname - print - print "3) Print content in ASCII format:" - print " %s --print INFILE"%appname - print - print "4) Print content as a raw python dictionary:" - print " %s --print-raw INFILE [KEYS]"%appname - print - print "INFILE : can be a logfile, a PerfMon tar-ball with PMonSD info inside, or a pickle file" - print " produced earlier from one of those by this script. Infiles can be gzipped if" - print " they have the .gz extension. PerfMon tar-balls must have .pmon.gz extension," - print " and Pickle files must have a .psd or .psd.gz extension." - print "OUTFILE : File for storing parsed information in python pickle format." - print "KEYS : Specify a list of keys to \"dive into\" the extracted dictionary" - print - print "Note that if you prefer to work entirely in python, you can access all of the above" - print "functionality through the module PerfMonComps.PMonSD and the functions therein:" - print "PMonSD.parse(infile,outfile=None), PMonSD.print_ascii(infile) and PMonSD.diff(infile1,infile2)." + print( "Usage, one of the following:" ) + print( ) + print( "1) List most significant performance differences in PMonSD outputs:" ) + print( " <EXPERIMENTAL FEATURE>" ) + print( " %s --diff INFILE1 INFILE2"%appname ) + print( ) + print( "2) Parse PMonSD text output and create pickle file:" ) + print( " %s --parse INFILE [--output=OUTFILE[.psd[.gz]]]"%appname ) + print( ) + print( "3) Print content in ASCII format:" ) + print( " %s --print INFILE"%appname ) + print( ) + print( "4) Print content as a raw python dictionary:" ) + print( " %s --print-raw INFILE [KEYS]"%appname ) + print( ) + print( "INFILE : can be a logfile, a PerfMon tar-ball with PMonSD info inside, or a pickle file" ) + print( " produced earlier from one of those by this script. Infiles can be gzipped if" ) + print( " they have the .gz extension. PerfMon tar-balls must have .pmon.gz extension," ) + print( " and Pickle files must have a .psd or .psd.gz extension." ) + print( "OUTFILE : File for storing parsed information in python pickle format." ) + print( "KEYS : Specify a list of keys to \"dive into\" the extracted dictionary" ) + print( ) + print( "Note that if you prefer to work entirely in python, you can access all of the above" ) + print( "functionality through the module PerfMonComps.PMonSD and the functions therein:" ) + print( "PMonSD.parse(infile,outfile=None), PMonSD.print_ascii(infile) and PMonSD.diff(infile1,infile2)." ) sys.exit(ec) def main(args): @@ -47,12 +47,14 @@ def main(args): #Differences in the two files is not a failure. return 0 elif n in [2,3] and args[0]=='--parse': - if n==3: outfile=args[2] - else: outfile=args[1] + if n==3: + outfile=args[2] + else: + outfile=args[1] PerfMonComps.PMonSD.parse(args[1],outfile) return 0 elif n==2 and args[0]=='--print': - if PerfMonComps.PMonSD.print_ascii(args[1])==False:#todo: actually return false in case of problems + if not PerfMonComps.PMonSD.print_ascii(args[1]):#todo: actually return false in case of problems return 1 return 0 elif n>=2 and args[0]=='--print-raw': @@ -63,33 +65,33 @@ def main(args): #list index keys[0]=int(keys[0]) if keys[0]>=len(d): - print "ERROR: Index out of range: %i (Only found %i PMonSD summaries in input)"%(keys[0],len(d)) + print( "ERROR: Index out of range: %i (Only found %i PMonSD summaries in input)"%(keys[0],len(d)) ) return 1 d=d[int(keys[0])] keys=keys[1:] elif len(d)>1: - print "Parsed list of length %i. Specify index (0,1,...) to pick out specific dictionary"%len(d) + print( "Parsed list of length %i. Specify index (0,1,...) to pick out specific dictionary"%len(d) ) return 0 elif len(d)==1: d=d[0] else: - print "Did not parse any PMonSD info" + print( "Did not parse any PMonSD info" ) return 1 #Dive in, according to keys (all strings we assume): while keys: k=keys.pop(0) if not type(d)==dict: - print "ERROR: Can't dive further into dictionary. Remaining objects are:" - print ' '+str(d) + print( "ERROR: Can't dive further into dictionary. Remaining objects are:" ) + print( ' '+str(d) ) return 1 - if not k in d.keys(): - print "ERROR: Invalid key '%s'. Valid keys are:" - print ' '+str(d.keys()) + if k not in d.keys(): + print( "ERROR: Invalid key '%s'. Valid keys are:" ) + print( ' '+str(d.keys()) ) return 1 d=d[k] - print d + print( d ) if type(d)==dict: - print "Next keys: %s"%str(d.keys()) + print( "Next keys: %s"%str(d.keys()) ) return 0 elif n in [2,3] and args[0]=='--validate': #hidden feature to use by validation scripts. @@ -97,12 +99,12 @@ def main(args): #file is a pickle with result of previous parsings. #a) Test that we didn't change results from parsing: if n==3: - if PerfMonComps.PMonSD._validate_identical(args[1],args[2])!=True: - print 'ERROR: Detected differences in information loaded from %s and %s'%(args[1],args[2]) + if not PerfMonComps.PMonSD._validate_identical(args[1],args[2]): + print( 'ERROR: Detected differences in information loaded from %s and %s'%(args[1],args[2]) ) return 1 #b) Test that we can reproduce the output with the deparsing ability: - if PerfMonComps.PMonSD._validate_deparsing(args[1])!=True: - print 'ERROR: Errors detected in deparsing of %s'%args[1] + if not PerfMonComps.PMonSD._validate_deparsing(args[1]): + print( 'ERROR: Errors detected in deparsing of %s'%args[1] ) return 1 return 0 _usage(1) diff --git a/Control/PerformanceMonitoring/PerfMonAna/bin/pmontree.py b/Control/PerformanceMonitoring/PerfMonAna/bin/pmontree.py index 66eeb52fb0eacbc3d56cc543affaf199cdc3d9c2..bf1e8f938cafafa73e6ac5c1d289f7e898743428 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/bin/pmontree.py +++ b/Control/PerformanceMonitoring/PerfMonAna/bin/pmontree.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration __author__ = "Frank Winklmeier" __version__ = "$Revision: 270227 $" @@ -9,7 +9,6 @@ __doc__ = "Script to create dependency tree of perfmon stats" import sys import operator import re -import tarfile import PerfMonComps.PerfMonSerializer as pmon_ser @@ -63,14 +62,15 @@ class ResUser(Resource): indent = (' '*level*(not opt.flat)) s = '\n' + indent + self._node() - for d in self.dep: s += d._show(level+1, showFct) + for d in self.dep: + s += d._show(level+1, showFct) return s def calcSelf(self, children=None): self.dvmem_self = self.dvmem def show(self, showFct=None): - print self._show(0, showFct), + print( self._show(0, showFct), ) def _node(self): return self.name @@ -80,13 +80,15 @@ class ResUser(Resource): """ # Mark dependents for deletion for d in self.dep: - if purgeFct(d): d.name = None + if purgeFct(d): + d.name = None # Recursively call children - for d in self.dep: d.purge(purgeFct) + for d in self.dep: + d.purge(purgeFct) # Remove from list - self.dep = [ d for d in self.dep if d.name!=None ] + self.dep = [ d for d in self.dep if d.name is not None ] class Comp(ResUser): """Component (Algorithm, Tool, Service, etc.) @@ -155,10 +157,12 @@ def getResUser(f, resTree, steps=['ini'], current=None): parent = current for line in f: - if line.startswith(('#','/io/')): continue + if line.startswith(('#','/io/')): + continue step,name,idx = sliceCompIdx(line) - if step not in steps: continue + if step not in steps: + continue # startAud if idx==0: @@ -182,7 +186,7 @@ def getResUser(f, resTree, steps=['ini'], current=None): # stopAud if idx==1: if name != current.name: - raise RuntimeError, "stop for %s within scope of %s" % (name, current.name) + raise RuntimeError( "stop for %s within scope of %s" % (name, current.name) ) else: current.set(line, idx) current.wrapup() @@ -190,48 +194,60 @@ def getResUser(f, resTree, steps=['ini'], current=None): if step=='cbk': offset = current.name.split(']+')[1] i = current.name.find('[') - if i>0: current.name = '%s{+%s}' % (current.name[:i],offset) + if i>0: + current.name = '%s{+%s}' % (current.name[:i],offset) if parent is None: current=None continue - else: return + else: + return def readEvents(f): """Read components for evt slice """ - reEvent = re.compile('AthenaEventLoopMgr\s*INFO\s*===>>> done processing event.*') + reEvent = re.compile(r'AthenaEventLoopMgr\s*INFO\s*===>>> done processing event.*') evt = None comps = [] # [ {name : [Comp]} ] for line in f: m = reEvent.match(line) if m: - if evt: evt+=1 - else: evt=0 + if evt: + evt+=1 + else: + evt=0 comps.append({}) - if evt is None: continue - + if evt is None: + continue + + ''' + ## FIX ME : This bit needs to be checked + ## It's not obvious what reAud is inteded to be... m = reAud.match(line) - if m and m.group('slice')!='evt': continue + if m and m.group('slice')!='evt': + continue if m and m.group('action')=='start': comp = Comp(m.group('comp')) comp.set(m, 0) - if not comp.name in comps[evt]: comps[evt][comp.name] = [] + if comp.name not in comps[evt]: + comps[evt][comp.name] = [] comps[evt][comp.name].append(comp) if m and m.group('action')=='stop': comp = comps[evt][comp.name][-1] comp.set(m, 1) comp.wrapup() + ''' return comps def resAvg(res): """Calculate average of list of resources""" - if len(res)==0: return None + if len(res)==0: + return None a = Comp(res[0].name) a.step = res[0].step for r in res: @@ -250,7 +266,8 @@ def calcEventAvg(comps, sliceObj=slice(None)): tmp = {} # { comp: [] } for evt in comps[sliceObj]: for comp in evt.keys(): - if not comp in tmp: tmp[comp] = [] + if comp not in tmp: + tmp[comp] = [] tmp[comp] += evt[comp] avg = [] @@ -267,8 +284,10 @@ def getCompList(resTree, resList): Call with resList = []. """ for r in resTree: - if isinstance(r, ResUser): resList.append(r) - for d in r.dep: getCompList([d], resList) + if isinstance(r, ResUser): + resList.append(r) + for d in r.dep: + getCompList([d], resList) return resList[:] @@ -279,11 +298,12 @@ def diff(table, opt, attrgetter=operator.attrgetter('dvmem')): for i,t in enumerate(table): for comp in t: label = comp.symbol + ' ' + comp.name - if not label in tmp: tmp[label] = [0]*len(table) + if label not in tmp: + tmp[label] = [0]*len(table) tmp[label][i] = attrgetter(comp) # Convert to list - if opt.min!=None: + if opt.min is not None: limit = opt.min + 0.00001 cmpTable = [ [k]+v for k,v in tmp.iteritems() if abs(v[1]-v[0])>limit ] else: @@ -292,12 +312,12 @@ def diff(table, opt, attrgetter=operator.attrgetter('dvmem')): if opt.diff and len(table)==2: cmpTable.sort( lambda x,y : (int(x[2]-x[1])-int(y[2]-y[1])), reverse=True ) for c in cmpTable: - print "%-60s %10.0f %10.0f %10.0f" % (c[0],c[1],c[2],c[2]-c[1]) + print( "%-60s %10.0f %10.0f %10.0f" % (c[0],c[1],c[2],c[2]-c[1]) ) else: cmpTable.sort( lambda x,y : int(x[1]-y[1]), reverse=True) for c in cmpTable: - print "%-60s" % c[0], - print "%10.0f "*(len(c)-1) % tuple(c[1:]) + print( "%-60s" % c[0], ) + print( "%10.0f "*(len(c)-1) % tuple(c[1:]) ) return @@ -308,8 +328,8 @@ def printTable(compList, opt): avgmalloc = c.dmalloc*1024/c.nmalloc else: avgmalloc = 0 - print "%-60s %10.0f %10.0f %10.0f %10.0f" %\ - (c.name,c.dvmem,c.dmalloc,c.nmalloc,avgmalloc) + print( "%-60s %10.0f %10.0f %10.0f %10.0f" %\ + (c.name,c.dvmem,c.dmalloc,c.nmalloc,avgmalloc) ) def main(): import argparse @@ -348,7 +368,7 @@ def main(): return 1 if opt.diff and len(opt.files)!=2: - print "Can only calculate difference if two files are given" + print( "Can only calculate difference if two files are given" ) return 1 slices = [opt.slice] @@ -368,26 +388,29 @@ def main(): # Read files resTreeList = [] for f in opt.files: - l = [] + z = [] fstream = pmon_ser.extract_pmon_files(f)['data'] - getResUser(fstream, l, slices) + getResUser(fstream, z, slices) del fstream - resTreeList.append(l[:]) + resTreeList.append(z[:]) # Calculate self-VMem - if not opt.libself: children = [SharedLib] - else: children = None - for r in resTreeList[-1]: r.calcSelf(children) + if not opt.libself: + children = [SharedLib] + else: + children = None + for r in resTreeList[-1]: + r.calcSelf(children) # Diff if len(opt.files)>1: - print '#'*80 + print( '#'*80 ) for i,f in enumerate(opt.files): - print "# [%d] %s" % (i+1,f) + print( "# [%d] %s" % (i+1,f) ) if opt.diff: - print "# [3] difference [2]-[1]" - print '#'*80 + print( "# [3] difference [2]-[1]" ) + print( '#'*80 ) table = [ getCompList(t,[]) for t in resTreeList ] if opt.self: @@ -399,12 +422,16 @@ def main(): # Only one file resTree = resTreeList[0] - if opt.min!=None: + if opt.min is not None: # Use VMem or self-VMem for filtering - vmem = lambda c : c.dvmem_self if (opt.self==True and hasattr(c,'dvmem_self')) else c.dvmem - for r in resTree: r.show(lambda c: vmem(c)>opt.min) + def vmem( c ): + result = c.dvmem_self if (opt.self is True and hasattr(c,'dvmem_self')) else c.dvmem + return result + for r in resTree: + r.show(lambda c: vmem(c)>opt.min) else: - for r in resTree: r.show() + for r in resTree: + r.show() return 0 @@ -412,9 +439,11 @@ def main(): if __name__ == "__main__": try: sys.exit(main()) - except IOError, e: + except IOError as e: (code, msg) = e - if (code==32): pass # ignore broken pipe exception - else: raise e + if (code==32): + pass # ignore broken pipe exception + else: + raise e except KeyboardInterrupt: sys.exit(1) diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/Analyzer.py b/Control/PerformanceMonitoring/PerfMonAna/python/Analyzer.py index e50c95346b64249904c1fd4beff1d35d4367a1fa..fc2ff754944164fdbb6610f3aa98592d72ec8543 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/Analyzer.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/Analyzer.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: Analyzer.py # @purpose: a set of classes to analyze data from a perfmon tuple @@ -15,7 +15,6 @@ __version__ = "$Revision: 1.15 $" __doc__ = "A set of classes to analyze data from a perfmon tuple." import logging -import os from PerfMonAna.PyRootLib import importRoot @@ -37,7 +36,8 @@ def getAnalyzer(monVarName, monName): klass_path = monName[0].split('.') m = __import__('.'.join(klass_path[:-1]), fromlist=klass_path[-1:]) Klass = getattr(m, klass_path[-1]) - if len(monName)==2: monName = monName[1] + if len(monName)==2: + monName = monName[1] else: raise RuntimeError('unknown analyzer [monVarName=%r, monName=%r]' %\ (monVarName, monName)) @@ -96,7 +96,8 @@ def mon_project(tree, monInfos, id, varexp, selection="", opt="", hEvol = ROOT.TH1F(id, "%s;%s;%s" % (monInfos[0], monInfos[1], monInfos[2]), n, v2[0]-binWide/2., v2[n-1]+binWide/2.) _fill = hEvol.Fill - for i in xrange(n): _fill(v2[i], v1[i]) + for i in range(n): + _fill(v2[i], v1[i]) return (hEvol,hDistrib) def make_stack( canvas, pad_nbr, title, drawopt="nostack" ): @@ -112,48 +113,49 @@ def make_stack( canvas, pad_nbr, title, drawopt="nostack" ): def make_canvas(name, title, items, shape=None): ROOT = importRoot() nItems = len(items) - if shape is None: shape=(1,nItems) + if shape is None: + shape=(1,nItems) c = ROOT.gROOT.FindObject(name) #DR if c is None: - if not c: + if not c: drawOpt = "" c = ROOT.TCanvas(name, title) - setattr(c, '_stacks', [make_stack(c,i,title) for i in xrange(nItems)]) + setattr(c, '_stacks', [make_stack(c,i,title) for i in range(nItems)]) setattr(c, '_shape', shape) def _plot(self): return #DR if self._shape is None: return - if not self._shape : return - for ipad in xrange(self._shape[0]*self._shape[1]): - pad = self.cd(ipad+1) + if not self._shape : + return + for ipad in range(self._shape[0]*self._shape[1]): stack = self._stacks[ipad] print ("-->",ipad,self.GetName(),stack.GetName()) stack.Draw("nostack") - for gr in stack._graphs: gr.Draw("SAME") + for gr in stack._graphs: + gr.Draw("SAME") return setattr(c, '_plot', _plot) - if nItems>=1: c.Divide(shape[0], shape[1]) - elif nItems==0: return c + if nItems>=1: + c.Divide(shape[0], shape[1]) + elif nItems==0: + return c else: drawOpt = " SAME" if nItems>=1: for i,o in enumerate(items): - pad = c.cd(i+1); pad.SetGrid() - stack = c._stacks[i] + pad = c.cd(i+1) + pad.SetGrid() drawOpt = o[1]+drawOpt o = o[0] if isinstance(o, ROOT.TGraph): - o.GetHistogram().Draw(drawOpt); + o.GetHistogram().Draw(drawOpt) o.Draw(drawOpt) -## stack._graphs.append(o) -## stack.Add(o.GetHistogram(), drawOpt) else: -## stack.Add(o, drawOpt) o.Draw(drawOpt) #c._plot(c) return c - + class Analyzer(object): """ The base object for analyzing data from a perfmon tuple @@ -163,14 +165,14 @@ class Analyzer(object): object.__init__(self) self.msg = logging.getLogger( "Analyzer" ) self.msg.setLevel( logging.INFO ) - + self.name = name self.typeName = typeName self.nEntries = None self.minEvt = None self.maxEvt = None - + self.histos = { } return @@ -182,7 +184,7 @@ class Analyzer(object): self.bookHistos( monComp ) return - + def run(self, monComp): if self.visit( monComp ): @@ -191,15 +193,9 @@ class Analyzer(object): self.fitHistos ( monComp ) return -## def __bookHistos(self): -## return - -## def __fillHistos(self): -## return - def fitHistos(self, monComp): return - + ## class NoopAnalyzer(Analyzer): diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/App.py b/Control/PerformanceMonitoring/PerfMonAna/python/App.py index fd8dcd5d5ccbec90e07f0f768ee53d67eb01e9bc..46c170b9743db6e10c1871c053b8ed7c33a59f1e 100644 --- a/Control/PerformanceMonitoring/PerfMonAna/python/App.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/App.py @@ -19,46 +19,36 @@ __doc__ = "A set of classes and utilities to post-process/analyze a (set of) import sys import os import logging -from array import array import numpy import six -#import pyximport -#pyximport.install(pyimport=True) -#from pyximport import pyxbuild -#pyxbuild.DEBUG = 0 - import tempfile mplconfig_dir = tempfile.mkdtemp(prefix='matplotlib-%s-' % os.getpid()) os.environ['MPLCONFIGDIR'] = mplconfig_dir import atexit def del_mplconfig_dir(): - #print ("... removing [%s] ..." % mplconfig_dir) import os if os.system('/bin/rm -rf %s' % mplconfig_dir): - print ("** could not remove temporary $MPLCONFIGDIR ** (sc=%s)" % (sc,)) + print ("** could not remove temporary $MPLCONFIGDIR **") return atexit.register(del_mplconfig_dir) #print ("===>",os.environ['MPLCONFIGDIR']) import matplotlib -if not 'matplotlib.backends' in sys.modules: +if 'matplotlib.backends' not in sys.modules: matplotlib.use('pdf') import matplotlib.pyplot as pyplot _rc = pyplot.rcParams _rc['legend.fontsize'] = 'medium' _rc['axes.titlesize'] = 'medium' -#_rc['text.fontsize'] = 'smaller' _rc['xtick.labelsize'] = 'small' _rc['ytick.labelsize'] = 'small' _rc['font.size'] = 7.0 -#_rc['figure.autolayout'] = True _rc['figure.dpi'] = 100 _rc['figure.subplot.bottom'] = 0.05 _rc['figure.subplot.hspace'] = 0.3 _rc['figure.subplot.right'] = 0.95 -#_rc['figure.subplot.left'] = 0.05 _rc['figure.subplot.top'] = 0.95 _rc['figure.subplot.wspace'] = 0.3 @@ -71,7 +61,7 @@ pyplot.legend = my_legend import pylab pylab.legend = my_legend del my_legend - + from .DataLoader import DataLoader __do_monitoring = False @@ -102,13 +92,17 @@ def _installLogger( lvl = logging.INFO ): logging.getLogger('').addHandler(logger) ## pre-defining some loggers with default logging-level - log = logging.getLogger("AppMgr"); log.setLevel( lvl ) + log = logging.getLogger("AppMgr") + log.setLevel( lvl ) for i in range(10): - log = logging.getLogger("AnaMgr-" + str(i).zfill(3)); + log = logging.getLogger("AnaMgr-" + str(i).zfill(3)) log.setLevel( lvl ) - log = logging.getLogger("Ana-chk"); log.setLevel( logging.ERROR ) - log = logging.getLogger("Ana-ref"); log.setLevel( logging.ERROR ) - log = logging.getLogger("Analyzer"); log.setLevel( logging.ERROR ) + log = logging.getLogger("Ana-chk") + log.setLevel( logging.ERROR ) + log = logging.getLogger("Ana-ref") + log.setLevel( logging.ERROR ) + log = logging.getLogger("Analyzer") + log.setLevel( logging.ERROR ) return ## install the logger at py-module import and clean-up @@ -159,14 +153,16 @@ class AppMgr(object): if fitSlice is None: self._fitSlice = '1:' - elif not ':' in fitSlice: + elif ':' not in fitSlice: bins = self.anaMgrs[0].bins nbins = len(bins[1:]) - try: fitSlice = float(fitSlice) - except ValueError: raise + try: + fitSlice = float(fitSlice) + except ValueError: + raise if fitSlice <= 0. or fitSlice > 1.: raise ValueError ( - "You have to give a fitSlice in (0.,1.] (got: %r)" % + "You have to give a fitSlice in (0.,1.] (got: %r)" % fitSlice) # get the last x percent of the total range, _ratio = (1.- float(fitSlice))*nbins @@ -179,7 +175,8 @@ class AppMgr(object): self._fitSlice += "95" elif nbins > 120 : self._fitSlice = "105:" - else: self._fitSlice = fitSlice + else: + self._fitSlice = fitSlice self.msg.info( "fit slice: [%s]", self._fitSlice ) self.analyzers = analyzers self.msg.info( "Scheduled analyzers: %r", self.analyzers ) @@ -190,7 +187,6 @@ class AppMgr(object): """ main entry point to run the post-processing of a perfmon job """ - msg=self.msg self.msg.info( "running app..." ) ## check everybody has the same bins for i in range(len(self.anaMgrs)): @@ -204,7 +200,7 @@ class AppMgr(object): self.msg.warning( " [%s] : %r", self.anaMgrs[j].name, self.anaMgrs[j].bins ) - + self.msg.info( "nbr of datasets: %i", len(DataSetMgr.instances.keys()) ) from . import Analyzer self.msg.info( "running analyzers..." ) @@ -232,10 +228,10 @@ class AppMgr(object): self.__writeRootFile() self.__writeAsciiFile() self.__writePdfFile() - + self.msg.info( "running app... [DONE]" ) return ExitCodes.SUCCESS - + def __filter(self, monComp): """hook for the user to filter out some MonitoredComponent""" ## user filtering fct @@ -251,12 +247,13 @@ class AppMgr(object): outFile = ROOT.fopen( outName, 'RECREATE' ) for dsName in DataSetMgr.names(): outFile.cd( "/" ) - outFile.mkdir( dsName ); outFile.cd( dsName ); + outFile.mkdir( dsName ) + outFile.cd( dsName ) for m in MonitoredComponent.instances.values(): - if (not m.name.startswith('PerfMonSlice') and + if (not m.name.startswith('PerfMonSlice') and not self.__filter(m)): continue - if not dsName in m.data: + if dsName not in m.data: continue for h in m.data[dsName]['histos'].values(): @@ -269,15 +266,17 @@ class AppMgr(object): outName ) self.msg.debug( "create ROOT file... [DONE]" ) return - + def __writePdfFile(self): figs = [] for k in [ 'ini', '1st', 'evt', 'fin', 'io' ]: if 'fig' in self.summary.sum[k]: f = self.summary.sum[k]['fig'] - if type(f) == type([]): figs.extend(f) - else: figs += [ f ] + if isinstance(f, list): + figs.extend(f) + else: + figs += [ f ] jobSlice = MonitoredComponent.instances['PerfMonSlice'] for k in [ 'cpu', 'mem', 'io' ]: fig = 'evt/%s' % k @@ -289,10 +288,13 @@ class AppMgr(object): for m in MonitoredComponent.instances.values(): if not self.__filter(m): continue - if m.type in ('alg','algtool','svc'): algFigs += m.figs.values() - elif m.type == 'io' : ioFigs += m.figs.values() - else: continue - + if m.type in ('alg','algtool','svc'): + algFigs += m.figs.values() + elif m.type == 'io': + ioFigs += m.figs.values() + else: + continue + figs += algFigs figs += ioFigs @@ -310,7 +312,7 @@ class AppMgr(object): def __writeAsciiFile(self): """Fill an ASCII with the summary data in a 'nice' format """ - outName = self.outputFile+".summary.txt" + outName = self.outputFile+".summary.txt" o = open( outName, 'w' ) _txt = self.summary.txt print (":"*80, file=o) @@ -318,18 +320,18 @@ class AppMgr(object): for i in ( 'ini','1st','evt','fin'): print ("=== [%s - %s] ===" % (i,c), file=o) for j in ( 'mem', 'cpu', 'allocs', ): - for l in _txt[i][j][c]: - print (l, file=o) + for z in _txt[i][j][c]: + print (z, file=o) print (":"*80, file=o) print ("="*80, file=o) o.close() - + if os.path.exists( outName ): self.msg.info( " --> (%10.3f kB) [%s]", os.stat(outName).st_size / 1024., outName ) return - + class AnaMgr(object): """ The main class to analyze the content of a perfmon tuple @@ -364,24 +366,20 @@ class AnaMgr(object): data['meta']['iocontainers'] ) storeNames = [ k for k in six.iterkeys(data['data']) if k != 'meta' ] compNames = [ c for c in compNames ] - + _monitor('4') dataSetName = self.name - ## print (">>>",len(compNames),len(data.keys())) - _data_keys = list(data.keys()) for compName in compNames: - ## print (":::::::::",compName) monComp = MonitoredComponent(compName, dataSetName) monData = monComp.data[dataSetName] for storeName in storeNames: - ## print ("--",storeName) try: monData[storeName] = data['data'][storeName][compName] except KeyError: monData[storeName] = None if storeName == 'io' and compName == 'PerfMonSlice': monData[storeName] = data['data']['io']['PerfMonSliceIo'] - + pass self.bins = numpy.arange(len(data['data']['evt']['PerfMonSlice'])) _monitor('5') @@ -390,7 +388,7 @@ class AnaMgr(object): _comps = list(data['meta']['components' ].keys()) _ioconts = data['meta']['iocontainers'] for monComp in MonitoredComponent.instances.values(): - if monComp.type != None: + if monComp.type is not None: continue monName = monComp.name if monName in _comps: @@ -400,18 +398,20 @@ class AnaMgr(object): ## FIXME: not there yet... ## monComp.domain = domain(_compsDb[monName]['class'], ## _compsDb[monName]['module']) - elif monName in _ioconts : monComp.type = 'io' - else : monComp.type = 'usr' + elif monName in _ioconts: + monComp.type = 'io' + else: + monComp.type = 'usr' pass _monitor('6') ## push the data into the according dataset dataSet = DataSetMgr(dataSetName, data) dataSet.bins = self.bins - + self.msg.debug( "Loading perfmon data... [OK]" ) return - + class MonitoredComponent(object): """ An object modelling a (Gaudi) component which has been monitored with the @@ -426,9 +426,12 @@ class MonitoredComponent(object): 'type' : None } def __new__(cls, *p, **kw): - if len(p) > 0: kw['name'] = p[0] - if len(p) > 1: kw['dataSetName'] = p[1] - if len(p) > 2: kw['data'] = p[2] + if len(p) > 0: + kw['name'] = p[0] + if len(p) > 1: + kw['dataSetName'] = p[1] + if len(p) > 2: + kw['data'] = p[2] # already created... if kw['name'] in cls.instances.keys(): @@ -440,12 +443,12 @@ class MonitoredComponent(object): for k in cls.__slots__.keys(): setattr(obj, k, cls.__slots__[k]) - + # update repository of instances cls.instances[kw['name']] = obj - + return obj - + def __init__(self, name, dataSetName): object.__init__(self) @@ -453,10 +456,10 @@ class MonitoredComponent(object): if not self.data: self.data = {} - + if not self.figs: self.figs = {} - + if dataSetName not in self.data: self.data[dataSetName] = {} @@ -476,7 +479,7 @@ class MonitoredComponent(object): for storeName,store in ds.items(): monKeys += [ k.split("/")[0] for k in store.keys() ] return [ k for k in set(monKeys) ] - + class DataSetMgr(object): """Borg-class (python-singleton) to hold the different 'dataset' """ @@ -489,9 +492,12 @@ class DataSetMgr(object): } def __new__(cls, *args, **kw): - if len(args) > 0: kw['name' ] = args[0] - if len(args) > 1: kw['data' ] = args[1] - if len(args) > 2: kw['label'] = args[2] + if len(args) > 0: + kw['name' ] = args[0] + if len(args) > 1: + kw['data' ] = args[1] + if len(args) > 2: + kw['label'] = args[2] # already created ? if kw['name'] in cls.instances.keys(): @@ -506,17 +512,17 @@ class DataSetMgr(object): # update repository of instances cls.instances[kw['name']] = obj - + return obj - + @staticmethod def labels( keys = None ): - if keys == None: + if keys is None: keys = list(DataSetMgr.instances.keys()) keys.sort() return [DataSetMgr.instances[k].label for k in keys] - + @staticmethod def names(): keys = list(DataSetMgr.instances.keys()) @@ -529,22 +535,25 @@ class DataSetMgr(object): # skip indigo... color = iter(list(pylab.cm.colors.cnames.keys())[1:]) return color - + def __init__(self, name, data, label=None): object.__init__(self) self.name = name - if not self.data: self.data = data - if not self.bins: self.bins = [] + if not self.data: + self.data = data + if not self.bins: + self.bins = [] + + if not self.label: + self.label = name - if not self.label: self.label = name - - if label == None: + if label is None: self.label = self.name - + return - - + + class PdfMgr(object): """Borg-class (python-singleton) to hold different Pdf files, containing multiple figures @@ -556,8 +565,10 @@ class PdfMgr(object): } def __new__(cls, *args, **kw): - if len(args) > 0: kw['name'] = args[0] - if len(args) > 1: kw['figs'] = args[1] + if len(args) > 0: + kw['name'] = args[0] + if len(args) > 1: + kw['figs'] = args[1] # already created ? if kw['name'] in cls.instances.keys(): @@ -572,10 +583,10 @@ class PdfMgr(object): # update repository of instances cls.instances[kw['name']] = obj - + return obj - - + + def __init__(self, name, figs = None): object.__init__(self) @@ -585,11 +596,8 @@ class PdfMgr(object): def save(self, pdfFileName, figs, orientation='portrait'): from matplotlib.backends.backend_pdf import PdfPages - - tmpFiles = [] + import os - os_close = os.close - from tempfile import mkstemp _monitor('7') if os.path.exists( pdfFileName ): os.remove( pdfFileName ) @@ -597,13 +605,14 @@ class PdfMgr(object): for idx,fig in enumerate(figs): out.savefig(fig) ## closing canvas to recover some memory - fig.clear(); del fig + fig.clear() + del fig figs[idx] = None - + _monitor('8') out.close() return - + """ def legend(*args, **kwargs): diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/CpuAnalyzer.py b/Control/PerformanceMonitoring/PerfMonAna/python/CpuAnalyzer.py index 51f1a2ddd76c2f7a6191b80d4763c91a39c62b76..fc0c320b79d974944df7f3876ee3d581f001c18a 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/CpuAnalyzer.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/CpuAnalyzer.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: CpuAnalyzer.py # @purpose: a set of classes to analyze (CPU) data from a perfmon tuple @@ -12,12 +12,11 @@ __author__ = 'Sebastien Binet' __version__ = "$Revision: 1.19 $" __doc__ = "A set of classes to analyze (CPU) data from a perfmon tuple." -import os import logging -import numpy,pylab +import numpy import matplotlib.pyplot as plt from .PyRootLib import importRoot -from .Analyzer import Analyzer,bookAvgHist,mon_project,make_canvas +from .Analyzer import Analyzer,bookAvgHist from .Constants import Units class CpuAnalyzer( Analyzer ): @@ -26,7 +25,7 @@ class CpuAnalyzer( Analyzer ): the initialize and finalize steps (on a per-algorithm basis) and the execute step (on a per-event and per-algorithm basis). """ - + def __init__(self, name): Analyzer.__init__(self, name, 'cpu') self.msg = logging.getLogger( "Cpu-%s" % name ) @@ -34,15 +33,16 @@ class CpuAnalyzer( Analyzer ): return def visit(self, monComp): - if not monComp.type in ['alg','user']: + if monComp.type not in ['alg','user']: self.msg.debug( " skipping %s [%s]",monComp.name,monComp.type ) return False return True - + def bookHistos(self, monComp): ROOT = importRoot() #Analyzer.__bookHistos(self) - from .PyRootLib import setupRootStyle; setupRootStyle(); + from .PyRootLib import setupRootStyle + setupRootStyle() from .App import DataSetMgr for dataSetName in monComp.data.keys(): @@ -53,7 +53,7 @@ class CpuAnalyzer( Analyzer ): ## print ":::",dataSetName,minEvt,maxEvt,nEntries histos = monComp.data[dataSetName]['histos'] - + monCompName = monComp.name.replace( "/", "#" ) hId = 'cpu_%s.%s' % (monCompName, dataSetName) hName = 'cpu_%s' % dataSetName @@ -68,35 +68,29 @@ class CpuAnalyzer( Analyzer ): from .App import DataSetMgr self.msg.debug("filling histograms...") - + # short-hands msg = self.msg # milliseconds ms = Units.ms - monName = monComp.name dsNames = DataSetMgr.names() yMin = [] yMax = [] allGood = True - - figs = monComp.figs for dsName in dsNames: if dsName not in monComp.data: continue data = monComp.data[dsName] - ## print "..",dsName,data.keys() - if not 'evt' in data: + if 'evt' not in data: continue data = data['evt'] if data is None: continue - - ## print "..",dsName,data.keys() - - if not 'cpu' in data.dtype.names: + + if 'cpu' not in data.dtype.names: allGood = False msg.debug('component [%s] has empty cpu/user infos for ' 'dataset [%s]', @@ -116,18 +110,18 @@ class CpuAnalyzer( Analyzer ): if len(yMin) == 0 and len(yMax) == 0: msg.debug("Component [%s] has no 'evt' level data", monComp.name) return - + yMin = min(yMin) yMax = max(yMax) - + for dsName in dsNames: if dsName not in monComp: continue data = monComp.data[dsName] - if not 'evt' in data: + if 'evt' not in data: continue bins = DataSetMgr.instances[dsName].bins - if not 'evt/cpu' in monComp.figs: + if 'evt/cpu' not in monComp.figs: monComp.figs['evt/cpu'] = plt.figure() monComp.figs['evt/cpu'].add_subplot(211).hold(True) monComp.figs['evt/cpu'].add_subplot(212).hold(True) @@ -135,14 +129,12 @@ class CpuAnalyzer( Analyzer ): ax = fig.axes[0] cpu = data['evt']['cpu'] cpu_c = cpu['cpu'] - cpu_u = cpu['user'] - cpu_s = cpu['sys'] binMax = len(cpu_c[self.minEvt:len(bins)]) - pl = ax.plot(bins[self.minEvt:binMax], - cpu_c[self.minEvt:binMax,2]*ms, - linestyle = 'steps', - label = dsName) + ax.plot(bins[self.minEvt:binMax], + cpu_c[self.minEvt:binMax,2]*ms, + linestyle = 'steps', + label = dsName) ax.grid(True) ax.set_title ( "CPU time [%s]" % monComp.name ) ax.set_ylabel( 'CPU time [ms]' ) @@ -161,7 +153,7 @@ class CpuAnalyzer( Analyzer ): ax.set_xlabel( 'CPU time [ms]' ) ax.set_ylim( (ax.get_ylim()[0], ax.get_ylim()[1]*1.1) ) - + h = data['histos']['cpu_%s' % dsName] hAvg = bookAvgHist(h, cpu_c[:,2] * ms) data['histos'][h.GetName()] = hAvg @@ -173,7 +165,7 @@ class CpuAnalyzer( Analyzer ): hAvg.Fill( cpuTime ) pass # loop over datasets - + for ax in monComp.figs['evt/cpu'].axes: ax.legend( DataSetMgr.labels( dsNames ), loc='best' ) @@ -185,8 +177,8 @@ class CpuAnalyzer( Analyzer ): ROOT = importRoot(batch=True) RootFct = ROOT.TF1 dummyCanvas = ROOT.TCanvas( 'dummyFitCanvas' ) - - histos = [ h for h in self.histos.values() + + histos = [ h for h in self.histos.values() if hasattr(h, 'tag') and h.tag == 'summary' and \ not h.GetName().startswith("cfg.") ] @@ -196,7 +188,7 @@ class CpuAnalyzer( Analyzer ): xMax = x.GetXmax() name = h.GetName() - modelFct = prl.Polynom( degree = 1 ) + prl.Polynom( degree = 1 ) fct = RootFct( 'fitFct_%s' % name, "pol1", xMin, xMax ) ## Q: quiet ## R: Use the range specified in the function range @@ -210,27 +202,8 @@ class CpuAnalyzer( Analyzer ): ( name, fitRes, fct.GetChisquare(), fct.GetNDF() ) self.msg.info( msg ) pass - - # FIXME: not yet for prod! -## histos = [ h for h in self.histos.values() -## if h.GetName().count("avg_") > 0 ] - -## for h in histos: -## x = h.GetXaxis() -## xMin, xMax = x.GetXmin(), x.GetXmax() -## name = h.GetName() -## fct = RootFct( "fitFct_%s" % name, "gaus", xMin, xMax ) -## fct.SetParameter( 1, h.GetMaximum() ) -## h.Fit(fct, "QOR") -## nPars = fct.GetNpar() -## self.msg.info( "[%-50s] %s", name, "\t".join( -## "p[%i] = %12.3f ms " % (i, fct.GetParameter(i)) \ -## for i in range(nPars) ) -## ) - -## pass - + del dummyCanvas return - + pass # class CpuAnalyzer diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/DataLoader.py b/Control/PerformanceMonitoring/PerfMonAna/python/DataLoader.py index 64e916beff8aa304710e5d79d48394c997f51cf8..78c4195d8148bc1e5195c8c2ca0432fd6456e787 100644 --- a/Control/PerformanceMonitoring/PerfMonAna/python/DataLoader.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/DataLoader.py @@ -1,32 +1,29 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: PerfMonAna/python/DataLoader.py # @purpose: handles various data formats and loads data from these files # @author: Sebastien Binet <binet@cern.ch> -import shelve,os,tempfile,glob,atexit,shutil -import cppyy +import shelve,os class DataFormatHandler(object): pass # class DataFormatHandler class DataHdlr_v000000(object): def __init__(self, fileName): - import cppyy object.__init__(self) self._fileName = fileName self._outFileName = None def cnv(self): return - + pass # class DataHdlr_v000000 class DataHdlr_v000100(object): """Data handler for pure python format. v00.01.00 """ def __init__(self, fileName, scratchDir): - import cppyy object.__init__(self) self._fileName = fileName self._tmpdir = scratchDir @@ -35,7 +32,7 @@ class DataHdlr_v000100(object): def cnv(self): data = {} - import tempfile,os,glob + import tempfile,os import gzip import shutil @@ -49,6 +46,7 @@ class DataHdlr_v000100(object): if self._fileName.endswith('.gz'): f = gzip.GzipFile(fileobj=f) f.seek(0) + tmpFileName = 'foo.bar' tmpFile = open(tmpFileName, 'w') shutil.copyfileobj(f, tmpFile) f.close() @@ -69,69 +67,24 @@ class DataHdlr_v000100(object): 'components' : _data['meta://components'], 'iocontainers' : _data['meta://iocontainers'], } - compNames = set(_data['components'].keys() + - _data['iocontainers']) - from PyRootLib import importRoot - ROOT = importRoot() - root = ROOT.fopen( - os.path.join([tmpdir,self._fileName+".root"]), - "recreate" - ) - -## dataSetName = self.name -## ## print ">>>",len(compNames),len(data.keys()) -## _data_keys = data.keys() -## for compName in compNames: -## ## print ":::::::::",compName -## monComp = MonitoredComponent(compName, dataSetName) -## monData = monComp.data[dataSetName] -## for storeName in storeNames: -## ## print compName,storeName -## if not monData.has_key(storeName): -## monData[storeName] = {} -## compNameHdr1 = compName + '://' + storeName -## compNameHdr2 = compName + ':///' + storeName -## for k in _data_keys: -## if k.startswith( compNameHdr1 ) or \ -## k.startswith( compNameHdr2 ): -## monKey = k[k.find(storeName+'/')+len(storeName)+1:] -## monData[storeName][monKey] = numpy.array(data[k]) -## if storeName == 'evt' and monData[storeName].has_key('evtNbr'): -## self.bins = monData[storeName]['evtNbr'] -## pass -## pass -## _monitor('5') - -## _compsDb = data['meta://components' ] -## _comps = data['meta://components' ].keys() -## _ioconts = data['meta://iocontainers'] -## for monComp in MonitoredComponent.instances.values(): -## if monComp.type != None: -## continue -## monName = monComp.name -## if monName in _comps : monComp.type = _compsDb[monName] -## elif monName in _ioconts : monComp.type = 'io' -## else : monComp.type = 'usr' -## pass - - finally: os.chdir(wkdir) + finally: + os.chdir(wkdir) return data - + pass # class DataHdlr_v000100 class DataHdlr_v000200(object): """Data handler for mixed ROOT/TTree-python format. v00.02.00 """ def __init__(self, fileName, scratchDir): - import cppyy object.__init__(self) self._fileName = fileName self._tmpdir = scratchDir def cnv(self): data = {} - import tempfile,os,glob + import os,glob origdir = os.getcwd() tmpdir = self._tmpdir try: @@ -148,21 +101,21 @@ class DataHdlr_v000200(object): else: db = shelve.open(fname) data['meta'] = {} - for k in db.iterkeys(): data['meta'][k] = db[k] + for k in db.iterkeys(): + data['meta'][k] = db[k] db.close() - -## print "version:",data['meta']['version_id'] from PyRootLib import importRoot ROOT = importRoot() root = ROOT.fopen(glob.glob("*.root")[0], "read") - for k in ('ini','evt','fin'): data[k] = root.Get("perfmon/%s"%k) + for k in ('ini','evt','fin'): + data[k] = root.Get("perfmon/%s"%k) data['meta']['rootFile'] = root finally: os.chdir(origdir) - + return data - + pass # class DataHdlr_v000200 @@ -177,6 +130,6 @@ class DataLoader(object): infos, data = pmon_ser.pmon_load(self.fileName) return {'meta':infos, 'data':data} - + pass # class DataLoader diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/IoAnalyzer.py b/Control/PerformanceMonitoring/PerfMonAna/python/IoAnalyzer.py index 705d06329ab7f487fe7ab67998dbdaac18279c81..79e029d79fd66797efe9e9b535c64fd680e6245d 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/IoAnalyzer.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/IoAnalyzer.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: IoAnalyzer.py # @purpose: a set of classes to analyze (I/O) data from a perfmon tuple @@ -34,11 +34,11 @@ class IoAnalyzer( Analyzer ): return def visit(self, monComp): - if not monComp.type in ['io']:#'cfg']: + if monComp.type not in ['io']:#'cfg']: self.msg.debug( " skipping %s [%s]",monComp.name,monComp.type ) return False return True - + def bookHistos(self, monComp): ROOT = importRoot() #Analyzer.__bookHistos(self) @@ -94,8 +94,10 @@ class IoAnalyzer( Analyzer ): ms = Units.ms dsNames = DataSetMgr.names() - yMinRead = []; yMaxRead = [] - yMinWrite = []; yMaxWrite = [] + yMinRead = [] + yMaxRead = [] + yMinWrite = [] + yMaxWrite = [] for dsName in dsNames: if dsName not in monComp.data: continue @@ -117,7 +119,7 @@ class IoAnalyzer( Analyzer ): yMaxRead = max(yMaxRead) yMinWrite = min(yMinWrite) yMaxWrite = max(yMaxWrite) - + for dsName in dsNames: if dsName not in monComp.data: continue @@ -136,11 +138,11 @@ class IoAnalyzer( Analyzer ): fig = monComp.figs['evt/io'] ax = fig.axes[0] _iy = self.minEvt + len(bins[self.minEvt:]) - pl = ax.plot( bins[self.minEvt:], - data['evt']['io/cpu/r'][self.minEvt:_iy] * ms, - linestyle = 'steps', - label = dsName ) - + ax.plot( bins[self.minEvt:], + data['evt']['io/cpu/r'][self.minEvt:_iy] * ms, + linestyle = 'steps', + label = dsName ) + ax.grid( True ) ax.set_title ( "[%s]" % monComp.name ) ax.set_ylabel( '(R) CPU time [ms]' ) @@ -160,15 +162,15 @@ class IoAnalyzer( Analyzer ): ax.set_xlabel( '(R) CPU time [ms]' ) ax.set_ylim( (ax.get_ylim()[0], ax.get_ylim()[1]*1.1) ) - + ## write fig = monComp.figs['evt/io'] ax = fig.axes[2] _iy = self.minEvt + len(bins[self.minEvt:]) - pl = ax.plot( bins[self.minEvt:], - data['evt']['io/cpu/w'][self.minEvt:_iy] * ms, - linestyle = 'steps', - label = dsName ) + ax.plot( bins[self.minEvt:], + data['evt']['io/cpu/w'][self.minEvt:_iy] * ms, + linestyle = 'steps', + label = dsName ) ax.grid( True ) ax.set_title ( "[%s]" % monComp.name ) ax.set_ylabel( '(W) CPU time [ms]' ) @@ -233,10 +235,10 @@ class IoAnalyzer( Analyzer ): fig = monComp.figs['evt/rio'] ax = fig.axes[0] _iy = self.minEvt + len(bins[self.minEvt:]) - pl = ax.plot( bins[self.minEvt:], - data['evt']['io/cpu/rr'][self.minEvt:_iy] * ms, - linestyle = 'steps', - label = dsName ) + ax.plot( bins[self.minEvt:], + data['evt']['io/cpu/rr'][self.minEvt:_iy] * ms, + linestyle = 'steps', + label = dsName ) ax.set_title ( "[%s]" % monComp.name ) ax.set_ylabel( '(RR) CPU time [ms]' ) ax.set_xlabel( 'Event number' ) @@ -259,22 +261,23 @@ class IoAnalyzer( Analyzer ): ratios = [] for idx,num in enumerate(data['evt']['io/cpu/rr'][self.minEvt:_iy]): den = data['evt']['io/cpu/r'][idx] - if den == 0.: r = 0 - else : r = num/den*100. + if den == 0.: + r = 0 + else: + r = num/den*100. ratios.append(r) -## print "%3i %8.3f %8.3f %8.3f" % (idx,num,den,r) - + ratios = numpy.array(ratios) yMinRatio = min(ratios) yMaxRatio = max(ratios) - + ## pure ROOT read over T/P read fig = monComp.figs['evt/rio'] ax = fig.axes[2] - pl = ax.plot( bins[self.minEvt:], - ratios, - linestyle = 'steps', - label = dsName ) + ax.plot( bins[self.minEvt:], + ratios, + linestyle = 'steps', + label = dsName ) ax.set_title ( "[%s]" % monComp.name ) ax.set_ylabel( 'Pure-ROOT over Full read CPU time (%)' ) ax.set_xlabel( 'Event number' ) diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/MemAnalyzer.py b/Control/PerformanceMonitoring/PerfMonAna/python/MemAnalyzer.py index ed435879a6c5caa77e67fed328d0d0f34a0b6e63..dca9fd6180763f73d4308a25a77bc0426fd1e246 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/MemAnalyzer.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/MemAnalyzer.py @@ -13,12 +13,12 @@ __version__ = "$Revision: 1.21 $" __doc__ = "A set of classes to analyze (Mem) data from a perfmon tuple." import logging -import numpy, pylab +import numpy import matplotlib.pyplot as plt from .PyRootLib import importRoot -from .Analyzer import Analyzer, bookAvgHist, mon_project, make_canvas +from .Analyzer import Analyzer, bookAvgHist from .Constants import Units - + class MemAnalyzer( Analyzer ): """analyzer working on memory related quantities. It reads the perfmon tuple and extracts virtual memory and resident set size memory consumptions @@ -36,17 +36,16 @@ class MemAnalyzer( Analyzer ): return def visit(self, monComp): - if not monComp.type in ['alg','user']: + if monComp.type not in ['alg','user']: self.msg.debug( " skipping %s [%s]",monComp.name,monComp.type ) return False - + return True - + def bookHistos(self, monComp): ROOT = importRoot() - from .PyRootLib import setupRootStyle; setupRootStyle(); - - #Analyzer.__bookHistos(self) + from .PyRootLib import setupRootStyle + setupRootStyle() from .App import DataSetMgr for dataSetName in monComp.data.keys(): @@ -82,46 +81,40 @@ class MemAnalyzer( Analyzer ): return def fillHistos(self, monComp): - + from .App import DataSetMgr self.msg.debug("filling histograms...") # short-hands msg = self.msg - - # convert page-size into MB - Mb = Units.Mb + # convert page-size into kB kb = Units.kb - monName = monComp.name dsNames = DataSetMgr.names() ymin = {'vmem':[], 'malloc':[]} ymax = {'vmem':[], 'malloc':[]} allGood = True - + for dsName in dsNames: - if not dsName in monComp.data: + if dsName not in monComp.data: continue - #print "---",dsName,monComp.name data = monComp.data[dsName] - if not 'evt' in data: + if 'evt' not in data: continue data = data['evt'] if data is None: ## print "---",dsName,monComp.name,"data['evt'] is None" continue - if not 'mem' in data.dtype.names: + if 'mem' not in data.dtype.names: allGood = False msg.debug('component [%s] has empty mem infos for ' 'dataset [%s]', monComp.name, dsName) - ## print "--->",dsName,monComp.name,data.dtype.names continue - ## print "+++",dsName,monComp.name mem = data['mem'] dvmem = mem['vmem'][:,2] ymin['vmem'].append(dvmem[self.minEvt:].min()*kb) @@ -142,28 +135,28 @@ class MemAnalyzer( Analyzer ): self.msg.debug("Component [%s] has no 'evt' level data", monComp.name) return - + for k in ymin.keys(): ymin[k] = min(ymin[k]) ymax[k] = max(ymax[k]) pass - + for dsName in dsNames: - if not dsName in monComp.data: + if dsName not in monComp.data: continue data = monComp.data[dsName] - if not 'evt' in data: + if 'evt' not in data: continue data = data['evt'] if data is None: continue - - if not 'mem' in data.dtype.names: + + if 'mem' not in data.dtype.names: continue bins = DataSetMgr.instances[dsName].bins - if not 'evt/mem' in monComp.figs: + if 'evt/mem' not in monComp.figs: monComp.figs['evt/mem'] = plt.figure() monComp.figs['evt/mem'].add_subplot(221).hold( True ) monComp.figs['evt/mem'].add_subplot(222).hold( True ) @@ -174,21 +167,21 @@ class MemAnalyzer( Analyzer ): mem = data['mem'] dvmem = mem['vmem'][:,2] dmall = mem['mall'][:,2] - + ## VMem ax = fig.axes[0] binMax = len(dvmem[self.minEvt:len(bins)]) - pl = ax.plot(bins[self.minEvt:binMax], - dvmem[self.minEvt:binMax] * kb, - linestyle = 'steps', - label = dsName) + ax.plot(bins[self.minEvt:binMax], + dvmem[self.minEvt:binMax] * kb, + linestyle = 'steps', + label = dsName) ax.grid(True) ax.set_title ('Delta V-Mem\n[%s]' % monComp.name) ax.set_ylabel('Delta V-Mem [kb]') ax.set_xlabel('Event number') ax.set_ylim((ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)) - + h,b = numpy.histogram( dvmem[self.minEvt:binMax] * kb, bins = 20, @@ -204,17 +197,17 @@ class MemAnalyzer( Analyzer ): ## Malloc ax = fig.axes[1] - pl = ax.plot(bins[self.minEvt:binMax], - dmall[self.minEvt:binMax] * kb, - linestyle = 'steps', - label = dsName) + ax.plot(bins[self.minEvt:binMax], + dmall[self.minEvt:binMax] * kb, + linestyle = 'steps', + label = dsName) ax.grid(True) ax.set_title ('Delta Malloc\n[%s]' % monComp.name) ax.set_ylabel('Delta Malloc [kb]') ax.set_xlabel('Event number') ax.set_ylim((ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)) - + h,b = numpy.histogram( dmall[self.minEvt:binMax] * kb, bins = 20, @@ -254,8 +247,6 @@ class MemAnalyzer( Analyzer ): return def fitHistos(self, monComp): - # convert page-size into MB - Mb = Units.Mb # convert page-size into kB kb = Units.kb @@ -263,12 +254,12 @@ class MemAnalyzer( Analyzer ): ROOT = importRoot() RootFct = ROOT.TF1 dummyCanvas = ROOT.TCanvas( 'dummyFitCanvas' ) - - histos = [ h for h in self.histos.values() + + histos = [ h for h in self.histos.values() if hasattr(h, 'tag') and h.tag == 'summary' and \ not h.GetName().startswith("cfg.") ] - + for h in histos: x = h.GetXaxis() xMin = x.GetXmin() @@ -279,7 +270,7 @@ class MemAnalyzer( Analyzer ): # could make sense for mem-leak... xMin = xMin + ( xMax-xMin) / 2. - modelFct = prl.Polynom( degree = 1 ) + prl.Polynom( degree = 1 ) fct = RootFct( 'fitFct_%s' % name, "pol1", xMin, xMax ) ## Q: quiet ## R: Use the range specified in the function range @@ -294,7 +285,7 @@ class MemAnalyzer( Analyzer ): self.msg.info( msg ) pass del dummyCanvas - + return - + pass # class MemAnalyzer diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/PyRootLib.py b/Control/PerformanceMonitoring/PerfMonAna/python/PyRootLib.py index 3efeb8834144104188631459a3acaa2fc0137b4f..29523c85fc94a4b637254af0fa939768d97a0a8a 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/PyRootLib.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/PyRootLib.py @@ -23,7 +23,7 @@ def importRoot( batch = True ): if 'DISPLAY' in os.environ: x_display = os.environ['DISPLAY'] del os.environ['DISPLAY'] - + ## few customizations ROOT.gErrorIgnoreLevel = ROOT.kError if ROOT.gROOT.GetVersionInt() >= 51800: @@ -38,7 +38,7 @@ def importRoot( batch = True ): ROOT.kMyRed = ROOT.kRed ROOT.kMyGreen = ROOT.kGreen+100 ROOT.kDarkGreen = ROOT.kGreen +100 - + ## if batch: if x_display: @@ -63,7 +63,7 @@ def importRoot( batch = True ): def fopen(*args): global _root_files f = ROOT.TFile.Open(*args) - if not f in _root_files: + if f not in _root_files: _root_files.append(f) return f ROOT.fopen = fopen @@ -73,7 +73,7 @@ ROOT = importRoot() def setupRootStyle(): """Somehow beautify the ugly default ROOT display style""" - + style = ROOT.gROOT.GetStyle( "Plain" ) # no borders, white color @@ -91,14 +91,14 @@ def setupRootStyle(): style.SetTitleOffset(1.1) style.SetTitleSize(0.035,"Y") style.SetTitleOffset(1.1,"Y") - + # canvas stuff style.SetCanvasBorderSize(0) style.SetCanvasDefH( 800) style.SetCanvasDefW(1000) style.SetFrameBorderMode(0) style.SetFrameBorderSize(0) - + style.SetStatX(0.95) style.SetStatY(0.9) style.SetStatW(0.18) @@ -119,7 +119,7 @@ def setupRootStyle(): style.SetPalette(1) #style.SetOptStat(111111) style.SetOptStat(0) - + ROOT.gROOT.SetStyle( "Plain" ) ROOT.gROOT.ForceStyle() @@ -144,7 +144,7 @@ class Polynom: self.n = degree def __call__(self, x, par): return sum( (x[0]**i) + par[i] for i in range(self.n+1) ) - + class OptionStyle: """ Struct to hold options (color/width/style) for a TStyle-like object @@ -175,12 +175,14 @@ class Style(object): self.line = lineOptions self.marker = markerOptions self.fillStyle = fillStyle - - if self.line == None: self.line = OptionStyle() - if self.marker == None: self.marker = OptionStyle() + + if self.line is None: + self.line = OptionStyle() + if self.marker is None: + self.marker = OptionStyle() return def getLine(self): return self.line def getMarker(self): return self.marker - + diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/Rtt.py b/Control/PerformanceMonitoring/PerfMonAna/python/Rtt.py deleted file mode 100644 index f1de91a7f9f8776941997b4f8b8df1acdd4b7577..0000000000000000000000000000000000000000 --- a/Control/PerformanceMonitoring/PerfMonAna/python/Rtt.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration - -# @file: PerfMonAna/python/Rtt.py -# @purpose: a set of classes to interact with RTT -# @author: Sebastien Binet <binet@cern.ch> -# $Id: Rtt.py,v 1.2 2007-07-19 18:27:49 binet Exp $ - -from __future__ import print_function - -""" a set of classes to interact with RTT -""" -# -# -__author__ = 'Sebastien Binet' -__version__ = "$Revision: 1.2 $" -__doc__ = "a set of classes to interact with RTT" - -import os, sys - -class RttHelper: - """A set of helpers to deal with RTT XML summary files""" - - @staticmethod - def getText(nodeList): - return "".join( [ node.data for node in nodeList.childNodes \ - if node.nodeType == node.TEXT_NODE ] ) - - @staticmethod - def val(path, element): - paths = path.split('/') - curGenEls = [element] - for p in paths: - nextGenEls = [] - if p == 'text()': - texts = [] - [texts.extend(RttHelper.getText(e)) for e in curGenEls] - text = reduce(lambda x,y:x+y, texts,'') - return text - else: - [nextGenEls.extend(e.childNodes) for e in curGenEls] - nextGenEls = [n for n in nextGenEls if n.nodeName == p] - curGenEls = nextGenEls - return curGenEls.strip() - - @staticmethod - def makeDict(node): - elements = [ elem for elem in node.childNodes \ - if elem.nodeType == elem.ELEMENT_NODE ] - d = { } - for elem in elements: - d[elem.tagName.strip()] = RttHelper.val('text()', elem) - return d - -class Rtt(object): - Home = "/afs/cern.ch/atlas/project/RTT" - Nightlies = [ 'bugfix', 'val', 'mig0', 'dev' ] - Builds = [ '10', '11', '12', '13' ] - Platforms = [ 'slc3', 'slc4' ] - - RelNbrKey = "rel_nbr" - RelTypeKey = "rel_type" - PlatformKey = "platform" - - RelNbr = "%("+RelNbrKey+")s" - RelType = "%("+RelTypeKey+")s" - Platform = "%("+PlatformKey+")s" - - - # template to generate a path to results - path = os.path.join( Home, "Results", RelNbr, RelType, "build", - Platform, "offline" ) - -class RttDb(object): - """A naive repository of RTT test packages""" - - def __init__(self): - object.__init__(self) - - self.dbName = os.path.expanduser( - os.path.join( "~", ".perfrtt", "rttPkgs.db" ) - ) - - self.updatePkgDb() - - def updatePkgDb(self): - - if not os.path.exists( os.path.dirname(self.dbName) ): - os.mkdir( os.path.dirname(self.dbName) ) - return self.createPkgDb() - - if not os.path.exists( self.dbName ): - return self.createPkgDb() - - rootXml = os.path.join( Rtt.Home, 'Results', 'page1.xml' ) - - # compare last modification of our repository and the - # master RTT file from which that repository was built - if os.path.getmtime( self.dbName ) < os.path.getmtime( rootXml ): - return self.createPkgDb() - - return - - def createPkgDb(self): - if not os.path.exists( os.path.dirname(self.dbName) ): - os.mkdir( os.path.dirname(self.dbName) ) - - val = RttHelper.val - - # get list of releases, nightlies... - rootXml = open( os.path.join( Rtt.Home, 'Results', 'page1.xml' ), 'r' ) - import xml.dom.minidom as Xml - dom = Xml.parse(rootXml) - rootXml.close() - - releases = [] - for rel in dom.getElementsByTagName('overview'): - relDb = { - 'name' : val('releaseName/text()', rel).strip(), - 'type' : val('originalBranch/text()', rel).strip(), - 'nbr' : val('release/text()', rel).strip(), - 'results' : val('resReleaseDir/text()', rel).strip(), - 'platform': val('targetCMTCONFIG/text()',rel).strip(), - } - ## we want 'bugfix/rel_2' instead of 'rel_2/bugfix' - ## so it better meshes with the Athena AFS install naming scheme - if relDb['name'].count('/') > 0: - relDb['name'] = relDb['name'].split('/') - relDb['name'].reverse() - relDb['name'] = "/".join( relDb['name'] ) - - releases.append( relDb ) - - pkgs = set() - jobDb = {} - idDups = {} - releases.sort( lambda x,y: cmp(x['name'],y['name']) ) - for r in releases: - print ("%10s" % r['type'],"%10s" % r['nbr'],"\t\t",r['name'],) - print (r['platform']) -## if r['type'] != 'bugfix': -## continue - sumFileName = os.path.join(r['results'], 'RTTSummary.xml') - summary = Xml.parse( open(sumFileName, 'r') ) - for pkgNode in summary.getElementsByTagName('package'): - pkgName = val( 'packageName/text()', pkgNode ).strip() - pkgs.add( pkgName ) - for m in pkgNode.getElementsByTagName('minder'): - pkg = { - 'pkgName' : pkgName, - 'jobId' : int(val('jobID/text()', m).strip()), - 'idName' : val('identifiedName/text()', m).strip(), - 'results' : val('resultsPath/text()', m).strip(), - 'jobName' : val('jobName/text()', m).strip(), - 'docStr' : val('jobDocString/text()', m).strip(), - 'status' : val('status/text()', m).strip(), - } - res = pkg['results'] - res = res.replace( "/%s/" % r['nbr'], - "/"+Rtt.RelNbr+"/" ) - res = res.replace( "/%s/" % r['type'], - "/"+Rtt.RelType+"/" ) - res = res.replace( "/%s/" % r['platform'], - "/"+Rtt.Platform+"/" ) - pkg['path'] = res - jobId = pkg['jobId'] - if res.count(Rtt.RelType) <= 0: - continue - - if jobDb.has_key(jobId): - o = jobDb[jobId] - if o['idName'] != pkg['idName']: - if not idDups.has_key(jobId): - idDups[jobId] = [] - if pkg['results'] not in idDups[jobId]: - idDups[jobId] += [pkg['results']] - if o['results'] not in idDups[jobId]: - idDups[jobId] += [o['results']] - else: - jobDb[jobId] = pkg - - print ("pkgs:",len(pkgs)) - for id in idDups.keys(): - print (">>> warning...",id) - idDups[id].sort() - for r in idDups[id]: - print (" ",r) - print ("dups: %i/%i" % (len(idDups.keys()), len(jobDb.keys()))) - return - - pass # RttDb - - diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/SkelAnalyzer.py b/Control/PerformanceMonitoring/PerfMonAna/python/SkelAnalyzer.py deleted file mode 100755 index c993d53f6fd3e93506c9b02edbaccb31c89e37bc..0000000000000000000000000000000000000000 --- a/Control/PerformanceMonitoring/PerfMonAna/python/SkelAnalyzer.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# @file: SkelAnalyzer.py -# @purpose: a set of classes to analyze (Skel) data from a perfmon tuple -# @author: Sebastien Binet <binet@cern.ch> - -""" A set of classes to analyze (Skel) data from a perfmon tuple -""" -# -# -__author__ = 'Sebastien Binet' -__version__ = "$Revision: 1.3 $" -__doc__ = "A set of classes to analyze (Skel) data from a perfmon tuple." - -import logging -from PerfMonAna.PyRootLib import importRoot -from PerfMonAna.Data import Data -from PerfMonAna.Analyzer import Analyzer - -class SkelAnalyzer( Analyzer ): - - typeName = 'Skel' - - def __init__(self, name, fileName): - Analyzer.__init__(self, name, fileName, '<none>') - return - - def bookHistos(self): - ROOT = importRoot() - #Analyzer.__bookHistos(self) - - meta = self.meta.skel - - nJobOpts = meta.cfg().size()+1 - return - - def fillHistos(self): - - self.msg.debug("filling histograms...") - - self.msg.debug("filling histograms... [DONE]") - return - - pass # class SkelAnalyzer diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/SummaryCreator.py b/Control/PerformanceMonitoring/PerfMonAna/python/SummaryCreator.py index d794223364cf4ad5dd76c6b83c918bdbbb005ab4..c4f118992956a78580750a38c7702dbfa62055fe 100644 --- a/Control/PerformanceMonitoring/PerfMonAna/python/SummaryCreator.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/SummaryCreator.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: SummaryCreator.py # @purpose: a set of classes to create a summary from a perfmon tuple @@ -12,12 +12,11 @@ __author__ = 'Sebastien Binet' __version__ = "$Revision: 1.20 $" __doc__ = "a set of classes to create a summary from a perfmon tuple" -import logging,os +import logging import numpy import numpy.lib.polynomial import matplotlib.pyplot as plt -from .Analyzer import Analyzer, bookAvgHist,mon_project,project,make_canvas -from .PyRootLib import importRoot +from .Analyzer import bookAvgHist,project from .Constants import Units def array_mean(a): @@ -39,8 +38,10 @@ class SummaryCreator(object): } self._fitSlice = [] for i in fitSlice.split(":"): - try: self._fitSlice += [ int(i) ] - except ValueError: self._fitSlice += [ None ] + try: + self._fitSlice += [ int(i) ] + except ValueError: + self._fitSlice += [ None ] # text format of the summary self.txt = {} for i in ( 'ini','1st','evt','fin'): @@ -50,7 +51,7 @@ class SummaryCreator(object): 'slice' : [], 'comps' : [], } - + # maximal number of top-XXX consumers to display self.max = 20 @@ -62,9 +63,10 @@ class SummaryCreator(object): from .PyRootLib import importRoot ROOT = importRoot(batch=True) - from .PyRootLib import setupRootStyle; setupRootStyle() + from .PyRootLib import setupRootStyle + setupRootStyle() c = ROOT.TCanvas('c_default') - + for m in [ self.processIni, self.processFirstEvt, self.processEvt, @@ -85,7 +87,7 @@ class SummaryCreator(object): begSlice = 0, endSlice = 1 ) return - + def processFirstEvt(self, dataSetMgr, monCompMgr): ## get top-20 consumers @@ -96,40 +98,39 @@ class SummaryCreator(object): begSlice = 0, endSlice = 1 ) return - + def processEvt(self, dataSetMgr, monCompMgr): from .App import DataSetMgr from .PyRootLib import importRoot ROOT = importRoot(batch=True) - + ## RootFct = ROOT.TF1 ## dummyCanvas = ROOT.TCanvas("dummyFitCanvas") ## import PyRootLib as prl - + ## self.sum['evt']['histos'] = [] ## self.sum['evt']['fig'] = [] - + ## short-hand ms = Units.ms kb = Units.kb Mb = Units.Mb - Mb2Kb = 1000. - msg = self.msg - + ## get top-20 consumers dsNames = DataSetMgr.names() - color = DataSetMgr.colorIter() _txt = self.txt['evt'] monComp = monCompMgr['PerfMonSlice'] - yMinCpu = []; yMaxCpu = [] - yMinIo = []; yMaxIo = [] + yMinCpu = [] + yMaxCpu = [] + yMinIo = [] + yMaxIo = [] for dsName in dsNames: data = monComp.data[dsName] histos = data['histos'] = {} - if not 'evt' in data: + if 'evt' not in data: continue data = data['evt'] if data is None: @@ -144,7 +145,7 @@ class SummaryCreator(object): dcpu_s= cpu_s[:,2] dcpu_r= cpu_r[:,2] dcpu_c= cpu_c[:,2] - + mem = data['mem'] vmem = mem['vmem'] dvmem = vmem[:,2] @@ -155,7 +156,7 @@ class SummaryCreator(object): nallocs = mem['nmall'][:,2] nfrees = mem['nfree'][:,2] - + yMinCpu.append(dcpu_c[self.minEvt:].min() * ms) yMaxCpu.append(dcpu_c[self.minEvt:].max() * ms) @@ -164,9 +165,6 @@ class SummaryCreator(object): yMinIo.append(io_c[self.minEvt:].min() * ms) yMaxIo.append(io_c[self.minEvt:].max() * ms) -## data['mem/vmem/d'] = data['mem/vmem/1'] - data['mem/vmem/0'] -## data['mem/rss/d' ] = data['mem/rss/1' ] - data['mem/rss/0' ] - ## fill-in some data for ASCII summary if dsName == '000': _txt['cpu']['slice'] += [ @@ -192,12 +190,12 @@ class SummaryCreator(object): nfrees[self.minEvt:].mean(), ), ] - + ## book ROOT histos nEntries = len(dataSetMgr[dsName].bins)-1 minEvt = dataSetMgr[dsName].bins[self.minEvt] maxEvt = dataSetMgr[dsName].bins[-1] - + hId = 'cpu_%s.%s' % (monComp.name, dsName) hName = 'cpu_%s' % dsName histos[hName] = ROOT.TH1F( @@ -242,7 +240,7 @@ class SummaryCreator(object): nEntries, minEvt, maxEvt ) pass - + yMinIo = min(yMinIo) yMaxIo = max(yMaxIo) yMinCpu = min(yMinCpu) @@ -250,11 +248,11 @@ class SummaryCreator(object): def markForLegend(p): setattr(p, '_markedForLegend', True) def isMarked(p): return hasattr(p, '_markedForLegend') - + memLeak = [] for dsName in dsNames: - if not 'evt' in monComp.data[dsName]: + if 'evt' not in monComp.data[dsName]: continue data = monComp.data[dsName] @@ -262,11 +260,11 @@ class SummaryCreator(object): cpu = data['evt']['cpu'] cpu_c = cpu['cpu'] dcpu_c= cpu_c[:,2] - + ## CPU bins = dataSetMgr[dsName].bins xbins= bins[self.minEvt:] - if not 'evt/cpu' in monComp.figs: + if 'evt/cpu' not in monComp.figs: monComp.figs['evt/cpu'] = plt.figure() monComp.figs['evt/cpu'].add_subplot(211) monComp.figs['evt/cpu'].add_subplot(212) @@ -285,7 +283,7 @@ class SummaryCreator(object): ax.set_ylim((ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)) markForLegend(pl[0]) - + h,b = numpy.histogram( dcpu_c[self.minEvt:] * ms, bins = 20, @@ -306,14 +304,14 @@ class SummaryCreator(object): cpuTime = dcpu_c[i] * ms h.Fill( float(bins[i]), cpuTime ) hAvg.Fill( cpuTime ) - + ## Mem mem = data['evt']['mem'] vmem = mem['vmem'] dvmem = vmem[:,2] dmall = mem['mall'][:,2] - - if not 'evt/mem' in monComp.figs: + + if 'evt/mem' not in monComp.figs: monComp.figs['evt/mem'] = plt.figure() monComp.figs['evt/mem'].add_subplot(311) monComp.figs['evt/mem'].add_subplot(312) @@ -368,7 +366,7 @@ class SummaryCreator(object): ax.get_ylim()[1]*1.1) ) ax.grid( True ) markForLegend( pl[0] ) - + ax = fig.axes[2] pl = ax.plot( xbins, dmall[self.minEvt:] * Mb, @@ -380,7 +378,7 @@ class SummaryCreator(object): ax.get_ylim()[1]*1.1) ) ax.grid( True ) markForLegend( pl[0] ) - + h = data['histos']['vmem_%s' % dsName] hAvg = bookAvgHist(h, mem['vmem'][:,1] * Mb) @@ -399,7 +397,7 @@ class SummaryCreator(object): hAvg.Fill( rss ) ## I/O - if not 'evt/io' in monComp.figs: + if 'evt/io' not in monComp.figs: monComp.figs['evt/io'] = plt.figure() monComp.figs['evt/io'].add_subplot(211) monComp.figs['evt/io'].add_subplot(212) @@ -441,7 +439,7 @@ class SummaryCreator(object): cpuTime = io_c[i] * ms h.Fill( float(bins[i]), cpuTime ) hAvg.Fill( cpuTime ) - + pass # loop over data sets ## handle mem-leak text @@ -458,9 +456,10 @@ class SummaryCreator(object): transform = ax.transAxes ) for figName,fig in monComp.figs.items(): loc = 'best' - if figName == 'evt/mem': loc = 'lower right' + if figName == 'evt/mem': + loc = 'lower right' for ax in fig.axes: - objs = [ l for l in ax.lines if isMarked(l) ] + objs = [ z for z in ax.lines if isMarked(z) ] ax.legend( objs, DataSetMgr.labels(), #loc='lower right' #loc='best' @@ -476,7 +475,7 @@ class SummaryCreator(object): begSlice = self.minEvt, endSlice = None ) return - + def processFin(self, dataSetMgr, monCompMgr): self._top_consumers( @@ -488,28 +487,26 @@ class SummaryCreator(object): endSlice = None ) return - + def processIo(self, dataSetMgr, monCompMgr): - from .App import DataSetMgr ## short-hand ms = Units.ms - kb = Units.kb - Mb = Units.Mb dsNames = dataSetMgr.keys() dsNames.sort() monComps = [ ] - cppTypes = {}; + cppTypes = {} for monComp in monCompMgr.values(): if monComp.type != 'io': continue - + monCompKeys = monComp.data.keys() monCompKeys.sort() - if monCompKeys != dsNames: continue + if monCompKeys != dsNames: + continue monComps.append( monComp ) cppTypes[monComp.name.split("#")[0]] = {} @@ -527,36 +524,41 @@ class SummaryCreator(object): not k.startswith( "meta://outputPoolFiles/" ): continue n,i = k.split( "PoolFiles/" ) - if n.startswith( "meta://input" ): f = inputFiles - elif n.startswith( "meta://output"): f = outputFiles - else: continue + if n.startswith( "meta://input" ): + f = inputFiles + elif n.startswith( "meta://output"): + f = outputFiles + else: + continue try: f[dsName][int(i)]= d[k] - except KeyError: f[dsName] = {}; f[dsName][int(i)]= d[k] + except KeyError: + f[dsName] = {} + f[dsName][int(i)]= d[k] def _findFolders( files, pattern, dsName ): d = {} cppType = pattern[0] sgKey = pattern[1] - if files.has_key(dsName): + if dsName in files: for file in files[dsName].values(): -## print ":"*80,"-->file" for f in file['data']: n = f['name'] pat = n.startswith( cppType+"_" ) and \ n.endswith ( "_"+sgKey ) -# pat = n.endswith ( "_"+sgKey ) -# pat = n.count( sgKey ) > 0 if pat: size = f['diskSize'] / float( f['nEntries'] ) - try: d[n] += size - except KeyError: d[n] = size -## print "\t-%5s" % str(pat),n + try: + d[n] += size + except KeyError: + d[n] = size keys = d.keys() if len(keys) > 1: raise RuntimeError ("len(d) > 1: %r" % d) - if len(keys) == 0: return [] - else: return [ d.items()[0] ] + if len(keys) == 0: + return [] + else: + return [ d.items()[0] ] ## helper function def io_proj(data, varexp): @@ -570,12 +572,9 @@ class SummaryCreator(object): for monComp in monComps: monName = monComp.name cppType, sgKey = monComp.name.split("#") -## print "---- %-30s %-30s ----" % (cppType,sgKey) - #print "monComp:",monComp.name,cppType,sgKey - #tp_pattern = re.compile( r'%s_.*?%s' % ( cppType, sgKey ) ) tp_pattern = ( cppType, sgKey ) for dsName in dsNames: - if not cppTypes[cppType].has_key( dsName ): + if dsNames not in cppTypes[cppType]: cppTypes[cppType][dsName] = {} data = monComp.data[dsName]['evt'] @@ -586,9 +585,6 @@ class SummaryCreator(object): ior = ( ior, io_proj(data, varexp=monName+".r.user") * ms ) iorr= ( iorr, io_proj(data, varexp=monName+".rr.user") * ms ) iow = ( iow, io_proj(data, varexp=monName+".w.user") * ms ) -## print "\tio-r ",ior -## print "\tio-rr",iorr -## print "\tio-w ",iow if ior[0] > 0.: d = { 'size' : _findFolders( inputFiles, tp_pattern, dsName ), @@ -597,7 +593,8 @@ class SummaryCreator(object): } monComp.data[dsName]['io/in'] = d if len(d['size']) > 0: - try: io_in = cppTypes[cppType][dsName]['io/in'] + try: + io_in = cppTypes[cppType][dsName]['io/in'] except KeyError: cppTypes[cppType][dsName]['io/in'] = { 'size' : [], 'r' : [], 'rr' : [] @@ -613,7 +610,8 @@ class SummaryCreator(object): } monComp.data[dsName]['io/out']= d if len(d['size']) > 0: - try: io_out = cppTypes[cppType][dsName]['io/out'] + try: + io_out = cppTypes[cppType][dsName]['io/out'] except KeyError: cppTypes[cppType][dsName]['io/out'] = { 'size' : [], 'w' : [] @@ -621,14 +619,8 @@ class SummaryCreator(object): io_out = cppTypes[cppType][dsName]['io/out'] io_out['size'] += [ d['size'][0][1] ] io_out['w' ] += [ iow ] -## try: -## print "in: ",monComp.data['000']['io/in'] -## except KeyError: print "<none>";pass -## try: -## print "out:",monComp.data['000']['io/out'] -## except KeyError: print "<none>";pass pass - + self.sum['io']['fig'] = [] def _createFig( ioKey, speedKey, cppTypes, dsNames, figTitle ): @@ -640,7 +632,7 @@ class SummaryCreator(object): from .App import DataSetMgr color = DataSetMgr.colorIter() - + # to hold 'cpu' for each dataSet axes = [ [], [], [] ] @@ -650,7 +642,7 @@ class SummaryCreator(object): names[dsName] = [] for cppType in cppTypes.keys(): store = cppTypes[cppType][dsName] - if not store.has_key(ioKey): + if ioKey not in store: continue store = store[ioKey] if len(store['size']) <= 0: @@ -691,8 +683,8 @@ class SummaryCreator(object): } ) table = numpy.array( table, dtype = descr ) nData = len(table) -## print speedKey,nData,names - if nData == 0: return None + if nData == 0: + return None table.sort( order=('size',) ) @@ -722,7 +714,7 @@ class SummaryCreator(object): ax.set_yticks( pos + 0.5 ) ax.set_yticklabels( labels, fontsize =6, horizontalalignment = 'right' ) - + # display 'user' part only data = table[speedKey+'/user'] pos = numpy.arange(nData) @@ -746,7 +738,7 @@ class SummaryCreator(object): color = 'g', label = dsName, lw = lw ) - + ## -- Speed ax = fig.axes[1] @@ -767,7 +759,7 @@ class SummaryCreator(object): ax.set_yticks( pos + 0.5 ) ax.set_yticklabels( labels, fontsize =6, horizontalalignment = 'right' ) - + # display 'user' part only data = table[speedKey+'/user'] data = table['speed'] @@ -800,7 +792,7 @@ class SummaryCreator(object): ax.set_yticks( pos + 0.5 ) ax.set_yticklabels( labels, fontsize =6, horizontalalignment = 'right' ) - + # display 'user' part only data = table[speedKey+'/userFreq'] pos = numpy.arange(nData) @@ -820,18 +812,16 @@ class SummaryCreator(object): fig = _createFig( 'io/in', 'r', cppTypes, dsNames, figTitle = '[P->T] transformations' ) - if fig: self.sum['io']['fig'] += [ fig ] + if fig: + self.sum['io']['fig'] += [ fig ] -## fig = _createFig( 'io/in', 'rr', cppTypes, dsNames, -## figTitle = '[P->T] transformations (ROOT part)' ) -## if fig: self.sum['io']['fig'] += [ fig ] - fig = _createFig( 'io/out', 'w', cppTypes, dsNames, figTitle = '[T->P] transformations' ) - if fig: self.sum['io']['fig'] += [ fig ] + if fig: + self.sum['io']['fig'] += [ fig ] return - + def _top_consumers(self, dataSetMgr, monCompMgr, compTypes, title, storeName, @@ -845,16 +835,16 @@ class SummaryCreator(object): kb = Units.kb Mb = Units.Mb msg = self.msg - + dsNames = list(dataSetMgr.keys()) dsNames.sort() monComps = [ ] for monComp in monCompMgr.values(): - if not monComp.type in compTypes: + if monComp.type not in compTypes: continue - + monCompKeys = list(monComp.data.keys()) monCompKeys.sort() if monCompKeys != dsNames: @@ -863,9 +853,9 @@ class SummaryCreator(object): if monComp.name in ('AthMasterSeq', 'AthAlgSeq',): continue monComps.append(monComp) - + if len(monComps) == 0: - msg.debug("Could not find any monitored component for" + msg.debug("Could not find any monitored component for" " _top_consumers_ analysis !") return @@ -880,7 +870,7 @@ class SummaryCreator(object): # to hold 'cpu' for each dataSet axes = [ [], [], [], [] ] - + color = DataSetMgr.colorIter() timings = {} @@ -892,16 +882,10 @@ class SummaryCreator(object): timings[dsName] = [] for monComp in _monComps: - if not storeName in monComp.data[dsName]: + if storeName not in monComp.data[dsName]: continue - usrKey = 'cpu/user' - sysKey = 'cpu/sys' - realKey= 'cpu/real' - vm0Key = 'mem/vmem/0' - vm1Key = 'mem/vmem/1' - malKey = 'mem/malloc/d' - + store = monComp.data[dsName][storeName] if store is None: ## if storeName == 'evt': @@ -917,13 +901,13 @@ class SummaryCreator(object): cpu_s = cpu['sys'][:,2] cpu_c = cpu['cpu'][:,2] cpu_r = cpu['real'][:,2] - + mem = store['mem'] vmem= mem['vmem'] mall= mem['mall'] dvmem = vmem[:,2] dmall = mall[:,2] - + monComps.append(monComp) timings[dsName].append(monComp.name) table.append( @@ -952,7 +936,7 @@ class SummaryCreator(object): table[-1] = tuple(new_value) msg.warn(" +%s", table[-1]) pass - + pass # loop over components descr = numpy.dtype( { @@ -965,7 +949,7 @@ class SummaryCreator(object): } ) try: table = numpy.array( table, dtype = descr ) - + nData = min( self.max, len([ m for m in monComps if m.type in compTypes ]) @@ -977,7 +961,7 @@ class SummaryCreator(object): title, storeName, sliceName, begSlice, endSlice) nData = 0 continue - + if nData == 0: _warn = msg.warning _warn("in top_consumers: no data to plot for [%s]!", title) @@ -985,7 +969,7 @@ class SummaryCreator(object): _warn("dsname=[%s] storename=[%s]", dsName, storeName) _warn("no component found with type %s", compTypes) continue - + # linewidth for horizontal bars lw = 0.01 names = timings[dsName] @@ -1013,7 +997,7 @@ class SummaryCreator(object): ax.set_yticklabels( labels, fontsize = 6, horizontalalignment = 'right' ) - + # display 'user' part only data = table['cpu/user'][-nData:] pos = numpy.arange(nData) @@ -1036,9 +1020,10 @@ class SummaryCreator(object): _c += [ "[cpu/sys] %10.3f %10.3f (ms) | %s" % _i ] - else: self.msg.error( "%s contains weird data !!",_i[-1] ) + else: + self.msg.error( "%s contains weird data !!",_i[-1] ) _c.reverse() - + ## real-time table.sort( order=('cpu/real',) ) labels = [ names[i] for i in table['name'][-nData:] ] @@ -1092,7 +1077,8 @@ class SummaryCreator(object): [ names[i] for i in table['name'] ] ): if not numpy.isnan(_i[0]): _c += [ "dVmem|dMalloc %10.3f %10.3f kB | %s" % _i ] - else: self.msg.error( "%s contains weird data !!",_i[-1] ) + else: + self.msg.error( "%s contains weird data !!",_i[-1] ) _c.reverse() ## malloc @@ -1129,13 +1115,14 @@ class SummaryCreator(object): [ names[i] for i in table['name'] ] ): if not numpy.isnan(_i[0]): _c += [ "alloc|free %10i %10i | %s" % _i ] - else: self.msg.error( "%s contains weird data !!",_i[-1] ) + else: + self.msg.error( "%s contains weird data !!",_i[-1] ) _c.reverse() pass # loop over data sets if nData != 0: - + for ix, ax in zip(axes, fig.axes): ax.legend(ix[::nData], DataSetMgr.labels(), loc='lower right') else: @@ -1144,7 +1131,7 @@ class SummaryCreator(object): ## dsName, monComp.name, storeName) ## return pass - + m = monCompMgr['PerfMonSlice'] d = m.data['000'][storeName] cpu = "%-20s [%10.3f ms %10.3f ms\t real= %10.3f ms ]" % ( @@ -1157,7 +1144,7 @@ class SummaryCreator(object): dvmem = d['mem']['vmem'][:,2] drss = d['mem']['rss'][:,2] dmall = d['mem']['mall'][:,2] - + mem = "%-20s [%10.3f MB -> %10.3f MB\t delta= %10.3f kB ]\n" \ "%-20s [%10.3f MB -> %10.3f MB\t delta= %10.3f kB ]\n" \ "%-20s [%10.3f MB -> %10.3f MB\t delta= %10.3f kB ]\n" \ @@ -1187,5 +1174,5 @@ class SummaryCreator(object): _txt['cpu']['slice'] += [ cpu ] _txt['mem']['slice'] += [ mem ] - + return diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/UserFct.py b/Control/PerformanceMonitoring/PerfMonAna/python/UserFct.py index b2cdeb10db1265c88e4801d6908f164c2fd68f1f..1bb4bfe2d3adab066c5f85c6063c9aa51e188bd5 100644 --- a/Control/PerformanceMonitoring/PerfMonAna/python/UserFct.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/UserFct.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @file: UserFct.py # @purpose: classes to allow users to specialize (and load) filtering functions @@ -21,7 +21,7 @@ class PluginFct: class FilterFct: __sharedState = { ## default selector: selects everything - PluginFct.Name : lambda x: x != None + PluginFct.Name : lambda x: x is not None } def __init__(self): @@ -32,7 +32,7 @@ class FilterFct: def setFilter(self, fct): self.__sharedState[PluginFct.Name] = fct - + def loadFilterFct( uri ): """Load and inspect a 'URI'-like resource. If this URI looks like a file, then it will be loaded and inspected for any function whose name is @@ -61,6 +61,6 @@ def loadFilterFct( uri ): def userFct (m): return eval (uri) filterFct.setFilter( userFct ) - + return filterFct - + diff --git a/Control/PerformanceMonitoring/PerfMonAna/python/root_pickle.py b/Control/PerformanceMonitoring/PerfMonAna/python/root_pickle.py index 5444844e0e107c3d1cef351f6b2b52bc5480d2af..5cf9e3c37c59cd7b0df54146cab7f9fe39c1e2f9 100755 --- a/Control/PerformanceMonitoring/PerfMonAna/python/root_pickle.py +++ b/Control/PerformanceMonitoring/PerfMonAna/python/root_pickle.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # # $Id: root_pickle.py,v 1.1 2007-07-22 01:51:43 binet Exp $ @@ -156,7 +156,7 @@ Root objects. """Clears the pickler's internal memo.""" self.__pickle.memo.clear() return - + def _persistent_id (self, o): @@ -169,7 +169,7 @@ Root objects. pid = "%s;%d" % (k.GetName(), k.GetCycle()) return pid return - + _compat_hooks = None @@ -182,11 +182,11 @@ class Root_Proxy: self.__o = None return def __getattr__ (self, a): - if self.__o == None: + if self.__o is None: self.__o = self.__f.Get (self.__pid) return getattr (self.__o, a) def __obj (self): - if self.__o == None: + if self.__o is None: self.__o = self.__f.Get (self.__pid) return self.__o class Unpickler: @@ -211,7 +211,8 @@ FILE should be a Root TFile. def load (self): """Read a pickled object representation from the open file.""" o = None - if _compat_hooks: save = _compat_hooks[0]() + if _compat_hooks: + save = _compat_hooks[0]() try: self.__n += 1 s = self.__file.Get ('_pickle;%d' % self.__n) @@ -219,9 +220,10 @@ FILE should be a Root TFile. o = self.__unpickle.load() self.__io.reopen () finally: - if _compat_hooks: save = _compat_hooks[1](save) + if _compat_hooks: + save = _compat_hooks[1](save) return o - + def _persistent_load (self, pid): if self.__use_proxy: o = Root_Proxy (self.__file, pid) @@ -253,7 +255,7 @@ FILE should be a Root TFile. setattr (mod, name, Dummy) return Dummy return - + def compat_hooks (hooks): @@ -262,7 +264,7 @@ If this is set, then hooks[0] is called before loading, and hooks[1] is called after loading. hooks[1] is called with the return value of hooks[0] as an argument. This is useful for backwards compatibility in some situations.""" - _compat_hooks = hooks + _compat_hooks = hooks # noqa: F841 return diff --git a/Control/PerformanceMonitoring/PerfMonEvent/python/__init__.py b/Control/PerformanceMonitoring/PerfMonEvent/python/__init__.py deleted file mode 100755 index 0f21b42a6739d2259c4edacf72a26e59c647ace3..0000000000000000000000000000000000000000 --- a/Control/PerformanceMonitoring/PerfMonEvent/python/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -## hook for the PerfMonEvent py-module - -def setup(): - """bring in the extension module""" - import os - __path__.append( os.path.join( __path__[0], os.environ[ 'CMTCONFIG' ] ) ) - import DataStore - return -setup() -del setup diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/CMakeLists.txt b/Control/PerformanceMonitoring/PerfMonGPerfTools/CMakeLists.txt index 4a1e58b1c1f8b55d3df509a3c9c3e1e1cfd92615..cc532c76b37137bda55a2b4216b5e444f6c51dc1 100644 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/CMakeLists.txt +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/CMakeLists.txt @@ -5,6 +5,7 @@ atlas_subdir( PerfMonGPerfTools ) # External dependencies: find_package( gperftools COMPONENTS profiler ) +find_package( pprof ) # Component(s) in the package: atlas_add_component( PerfMonGPerfTools @@ -14,6 +15,5 @@ atlas_add_component( PerfMonGPerfTools LINK_LIBRARIES ${GPERFTOOLS_LIBRARIES} GaudiKernel AthenaBaseComps AthenaKernel ) # Install files from the package: -atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) -atlas_install_joboptions( share/*.py ) +atlas_install_joboptions( share/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) atlas_install_scripts( scripts/*.py scripts/gathena ) diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/python/Utils.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/python/Utils.py deleted file mode 100644 index 6d8eb92c83fb9047d12a7b37dc32f2832b62761e..0000000000000000000000000000000000000000 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/python/Utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - -# $Id: Utils.py 491963 2012-03-30 14:53:13Z krasznaa $ -# -# Python utility functions used in multiple scripts. -# - -## -# @short Function finding AtlasGPerfTools/cmt/setup.sh -# -# The script needs to source the setup script of the GPerfTools glue package -# internally. This function finds the location of the script in the filesystem -# that should be used. -# -# @returns The full path of the setup script to be used -# -def locateSetupScript(): - - # Set up a logger object: - from AthenaCommon.Logging import logging - logger = logging.getLogger( "locateSetupScript" ) - - # Locate the AtlasGPerfTools package using CMT: - import subprocess - process = subprocess.Popen( [ "cmt", "show", "versions", - "External/AtlasGPerfTools" ], - stdout = subprocess.PIPE, - stderr = subprocess.PIPE ) - ( result, errors ) = process.communicate() - - # Check that the CMT command was successful: - if len( errors ): - logger.error( "Couldn't execute cmt show versions command" ) - return "" - - # Select the first result: - first_version = result.split( '\n' )[ 0 ] - logger.verbose( "Preferred AtlasGPerfTools version: " + first_version ) - - # The line should look like this: - # [package name] [package version] [path] - package_info = first_version.split( ' ' ) - if len( package_info ) != 3: - logger.error( "Couldn't interpret: " + first_version ) - return "" - - # Now, construct the path: - path = package_info[ 2 ] + "/" + package_info[ 0 ] + "/cmt/setup.sh" - logger.debug( "Setup script found under: " + path ) - - return path diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/scripts/aprof.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/scripts/aprof.py index eb11a59063afd67d28b83975d0d0d72ba1e0a451..6812f1671367745b560bdb8124d755774599a0c8 100755 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/scripts/aprof.py +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/scripts/aprof.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # # $Id: aprof.py 783179 2016-11-09 11:13:54Z limosani $ # @@ -77,19 +77,9 @@ def makePdf( input_file, output_file ): from AthenaCommon.Logging import logging logger = logging.getLogger( "makePdf" ) - import os - # CMAKE - if os.environ.get( 'CMTPATH', '' ) == '': - commandprefix = "" - else: - # CMT - # Locate the setup script: - from PerfMonGPerfTools.Utils import locateSetupScript - script = locateSetupScript() - # Construct and run the command: import os - command = "source " + script + " && pprof --pdf --nodecount=200 --nodefraction=0.001 " \ + command = "pprof --pdf --nodecount=200 --nodefraction=0.001 " \ "--edgefraction=0.0002 `which python` " + input_file + " > " + output_file logger.info( "Running command: " + command ) return os.system( command ) @@ -109,13 +99,9 @@ def makeCallgrind( input_file, output_file ): from AthenaCommon.Logging import logging logger = logging.getLogger( "makeCallgrind" ) - # Locate the setup script: - from PerfMonGPerfTools.Utils import locateSetupScript - script = locateSetupScript() - # Construct and run the command: import os - command = "source " + script + " && pprof --callgrind `which python` " + \ + command = "pprof --callgrind `which python` " + \ input_file + " > " + output_file logger.info( "Running command: " + command ) return os.system( command ) diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisableCoreDumpSvc_postInclude.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisableCoreDumpSvc_postInclude.py index 4c0832c32feb7ea3827d41f727aa76a4b390d5c2..b0e325dd0a2952169af48186388dbadbf99f7c02 100644 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisableCoreDumpSvc_postInclude.py +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisableCoreDumpSvc_postInclude.py @@ -1,3 +1,5 @@ +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + # This posInclude fragment is used when setting up GPT profiling of an Athena job # to turn off CoreDumpSvc. CoreDumpSvc hooks into certain signal events, which # seems to interfere with GPerfTools and causes random crashes diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisablePerfMon_jobOFragment.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisablePerfMon_jobOFragment.py index a43ac3849c12aa67d44bbb258f45960b80096602..d9a823f8806517a088258054da04a9b75f17ee7f 100644 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisablePerfMon_jobOFragment.py +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/DisablePerfMon_jobOFragment.py @@ -1,3 +1,5 @@ +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + # $Id: DisablePerfMon_jobOFragment.py 631453 2014-11-27 09:26:40Z will $ # # This jobO fragment is used when setting up the profiling of an Athena job @@ -6,7 +8,7 @@ # # It should only be loaded once: -include.block( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) +include.block( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) # noqa: F821 # Disable PerfMon as much as we can: from PerfMonComps.PerfMonFlags import jobproperties as pmon_properties diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileEventLoop_preInclude.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileEventLoop_preInclude.py index 8d4a39effb4d42bdcfc620327c6ec106ecbf30e5..29718f612c0736a60144ee7ed30205cfa04977fc 100644 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileEventLoop_preInclude.py +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileEventLoop_preInclude.py @@ -1,3 +1,5 @@ +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + # $Id: ProfileEventLoop_preInclude.py 496380 2012-04-18 12:28:09Z ritsch $ # # This jobO fragment can be pre-included in Reco_trf.py jobs to profile @@ -5,7 +7,7 @@ # # First off, let's disable PerfMon. It doesn't mix well with GPT. -include( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) +include( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) # noqa: F821 # Set up the profiler service: from AthenaCommon.AppMgr import ServiceMgr @@ -18,7 +20,7 @@ ServiceMgr.ProfilerService.InitEvent = 10 ServiceMgr.ProfilerService.ProfileFileName = "gpt-execute.profile" # Set up the profiler service as the first service to be created: -theApp.CreateSvc.insert( 0, "GPT::ProfilerService/ProfilerService" ) +theApp.CreateSvc.insert( 0, "GPT::ProfilerService/ProfilerService" ) # noqa: F821 # Print a message with what happened: from AthenaCommon.Logging import logging diff --git a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileJob_preInclude.py b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileJob_preInclude.py index a149ea324a98998fd40ba1494e189e0a9f1ad0ef..507e2a5601c7c48dd4945c87ae8bde3a38b3c798 100644 --- a/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileJob_preInclude.py +++ b/Control/PerformanceMonitoring/PerfMonGPerfTools/share/ProfileJob_preInclude.py @@ -1,3 +1,5 @@ +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + # $Id: ProfileJob_preInclude.py 496380 2012-04-18 12:28:09Z ritsch $ # # This job fragment can be pre-included in Reco_trf.py jobs to profile @@ -11,7 +13,7 @@ # # First off, let's disable PerfMon. It doesn't mix well with GPT. -include( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) +include( "PerfMonGPerfTools/DisablePerfMon_jobOFragment.py" ) # noqa: F821 # Set up the profiler service: from AthenaCommon.AppMgr import ServiceMgr @@ -23,7 +25,7 @@ ServiceMgr.ProfilerService.InitEvent = -1 ServiceMgr.ProfilerService.ProfileFileName = "gpt-fulljob.profile" # Set up the profiler service as the first service to be created: -theApp.CreateSvc.insert( 0, "GPT::ProfilerService/ProfilerService" ) +theApp.CreateSvc.insert( 0, "GPT::ProfilerService/ProfilerService" ) # noqa: F821 # Print a message with what happened: from AthenaCommon.Logging import logging diff --git a/Control/PerformanceMonitoring/PerfMonVTune/CMakeLists.txt b/Control/PerformanceMonitoring/PerfMonVTune/CMakeLists.txt index 51ae73f5f861936e0c7e2a7917ea2067ad9f859b..dd30bd6d2996d5527d8cedca28aa4c73f0d4d563 100644 --- a/Control/PerformanceMonitoring/PerfMonVTune/CMakeLists.txt +++ b/Control/PerformanceMonitoring/PerfMonVTune/CMakeLists.txt @@ -5,7 +5,7 @@ atlas_subdir( PerfMonVTune ) #### # VTune hack for the time-being -find_program( VTUNE_EXECUTABLE amplxe-cl ) +find_program( VTUNE_EXECUTABLE vtune ) get_filename_component( VTUNE_DIR ${VTUNE_EXECUTABLE} PATH ) set( ITT_PREFIX ${VTUNE_DIR}/.. ) @@ -13,6 +13,14 @@ find_path( ITT_INCLUDE_DIR NAMES ittnotify.h HINTS ${ITT_PREFIX}/include ) find_library( ITT_LIBRARY NAMES ittnotify HINTS ${ITT_PREFIX}/lib64 ) include_directories(${ITT_INCLUDE_DIR}) + +if ( NOT ITT_LIBRARY OR NOT ITT_INCLUDE_DIR ) + message( FATAL_ERROR + "\nYou must have VTune setup properly for compiling PerfMonVTune.\n" + "From within CERN this can be accomplished by doing:\n" + "source /cvmfs/projects.cern.ch/intelsw/psxe/linux/all-setup.sh\n" + "*BEFORE* setting up Athena." ) +endif() #### # Component(s) in the package: @@ -22,6 +30,6 @@ atlas_add_component( PerfMonVTune LINK_LIBRARIES GaudiKernel AthenaBaseComps AthenaKernel ${ITT_LIBRARY} ${CMAKE_DL_LIBS} ) # Install files from the package: -atlas_install_python_modules( python/*.py ) -atlas_install_joboptions( share/*.py ) -atlas_install_scripts( scripts/*.py ) +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) +atlas_install_joboptions( share/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) +atlas_install_scripts( scripts/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) diff --git a/Control/PerformanceMonitoring/PerfMonVTune/scripts/vtune_athena.py b/Control/PerformanceMonitoring/PerfMonVTune/scripts/vtune_athena.py index 673ebba0b9e564aacc78f0115e49e96a46454b21..8306d0c464861f082f1907efa4e88bc77b91b09c 100755 --- a/Control/PerformanceMonitoring/PerfMonVTune/scripts/vtune_athena.py +++ b/Control/PerformanceMonitoring/PerfMonVTune/scripts/vtune_athena.py @@ -1,14 +1,10 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -import glob import logging -import multiprocessing -import os import subprocess import sys -import uuid # Setting logging options fmt = '%(asctime)s :: %(levelname)-8s :: %(message)s' @@ -25,32 +21,32 @@ console.setFormatter(formatter) logger.addHandler(console) #### -## Check Athena Setup +## Check Athena Setup #### def checkAthenaSetup(): try: - a = subprocess.check_output(['athena','--version']) + a = subprocess.check_output(['athena','--version']) logger.debug('Athena version information \n %s',a) - except: + except Exception: logger.fatal('Athena is not setup!') - sys.exit(-1) + sys.exit(-1) #### ## Check VTune Setup #### def checkVTuneSetup(): try: - a = subprocess.check_output(['amplxe-cl','--version']) + a = subprocess.check_output(['vtune','--version']) logger.debug('VTune version information \n %s',a) - except: + except Exception: logger.fatal('VTune is not setup!') - sys.exit(-1) + sys.exit(-1) #### -## AutoGen a jobOptions fragment +## AutoGen a jobOptions fragment #### def generateJOFragment(fileName,firstEvent,lastEvent): - logger.info('Creating jOptions fragment %s', fileName) + logger.info('Creating jOptions fragment %s', fileName) with open('{}'.format(fileName),'w') as f: f.write('# Auto generated jobOptions fragment to setup Athena VTune profiler') f.write('\ninclude(\'PerfMonVTune/VTuneProfileEventLoop_preInclude.py\')') @@ -122,11 +118,11 @@ def main(): checkAthenaSetup() checkVTuneSetup() - # Perpare the JO fragment + # Perpare the JO fragment joFragment = 'PerfMonVTune_autoSetup.py' - generateJOFragment(joFragment, options.start, options.stop) + generateJOFragment(joFragment, options.start, options.stop) - # Prepare the transformation command to execute + # Prepare the transformation command to execute if not options.tf: logger.fatal('The transformation command is empty, quitting...') sys.exit(-1) @@ -139,7 +135,7 @@ def main(): args.extend(['--preInclude',joFragment]) # Run the command - cmd = ( 'amplxe-cl' + + cmd = ( 'vtune' + ' -collect ' + options.collect + ' -strategy ' + options.strategy + ' -start-paused -- ' ) diff --git a/Control/PerformanceMonitoring/PerfMonVTune/share/VTuneProfileEventLoop_preInclude.py b/Control/PerformanceMonitoring/PerfMonVTune/share/VTuneProfileEventLoop_preInclude.py index 4240d9f37e01659bc1113164fa96764073f9a9d7..88d01d7a86a113218f92c88cb86ce75f7593ec37 100644 --- a/Control/PerformanceMonitoring/PerfMonVTune/share/VTuneProfileEventLoop_preInclude.py +++ b/Control/PerformanceMonitoring/PerfMonVTune/share/VTuneProfileEventLoop_preInclude.py @@ -1,3 +1,5 @@ +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + # Set up the profiler service: from AthenaCommon.AppMgr import ServiceMgr from PerfMonVTune.PerfMonVTuneConf import VTuneProfilerService @@ -6,7 +8,7 @@ ServiceMgr += VTuneProfilerService("VTuneProfilerService") ServiceMgr.VTuneProfilerService.ResumeEvent = 10 # Set up the profiler service as the first service to be created: -theApp.CreateSvc.insert( 0, "VTuneProfilerService/VTuneProfilerService" ) +theApp.CreateSvc.insert( 0, "VTuneProfilerService/VTuneProfilerService" ) # noqa: F821 # Print a message with what happened: from AthenaCommon.Logging import logging diff --git a/Generators/AtlasHepMC/AtlasHepMC/GenParticle.h b/Generators/AtlasHepMC/AtlasHepMC/GenParticle.h index 54d6e42689467f790ea23339684518348d20fa7b..ca59ea1d9ec41f17fa6a062645ef1ef4c4b1f2f4 100644 --- a/Generators/AtlasHepMC/AtlasHepMC/GenParticle.h +++ b/Generators/AtlasHepMC/AtlasHepMC/GenParticle.h @@ -40,6 +40,7 @@ using HepMC3::GenParticle; } #else #include "HepMC/GenParticle.h" +#include <memory> namespace HepMC { typedef GenParticle* GenParticlePtr; typedef const GenParticle* ConstGenParticlePtr; @@ -48,8 +49,9 @@ inline GenParticlePtr newGenParticlePtr(const HepMC::FourVector &mom = HepMC::Fo } inline int barcode(GenParticle p) { return p.barcode(); } template <class T> inline int barcode(T p) { return p->barcode(); } -template <class T> bool suggest_barcode(T p, int i) {return p->suggest_barcode(i);} -inline bool suggest_barcode(GenParticle p, int i) {return p.suggest_barcode(i);} +template <class T> bool suggest_barcode(T& p, int i) {return p.suggest_barcode(i);} +//Smart pointers should not be used with HepMC2. But it happens. +template <> inline bool suggest_barcode<std::unique_ptr<HepMC::GenParticle> >(std::unique_ptr<HepMC::GenParticle>& p, int i) {return p->suggest_barcode(i);} template <class T> bool suggest_barcode(T* p, int i) {return p->suggest_barcode(i);} namespace Print { inline void line(std::ostream& os,const GenParticle& p) {p.print(os);} diff --git a/Generators/TruthConverters/Root/xAODtoHepMCTool.cxx b/Generators/TruthConverters/Root/xAODtoHepMCTool.cxx index 29972fe36750267a5ee40265e7a1494cc4a226d4..63764b8d0e072dc3b919d73619456d7f991774ce 100644 --- a/Generators/TruthConverters/Root/xAODtoHepMCTool.cxx +++ b/Generators/TruthConverters/Root/xAODtoHepMCTool.cxx @@ -74,7 +74,7 @@ std::vector<HepMC::GenEvent> xAODtoHepMCTool :: getHepMCEvents(const xAOD::Truth // Insert into McEventCollection mcEventCollection.push_back(hepmcEvent); if( doPrint ) ATH_MSG_DEBUG("XXX Printing HepMC Event"); - if( doPrint ) hepmcEvent.print(); + if( doPrint ) HepMC::Print::line(std::cout,hepmcEvent); // Quit if signal only if( m_signalOnly ) break; } @@ -93,7 +93,7 @@ HepMC::GenEvent xAODtoHepMCTool::createHepMCEvent(const xAOD::TruthEvent* xEvt, // PARTICLES AND VERTICES // Map of existing vertices - needed for the tree linking - std::map<const xAOD::TruthVertex*,HepMC::GenVertex*> vertexMap; + std::map<const xAOD::TruthVertex*,HepMC::GenVertexPtr> vertexMap; // Loop over all of the particles in the event, call particle builder // Call suggest_barcode only after insertion! @@ -117,7 +117,11 @@ HepMC::GenEvent xAODtoHepMCTool::createHepMCEvent(const xAOD::TruthEvent* xEvt, // Create GenParticle //presumably the GenEvent takes ownership of this, but creating a unique_ptr here as that will only happen if there's an associated vertex +#ifdef HEPMC3 + auto hepmcParticle=createHepMCParticle(xPart) ; +#else std::unique_ptr<HepMC::GenParticle> hepmcParticle( createHepMCParticle(xPart) ); +#endif int bcpart = xPart->barcode(); // status 10902 should be treated just as status 2 @@ -129,18 +133,22 @@ HepMC::GenEvent xAODtoHepMCTool::createHepMCEvent(const xAOD::TruthEvent* xEvt, // skip production vertices with barcode > 200000 --> Geant4 secondaries if ( std::abs(xAODProdVtx->barcode()) > 200000 ) continue; bool prodVtxSeenBefore(false); // is this new? - HepMC::GenVertex* hepmcProdVtx = vertexHelper(xAODProdVtx,vertexMap,prodVtxSeenBefore); + auto hepmcProdVtx = vertexHelper(xAODProdVtx,vertexMap,prodVtxSeenBefore); // Set the decay/production links +#ifdef HEPMC3 + hepmcProdVtx->add_particle_out(hepmcParticle); +#else hepmcProdVtx->add_particle_out(hepmcParticle.release()); +#endif // Insert into Event if (!prodVtxSeenBefore){ genEvt.add_vertex(hepmcProdVtx); - if( !hepmcProdVtx->suggest_barcode(xAODProdVtx->barcode()) ){ + if( !HepMC::suggest_barcode(hepmcProdVtx,xAODProdVtx->barcode()) ){ ATH_MSG_WARNING("suggest_barcode failed for vertex "<<xAODProdVtx->barcode()); ++m_badSuggest; } } - if( !hepmcParticle->suggest_barcode(bcpart) ){ + if( !HepMC::suggest_barcode(hepmcParticle,bcpart) ){ ATH_MSG_DEBUG("suggest_barcode failed for particle " <<bcpart); ++m_badSuggest; } @@ -152,22 +160,26 @@ HepMC::GenEvent xAODtoHepMCTool::createHepMCEvent(const xAOD::TruthEvent* xEvt, if( xPart->hasDecayVtx() ){ const xAOD::TruthVertex* xAODDecayVtx = xPart->decayVtx(); // skip decay vertices with barcode > 200000 --> Geant4 secondaries - if ( fabs(xAODDecayVtx->barcode()) > 200000 ) continue; + if ( std::abs(xAODDecayVtx->barcode()) > 200000 ) continue; bool decayVtxSeenBefore(false); // is this new? - HepMC::GenVertex* hepmcDecayVtx = vertexHelper(xAODDecayVtx,vertexMap,decayVtxSeenBefore); + auto hepmcDecayVtx = vertexHelper(xAODDecayVtx,vertexMap,decayVtxSeenBefore); // Set the decay/production links +#ifdef HEPMC3 + hepmcDecayVtx->add_particle_in(hepmcParticle); +#else hepmcDecayVtx->add_particle_in(hepmcParticle.release()); +#endif // Insert into Event if (!decayVtxSeenBefore){ genEvt.add_vertex(hepmcDecayVtx); - if( !hepmcDecayVtx->suggest_barcode(xAODDecayVtx->barcode()) ){ + if( !HepMC::suggest_barcode(hepmcDecayVtx,xAODDecayVtx->barcode()) ){ ATH_MSG_WARNING("suggest_barcode failed for vertex " <<xAODDecayVtx->barcode()); ++m_badSuggest; } } if( bcpart != 0 ){ - if( !hepmcParticle->suggest_barcode(bcpart) ){ + if( !HepMC::suggest_barcode(hepmcParticle,bcpart) ){ ATH_MSG_DEBUG("suggest_barcode failed for particle " <<bcpart); ++m_badSuggest; } @@ -185,12 +197,12 @@ HepMC::GenEvent xAODtoHepMCTool::createHepMCEvent(const xAOD::TruthEvent* xEvt, // Helper to check whether a vertex exists or not using a map; // calls createHepMCVertex if not -HepMC::GenVertex* xAODtoHepMCTool::vertexHelper(const xAOD::TruthVertex* xaodVertex, - std::map<const xAOD::TruthVertex*,HepMC::GenVertex*> &vertexMap, +HepMC::GenVertexPtr xAODtoHepMCTool::vertexHelper(const xAOD::TruthVertex* xaodVertex, + std::map<const xAOD::TruthVertex*,HepMC::GenVertexPtr> &vertexMap, bool &seenBefore) const { - HepMC::GenVertex* hepmcVertex; - std::map<const xAOD::TruthVertex*,HepMC::GenVertex*>::iterator vMapItr; + HepMC::GenVertexPtr hepmcVertex; + std::map<const xAOD::TruthVertex*,HepMC::GenVertexPtr>::iterator vMapItr; vMapItr=vertexMap.find(xaodVertex); // Vertex seen before? if (vMapItr!=vertexMap.end()) { @@ -209,20 +221,20 @@ HepMC::GenVertex* xAODtoHepMCTool::vertexHelper(const xAOD::TruthVertex* xaodVer // Create the HepMC GenParticle // Call suggest_barcode after insertion! -HepMC::GenParticle* xAODtoHepMCTool::createHepMCParticle(const xAOD::TruthParticle* particle) const { +HepMC::GenParticlePtr xAODtoHepMCTool::createHepMCParticle(const xAOD::TruthParticle* particle) const { ATH_MSG_VERBOSE("Creating GenParticle for barcode " <<particle->barcode()); const HepMC::FourVector fourVec( m_momFac * particle->px(), m_momFac * particle->py(), m_momFac * particle->pz(), m_momFac * particle->e() ); - HepMC::GenParticle* hepmcParticle=new HepMC::GenParticle(fourVec, particle->pdgId(), particle->status()); + auto hepmcParticle=HepMC::newGenParticlePtr(fourVec, particle->pdgId(), particle->status()); hepmcParticle->set_generated_mass( m_momFac * particle->m()); return hepmcParticle; } // Create the HepMC GenVertex // Call suggest_barcode after insertion! -HepMC::GenVertex* xAODtoHepMCTool::createHepMCVertex(const xAOD::TruthVertex* vertex) const { +HepMC::GenVertexPtr xAODtoHepMCTool::createHepMCVertex(const xAOD::TruthVertex* vertex) const { ATH_MSG_VERBOSE("Creating GenVertex for barcode " <<vertex->barcode()); HepMC::FourVector prod_pos( m_lenFac * vertex->x(), m_lenFac * vertex->y(),m_lenFac * vertex->z(), m_lenFac * vertex->t() ); - HepMC::GenVertex* genVertex=new HepMC::GenVertex(prod_pos); + auto genVertex=HepMC::newGenVertexPtr(prod_pos); return genVertex; } diff --git a/HLT/Trigger/TrigControl/TrigExamples/TrigExPartialEB/python/MTCalibPebConfig.py b/HLT/Trigger/TrigControl/TrigExamples/TrigExPartialEB/python/MTCalibPebConfig.py index ffb90908cb75b923bfee7019eb5016522dd18a92..3ec760429f6007b268b9d36f54be85991d70e81c 100644 --- a/HLT/Trigger/TrigControl/TrigExamples/TrigExPartialEB/python/MTCalibPebConfig.py +++ b/HLT/Trigger/TrigControl/TrigExamples/TrigExPartialEB/python/MTCalibPebConfig.py @@ -306,7 +306,7 @@ def make_summary_algs(hypo_algs): summMaker.FinalStepDecisions = {} for hypo in hypo_algs: for tool in hypo.HypoTools: - summMaker.FinalStepDecisions[tool.getName()] = str(hypo.HypoOutputDecisions) + summMaker.FinalStepDecisions[tool.getName()] = [str(hypo.HypoOutputDecisions)] log.info('summMaker = %s', summMaker) return [summary, summMaker] diff --git a/HLT/Trigger/TrigControl/TrigServices/python/TrigServicesConfig.py b/HLT/Trigger/TrigControl/TrigServices/python/TrigServicesConfig.py index 06c8c51f1692b29a0678dc9bf0dd6923b11495e0..e1fdb3b88d41c522100893bab41860c8bd236d29 100644 --- a/HLT/Trigger/TrigControl/TrigServices/python/TrigServicesConfig.py +++ b/HLT/Trigger/TrigControl/TrigServices/python/TrigServicesConfig.py @@ -52,7 +52,7 @@ def setupMessageSvc(): MessageSvc = svcMgr.MessageSvc MessageSvc.OutputLevel = theApp.OutputLevel - MessageSvc.Format = "% F%40W%S%4W%R%e%s%8W%R%T %0W%M" + MessageSvc.Format = "% F%40W%C%4W%R%e%s%8W%R%T %0W%M" # Add timestamp when running in partition if os.environ.get('TDAQ_PARTITION','') != 'athenaHLT': MessageSvc.Format = "%t " + MessageSvc.Format diff --git a/HLT/Trigger/TrigControl/TrigServices/python/TriggerUnixStandardSetup.py b/HLT/Trigger/TrigControl/TrigServices/python/TriggerUnixStandardSetup.py index 8904eec9b96bbff0a3a90f796577e51d334b2e40..f8ac2b0ecfa752e37ac626f94ee424c74d6dc49d 100644 --- a/HLT/Trigger/TrigControl/TrigServices/python/TriggerUnixStandardSetup.py +++ b/HLT/Trigger/TrigControl/TrigServices/python/TriggerUnixStandardSetup.py @@ -23,9 +23,6 @@ def setupCommonServices(): # Create our own logger log = logging.getLogger( 'TriggerUnixStandardSetup::setupCommonServices:' ) - from TrigServices.TrigServicesConfig import setupMessageSvc - setupMessageSvc() - # Do the default Atlas job configuration first import AthenaCommon.AtlasUnixStandardJob # noqa: F401 @@ -179,6 +176,9 @@ def setupCommonServicesEnd(): # Set default properties for some important services after all user job options log.info('Configure core services for online running') + from TrigServices.TrigServicesConfig import setupMessageSvc + setupMessageSvc() + svcMgr.CoreDumpSvc.CoreDumpStream = "stdout" svcMgr.CoreDumpSvc.CallOldHandler = False svcMgr.CoreDumpSvc.StackTrace = True diff --git a/InnerDetector/InDetConditions/SCT_ConditionsAlgorithms/python/SCT_SiliconConditionsTestAlgConfig.py b/InnerDetector/InDetConditions/SCT_ConditionsAlgorithms/python/SCT_SiliconConditionsTestAlgConfig.py index 158e55b26266ae24f57b98dd44d89d7700a00098..f9cc5080ae16217c8f7ea757f34ea0868cff3665 100644 --- a/InnerDetector/InDetConditions/SCT_ConditionsAlgorithms/python/SCT_SiliconConditionsTestAlgConfig.py +++ b/InnerDetector/InDetConditions/SCT_ConditionsAlgorithms/python/SCT_SiliconConditionsTestAlgConfig.py @@ -1,4 +1,4 @@ -"""Define method to configure and test SCT_SiliconConditionsTestAlgConfig +"""Define method to configure and test SCT_SiliconConditionsTestAlg Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration """ diff --git a/InnerDetector/InDetDetDescr/InDetTrackingGeometry/src/SiLayerBuilder.cxx b/InnerDetector/InDetDetDescr/InDetTrackingGeometry/src/SiLayerBuilder.cxx index 520f05d0f6bf33098d8c9939868e6c5d0ec92f06..ef70dd63c6f859d3f155ecffb152856ae7ea6571 100755 --- a/InnerDetector/InDetDetDescr/InDetTrackingGeometry/src/SiLayerBuilder.cxx +++ b/InnerDetector/InDetDetDescr/InDetTrackingGeometry/src/SiLayerBuilder.cxx @@ -995,6 +995,8 @@ std::vector< const Trk::DiscLayer* >* InDet::SiLayerBuilder::createDiscLayers(st new Trk::DiscBounds(rMin,rMax), *passiveLayerMaterial, 1.*Gaudi::Units::mm); + // cleanup of the layer material -------------------------------------------------------------- + delete passiveLayerMaterial; } else passiveLayer = new Trk::DiscLayer(passiveDiscTransf, new Trk::DiscBounds(rMin,rMax), 0); ATH_MSG_DEBUG( " -> At Z - Position : " << *addLayerIter ); diff --git a/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecExampleConfigDb.py b/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecExampleConfigDb.py index b0641d06257ea332c0591173d061cfafa5450a71..9c67652a919d9f0b0f8cbcf4cc25acfcb8b7507f 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecExampleConfigDb.py +++ b/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecExampleConfigDb.py @@ -39,6 +39,7 @@ addTool( "MuonRecExample.MuonRecTools.MuonHoughPatternTool", "MuonHoughPatternTo addTool( "MuonRecExample.MuonRecTools.MuonHoughPatternFinderTool", "MuonHoughPatternFinderTool" ) addService("MuonRecExample.MuonRecTools.AtlasTrackingGeometrySvc","AtlasTrackingGeometrySvc") +addService("MuonRecExample.MuonRecTools.TrackingVolumesSvc","TrackingVolumesSvc") addTool( "MuonRecExample.MuonRecTools.MuonNavigator", "MuonNavigator" ) diff --git a/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecTools.py b/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecTools.py index 7bf882c5ee1f381f035efade7dfef76e8dd9dcaf..1480d72a1ab003ae5cef93d1592a4cb4c4417782 100644 --- a/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecTools.py +++ b/MuonSpectrometer/MuonReconstruction/MuonRecExample/python/MuonRecTools.py @@ -188,6 +188,9 @@ def AtlasTrackingGeometrySvc(name="AtlasTrackingGeometrySvc",**kwargs): from TrkDetDescrSvc.AtlasTrackingGeometrySvc import AtlasTrackingGeometrySvc return AtlasTrackingGeometrySvc +def TrackingVolumesSvc(name="TrackingVolumesSvc",**kwargs): + from TrkDetDescrSvc.TrkDetDescrSvcConf import Trk__TrackingVolumesSvc + return Trk__TrackingVolumesSvc("TrackingVolumesSvc") # default muon navigator def MuonNavigator(name = "MuonNavigator",**kwargs): diff --git a/Tools/Tier0ChainTests/test/test_q431.sh b/Tools/Tier0ChainTests/test/test_q431.sh index 67043285c0acba0a9a9abfcbd01886eebda4c2e1..47d12d3c523e56673b7b9b938351530d8ae685ee 100755 --- a/Tools/Tier0ChainTests/test/test_q431.sh +++ b/Tools/Tier0ChainTests/test/test_q431.sh @@ -8,7 +8,7 @@ # art-include: 21.3/Athena # art-include: 21.9/Athena -Reco_tf.py --AMI=q431 --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --outputHISTFile=myHIST.root --imf False +Reco_tf.py --AMI=q431 --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --outputHISTFile=myHIST.root --imf False --maxEvents=1000 echo "art-result: $? Reco" ArtPackage=$1 diff --git a/Tools/Tier0ChainTests/test/test_q431_mp.sh b/Tools/Tier0ChainTests/test/test_q431_mp.sh index 724d6fb7c05e5a17d81c6fb625b9e5126d3ed7b2..c3682da02683f638dfe30715b740bf2ea620f30e 100755 --- a/Tools/Tier0ChainTests/test/test_q431_mp.sh +++ b/Tools/Tier0ChainTests/test/test_q431_mp.sh @@ -8,7 +8,7 @@ # art-include: 21.3/Athena # art-include: 21.9/Athena -Reco_tf.py --AMI=q431 --athenaopts='--nprocs=2' --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --outputHISTFile=myHIST.root --imf False +Reco_tf.py --AMI=q431 --athenaopts='--nprocs=2' --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --outputHISTFile=myHIST.root --imf False --maxEvents=1000 echo "art-result: $? Reco" ArtPackage=$1 diff --git a/Tools/Tier0ChainTests/test/test_q431_mt.sh b/Tools/Tier0ChainTests/test/test_q431_mt.sh index 0c60e7dd53c3ebfc599407d92df9c66e3bf799f7..a755a1d7b1f1f7f0a78e99d775e795c1e6c564c6 100755 --- a/Tools/Tier0ChainTests/test/test_q431_mt.sh +++ b/Tools/Tier0ChainTests/test/test_q431_mt.sh @@ -8,7 +8,7 @@ # art-include: 21.3/Athena # art-include: 21.9/Athena -Reco_tf.py --AMI=q431 --athenaopts='--threads=1' --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --imf False +Reco_tf.py --AMI=q431 --athenaopts='--threads=2' --outputAODFile=myAOD.pool.root --outputESDFile=myESD.pool.root --imf False --maxEvents=1000 echo "art-result: $? Reco" ArtPackage=$1 diff --git a/Tracking/TrkExtrapolation/TrkExRungeKuttaIntersector/src/RungeKuttaIntersector.cxx b/Tracking/TrkExtrapolation/TrkExRungeKuttaIntersector/src/RungeKuttaIntersector.cxx index 022ab47d4f78c696c5bcea1bb49da0a7e2148fdf..3206f7acf0cfe724159d1bceaa067b0a96cf147f 100755 --- a/Tracking/TrkExtrapolation/TrkExRungeKuttaIntersector/src/RungeKuttaIntersector.cxx +++ b/Tracking/TrkExtrapolation/TrkExRungeKuttaIntersector/src/RungeKuttaIntersector.cxx @@ -119,8 +119,8 @@ RungeKuttaIntersector::finalize() << " step reductions and" << std::setw(5) << std::setprecision(2) << norm*static_cast<double>(m_countShortStep) << " short final steps"; - } msg(MSG::INFO) << endmsg; + } return StatusCode::SUCCESS; } diff --git a/Tracking/TrkTools/TrkTrackSummaryTool/CMakeLists.txt b/Tracking/TrkTools/TrkTrackSummaryTool/CMakeLists.txt index f7c99f2a718722c8abd55c11d14a218c6a4b3acf..3535e938d9dd82822ad5c61d1361863ea5fb5ffc 100644 --- a/Tracking/TrkTools/TrkTrackSummaryTool/CMakeLists.txt +++ b/Tracking/TrkTools/TrkTrackSummaryTool/CMakeLists.txt @@ -7,7 +7,17 @@ atlas_subdir( TrkTrackSummaryTool ) atlas_add_component( TrkTrackSummaryTool src/*.cxx src/components/*.cxx - LINK_LIBRARIES AthenaBaseComps GaudiKernel TrkParameters TrkTrackSummary TrkToolInterfaces AtlasDetDescr Identifier TrkDetElementBase TrkGeometry TrkCompetingRIOsOnTrack TrkEventPrimitives TrkMeasurementBase TrkRIO_OnTrack TrkTrack TRT_ElectronPidToolsLib ) + LINK_LIBRARIES AthenaBaseComps GaudiKernel TrkTrackSummary TrkToolInterfaces AtlasDetDescr Identifier TrkDetElementBase TrkGeometry TrkCompetingRIOsOnTrack TrkEventPrimitives TrkMeasurementBase TrkRIO_OnTrack TrkTrack TRT_ElectronPidToolsLib ) + + +atlas_add_test(TrackSummaryTool_test + SOURCES test/TrackSummaryTool_test.cxx + INCLUDE_DIRS ${Boost_INCLUDE_DIRS} ${ROOT_INCLUDE_DIRS} + LINK_LIBRARIES ${Boost_LIBRARIES} ${ROOT_LIBRARIES} AthenaBaseComps GaudiKernel IdDictParser StoreGateLib TrkToolInterfaces AtlasDetDescr Identifier TrkDetElementBase TrkGeometry TrkCompetingRIOsOnTrack TrkEventPrimitives TrkMeasurementBase TrkRIO_OnTrack TrkTrack TRT_ElectronPidToolsLib + POST_EXEC_SCRIPT "nopost.sh" ) + +# Install files from the package: +atlas_install_joboptions( share/*.txt ) # Install files from the package: atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) diff --git a/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryTool_test.txt b/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryTool_test.txt new file mode 100644 index 0000000000000000000000000000000000000000..50845d9e77c3389d21def868c360960c90ae061d --- /dev/null +++ b/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryTool_test.txt @@ -0,0 +1,8 @@ +EventDataSvc.ForceLeaves = true; +EventDataSvc.RootCLID = 1; +ApplicationMgr.Dlls += { "AthenaServices" }; +ApplicationMgr.ExtSvc = {"StoreGateSvc"}; +ApplicationMgr.EvtMax = 1; +ApplicationMgr.EvtSel = "TestEvtSelector"; +ApplicationMgr.HistogramPersistency = "NONE"; +MessageSvc.OutputLevel = 5; diff --git a/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryUpdater_test.txt b/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryUpdater_test.txt new file mode 100644 index 0000000000000000000000000000000000000000..77d6b87f291234c46480cabefe4d83f06d6a15d6 --- /dev/null +++ b/Tracking/TrkTools/TrkTrackSummaryTool/share/TrackSummaryUpdater_test.txt @@ -0,0 +1,8 @@ +EventDataSvc.ForceLeaves = true; +EventDataSvc.RootCLID = 1; +ApplicationMgr.Dlls += { "AthenaServices", "StoreGate" }; +ApplicationMgr.ExtSvc = {"StoreGateSvc", "StoreGateSvc/DetectorStore"}; +ApplicationMgr.EvtMax = 1; +ApplicationMgr.EvtSel = "TestEvtSelector"; +ApplicationMgr.HistogramPersistency = "NONE"; +MessageSvc.OutputLevel = 3; diff --git a/Tracking/TrkTools/TrkTrackSummaryTool/src/TrackSummaryTool.cxx b/Tracking/TrkTools/TrkTrackSummaryTool/src/TrackSummaryTool.cxx index e86b1766abd7e7dc9c98b97af0c4be7375ec302a..99a7a8c445f952d10e1973cf022a4c95ac1a156b 100755 --- a/Tracking/TrkTools/TrkTrackSummaryTool/src/TrackSummaryTool.cxx +++ b/Tracking/TrkTools/TrkTrackSummaryTool/src/TrackSummaryTool.cxx @@ -66,8 +66,7 @@ StatusCode Trk::TrackSummaryTool::initialize(){ ATH_CHECK( detStore()->retrieve(m_detID, "AtlasID" )); if (m_idTool.empty() && m_muonTool.empty()) { - ATH_MSG_ERROR ("Could get neither InDetHelperTool nor MuonHelperTool. Must abort."); - return StatusCode::FAILURE; + ATH_MSG_WARNING ("Could get neither InDetHelperTool nor MuonHelperTool."); } if (not m_idTool.empty()) ATH_CHECK(m_idTool.retrieve()); if (not m_eProbabilityTool.empty()) ATH_CHECK(m_eProbabilityTool.retrieve()); @@ -305,7 +304,9 @@ Trk::TrackSummaryTool::updateSharedHitCount( TrackSummary& summary) const { // first check if track has no summary - then it is recreated - m_idTool->updateSharedHitCount(track, prdToTrackMap, summary); + if (m_idTool){ + m_idTool->updateSharedHitCount(track, prdToTrackMap, summary); + } } void @@ -330,10 +331,12 @@ Trk::TrackSummaryTool::updateAdditionalInfo(const Track& track, if (track.info().trackFitter() != TrackInfo::Unknown && !m_dedxtool.empty()) { dedx = m_dedxtool->dEdx(track, nHitsUsed_dEdx, nOverflowHits_dEdx); } - m_idTool->updateAdditionalInfo(summary, eProbability,dedx, nHitsUsed_dEdx,nOverflowHits_dEdx); - m_idTool->updateExpectedHitInfo(track, summary); - if (m_addInDetDetailedSummary) { - m_idTool->addDetailedTrackSummary(track,summary); + if (m_idTool){ + m_idTool->updateAdditionalInfo(summary, eProbability,dedx, nHitsUsed_dEdx,nOverflowHits_dEdx); + m_idTool->updateExpectedHitInfo(track, summary); + if (m_addInDetDetailedSummary) m_idTool->addDetailedTrackSummary(track,summary); + } else { + ATH_MSG_INFO("No updates attempted, as the SummaryHelperTool is not defined."); } } diff --git a/Tracking/TrkTools/TrkTrackSummaryTool/test/TrackSummaryTool_test.cxx b/Tracking/TrkTools/TrkTrackSummaryTool/test/TrackSummaryTool_test.cxx new file mode 100644 index 0000000000000000000000000000000000000000..437c700d56fb93c636940533e382bb81712b1afc --- /dev/null +++ b/Tracking/TrkTools/TrkTrackSummaryTool/test/TrackSummaryTool_test.cxx @@ -0,0 +1,150 @@ +/* + Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration +*/ +/* + */ +/** + * @file TrkTrackSummaryUpdater/test/TrackSummaryUpdater_test.cxx + * @author Shaun Roe + * @date Dec, 2020 + * @brief Some tests for TrackSummaryTool algorithm in the Boost framework + */ + + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MAIN +#define BOOST_TEST_MODULE TEST_TRKTRACKSUMMARYTOOL +// +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Woverloaded-virtual" +#include <boost/test/unit_test.hpp> + +#pragma GCC diagnostic pop + +// +#include "GaudiKernel/ISvcLocator.h" +#include "StoreGate/StoreGateSvc.h" +#include "CxxUtils/checker_macros.h" +#include "TInterpreter.h" +// +#include "GaudiKernel/IAppMgrUI.h" +#include "GaudiKernel/SmartIF.h" +#include "GaudiKernel/EventContext.h" +#include "AthenaBaseComps/AthAlgTool.h" +#include "CxxUtils/ubsan_suppress.h" +#include "IdDictParser/IdDictParser.h" +#include "AtlasDetDescr/AtlasDetectorID.h" + +#include "TrkTrackSummaryTool/TrackSummaryTool.h" +ATLAS_NO_CHECK_FILE_THREAD_SAFETY; // This test uses global svcLoc. + +// Gaudi fixture +class GaudiFixture { + public: + ISvcLocator * + svcLoc(){ + return m_svcLoc; + } + + IToolSvc * + toolSvc(){ + return m_toolSvc; + } + + StoreGateSvc* + storeGateSvc(){ + return m_sg; + } + StoreGateSvc* + detStore(){ + return m_detStore; + } + + GaudiFixture(const std::string & joPath = "TrkTrackSummaryUpdater/TrackSummaryUpdater_test.txt") { + setUpGaudi(joPath); + } + + ~GaudiFixture() { + tearDownGaudi(); + } + + private: + void + setUpGaudi(const std::string & joPath) { + CxxUtils::ubsan_suppress ([]() { TInterpreter::Instance(); } ); + m_appMgr = Gaudi::createApplicationMgr(); + m_svcLoc = m_appMgr; + m_svcMgr = m_appMgr; + m_propMgr = m_appMgr; + m_propMgr->setProperty( "EvtSel", "NONE" ).ignore() ; + m_propMgr->setProperty( "JobOptionsType", "FILE" ).ignore(); + m_propMgr->setProperty( "JobOptionsPath", joPath ).ignore(); + m_toolSvc = m_svcLoc->service("ToolSvc"); + m_appMgr->configure().ignore(); + m_appMgr->initialize().ignore(); + m_sg = nullptr; + m_svcLoc->service ("StoreGateSvc", m_sg).ignore(); + m_svcLoc->service ("StoreGateSvc/DetectorStore", m_detStore).ignore(); + } + + void + tearDownGaudi() { + m_svcMgr->finalize().ignore(); + m_appMgr->finalize().ignore(); + m_appMgr->terminate().ignore(); + m_svcLoc->release(); + m_svcMgr->release(); + Gaudi::setInstance( static_cast<IAppMgrUI*>(nullptr) ); + } + + StoreGateSvc* + evtStore(){ + return m_sg; + } + + //member variables for Core Gaudi components + IAppMgrUI* m_appMgr{nullptr}; + SmartIF<ISvcLocator> m_svcLoc; + SmartIF<ISvcManager> m_svcMgr; + SmartIF<IToolSvc> m_toolSvc; + SmartIF<IProperty> m_propMgr; + StoreGateSvc* m_sg{ nullptr }; + StoreGateSvc * m_detStore{nullptr}; + }; + +BOOST_AUTO_TEST_SUITE(TrackSummaryUpdaterTest) + + GaudiFixture g("TrkTrackSummaryTool/TrackSummaryTool_test.txt"); + auto pSvcLoc=g.svcLoc(); + auto pToolSvc=g.toolSvc(); + auto pDetStore=g.detStore(); + IAlgTool* pToolInterface{}; + + BOOST_AUTO_TEST_CASE( sanityCheck ){ + const bool svcLocatorIsOk=(pSvcLoc != nullptr); + BOOST_TEST(svcLocatorIsOk); + const bool toolSvcIsOk = ( pToolSvc != nullptr); + BOOST_TEST(toolSvcIsOk); + const bool detStoreIsOk = (pDetStore != nullptr); + BOOST_TEST(detStoreIsOk); + } + + BOOST_AUTO_TEST_CASE(retrieveTool){ + static IdDictParser parser; + parser.register_external_entity ("InnerDetector","IdDictInnerDetector.xml"); + parser.register_external_entity ("MuonSpectrometer","IdDictMuonSpectrometer_S.02.xml"); + parser.register_external_entity ("Calorimeter","IdDictCalorimeter_L1Onl.xml"); + IdDictMgr& idDict = parser.parse ("IdDictParser/ATLAS_IDS.xml"); + auto atlasId = std::make_unique<AtlasDetectorID>(); + atlasId->initialize_from_dictionary (idDict); + if (pDetStore and (not pDetStore->contains<AtlasDetectorID>("AtlasID"))) { + BOOST_TEST ( pDetStore->record (std::move (atlasId), "AtlasID").isSuccess() ); + } + BOOST_TEST ( pToolSvc->retrieveTool("Trk::TrackSummaryTool", pToolInterface).isSuccess()); + BOOST_TEST(pToolInterface -> initialize()); + } + + +BOOST_AUTO_TEST_SUITE_END() + + diff --git a/Trigger/TrigAlgorithms/TrigPartialEventBuilding/python/TrigPartialEventBuildingConfig.py b/Trigger/TrigAlgorithms/TrigPartialEventBuilding/python/TrigPartialEventBuildingConfig.py index a47d8e6ae65b6b521ae909fe94efb452c429143f..de2bfa27b2cd2e724be714e714c2ab1eb41ffcc1 100644 --- a/Trigger/TrigAlgorithms/TrigPartialEventBuilding/python/TrigPartialEventBuildingConfig.py +++ b/Trigger/TrigAlgorithms/TrigPartialEventBuilding/python/TrigPartialEventBuildingConfig.py @@ -2,13 +2,13 @@ # Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration # -from TrigPartialEventBuilding.TrigPartialEventBuildingConf import StaticPEBInfoWriterTool, RoIPEBInfoWriterTool +from AthenaConfiguration.ComponentFactory import CompFactory from TrigEDMConfig.DataScoutingInfo import getFullHLTResultID from libpyeformat_helper import SourceIdentifier, SubDetector from RegionSelector import RegSelToolConfig -class StaticPEBInfoWriterToolCfg(StaticPEBInfoWriterTool): +def StaticPEBInfoWriterToolCfg(name='StaticPEBInfoWriterTool'): def addROBs(self, robs): self.ROBList.extend(robs) @@ -23,8 +23,17 @@ class StaticPEBInfoWriterToolCfg(StaticPEBInfoWriterTool): ctpResultSID = SourceIdentifier(SubDetector.TDAQ_CTP, moduleId) self.addROBs([ctpResultSID.code()]) + CompFactory.StaticPEBInfoWriterTool.addROBs = addROBs + CompFactory.StaticPEBInfoWriterTool.addSubDets = addSubDets + CompFactory.StaticPEBInfoWriterTool.addHLTResultToROBList = addHLTResultToROBList + CompFactory.StaticPEBInfoWriterTool.addCTPResultToROBList = addCTPResultToROBList -class RoIPEBInfoWriterToolCfg(RoIPEBInfoWriterTool): + tool = CompFactory.StaticPEBInfoWriterTool(name) + + return tool + + +def RoIPEBInfoWriterToolCfg(name='RoIPEBInfoWriterTool'): def addRegSelDets(self, detNames): ''' Add RegionSelector tools for given detector look-up tables to build PEB list of ROBs @@ -51,7 +60,7 @@ class RoIPEBInfoWriterToolCfg(RoIPEBInfoWriterTool): def addSubDets(self, dets): '''Add extra fixed list of SubDets independent of RoI''' - self.ExtraSubDets.extend(dets) + self.ExtraSubDets.extend([int(detid) for detid in dets]) def addHLTResultToROBList(self, moduleId=getFullHLTResultID()): hltResultSID = SourceIdentifier(SubDetector.TDAQ_HLT, moduleId) @@ -60,3 +69,13 @@ class RoIPEBInfoWriterToolCfg(RoIPEBInfoWriterTool): def addCTPResultToROBList(self, moduleId=0): ctpResultSID = SourceIdentifier(SubDetector.TDAQ_CTP, moduleId) self.addROBs([ctpResultSID.code()]) + + CompFactory.RoIPEBInfoWriterTool.addRegSelDets = addRegSelDets + CompFactory.RoIPEBInfoWriterTool.addROBs = addROBs + CompFactory.RoIPEBInfoWriterTool.addSubDets = addSubDets + CompFactory.RoIPEBInfoWriterTool.addHLTResultToROBList = addHLTResultToROBList + CompFactory.RoIPEBInfoWriterTool.addCTPResultToROBList = addCTPResultToROBList + + tool = CompFactory.RoIPEBInfoWriterTool(name) + + return tool diff --git a/Trigger/TrigHypothesis/TrigHLTJetHypo/TrigHLTJetHypo/TrigHLTJetHypoUtils/AllJetsGrouper.h b/Trigger/TrigHypothesis/TrigHLTJetHypo/TrigHLTJetHypo/TrigHLTJetHypoUtils/AllJetsGrouper.h index 9bc4ae4008f925838d45a82f05162d0f03d551de..47cdba67e25b3dad7025dcccf721bae6d73f2e9d 100644 --- a/Trigger/TrigHypothesis/TrigHLTJetHypo/TrigHLTJetHypo/TrigHLTJetHypoUtils/AllJetsGrouper.h +++ b/Trigger/TrigHypothesis/TrigHLTJetHypo/TrigHLTJetHypo/TrigHLTJetHypoUtils/AllJetsGrouper.h @@ -16,7 +16,7 @@ class AllJetsGrouper: public IJetGrouper{ std::vector<HypoJetGroupVector> group(HypoJetIter&, HypoJetIter&) const override; - std::optional<HypoJetGroupVector> next(); + virtual std::optional<HypoJetGroupVector> next() override; std::string getName() const override; std::string toString() const override; diff --git a/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetConditionConfig_capacitychecked.cxx b/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetConditionConfig_capacitychecked.cxx index f98c2407a8eb2e99f76ee381482b2b8fb1e2af9c..6104c6517a7def1ae9b0089a50d6dc18ee70201a 100644 --- a/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetConditionConfig_capacitychecked.cxx +++ b/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetConditionConfig_capacitychecked.cxx @@ -43,7 +43,7 @@ TrigJetConditionConfig_capacitychecked::getCapacityCheckedCondition() const { StatusCode TrigJetConditionConfig_capacitychecked::checkVals() const { - if (m_multiplicity < 1) { + if (m_multiplicity < 1u) { ATH_MSG_ERROR("m_multiplicity = " + std::to_string(m_multiplicity) + "expected > 0"); return StatusCode::FAILURE; diff --git a/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetHypoToolConfig_fastreduction.cxx b/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetHypoToolConfig_fastreduction.cxx index f407bcf776c44e383135eb661b09e5397aa64acd..c819ee3580b170c7d76d5fd5fe25f403a6890a10 100644 --- a/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetHypoToolConfig_fastreduction.cxx +++ b/Trigger/TrigHypothesis/TrigHLTJetHypo/src/TrigJetHypoToolConfig_fastreduction.cxx @@ -116,7 +116,7 @@ TrigJetHypoToolConfig_fastreduction::getCapacityCheckedConditions() const { // return an invalid optional if any src signals a problem for(const auto& cm : m_conditionMakers){ - conditions.push_back(std::move(cm->getCapacityCheckedCondition())); + conditions.push_back(cm->getCapacityCheckedCondition()); } return std::make_optional<ConditionPtrs>(std::move(conditions)); @@ -128,7 +128,7 @@ TrigJetHypoToolConfig_fastreduction::getConditions() const { ConditionsMT conditions; for(const auto& cm : m_conditionMakers){ - conditions.push_back(std::move(cm->getCapacityCheckedCondition())); + conditions.push_back(cm->getCapacityCheckedCondition()); } return std::make_optional<ConditionsMT>(std::move(conditions)); diff --git a/Trigger/TrigMonitoring/TrigMETMonitoring/python/TrigMETMonitorAlgorithm.py b/Trigger/TrigMonitoring/TrigMETMonitoring/python/TrigMETMonitorAlgorithm.py index b6b12f34db129d05445d1bc91df301ddb2cc5365..593320d39019cf9df17022412d208a2577a90cbe 100644 --- a/Trigger/TrigMonitoring/TrigMETMonitoring/python/TrigMETMonitorAlgorithm.py +++ b/Trigger/TrigMonitoring/TrigMETMonitoring/python/TrigMETMonitorAlgorithm.py @@ -52,7 +52,7 @@ def TrigMETMonConfig(inputFlags): ### check Run2 or Run3 MT mt_chains = True - if ( inputFlags.Trigger.EDMDecodingVersion < 3 ) : + if ( inputFlags.Trigger.EDMVersion < 3 ) : mt_chains = False ### container name selection diff --git a/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16a_build.py b/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16a_build.py index 70405a41232dc99470841167ee3278cb3ed10954..1dc4b34b9c84b11eca5aa3d12777c657fa0a032c 100755 --- a/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16a_build.py +++ b/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16a_build.py @@ -56,7 +56,7 @@ rdo2rdotrig.input = '' rdo2rdotrig.imf = False rdo2rdotrig.explicit_input = True rdo2rdotrig.args = '--inputRDOFile=RDO.pool.root --outputRDO_TRIGFile=RDO_TRIG.pool.root' -rdo2rdotrig.args += ' --asetup="RDOtoRDOTrigger:Athena,21.0-mc16a,slc6,latest"' +rdo2rdotrig.args += ' --asetup="RDOtoRDOTrigger:Athena,21.0-mc16a,latest"' rdo2rdotrig.args += ' --triggerConfig="MCRECO:MC_pp_v6_tight_mc_prescale"' rdo2rdotrig.args += ' --imf="all:True"' rdo2rdotrig.args += ' --preExec="all:from TriggerJobOpts.TriggerFlags import TriggerFlags; TriggerFlags.run2Config=\'2016\'"' diff --git a/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16d_build.py b/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16d_build.py index c383a46b3eb66a277d30d362e8fc9179b8c8b0fb..21ff5c44fb324cbfb496e869bbb9ed31402081af 100755 --- a/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16d_build.py +++ b/Trigger/TrigValidation/TrigAnalysisTest/test/test_trigAna_HITtoAOD_trigRel21_mc16d_build.py @@ -56,7 +56,7 @@ rdo2rdotrig.input = '' rdo2rdotrig.imf = False rdo2rdotrig.explicit_input = True rdo2rdotrig.args = '--inputRDOFile=RDO.pool.root --outputRDO_TRIGFile=RDO_TRIG.pool.root' -rdo2rdotrig.args += ' --asetup="RDOtoRDOTrigger:Athena,21.0-mc16d,slc6,latest"' +rdo2rdotrig.args += ' --asetup="RDOtoRDOTrigger:Athena,21.0-mc16d,latest"' rdo2rdotrig.args += ' --triggerConfig="MCRECO:MC_pp_v7_tight_mc_prescale"' rdo2rdotrig.args += ' --imf="all:True"' diff --git a/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py b/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py index 1ef81f81c0c1dd414a8e2f56762fe74fb4c01877..3ea6726e9574e337e36ffe3e2db53ac1eb777cb3 100644 --- a/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py +++ b/Trigger/TriggerCommon/TriggerJobOpts/share/runHLT_standalone.py @@ -504,12 +504,6 @@ if not opt.createHLTMenuExternally: -#Needed to get full output from TrigSignatureMoniMT with a large menu: see ATR-21487 -#Can be removed once chainDump.py is used instead of log file parsing -svcMgr.MessageSvc.infoLimit=10000 - - - from TrigConfigSvc.TrigConfigSvcCfg import getHLTConfigSvc svcMgr += conf2toConfigurable( getHLTConfigSvc(ConfigFlags) ) @@ -626,9 +620,13 @@ if opt.reverseViews or opt.filterViews: include("TriggerTest/disableChronoStatSvcPrintout.py") #------------------------------------------------------------- -# Disable spurious warnings from HepMcParticleLink, ATR-21838 +# MessageSvc #------------------------------------------------------------- +svcMgr.MessageSvc.Format = "% F%40W%C%4W%R%e%s%8W%R%T %0W%M" +svcMgr.MessageSvc.enableSuppression = False + if ConfigFlags.Input.isMC: + # Disable spurious warnings from HepMcParticleLink, ATR-21838 svcMgr.MessageSvc.setError += ['HepMcParticleLink'] #------------------------------------------------------------- diff --git a/Trigger/TriggerCommon/TriggerMenuMT/CMakeLists.txt b/Trigger/TriggerCommon/TriggerMenuMT/CMakeLists.txt index 7a03fb29a6723ae7381504434209e08b7f79721b..aec1bf6342f44cffd13852b2c26dbce60362be3e 100644 --- a/Trigger/TriggerCommon/TriggerMenuMT/CMakeLists.txt +++ b/Trigger/TriggerCommon/TriggerMenuMT/CMakeLists.txt @@ -151,6 +151,12 @@ atlas_add_test( LS2_emu_menu_DH PROPERTIES TIMEOUT 500 POST_EXEC_SCRIPT nopost.sh ) +file( MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/unitTestRun_EventBuildingSequenceSetup ) +atlas_add_test( EventBuildingSequenceSetup + SCRIPT python -m TriggerMenuMT.HLTMenuConfig.CommonSequences.EventBuildingSequenceSetup + PROPERTIES WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/unitTestRun_EventBuildingSequenceSetup + POST_EXEC_SCRIPT nopost.sh ) + #---------------------------------- # List of menus to be created: atlas_build_lvl1_trigger_menu( LS2_v1 ) diff --git a/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/CommonSequences/EventBuildingSequenceSetup.py b/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/CommonSequences/EventBuildingSequenceSetup.py index e3cdd1e782339ead5cbfd2b88265d312b70d13ea..5f5fb419daf15ef5874939c04527e01a7aebf460 100644 --- a/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/CommonSequences/EventBuildingSequenceSetup.py +++ b/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/CommonSequences/EventBuildingSequenceSetup.py @@ -8,7 +8,7 @@ from TriggerMenuMT.HLTMenuConfig.Menu.MenuComponents import ChainStep, MenuSeque from TrigPartialEventBuilding.TrigPartialEventBuildingConf import PEBInfoWriterAlg from TrigPartialEventBuilding.TrigPartialEventBuildingConfig import StaticPEBInfoWriterToolCfg, RoIPEBInfoWriterToolCfg from DecisionHandling import DecisionHandlingConf -from libpyeformat_helper import SubDetector +from libpyeformat_helper import SourceIdentifier, SubDetector from AthenaCommon.CFElements import seqAND, findAlgorithm from AthenaCommon.Logging import logging log = logging.getLogger('EventBuildingSequenceSetup') @@ -169,3 +169,46 @@ def alignEventBuildingSteps(all_chains): numStepsNeeded = maxPebStepPosition[ebt] - pebStepPosition log.debug('Aligning PEB step for chain %s by adding %d empty steps', chainDict['chainName'], numStepsNeeded) chainConfig.insertEmptySteps('EmptyPEBAlign', numStepsNeeded, pebStepPosition-1) + + +# Unit test +if __name__ == "__main__": + failures = 0 + for eb_identifier in EventBuildingInfo.getAllEventBuildingIdentifiers(): + tool = None + try: + tool = pebInfoWriterTool('TestTool_'+eb_identifier, eb_identifier) + except Exception as ex: + failures += 1 + log.error('Caught exception while configuring PEBInfoWriterTool for %s: %s', eb_identifier, ex) + continue + + if not tool: + failures += 1 + log.error('No tool created for %s', eb_identifier) + continue + + if tool.__cpp_type__ not in ['StaticPEBInfoWriterTool', 'RoIPEBInfoWriterTool']: + failures += 1 + log.error('Unexpected tool type for %s: %s', eb_identifier, tool.__cpp_type__) + continue + + robs = tool.ROBList if tool.__cpp_type__ == 'StaticPEBInfoWriterTool' else tool.ExtraROBs + dets = tool.SubDetList if tool.__cpp_type__ == 'StaticPEBInfoWriterTool' else tool.ExtraSubDets + robs_check_passed = True + for rob_id in robs: + rob_sid = SourceIdentifier(rob_id) + rob_det_id = rob_sid.subdetector_id() + if int(rob_det_id) in dets: + robs_check_passed = False + log.error('Redundant configuration for %s: ROB %s added to the ROB list while full SubDetector ' + '%s is already in the SubDets list', eb_identifier, rob_sid.human(), str(rob_det_id)) + + if not robs_check_passed: + failures += 1 + continue + + log.info('%s correctly configured', tool.name) + + import sys + sys.exit(failures) diff --git a/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/Menu/StreamInfo.py b/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/Menu/StreamInfo.py index f91ed07976aa2b77371cded74c9ad6477547e603..0b63eb0f903020ee41285663dc7f18c042245371 100644 --- a/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/Menu/StreamInfo.py +++ b/Trigger/TriggerCommon/TriggerMenuMT/python/HLTMenuConfig/Menu/StreamInfo.py @@ -49,7 +49,7 @@ _all_streams = [ StreamInfo('express', 'express', True, True), # MONITORING STREAMS StreamInfo('IDMonitoring', 'monitoring', True, True), - StreamInfo('CSC', 'monitoring', True, True), + StreamInfo('CSC', 'monitoring', True, False), # CALIBRATION STREAMS StreamInfo('BeamSpot', 'calibration', True, False), StreamInfo('LArCells', 'calibration', False, False), diff --git a/Trigger/TriggerCommon/TriggerMenuMT/scripts/menu_config_tests.py b/Trigger/TriggerCommon/TriggerMenuMT/scripts/menu_config_tests.py index 04ae8bc1e243e1a54d9bd051551547b7c042fb81..72a6380828e9ce4a96f416aff9f359777cde98ee 100644 --- a/Trigger/TriggerCommon/TriggerMenuMT/scripts/menu_config_tests.py +++ b/Trigger/TriggerCommon/TriggerMenuMT/scripts/menu_config_tests.py @@ -155,11 +155,55 @@ class RestrictedCTPIDs(MenuVerification): if ctp_id > 512] self.failures.extend(over_max_ids) + +class PartialEventBuildingChecks(MenuVerification): + def __init__(self): + super(PartialEventBuildingChecks, self).__init__( + description='Config consistency of Partial Event Building') + + def run(self, config): + from TriggerMenuMT.HLTMenuConfig.Menu import EventBuildingInfo + eb_identifiers = EventBuildingInfo.getAllEventBuildingIdentifiers() + + for chain_name, chain_config in config['chains'].items(): + peb_identifiers = [idf for idf in eb_identifiers if idf in chain_name] + peb_writers = [seq for seq in chain_config['sequencers'] if 'PEBInfoWriter' in seq] + + if len(peb_identifiers) == 0 and len(peb_writers) == 0: + # Not a PEB chain + continue + + if len(peb_identifiers) != 1: + self.failures.append( + '{:s} has {:d} event building identifiers'.format(chain_name, len(peb_identifiers))) + + if len(peb_writers) != 1: + self.failures.append( + '{:s} has {:d} PEBInfoWriter sequences'.format(chain_name, len(peb_writers))) + + if peb_identifiers and peb_writers and not peb_writers[0].endswith(peb_identifiers[0]): + self.failures.append( + '{:s} PEB sequence name {:s} doesn\'t end with PEB identifier {:s}'.format( + chain_name, peb_writers[0], peb_identifiers[0])) + + for stream_name in chain_config['streams']: + if stream_name not in config['streams']: + self.failures.append( + 'Stream {:s} for chain {:s} is not defined in streaming configuration'.format( + stream_name, chain_name)) + + if config['streams'][stream_name]['forceFullEventBuilding']: + self.failures.append( + 'PEB chain {:s} streamed to a full-event-building stream {:s}'.format( + chain_name, stream_name)) + + menu_tests = { TriggerLevel.HLT: [ UniqueChainNames(), ConsecutiveChainCounters(), StructuredChainNames(TriggerLevel.HLT), + PartialEventBuildingChecks() ], TriggerLevel.L1: [ RestrictedCTPIDs(),