From 048b02b1f047d77f318aa00cff81f6a79b3db297 Mon Sep 17 00:00:00 2001 From: Pascal Rene Baehr <pascal.baehr@cern.ch> Date: Sun, 9 Jun 2019 11:20:13 +0000 Subject: [PATCH] Futurize stage 2 changes for test files, scripts and shared files. Also various changes to comparisons. --- .../EvgenJobTransforms/python/download.py | 6 +- .../scripts/EvtGenFromEVNT_tf.py | 3 +- .../EvgenJobTransforms/scripts/Generate_tf.py | 10 +- .../share/Generate_randomseeds.py | 2 +- .../share/skeleton.ABtoEVGEN.py | 31 +-- .../share/skeleton.EVGENtoRivet.py | 7 +- .../share/skeleton.EvtGenFromEVNT.py | 3 +- .../share/skeleton.GENtoEVGEN.py | 34 +-- .../PATJobTransforms/python/DPDUtils.py | 6 +- .../python/PATTransformUtils.py | 3 +- .../PATJobTransforms/scripts/rhadd.py | 7 +- .../share/CommonSkeletonJobOptions.py | 4 +- .../share/skeleton.AODtoDAOD_tf.py | 15 +- .../share/skeleton.AODtoDPD_tf.py | 15 +- .../share/skeleton.AODtoRED_tf.py | 13 +- .../share/skeleton.ESDtoDPD_tf.py | 15 +- .../share/skeleton.NTUPtoRED_tf.py | 15 +- .../share/skeleton.PhysicsValidation_tf.py | 4 +- .../RecJobTransforms/python/ConfigUtils.py | 35 +-- .../RecJobTransforms/python/DPDUtils.py | 2 +- .../python/MixStreamConfig.py | 45 ++-- .../RecJobTransforms/python/MixingSelector.py | 14 +- .../RecJobTransforms/python/RDOFilePeeker.py | 26 ++- .../RecJobTransforms/python/recoTransforms.py | 6 +- .../RecJobTransforms/python/streaming_arg.py | 1 + .../RecJobTransforms/scripts/Reco_tf.py | 4 +- .../RecJobTransforms/scripts/TrainReco_tf.py | 18 +- .../share/IDTrackingPtMin400MeV.py | 9 +- .../RecJobTransforms/share/UseFrontier.py | 3 +- .../share/skeleton.AODtoTAG_tf.py | 3 +- .../share/skeleton.ESDtoAOD.py | 3 +- .../share/skeleton.ESDtoAOD_tf.py | 5 +- .../share/skeleton.ESDtoESD.py | 3 +- .../share/skeleton.MergePool_tf.py | 15 +- .../share/skeleton.MergeRDO_tf.py | 9 +- .../share/skeleton.RAWtoALL_tf.py | 3 +- .../share/skeleton.RAWtoESD.py | 5 +- .../share/skeleton.RAWtoESD_tf.py | 3 +- .../share/skeleton.RDOtoRDOtrigger.py | 2 + .../share/skeleton.csc_MergeHIST_trf.py | 37 ++-- .../python/HitsFilePeeker.py | 30 +-- .../python/SimBeamSpotShapeFilter.py | 1 + .../python/SimTransformUtils.py | 2 + .../share/skeleton.EVGENtoHIT.py | 1 + .../share/skeleton.EVGENtoHIT_ISF.py | 1 + .../share/skeleton.EVGENtoHIT_MC12.py | 1 + .../share/skeleton.FilterHit.py | 4 +- .../share/skeleton.HITSMerge.py | 12 +- .../share/skeleton.HITStoHIST_SIM.py | 6 +- .../share/skeleton.HITtoRDO.py | 5 +- .../share/skeleton.RDOtoHIST_DIGI.py | 6 +- .../share/skeleton.TestBeam.py | 4 +- Tools/PyJobTransforms/python/transform.py | 55 ++--- Tools/PyJobTransforms/python/trfAMI.py | 60 ++--- Tools/PyJobTransforms/python/trfArgClasses.py | 205 +++++++++--------- Tools/PyJobTransforms/python/trfArgs.py | 13 +- Tools/PyJobTransforms/python/trfDecorators.py | 20 +- Tools/PyJobTransforms/python/trfEnv.py | 7 +- Tools/PyJobTransforms/python/trfExe.py | 167 +++++++------- Tools/PyJobTransforms/python/trfExitCodes.py | 1 + .../python/trfFileUtils-lite.py | 36 +-- Tools/PyJobTransforms/python/trfFileUtils.py | 8 +- .../python/trfFileValidationFunctions.py | 1 + Tools/PyJobTransforms/python/trfGraph.py | 55 +++-- Tools/PyJobTransforms/python/trfJobOptions.py | 85 ++++---- Tools/PyJobTransforms/python/trfMPTools.py | 18 +- Tools/PyJobTransforms/python/trfReports.py | 86 ++++---- Tools/PyJobTransforms/python/trfSignal.py | 4 +- Tools/PyJobTransforms/python/trfUtils.py | 99 +++++---- .../python/trfValidateRootFile.py | 10 +- Tools/PyJobTransforms/python/trfValidation.py | 90 ++++---- .../PyJobTransforms/scripts/Asetup_report.py | 3 +- Tools/PyJobTransforms/scripts/EVNTMerge_tf.py | 3 +- Tools/PyJobTransforms/scripts/GetTfCommand.py | 13 +- Tools/PyJobTransforms/scripts/Merge_tf.py | 2 +- Tools/PyJobTransforms/scripts/ScanLog.py | 5 +- .../scripts/makeTrfJSONSignatures.py | 12 +- Tools/PyJobTransforms/share/UseFrontier.py | 5 +- .../test/test_AtlasG4_SimTTBar_tf.py | 4 +- .../test/test_Reco_AthenaMP_tf.py | 6 +- .../PyJobTransforms/test/test_Reco_EOS_tf.py | 2 +- Tools/PyJobTransforms/test/test_Reco_MC_tf.py | 6 +- .../test/test_Reco_Tier0_tf.py | 6 +- .../PyJobTransforms/test/test_Reco_q222_tf.py | 2 +- Tools/PyJobTransforms/test/test_Reco_tf.py | 8 +- .../test/test_trfArgClasses.py | 23 +- .../test/test_trfArgClassesATLAS.py | 23 +- Tools/PyJobTransforms/test/test_trfEnv.py | 6 +- Tools/PyJobTransforms/test/test_trfMPTools.py | 5 +- Tools/PyJobTransforms/test/test_trfReports.py | 7 +- .../test/test_trfSubstepIntegration.py | 5 +- Tools/PyJobTransforms/test/test_trfUtils.py | 13 +- .../test/test_trfUtilsDBRelease.py | 4 +- .../test/test_trfValidation.py | 34 +-- 94 files changed, 970 insertions(+), 764 deletions(-) diff --git a/Generators/EvgenJobTransforms/python/download.py b/Generators/EvgenJobTransforms/python/download.py index 377332f64a3..25b0ad26d8c 100644 --- a/Generators/EvgenJobTransforms/python/download.py +++ b/Generators/EvgenJobTransforms/python/download.py @@ -1,3 +1,5 @@ +from future import standard_library +standard_library.install_aliases() # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration def downloadUsingProxy(url, filename=None): @@ -24,8 +26,8 @@ def downloadUsingProxy(url, filename=None): del os.environ['http_proxy'] cmd = "wget --waitretry=5 --tries=3 --connect-timeout=20 --read-timeout=120 -O %s %s" % (filename, url) msg += "Trying to retrieve '%s' using proxy '%s' via: %s\n" % (url, proxy, cmd) - import commands - status, output = commands.getstatusoutput(cmd) + import subprocess + status, output = subprocess.getstatusoutput(cmd) if status == 0: msg += "Downloaded %s using proxy '%s'\n" % (url, proxy) break diff --git a/Generators/EvgenJobTransforms/scripts/EvtGenFromEVNT_tf.py b/Generators/EvgenJobTransforms/scripts/EvtGenFromEVNT_tf.py index cca31344052..6de8db970d9 100755 --- a/Generators/EvgenJobTransforms/scripts/EvtGenFromEVNT_tf.py +++ b/Generators/EvgenJobTransforms/scripts/EvtGenFromEVNT_tf.py @@ -2,6 +2,7 @@ # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import sys import time @@ -34,7 +35,7 @@ def main(): def getTransform(): executorSet = set() - print type(executorSet) + print(type(executorSet)) executorSet.add(athenaExecutor(name = 'EvtGenFromEVNT', skeletonFile = 'EvgenJobTransforms/skeleton.EvtGenFromEVNT.py',inData = ['EVNT'], outData = ['EVNT_MRG'])) trf = transform(executor = executorSet) diff --git a/Generators/EvgenJobTransforms/scripts/Generate_tf.py b/Generators/EvgenJobTransforms/scripts/Generate_tf.py index 3b2645e5aee..4090a48e7e7 100755 --- a/Generators/EvgenJobTransforms/scripts/Generate_tf.py +++ b/Generators/EvgenJobTransforms/scripts/Generate_tf.py @@ -4,6 +4,8 @@ # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration Run event simulation and produce an EVNT file. """ +from __future__ import print_function + import os, sys, time, shutil from PyJobTransforms.trfLogger import msg @@ -91,12 +93,12 @@ class EvgenExecutor(athenaExecutor): local_path = None if ("localPath" in self._trf.argdict ): local_path = self._trf.argdict["localPath"].value - print("local path",local_path) + print(("local path",local_path)) cvmfs_path = os.path.join(sw_base, "atlas.cern.ch") if ((local_path is not None) and (os.path.exists(local_path))) : mk_jo_proxy(local_path, "MC15JobOptions","_joproxy15") - print("JO fragments taken from local path i.e. ",local_path) + print(("JO fragments taken from local path i.e. ",local_path)) elif os.path.exists(cvmfs_path): # TODO: Make the package name configurable if "MC14" in str(joparam): @@ -127,8 +129,8 @@ class EvgenExecutor(athenaExecutor): def move_files(main_dir,tmp_dir,whitelist): files = os.listdir(tmp_dir) - print("list of files ",files) - print("white list ",whitelist) + print(("list of files ",files)) + print(("white list ",whitelist)) files.sort() for f in files: for i in whitelist: diff --git a/Generators/EvgenJobTransforms/share/Generate_randomseeds.py b/Generators/EvgenJobTransforms/share/Generate_randomseeds.py index c6087a12d1a..c517e473be8 100644 --- a/Generators/EvgenJobTransforms/share/Generate_randomseeds.py +++ b/Generators/EvgenJobTransforms/share/Generate_randomseeds.py @@ -52,7 +52,7 @@ else: ## Pass the random seed from the transform command line into each used generator's seed config string seedstrs = [] for gen in evgenConfig.generators: - if genseeds.has_key(gen): + if gen in genseeds: for seedtemplate in genseeds[gen]: seed = runArgs.randomSeed if runArgs.trfSubstepName == 'afterburn': diff --git a/Generators/EvgenJobTransforms/share/skeleton.ABtoEVGEN.py b/Generators/EvgenJobTransforms/share/skeleton.ABtoEVGEN.py index 78fbd5dcaed..1634728c1a2 100644 --- a/Generators/EvgenJobTransforms/share/skeleton.ABtoEVGEN.py +++ b/Generators/EvgenJobTransforms/share/skeleton.ABtoEVGEN.py @@ -1,4 +1,9 @@ """Functionality core of the Generate_tf transform""" +from __future__ import print_function +from __future__ import division + +from future import standard_library +standard_library.install_aliases() ##============================================================== ## Basic configuration @@ -436,7 +441,7 @@ def mk_symlink(srcfile, dstfile): os.remove(dstfile) if not os.path.exists(dstfile): evgenLog.info("Symlinking %s to %s" % (srcfile, dstfile)) - print "Symlinking %s to %s" % (srcfile, dstfile) + print("Symlinking %s to %s" % (srcfile, dstfile)) os.symlink(srcfile, dstfile) else: evgenLog.debug("Symlinking: %s is already the same as %s" % (dstfile, srcfile)) @@ -463,30 +468,30 @@ if _checkattr("description", required=True): msg = evgenConfig.description if _checkattr("notes"): msg += " " + evgenConfig.notes - print "MetaData: %s = %s" % ("physicsComment", msg) + print("MetaData: %s = %s" % ("physicsComment", msg)) if _checkattr("generators", required=True): - print "MetaData: %s = %s" % ("generatorName", "+".join(gennames)) + print("MetaData: %s = %s" % ("generatorName", "+".join(gennames))) if _checkattr("process"): - print "MetaData: %s = %s" % ("physicsProcess", evgenConfig.process) + print("MetaData: %s = %s" % ("physicsProcess", evgenConfig.process)) if _checkattr("tune"): - print "MetaData: %s = %s" % ("generatorTune", evgenConfig.tune) + print("MetaData: %s = %s" % ("generatorTune", evgenConfig.tune)) if _checkattr("hardPDF"): - print "MetaData: %s = %s" % ("hardPDF", evgenConfig.hardPDF) + print("MetaData: %s = %s" % ("hardPDF", evgenConfig.hardPDF)) if _checkattr("softPDF"): - print "MetaData: %s = %s" % ("softPDF", evgenConfig.softPDF) + print("MetaData: %s = %s" % ("softPDF", evgenConfig.softPDF)) if _checkattr("keywords"): - print "MetaData: %s = %s" % ("keywords", ", ".join(evgenConfig.keywords).lower()) + print("MetaData: %s = %s" % ("keywords", ", ".join(evgenConfig.keywords).lower())) if _checkattr("specialConfig"): - print "MetaData: %s = %s" % ("specialConfig", evgenConfig.specialConfig) + print("MetaData: %s = %s" % ("specialConfig", evgenConfig.specialConfig)) # TODO: Require that a contact / JO author is always set if _checkattr("contact"): - print "MetaData: %s = %s" % ("contactPhysicist", ", ".join(evgenConfig.contact)) + print("MetaData: %s = %s" % ("contactPhysicist", ", ".join(evgenConfig.contact))) # Output list of generator filters used filterNames = [alg.getType() for alg in acas.iter_algseq(filtSeq)] excludedNames = ['AthSequencer', 'PyAthena::Alg', 'TestHepMC'] filterNames = list(set(filterNames) - set(excludedNames)) -print "MetaData: %s = %s" % ("genFilterNames", ", ".join(filterNames)) +print("MetaData: %s = %s" % ("genFilterNames", ", ".join(filterNames))) ##============================================================== @@ -498,8 +503,8 @@ runPars = RunArguments() runPars.minevents = evgenConfig.minevents runPars.maxeventsstrategy = evgenConfig.maxeventsstrategy with open("config.pickle", 'w') as f: - import cPickle - cPickle.dump(runPars, f) + import pickle + pickle.dump(runPars, f) ##============================================================== diff --git a/Generators/EvgenJobTransforms/share/skeleton.EVGENtoRivet.py b/Generators/EvgenJobTransforms/share/skeleton.EVGENtoRivet.py index ba0ce1b485d..50b5ee5519d 100644 --- a/Generators/EvgenJobTransforms/share/skeleton.EVGENtoRivet.py +++ b/Generators/EvgenJobTransforms/share/skeleton.EVGENtoRivet.py @@ -1,5 +1,8 @@ """Functionality core of the Rivet_tf transform""" +from future import standard_library +standard_library.install_aliases() + ##============================================================== ## Basic configuration ##============================================================== @@ -84,8 +87,8 @@ acas.dumpMasterSequence() from PyJobTransformsCore.runargs import RunArguments runPars = RunArguments() with open("config.pickle", 'w') as f: - import cPickle - cPickle.dump(runPars, f) + import pickle + pickle.dump(runPars, f) ##============================================================== diff --git a/Generators/EvgenJobTransforms/share/skeleton.EvtGenFromEVNT.py b/Generators/EvgenJobTransforms/share/skeleton.EvtGenFromEVNT.py index 68fc68f19c8..455c440d300 100644 --- a/Generators/EvgenJobTransforms/share/skeleton.EvtGenFromEVNT.py +++ b/Generators/EvgenJobTransforms/share/skeleton.EvtGenFromEVNT.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################# ## basic jobO configuration include("PATJobTransforms/CommonSkeletonJobOptions.py") @@ -23,7 +24,7 @@ if hasattr(runArgs,"preExec"): ## Pre-include if hasattr(runArgs,"preInclude"): for fragment in runArgs.preInclude: - print "preInclude",fragment + print("preInclude",fragment) include(fragment) diff --git a/Generators/EvgenJobTransforms/share/skeleton.GENtoEVGEN.py b/Generators/EvgenJobTransforms/share/skeleton.GENtoEVGEN.py index cc7faf5a6ed..72fd318d239 100644 --- a/Generators/EvgenJobTransforms/share/skeleton.GENtoEVGEN.py +++ b/Generators/EvgenJobTransforms/share/skeleton.GENtoEVGEN.py @@ -1,4 +1,10 @@ """Functionality core of the Generate_tf transform""" +from __future__ import print_function +from __future__ import division +from builtins import filter + +from future import standard_library +standard_library.install_aliases() ##============================================================== ## Basic configuration @@ -475,7 +481,7 @@ def mk_symlink(srcfile, dstfile): os.remove(dstfile) if not os.path.exists(dstfile): evgenLog.info("Symlinking %s to %s" % (srcfile, dstfile)) - print "Symlinking %s to %s" % (srcfile, dstfile) + print("Symlinking %s to %s" % (srcfile, dstfile)) os.symlink(srcfile, dstfile) else: evgenLog.debug("Symlinking: %s is already the same as %s" % (dstfile, srcfile)) @@ -530,26 +536,26 @@ if _checkattr("description", required=True): msg = evgenConfig.description if _checkattr("notes"): msg += " " + evgenConfig.notes - print "MetaData: %s = %s" % ("physicsComment", msg) + print("MetaData: %s = %s" % ("physicsComment", msg)) if _checkattr("generators", required=True): - print "MetaData: %s = %s" % ("generatorName", "+".join(gennames)) + print("MetaData: %s = %s" % ("generatorName", "+".join(gennames))) if _checkattr("process"): - print "MetaData: %s = %s" % ("physicsProcess", evgenConfig.process) + print("MetaData: %s = %s" % ("physicsProcess", evgenConfig.process)) if _checkattr("tune"): - print "MetaData: %s = %s" % ("generatorTune", evgenConfig.tune) + print("MetaData: %s = %s" % ("generatorTune", evgenConfig.tune)) if _checkattr("hardPDF"): - print "MetaData: %s = %s" % ("hardPDF", evgenConfig.hardPDF) + print("MetaData: %s = %s" % ("hardPDF", evgenConfig.hardPDF)) if _checkattr("softPDF"): - print "MetaData: %s = %s" % ("softPDF", evgenConfig.softPDF) + print("MetaData: %s = %s" % ("softPDF", evgenConfig.softPDF)) if _checkattr("keywords"): - print "MetaData: %s = %s" % ("keywords", ", ".join(evgenConfig.keywords).lower()) + print("MetaData: %s = %s" % ("keywords", ", ".join(evgenConfig.keywords).lower())) if _checkattr("specialConfig"): - print "MetaData: %s = %s" % ("specialConfig", evgenConfig.specialConfig) + print("MetaData: %s = %s" % ("specialConfig", evgenConfig.specialConfig)) # TODO: Require that a contact / JO author is always set if _checkattr("contact"): - print "MetaData: %s = %s" % ("contactPhysicist", ", ".join(evgenConfig.contact)) + print("MetaData: %s = %s" % ("contactPhysicist", ", ".join(evgenConfig.contact))) #if _checkattr( "randomSeed") : -print "MetaData: %s = %s" % ("randomSeed", str(runArgs.randomSeed)) +print("MetaData: %s = %s" % ("randomSeed", str(runArgs.randomSeed))) @@ -558,7 +564,7 @@ print "MetaData: %s = %s" % ("randomSeed", str(runArgs.randomSeed)) filterNames = [alg.getType() for alg in acas.iter_algseq(filtSeq)] excludedNames = ['AthSequencer', 'PyAthena::Alg', 'TestHepMC'] filterNames = list(set(filterNames) - set(excludedNames)) -print "MetaData: %s = %s" % ("genFilterNames", ", ".join(filterNames)) +print("MetaData: %s = %s" % ("genFilterNames", ", ".join(filterNames))) ##============================================================== @@ -570,8 +576,8 @@ runPars = RunArguments() runPars.minevents = evgenConfig.minevents runPars.maxeventsstrategy = evgenConfig.maxeventsstrategy with open("config.pickle", 'w') as f: - import cPickle - cPickle.dump(runPars, f) + import pickle + pickle.dump(runPars, f) ##============================================================== diff --git a/PhysicsAnalysis/PATJobTransforms/python/DPDUtils.py b/PhysicsAnalysis/PATJobTransforms/python/DPDUtils.py index 23128fe095c..cb44a15d096 100644 --- a/PhysicsAnalysis/PATJobTransforms/python/DPDUtils.py +++ b/PhysicsAnalysis/PATJobTransforms/python/DPDUtils.py @@ -10,7 +10,7 @@ from RecExConfig.RecoFunctions import AddValidItemToList def SetupOutputDPDs(runArgs,flagContainerList): DPDMakerScripts=[] for flagContainer in flagContainerList: - for flagName in flagContainer.__dict__.keys(): + for flagName in flagContainer.__dict__: flag=getattr(flagContainer,flagName) if hasattr(flag,"StreamName"): dpdName=flag.StreamName.lstrip("Stream") @@ -45,7 +45,7 @@ def SetupDPDIncludes(runArgs,flagContainerList,includeType): logger = logging.getLogger( "SetupDPDIncludes" ) # Loop over all DPD job properties: for flagContainer in flagContainerList: - for flagName in flagContainer.__dict__.keys(): + for flagName in flagContainer.__dict__: flag=getattr(flagContainer,flagName) # Check if this DPD type has been requested in the job: if hasattr(flag,"StreamName"): @@ -68,7 +68,7 @@ def SetupDPDIncludes(runArgs,flagContainerList,includeType): includeTypeList.append(argName) # Make sure that we have a list of scripts, not just one: includes = getattr(flag,includeType) - if type(includes) != list: + if not isinstance(includes, list): includes = [includes] pass # Now include all the specified scripts: diff --git a/PhysicsAnalysis/PATJobTransforms/python/PATTransformUtils.py b/PhysicsAnalysis/PATJobTransforms/python/PATTransformUtils.py index 05f1f8f5671..e4afc2a2805 100644 --- a/PhysicsAnalysis/PATJobTransforms/python/PATTransformUtils.py +++ b/PhysicsAnalysis/PATJobTransforms/python/PATTransformUtils.py @@ -1,3 +1,4 @@ +from builtins import zip # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @brief Module with PAT transform options and substeps @@ -59,7 +60,7 @@ def addNTUPMergeSubsteps(executorSet): extraNTUPs = getExtraDPDList(NTUPOnly = True) for ntup in extraNTUPs: executorSet.add(NTUPMergeExecutor(name='NTUPLEMerge'+ntup.name.replace('_',''), exe='hadd', inData=[ntup.name], outData=[ntup.name+'_MRG'], exeArgs=[])) - except ImportError, e: + except ImportError as e: msg.warning("Failed to get D3PD lists - probably D3PDs are broken in this release: {0}".format(e)) diff --git a/PhysicsAnalysis/PATJobTransforms/scripts/rhadd.py b/PhysicsAnalysis/PATJobTransforms/scripts/rhadd.py index 6068ecf7626..b69b4679262 100755 --- a/PhysicsAnalysis/PATJobTransforms/scripts/rhadd.py +++ b/PhysicsAnalysis/PATJobTransforms/scripts/rhadd.py @@ -1,3 +1,6 @@ +from __future__ import division +from builtins import object +from builtins import range #! /usr/bin/env python # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration @@ -65,7 +68,7 @@ class haddJob(object): logging.info('Will now execute merge: %s' % ' '.join(mergeCmd)) output = [] job = Popen(mergeCmd, stdout=PIPE, stderr=STDOUT, bufsize=1, close_fds=True) - while job.poll() == None: + while job.poll() is None: output.append(job.stdout.readline().strip()) self._exitCode = job.returncode if self._exitCode != 0: @@ -93,7 +96,7 @@ class haddStep(object): def _defineMergeJobs(self): # How many merges to do in this step? - nMerges = (len(self._inputFiles)-1) / self._bunchNumber + 1 + nMerges = (len(self._inputFiles)-1) // self._bunchNumber + 1 logging.debug('Need %d merges for level %d' % (nMerges, self._level)) if nMerges == 1: logging.debug('Final merge job: %s -> %s' % (self._inputFiles, self._inputFiles)) diff --git a/PhysicsAnalysis/PATJobTransforms/share/CommonSkeletonJobOptions.py b/PhysicsAnalysis/PATJobTransforms/share/CommonSkeletonJobOptions.py index 0f34bc8661b..15ac1d891dd 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/CommonSkeletonJobOptions.py +++ b/PhysicsAnalysis/PATJobTransforms/share/CommonSkeletonJobOptions.py @@ -1,3 +1,5 @@ +from past.builtins import basestring + ###################################################################### # # # Place holder for numerous common job options of skeleton.XXX files # @@ -36,7 +38,7 @@ else: athenaCommonFlags.EvtMax=-1 #RecExCommon configuration if hasattr(runArgs,"geometryVersion"): inputGeometryVersion = runArgs.geometryVersion - if type(inputGeometryVersion) == str and inputGeometryVersion.endswith("_VALIDATION"): + if isinstance(inputGeometryVersion, basestring) and inputGeometryVersion.endswith("_VALIDATION"): inputGeometryVersion = inputGeometryVersion.replace("_VALIDATION", "") globalflags.DetDescrVersion.set_Value_and_Lock( inputGeometryVersion ) if hasattr(runArgs,"conditionsTag"): globalflags.ConditionsTag.set_Value_and_Lock( runArgs.conditionsTag ) diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDAOD_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDAOD_tf.py index 0cd75325c1f..95545c85dce 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDAOD_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDAOD_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Skeleton file for AOD to DAOD (Reduction framework) job # # $Id: skeleton.AODtoDAOD_tf.py 731616 2016-03-22 15:25:39Z cranshaw $ @@ -10,7 +11,7 @@ msg.info( '****************** STARTING AOD->DAOD MAKING *****************' ) def getSubSequences(sequence,sequenceList): sequenceList.append(sequence) for item in sequence: - if type(item).__name__ == 'AthSequencer': + if isinstance(item, AthSequencer): getSubSequences(item,sequenceList) return @@ -18,7 +19,7 @@ if hasattr(runArgs, "reductionConf"): msg.info('Will attempt to make the following reduced formats: {0}'.format(runArgs.reductionConf)) else: msg.error('AOD Reduction job started, but with no "reductionConf" array - aborting') - raise RuntimeError, "No reductions configured" + raise RuntimeError("No reductions configured") include("RecJobTransforms/CommonRecoSkeletonJobOptions.py") @@ -29,7 +30,7 @@ try: release = project + '-' + version rec.AtlasReleaseVersion = release except: - print "WARNING: Unable to construct AtlasReleaseVersion from environment" + print("WARNING: Unable to construct AtlasReleaseVersion from environment") if hasattr(runArgs,"inputAODFile"): globalflags.InputFormat.set_Value_and_Lock('pool') @@ -57,7 +58,7 @@ elif hasattr(runArgs,'inputEVNTFile') or hasattr(runArgs,'jobConfig'): # Leave the remainder for the internal setup else: msg.error('AOD Reduction job started, but with no AOD inputs - aborting') - raise RuntimeError, "No AOD input" + raise RuntimeError("No AOD input") listOfFlags=[] @@ -65,7 +66,7 @@ try: from DerivationFrameworkCore.DerivationFrameworkProdFlags import derivationFlags listOfFlags.append(derivationFlags) except ImportError: - print "WARNING DerivationFrameworkProdFlags not available." + print("WARNING DerivationFrameworkProdFlags not available.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) @@ -74,7 +75,7 @@ passThroughMode = False if hasattr(runArgs,"passThrough"): passThroughMode = runArgs.passThrough -#if (passThroughMode==True): +#if (passThroughMode is True): # msg.warning("Pass through mode is ON: decision of derivation kernels will be IGNORED!") # rec.doDPD.passThroughMode = True @@ -105,7 +106,7 @@ if passThroughMode: getSubSequences(mainSeq,sequenceList) for seq in sequenceList: for item in seq: - if type(item).__name__=='DerivationFramework__DerivationKernel': + if isinstance(item, DerivationFramework__DerivationKernel): item.SkimmingTools = [] msg.info( 'Pass through mode was requested. Skimming tools have been removed from all kernels.') diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDPD_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDPD_tf.py index 7c7a75059e7..90ebf104c29 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDPD_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoDPD_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for AOD->DPD @@ -20,22 +21,22 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D2PDMaker.D2PDFlags import D2PDFlags listOfFlags.append(D2PDFlags) except ImportError: - print "WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis." + print("WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis.") try: from TopPhysD2PDMaker.TopPhysD2PDFlags import topPhysDPD listOfFlags.append(topPhysDPD) except ImportError: - print "WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D3PDMakerConfig.D3PDProdFlags import prodFlags listOfFlags.append( prodFlags ) except ImportError: - print "WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) @@ -153,7 +154,7 @@ if hasattr(runArgs,"preInclude"): from PATJobTransforms.DPDUtils import SetupDPDPreIncludes dpdPreInclude = SetupDPDPreIncludes(runArgs, listOfFlags) # Examine if we have more output types -if type(dpdPreInclude) is not list: +if not isinstance(dpdPreInclude, list): recoLog.warning("Old return value from SetupDPDPostIncludes() - no check for output incompatibility done") else: additionalOutputs = set( [ outfile for outfile in dir(runArgs) if outfile.startswith('output') and outfile.endswith('File') ] ) - set(dpdPreInclude) @@ -174,7 +175,7 @@ try: ## Make "old style" D3PDs. for c in SetupOutputDPDs(runArgs, [oldProdFlags]): c() except ImportError: - print "WARNING: Old prod flags could not be included this. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING: Old prod flags could not be included this. Only OK if you're using job transforms without the AtlasAnalysis project.") ## Offline prescales (has to be *after* the topOptions) if hasattr(runArgs,"prescales"): @@ -192,7 +193,7 @@ if hasattr(runArgs,"postInclude"): ## Post-includes defined for the DPDs: from PATJobTransforms.DPDUtils import SetupDPDPostIncludes dpdPostIncludeUsed = SetupDPDPostIncludes(runArgs, listOfFlags) -if type(dpdPostIncludeUsed) is not list: +if not isinstance(dpdPostIncludeUsed, list): recoLog.warning("Old return value from SetupDPDPostIncludes() - no check for output incompatibility done") else: additionalOutputs = set( [ outfile for outfile in dir(runArgs) if outfile.startswith('output') and outfile.endswith('File') ] ) - set(dpdPostIncludeUsed) diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoRED_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoRED_tf.py index b1734b3ad9a..16659687ea5 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoRED_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.AODtoRED_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Skeleton file for AOD to DAOD (Reduction framework) job # # $Id$ @@ -10,7 +11,7 @@ if hasattr(runArgs, "reductionConf"): msg.info('Will attempt to make the following reduced formats: {0}'.format(runArgs.reductionConf)) else: msg.error('AOD Reduction job started, but with no "reductionConf" array - aborting') - raise RuntimeError, "No reductions configured" + raise RuntimeError("No reductions configured") include("RecJobTransforms/CommonRecoSkeletonJobOptions.py") @@ -20,22 +21,22 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D2PDMaker.D2PDFlags import D2PDFlags listOfFlags.append(D2PDFlags) except ImportError: - print "WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis." + print("WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis.") try: from TopPhysD2PDMaker.TopPhysD2PDFlags import topPhysDPD listOfFlags.append(topPhysDPD) except ImportError: - print "WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D3PDMakerConfig.D3PDProdFlags import prodFlags listOfFlags.append( prodFlags ) except ImportError: - print "WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) @@ -47,7 +48,7 @@ if hasattr(runArgs,"inputAODFile"): athenaCommonFlags.PoolAODInput.set_Value_and_Lock( runArgs.inputAODFile ) else: msg.error('AOD Reduction job started, but with no AOD inputs - aborting') - raise RuntimeError, "No AOD input" + raise RuntimeError("No AOD input") ## Pre-exec diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.ESDtoDPD_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.ESDtoDPD_tf.py index 62dce6ec412..d4c8788bc22 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.ESDtoDPD_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.ESDtoDPD_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for ESD->DPD @@ -18,17 +19,17 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D2PDMaker.D2PDFlags import D2PDFlags listOfFlags.append(D2PDFlags) except ImportError: - print "Unable to import listAODtoD2PD. This requires D2PDMaker-00-00-55-08 or D2PDMaker-00-00-62" + print("Unable to import listAODtoD2PD. This requires D2PDMaker-00-00-55-08 or D2PDMaker-00-00-62") try: from D3PDMakerConfig.D3PDProdFlags import prodFlags listOfFlags.append( prodFlags ) except ImportError: - print "WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs @@ -144,17 +145,17 @@ if hasattr(runArgs,"outputNTUP_FastCaloSimFile"): try: from D3PDMakerConfig.D3PDProdFlags import oldProdFlags except(ImportError): - print "WARNING oldProdFlags not available. " + print("WARNING oldProdFlags not available. ") pass try: from D3PDMakerConfig.D3PDMakerFlags import D3PDMakerFlags except(ImportError): - print "WARNING D3PDMakerFlags not available. " + print("WARNING D3PDMakerFlags not available. ") pass try: from SUSYD3PDMaker.SUSYD3PDFlags import SUSYD3PDFlags except(ImportError): - print "WARNING SUSYD3PDFlags not available. " + print("WARNING SUSYD3PDFlags not available. ") pass ## Pre-exec @@ -180,7 +181,7 @@ else: include( "RecExCommon/RecExCommon_topOptions.py" ) try: for c in SetupOutputDPDs(runArgs, [oldProdFlags]): c() except NameError: - print "WARNING: oldProdFlags not available" + print("WARNING: oldProdFlags not available") pass ## Offline prescales (has to be *after* the topOptions) diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.NTUPtoRED_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.NTUPtoRED_tf.py index ca74590e62a..6a4b17c8952 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.NTUPtoRED_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.NTUPtoRED_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # @@ -18,22 +19,22 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D2PDMaker.D2PDFlags import D2PDFlags listOfFlags.append(D2PDFlags) except ImportError: - print "WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis." + print("WARNING D2PDFlags not available. Requires D2PDMaker-00-00-50 in AtlasAnalysis.") try: from TopPhysD2PDMaker.TopPhysD2PDFlags import topPhysDPD listOfFlags.append(topPhysDPD) except ImportError: - print "WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING TopPhysD2PDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") try: from D3PDMakerConfig.D3PDProdFlags import prodFlags listOfFlags.append( prodFlags ) except ImportError: - print "WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING D3PDProdFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) @@ -43,7 +44,7 @@ if hasattr(runArgs, "reductionConf"): msg.info('Will attempt to make the following reduced formats: {0}'.format(runArgs.reductionConf)) else: msg.error('NTUP Reduction job started, but with no "reductionConf" array - aborting') - raise RuntimeError, "No reductions configured" + raise RuntimeError("No reductions configured") ## max/skip events from AthenaCommon.AthenaCommonFlags import athenaCommonFlags @@ -69,7 +70,7 @@ for arg in dir(runArgs): inFile = getattr(runArgs,arg) athenaCommonFlags.FilesInput.set_Value_and_Lock(inFile) inFileArgs+=1 - print "Using argument ", arg, " = ",inFile, ", tree name = ", prodFlags.TreeName() + print("Using argument ", arg, " = ",inFile, ", tree name = ", prodFlags.TreeName()) if inFileArgs!=1: raise TransformArgumentError(message='Wrong number of inputNTUPXXXFile arguments: {0:d} instead of 1. Stopping!'.format(inFileArgs)) @@ -78,7 +79,7 @@ listOfFlags=[] try: listOfFlags.append( prodFlags ) except ImportError: - print "WARNING NTUPtoNTUPProdFlags.py is not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING NTUPtoNTUPProdFlags.py is not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from NTUPtoNTUPCore.NTUPUtils import SetupOutputNTUPs from AthenaCommon.JobProperties import jobproperties diff --git a/PhysicsAnalysis/PATJobTransforms/share/skeleton.PhysicsValidation_tf.py b/PhysicsAnalysis/PATJobTransforms/share/skeleton.PhysicsValidation_tf.py index df71e0643a2..de3a07c2cd4 100644 --- a/PhysicsAnalysis/PATJobTransforms/share/skeleton.PhysicsValidation_tf.py +++ b/PhysicsAnalysis/PATJobTransforms/share/skeleton.PhysicsValidation_tf.py @@ -1,3 +1,5 @@ +from future.utils import iteritems + ############################################################### # # Skeleton top job options for Physics Validation @@ -108,7 +110,7 @@ monMan.FileKey = "PhysVal" # Schedule individual validations from PyJobTransforms.trfUtils import findFile -for validationType, enabled in validationDict.iteritems(): +for validationType, enabled in iteritems(validationDict): if enabled: JOFile = 'PhysValMonitoring/PhysVal{0}_jobOptions.py'.format(validationType) if findFile(os.environ['JOBOPTSEARCHPATH'], JOFile): diff --git a/Reconstruction/RecJobTransforms/python/ConfigUtils.py b/Reconstruction/RecJobTransforms/python/ConfigUtils.py index d2b9f983f93..fe7189dfba9 100644 --- a/Reconstruction/RecJobTransforms/python/ConfigUtils.py +++ b/Reconstruction/RecJobTransforms/python/ConfigUtils.py @@ -5,26 +5,27 @@ # First define a list of functions # Then call them from PredefinedConfigFunctions(key,runArgs) at the end +from __future__ import print_function def ConfigureFieldAndGeo(runArgs): - print "Method ConfigureFieldAndGeo..." + print("Method ConfigureFieldAndGeo...") from PATJobTransforms.GeoConfig import GetRunNumber RunNumber=GetRunNumber(runArgs) - print "Configuring geometry and field from RunNuber",RunNumber + print("Configuring geometry and field from RunNuber",RunNumber) from RecExCommission.RecExCommission_MagneticFieldConfig import setFieldConfig setFieldConfig(RunNumber) return #------------------------ def SetBeamType(runArgs): - print "Method SetBeamType..." + print("Method SetBeamType...") BeamType='NONE' if hasattr(runArgs,"beamType"): BeamType=runArgs.beamType elif hasattr(runArgs,"outputESDFile") or hasattr(runArgs,"outputAODFile") or hasattr(runArgs,"inputESDFile") or hasattr(runArgs,"inputAODFile"): - print "Attempting to set beamType from outputESD or outputAOD file..." - print "known cosmics projects: data08_cos, data08_cosmag" - print "known single beam projects: data08_1beam, data08_1beammag" - print "know collisions projects: data08_coll900, data08 " + print("Attempting to set beamType from outputESD or outputAOD file...") + print("known cosmics projects: data08_cos, data08_cosmag") + print("known single beam projects: data08_1beam, data08_1beammag") + print("know collisions projects: data08_coll900, data08 ") if hasattr(runArgs,"outputESDFile"): fileName=runArgs.outputESDFile elif hasattr(runArgs,"outputAODFile"): fileName=runArgs.outputAODFile @@ -40,40 +41,40 @@ def SetBeamType(runArgs): firstName=subNames[0] #from run08_cos.ESD.pool.root, split by dots and project is the first part project = firstName.split('.')[0] - print 'project = ',project + print('project = ',project) if project=="data08_cos" or project=="data08_cosmag": BeamType='cosmics' elif project=="data08_1beam" or project=="data08_1beammag": BeamType='singlebeam' elif project=="data08" or project=="data08_coll900": BeamType='collisions' else: - print "\n\nWARNING - unknown project name '%s'\n\n"%project - print "Will try to guess project from 'data', '_cos', '_1beam', '_coll' keywords..." + print("\n\nWARNING - unknown project name '%s'\n\n"%project) + print("Will try to guess project from 'data', '_cos', '_1beam', '_coll' keywords...") if project.startswith('data'): if project.rfind('_cos')>0: BeamType='cosmics' elif project.rfind('_1beam')>0: BeamType='singlebeam' elif project.rfind('_coll')>0: BeamType='collisions' - if BeamType is 'NONE': - print "Unexpected project name. Maybe the project position is wrong?" - print "Trying to search the complete file name for known projects... [%s]"%fileName + if BeamType == 'NONE': + print("Unexpected project name. Maybe the project position is wrong?") + print("Trying to search the complete file name for known projects... [%s]"%fileName) if fileName.rfind("data08_cos")>0 or fileName.rfind("data08_cosmag")>0: BeamType='cosmics' elif fileName.rfind("data08_1beam")>0 or fileName.rfind("data08_1beammag")>0: BeamType='singlebeam' elif fileName.rfind("data08.")>0 or fileName.rfind("data08_coll900")>0: BeamType='collisions' else: raise RuntimeError("(2) Unable to determine beamType from file named '%s'"%fileName) - if BeamType is 'NONE': + if BeamType == 'NONE': raise RuntimeError("(3) Unable to determine beamType from file name nor from direct argument") else: from AthenaCommon.BeamFlags import jobproperties jobproperties.Beam.beamType.set_Value_and_Lock( BeamType ) - print "The beamType is set to",BeamType + print("The beamType is set to",BeamType) return def PredefinedConfigFunctions(key,runArgs): - if key is "FieldAndGeo": + if key == "FieldAndGeo": ConfigureFieldAndGeo(runArgs) - elif key is "BeamType": + elif key == "BeamType": SetBeamType(runArgs) else: raise RuntimeError("Unknown key '%s' in PredefinedConfigFunctions(key,runArgs)"%key) diff --git a/Reconstruction/RecJobTransforms/python/DPDUtils.py b/Reconstruction/RecJobTransforms/python/DPDUtils.py index 7fb9f8b3b2f..f31d5db49f2 100644 --- a/Reconstruction/RecJobTransforms/python/DPDUtils.py +++ b/Reconstruction/RecJobTransforms/python/DPDUtils.py @@ -7,7 +7,7 @@ from RecExConfig.RecoFunctions import AddValidItemToList def SetupOutputDPDs(runArgs,flagContainerList): DPDMakerScripts=[] for flagContainer in flagContainerList: - for flagName in flagContainer.__dict__.keys(): + for flagName in flagContainer.__dict__: flag=getattr(flagContainer,flagName) if hasattr(flag,"StreamName"): dpdName=flag.StreamName.lstrip("Stream") diff --git a/Reconstruction/RecJobTransforms/python/MixStreamConfig.py b/Reconstruction/RecJobTransforms/python/MixStreamConfig.py index 0b1c66bf5aa..b871caa6e48 100644 --- a/Reconstruction/RecJobTransforms/python/MixStreamConfig.py +++ b/Reconstruction/RecJobTransforms/python/MixStreamConfig.py @@ -1,3 +1,10 @@ +from __future__ import print_function +from __future__ import division +from builtins import object +from future.utils import iteritems + +from builtins import int + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ####################################################################################################### @@ -28,7 +35,7 @@ class ListOfSelectors(UniqueList): # check that all entries are strings for v in value: valType = type(v).__name__ - if valType != 'MixingSelector': + if not isinstance(v, MixingSelector): raise JobConfigError("Entry %r in %s is not a MixingSelector (but an %s)" % \ (v,variableName,valType) ) return value @@ -42,13 +49,13 @@ class FloatList(Descriptor): def _checkType(self,variableName,value): """Check that <value> is a list or tuple, and make the tuple a list.""" valType = type(value).__name__ - if valType != 'list' and valType != 'tuple': + if not isinstance(value, list) and not isinstance(value, tuple): raise JobConfigError('%s should be a list or tuple. Got %s instead.' & (variableName, valType)) - if valType == 'tuple': + if isinstance(value, tuple): value = list(value) for v in value: valType = type(v).__name__ - if valType != 'float': + if not isinstance(v, float): raise JobConfigError("Entry %r in %s is not a float (but an %s)" % (v,variableName,valType) ) return value @@ -64,7 +71,7 @@ class FloatList(Descriptor): newAllowed=[] if not allowedValues: return newAllowed - if type(allowedValues[0]).__name__ == 'int' and allowedValues[0] >= 0: + if isinstance(allowedValues[0], int) and allowedValues[0] >= 0: newAllowed += [ allowedValues[0] ] return newAllowed @@ -121,7 +128,7 @@ if not 'mixStreamConfig' in dir(): # ...StreamAlg.SampleWeights = m.ListOfSampleWeights() ############################################ -class MixingPartitioner: +class MixingPartitioner(object): """ This collects input files to add to the stager, and generates python to configure EventMixer and your optional MCRunNumber-based event weighting service. """ def __init__(self): self.__ThisPartition = -1 @@ -140,14 +147,14 @@ class MixingPartitioner: self.__ThisPartition = thisPartition if self.__ThisPartition < 0 : - print "*** MixingPartitioner: WARNING you requested an invalid partition number: setting to 0. *** " + print("*** MixingPartitioner: WARNING you requested an invalid partition number: setting to 0. *** ") self.__ThisPartition = 0; else: if self.__ThisPartition >= (self.__NumPartitions - 1): - print "*** MixingPartitioner: WARNING you requested an invalid (big) partition number: setting to", self.__NumPartitions - 1, ". *** " + print("*** MixingPartitioner: WARNING you requested an invalid (big) partition number: setting to", self.__NumPartitions - 1, ". *** ") self.__ThisPartition = self.__NumPartitions - 1; else: - print "*** MixingPartitioner: INFO you requested partition number", self.__ThisPartition + print("*** MixingPartitioner: INFO you requested partition number", self.__ThisPartition) self.__ThisScaleFactor = mixStreamConfig.ThisLuminosityFraction(self.__ThisPartition) #the job config knows partitioning self.__ScaleFactorSum = mixStreamConfig.FinishedLuminosityFraction(self.__ThisPartition) #the job config knows partitioning @@ -180,21 +187,21 @@ class MixingPartitioner: def SoFar(self): return self.__ScaleFactorSum + self.PerSFOnow() * (self.__ThisPartition % mixStreamConfig.NumberOfSFO) def ConfigureSelectors(self): - print "*** MixingPartitioner: INFO Partition ", self.__ThisPartition, " has a LB beginning after ", self.__ScaleFactorSum, "% of the input has been processed." - print "*** MixingPartitioner: INFO Partition ", self.__ThisPartition, " has a LB reading ", self.__ThisScaleFactor * 100, "% of the input. " + print("*** MixingPartitioner: INFO Partition ", self.__ThisPartition, " has a LB beginning after ", self.__ScaleFactorSum, "% of the input has been processed.") + print("*** MixingPartitioner: INFO Partition ", self.__ThisPartition, " has a LB reading ", self.__ThisScaleFactor * 100, "% of the input. ") totalLumi = 1 - print ":::::::::: STREAMING JOB CONFIGURATION: LHC INSTANTANEOUS LUMINOSITY= %f x 10^%i cm^-2 s^-1" % (self.ScaleFactor(), 31) + print(":::::::::: STREAMING JOB CONFIGURATION: LHC INSTANTANEOUS LUMINOSITY= %f x 10^%i cm^-2 s^-1" % (self.ScaleFactor(), 31)) soFar = self.SoFar() perSFOnow = self.PerSFOnow() for ksample in self.__Selectors: sel = self.__Selectors[ksample] if not sel.isSufficient(): # prescale weighting is a TASK-GLOBAL! Read more, don't scale more. - print "*** MixingPartitioner: WARNING not enough events for %s -- (%s) will be weighted." % \ - (sel.name(), ','.join([str(ali) for ali in sel.Equivalents()]) ) + print("*** MixingPartitioner: WARNING not enough events for %s -- (%s) will be weighted." % \ + (sel.name(), ','.join([str(ali) for ali in sel.Equivalents()]) )) for aliasedID in sel.Equivalents(): self.__DatasetsToWeight[ aliasedID ] = sel.weight() - print "*** MixingPartitioner: INFO \t%s FirstFile=%s EvOffset=%i NeV=%f" % \ - (sel.name(), sel.firstFileIndex(soFar), sel.firstEventInFile(soFar), sel.totalEventsThisJob(perSFOnow)) + print("*** MixingPartitioner: INFO \t%s FirstFile=%s EvOffset=%i NeV=%f" % \ + (sel.name(), sel.firstFileIndex(soFar), sel.firstEventInFile(soFar), sel.totalEventsThisJob(perSFOnow))) def preStageInputFiles(self,CastorOrDCache = 'Castor'): from PyJobTransformsCore.FilePreStager import theFileStagerRobot @@ -205,12 +212,12 @@ class MixingPartitioner: fileList = self.ListOfFilesToStage() # NOTE THAT FILES THAT DON'T START WITH PNFS OR DCACHE WILL NOT BE CHECKED. filesNeedingAction = theFileStagerRobot.addFilesToStagerIfNeeded( fileList ) - print "This job muust stage %i files" % len(filesNeedingAction) - for f in filesNeedingAction: print f + print("This job muust stage %i files" % len(filesNeedingAction)) + for f in filesNeedingAction: print(f) filesNotStaged = theFileStagerRobot.waitUntilAllFilesStaged() if filesNotStaged: problemFiles = '' - for filename,status in filesNotStaged.items(): + for filename,status in iteritems(filesNotStaged): problemFiles += os.linesep + "%s:%s" % (filename,status) raise IOError("Could not stage following files from tape:%s" % problemFiles ) diff --git a/Reconstruction/RecJobTransforms/python/MixingSelector.py b/Reconstruction/RecJobTransforms/python/MixingSelector.py index 8532d0547ac..fc45efc6628 100644 --- a/Reconstruction/RecJobTransforms/python/MixingSelector.py +++ b/Reconstruction/RecJobTransforms/python/MixingSelector.py @@ -1,3 +1,7 @@ +from __future__ import print_function +from __future__ import division + +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ##################################### @@ -17,7 +21,7 @@ class MixingSelector(object): self.__evPerFile = eventsPerFile self.__aliases = [ datasetName ] self.__physical = [] - print "*** MixingPartitioner: INFO Created new selector Selector%i requesting an average of %f events per job ***" % (datasetName, eventsRequired) + print("*** MixingPartitioner: INFO Created new selector Selector%i requesting an average of %f events per job ***" % (datasetName, eventsRequired)) def name(self): return "Selector"+str(self.__dsid) def __str__(self): @@ -35,16 +39,16 @@ class MixingSelector(object): def addAlias(self,newAlias): """If some downstream module is weighting prescales based on MC run number, inform it that run number newAlias is equivalent to this __dsid.""" if not newAlias in self.__aliases: - print "*** MixingPartitioner: INFO \tEventInfo run number %i interpreted like %i. ***" % (newAlias, self.__dsid) + print("*** MixingPartitioner: INFO \tEventInfo run number %i interpreted like %i. ***" % (newAlias, self.__dsid)) self.__aliases += [ newAlias ] def addNewCatalog(self, pfnlist): if len(pfnlist) == 0: - print "*** MixingPartitioner: WARNING Adding empty list to %s?" % self.name() + print("*** MixingPartitioner: WARNING Adding empty list to %s?" % self.name()) return if self.numFiles(): - print "*** MixingPartitioner: INFO Files (%s ...) will be appended to %s. ***" % (pfnlist[0], self.name()) + print("*** MixingPartitioner: INFO Files (%s ...) will be appended to %s. ***" % (pfnlist[0], self.name())) else: - print "*** MixingPartitioner: INFO Using files (%s ...) to initialize %s. ***" % (pfnlist[0], self.name()) + print("*** MixingPartitioner: INFO Using files (%s ...) to initialize %s. ***" % (pfnlist[0], self.name())) self.__physical += pfnlist ### functions to calculate staging and mixer configuration ### ) def evAvailablePerJob(self): diff --git a/Reconstruction/RecJobTransforms/python/RDOFilePeeker.py b/Reconstruction/RecJobTransforms/python/RDOFilePeeker.py index c6449733f15..96aca3b8f51 100644 --- a/Reconstruction/RecJobTransforms/python/RDOFilePeeker.py +++ b/Reconstruction/RecJobTransforms/python/RDOFilePeeker.py @@ -1,3 +1,5 @@ +from past.builtins import basestring + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration def RDOFilePeeker(runArgs, skeletonLog): @@ -7,7 +9,7 @@ def RDOFilePeeker(runArgs, skeletonLog): except AssertionError: skeletonLog.error("Failed to open input file: %s", runArgs.inputRDOFile[0]) #check evt_type of input file - if 'evt_type' in f.infos.keys(): + if 'evt_type' in f.infos: import re if not re.match(str(f.infos['evt_type'][0]), 'IS_SIMULATION') : skeletonLog.error('This input file has incorrect evt_type: %s',str(f.infos['evt_type'])) @@ -18,14 +20,14 @@ def RDOFilePeeker(runArgs, skeletonLog): else : skeletonLog.warning('Could not find \'evt_type\' key in athfile.infos. Unable to that check evt_type is correct.') metadatadict = dict() - if 'metadata' in f.infos.keys(): - if '/Digitization/Parameters' in f.infos['metadata'].keys(): + if 'metadata' in f.infos: + if '/Digitization/Parameters' in f.infos['metadata']: metadatadict = f.infos['metadata']['/Digitization/Parameters'] if isinstance(metadatadict, list): skeletonLog.warning("%s inputfile: %s contained %s sets of Dititization Metadata. Using the final set in the list.",inputtype,inputfile,len(metadatadict)) metadatadict=metadatadict[-1] ##Get IOVDbGlobalTag - if 'IOVDbGlobalTag' not in metadatadict.keys(): + if 'IOVDbGlobalTag' not in metadatadict: try: assert f.fileinfos['metadata']['/TagInfo']['IOVDbGlobalTag'] is not None metadatadict['IOVDbGlobalTag'] = f.fileinfos['metadata']['/TagInfo']['IOVDbGlobalTag'] @@ -37,7 +39,7 @@ def RDOFilePeeker(runArgs, skeletonLog): skeletonLog.warning("Failed to find IOVDbGlobalTag.") else: ##Patch for older hit files - if 'DigitizedDetectors' not in metadatadict.keys(): + if 'DigitizedDetectors' not in metadatadict: metadatadict['DigitizedDetectors'] = ['pixel','SCT','TRT','BCM','Lucid','LAr','Tile','MDT','CSC','TGC','RPC','Truth'] import re @@ -46,15 +48,15 @@ def RDOFilePeeker(runArgs, skeletonLog): ## Configure DetDescrVersion if hasattr(runArgs,"geometryVersion"): inputGeometryVersion = runArgs.geometryVersion - if type(inputGeometryVersion) == str and inputGeometryVersion.endswith("_VALIDATION"): + if isinstance(inputGeometryVersion, basestring) and inputGeometryVersion.endswith("_VALIDATION"): inputGeometryVersion = inputGeometryVersion.replace("_VALIDATION", "") - if 'DetDescrVersion' in metadatadict.keys(): + if 'DetDescrVersion' in metadatadict: if not re.match(metadatadict['DetDescrVersion'], inputGeometryVersion): skeletonLog.warning("command-line geometryVersion (%s) does not match the value used in the Simulation step (%s) !", inputGeometryVersion, metadatadict['DetDescrVersion']) globalflags.DetDescrVersion.set_Value_and_Lock( inputGeometryVersion ) skeletonLog.info("Using geometryVersion from command-line: %s", globalflags.DetDescrVersion.get_Value()) - elif 'DetDescrVersion' in metadatadict.keys(): + elif 'DetDescrVersion' in metadatadict: globalflags.DetDescrVersion.set_Value_and_Lock( metadatadict['DetDescrVersion'] ) skeletonLog.info("Using geometryVersion from RDO file metadata %s", globalflags.DetDescrVersion.get_Value()) else: @@ -62,20 +64,20 @@ def RDOFilePeeker(runArgs, skeletonLog): ## Configure ConditionsTag if hasattr(runArgs,"conditionsTag"): - if 'IOVDbGlobalTag' in metadatadict.keys(): + if 'IOVDbGlobalTag' in metadatadict: if not re.match(metadatadict['IOVDbGlobalTag'], runArgs.conditionsTag): skeletonLog.warning("command-line conditionsTag (%s) does not match the value used in the Simulation step (%s) !", runArgs.conditionsTag, metadatadict['IOVDbGlobalTag']) #globalflags.ConditionsTag.set_Value_and_Lock( runArgs.conditionsTag ) ## already done in CommonSkeletonJobOptions.py skeletonLog.info("Using conditionsTag from command-line: %s", globalflags.ConditionsTag.get_Value()) - elif 'IOVDbGlobalTag' in metadatadict.keys(): + elif 'IOVDbGlobalTag' in metadatadict: globalflags.ConditionsTag.set_Value_and_Lock( metadatadict['IOVDbGlobalTag'] ) skeletonLog.info("Using conditionsTag from RDO file metadata %s", globalflags.ConditionsTag.get_Value()) else: raise SystemExit("conditionsTag not found in RDO file metadata or on transform command-line!") ## Configure DetFlags - if 'DigitizedDetectors' in metadatadict.keys(): + if 'DigitizedDetectors' in metadatadict: from AthenaCommon.DetFlags import DetFlags # by default everything is off DetFlags.all_setOff() @@ -84,7 +86,7 @@ def RDOFilePeeker(runArgs, skeletonLog): cmd='DetFlags.%s_setOn()' % subdet skeletonLog.debug(cmd) try: - exec cmd + exec(cmd) except: skeletonLog.warning('Failed to switch on subdetector %s',subdet) #hacks to reproduce the sub-set of DetFlags left on by RecExCond/AllDet_detDescr.py diff --git a/Reconstruction/RecJobTransforms/python/recoTransforms.py b/Reconstruction/RecJobTransforms/python/recoTransforms.py index b0f3f60035d..ac7bddd6e13 100644 --- a/Reconstruction/RecJobTransforms/python/recoTransforms.py +++ b/Reconstruction/RecJobTransforms/python/recoTransforms.py @@ -47,9 +47,9 @@ class skimRawExecutor(scriptExecutor): # the fast hash search against a dictionary rawEventList[runstr + "-" + evtstr] = True msg.debug("Identified run {0}, event {1} in input RAW files".format(runstr, evtstr)) - except ValueError, e: + except ValueError as e: msg.warning("Failed to understand this line from AtlListBSEvents: {0}".format(line)) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: errMsg = "Call to AtlListBSEvents failed: {0}".format(e) msg.error(erMsg) raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_EXEC_SETUP_FAIL"), errMsg) @@ -66,7 +66,7 @@ class skimRawExecutor(scriptExecutor): msg.debug("Found run {0}, event {1} in master filter list".format(runstr, evtstr)) os.write(slimFF.fileno(), line) count += 1 - except ValueError, e: + except ValueError as e: msg.warning("Failed to understand this line from master filter file: {0} {1}".format(line, e)) if count == 0: # If there are no matched events, create a bogus request for run and event 0 to keep diff --git a/Reconstruction/RecJobTransforms/python/streaming_arg.py b/Reconstruction/RecJobTransforms/python/streaming_arg.py index 708153d521b..c19e18271a2 100644 --- a/Reconstruction/RecJobTransforms/python/streaming_arg.py +++ b/Reconstruction/RecJobTransforms/python/streaming_arg.py @@ -1,3 +1,4 @@ + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration __author__ = "Ayana.Holloway@cern.ch" diff --git a/Reconstruction/RecJobTransforms/scripts/Reco_tf.py b/Reconstruction/RecJobTransforms/scripts/Reco_tf.py index cf235f291da..7ac5d363682 100755 --- a/Reconstruction/RecJobTransforms/scripts/Reco_tf.py +++ b/Reconstruction/RecJobTransforms/scripts/Reco_tf.py @@ -77,7 +77,7 @@ def getTransform(RAWtoALL=False): simStepSet = set() addDigitizationSubstep(simStepSet) trf.appendToExecutorSet(list(simStepSet)[0]) - except ImportError, e: + except ImportError as e: msg.warning('Failed to import digitisation arguments ({0}). Digitisation substep will not be available.'.format(e)) # Again, protect core functionality from too tight a dependence on EventOverlay @@ -87,7 +87,7 @@ def getTransform(RAWtoALL=False): addOverlayTrfArgs(trf.parser) addOverlayPoolTrfArgs(trf.parser) appendOverlay_PoolSubstep(trf, True) - except ImportError, e: + except ImportError as e: msg.warning('Failed to import overlay arguments ({0}). Event overlay substep will not be available.'.format(e)) # Again, protect core functionality from too tight a dependence on PATJobTransforms diff --git a/Reconstruction/RecJobTransforms/scripts/TrainReco_tf.py b/Reconstruction/RecJobTransforms/scripts/TrainReco_tf.py index 9f78b03dae9..6e8544935a4 100755 --- a/Reconstruction/RecJobTransforms/scripts/TrainReco_tf.py +++ b/Reconstruction/RecJobTransforms/scripts/TrainReco_tf.py @@ -10,8 +10,12 @@ # Train_reco_tf.py --inputNTUP_COMMONFile=NTUP_COMMON.01316372._001218.root.1 --outputDNTUPFile DNTUP --reductionConf SM1 #================================================================== +from __future__ import print_function +from builtins import next +from future import standard_library +standard_library.install_aliases() import re -import os, commands +import os, subprocess from time import sleep import sys import pickle @@ -19,7 +23,7 @@ import tempfile def get_filename(subname): try: - return os.listdir('.')[(i for i, name in enumerate(os.listdir('.')) if subname in name).next()] + return os.listdir('.')[next((i for i, name in enumerate(os.listdir('.')) if subname in name))] except: return None @@ -39,7 +43,7 @@ if __name__ == '__main__': for tmpKeyVal in sys.argv[1:]: try: tmpMatch = re.search('^([^=]+)=(.+)$',tmpKeyVal) - if tmpMatch != None: + if tmpMatch is not None: mapKey = tmpMatch.group(1) mapVal = tmpMatch.group(2) search = mapKey.find('--output') @@ -54,7 +58,7 @@ if __name__ == '__main__': # use string argMap[mapKey] = mapVal except: - print "warning: %s arg not recognised, skipping it" % tmpKeyVal + print("warning: %s arg not recognised, skipping it" % tmpKeyVal) #print "Reco_wrap_tf.py: arguments : " + str(sys.argv[1:]) #print "Reco_wrap_tf.py: arg map : " + str(argMap) @@ -65,7 +69,7 @@ if __name__ == '__main__': # execute original trf com = '%s %s ' % (trfName,trfArgs) sys.stdout.flush() - print "Reco_tf.py: running %s" % com + print("Reco_tf.py: running %s" % com) retStat = os.system(com) try: os.remove(tmpName) @@ -76,10 +80,10 @@ if __name__ == '__main__': xmlfile = open('metadata.xml','r') newxmlfile = open('new.xml','w') xmlcontents = xmlfile.read() - for key in fileNameMap.keys(): + for key in fileNameMap: mvString = "mv %s %s" % (key,fileNameMap[key]) sys.stdout.flush() - print "Renaming file %s --> %s" % (key,fileNameMap[key]) + print("Renaming file %s --> %s" % (key,fileNameMap[key])) retStat = os.system(mvString) newxml = xmlcontents.replace(key,fileNameMap[key]) xmlcontents = newxml diff --git a/Reconstruction/RecJobTransforms/share/IDTrackingPtMin400MeV.py b/Reconstruction/RecJobTransforms/share/IDTrackingPtMin400MeV.py index e31ccc382c5..fac54617026 100644 --- a/Reconstruction/RecJobTransforms/share/IDTrackingPtMin400MeV.py +++ b/Reconstruction/RecJobTransforms/share/IDTrackingPtMin400MeV.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from past.builtins import basestring + #syntax : # preInclude_r2e=RecJobTransforms/IDTrackingPtMin400MeV.py # in addition one can change the effective cut @@ -7,14 +10,14 @@ b=['from InDetRecExample.ConfiguredNewTrackingCuts import ConfiguredNewTrackingCuts','InDetNewTrackingCuts=ConfiguredNewTrackingCuts("Offline")','InDetNewTrackingCuts._ConfiguredNewTrackingCuts__minPT=400.0'] #if rec.UserFlags is a non empty string, make it a vector a=rec.UserFlags() -if a!="" and type(a)==type(""): +if a!="" and isinstance(a, basestring): a=[a] -if type(a)==type([]) and len(a)>0: +if isinstance(a, list) and len(a)>0: rec.UserFlags=b+a else: rec.UserFlags=b del a,b -print "IDTrackingPtMin400MeV.py setting rec.UserFlags to ", rec.UserFlags() +print("IDTrackingPtMin400MeV.py setting rec.UserFlags to ", rec.UserFlags()) diff --git a/Reconstruction/RecJobTransforms/share/UseFrontier.py b/Reconstruction/RecJobTransforms/share/UseFrontier.py index 85d3a2d2465..139d502d69a 100644 --- a/Reconstruction/RecJobTransforms/share/UseFrontier.py +++ b/Reconstruction/RecJobTransforms/share/UseFrontier.py @@ -1,4 +1,5 @@ +from __future__ import print_function ## This is a stub, redirecting to the new location in PyJobTransforms -print "UseFrontier.py: Deprecation Warning - please use PyJobTransforms/UseFrontier.py instead" +print("UseFrontier.py: Deprecation Warning - please use PyJobTransforms/UseFrontier.py instead") include("PyJobTransforms/UseFrontier.py") diff --git a/Reconstruction/RecJobTransforms/share/skeleton.AODtoTAG_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.AODtoTAG_tf.py index b49b3008fb7..e1e51ef97e9 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.AODtoTAG_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.AODtoTAG_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for AOD->TAG @@ -71,7 +72,7 @@ if rec.doWriteAOD: try: StreamAOD.ExtendProvenanceRecord = False except: - print "StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags." + print("StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.") ## Post-include if hasattr(runArgs,"postInclude"): diff --git a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD.py b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD.py index 120ec726a93..d62dfaa4e74 100755 --- a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for ESD->AOD @@ -93,7 +94,7 @@ if hasattr(runArgs,"preExec"): ## Pre-include if hasattr(runArgs,"preInclude"): for fragment in runArgs.preInclude: - print "preInclude",fragment + print("preInclude",fragment) include(fragment) #======================================================== diff --git a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py index 74d54239f16..7ab7eaf49de 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoAOD_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for ESD->AOD @@ -96,7 +97,7 @@ if hasattr(runArgs,"preExec"): ## Pre-include if hasattr(runArgs,"preInclude"): for fragment in runArgs.preInclude: - print "preInclude",fragment + print("preInclude",fragment) include(fragment) #======================================================== @@ -111,7 +112,7 @@ if hasattr(runArgs,"outputAODFile"): try: StreamAOD.ExtendProvenanceRecord = False except: - print "StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags." + print("StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.") #D3PDMaker outputs if hasattr(runArgs,"outputNTUP_MINBIASFile"): diff --git a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoESD.py b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoESD.py index fc6a404ab2b..c9e88d930c7 100755 --- a/Reconstruction/RecJobTransforms/share/skeleton.ESDtoESD.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.ESDtoESD.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for ESD->ESD (reprocessing) @@ -50,7 +51,7 @@ if hasattr(runArgs,"preExec"): ## Pre-include if hasattr(runArgs,"preInclude"): for fragment in runArgs.preInclude: - print "preInclude",fragment + print("preInclude",fragment) include(fragment) #======================================================== diff --git a/Reconstruction/RecJobTransforms/share/skeleton.MergePool_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.MergePool_tf.py index 5ae81287e25..7990ec240cc 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.MergePool_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.MergePool_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for ESD/AOD/DPD merging @@ -17,10 +18,10 @@ recoLog.info( '****************** STARTING POOL FILE MERGING *****************' ## Input # Deal with generic case first of all if hasattr(runArgs, "inputPOOL_MRG_INPUTFile"): - if runArgs.inputPOOL_MRG_INPUTFileType is "AOD": + if runArgs.inputPOOL_MRG_INPUTFileType == "AOD": runArgs.inputAODFile = runArgs.inputPOOL_MRG_INPUTFile runArgs.outputAOD_MRGFile = runArgs.outputPOOL_MRG_OUTPUTFile - elif runArgs.inputPOOL_MRG_INPUTFileType is "ESD": + elif runArgs.inputPOOL_MRG_INPUTFileType == "ESD": runArgs.inputESDFile = runArgs.inputPOOL_MRG_INPUTFile runArgs.outputESD_MRGFile = runArgs.outputPOOL_MRG_OUTPUTFile else: @@ -28,11 +29,11 @@ if hasattr(runArgs, "inputPOOL_MRG_INPUTFile"): # DAOD comes in many flavours, so automate transforming this into a "standard" AOD argument DAOD_Input_Key = [ k for k in dir(runArgs) if k.startswith("inputDAOD") and k.endswith("File") ] -if len(DAOD_Input_Key) is 1: +if len(DAOD_Input_Key) == 1: runArgs.inputAODFile = getattr(runArgs, DAOD_Input_Key[0]) DAOD_Output_Key = [ k for k in dir(runArgs) if k.startswith("outputDAOD") and k.endswith("_MRGFile") ] -if len(DAOD_Output_Key) is 1: +if len(DAOD_Output_Key) == 1: runArgs.outputAOD_MRGFile = getattr(runArgs, DAOD_Output_Key[0]) if hasattr(runArgs,"inputFile"): athenaCommonFlags.FilesInput.set_Value_and_Lock( runArgs.inputFile ) @@ -77,16 +78,16 @@ if rec.doWriteAOD: if rec.readAOD(): StreamAOD.ExtendProvenanceRecord = False else: - print "StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags." + print("StreamAOD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.") if rec.doWriteESD: if rec.readESD(): StreamESD.ExtendProvenanceRecord = False else: - print "StreamESD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags." + print("StreamESD was not defined, cannot set ExtendProvenanceRecord = False. Check your flags.") # Fast merge options -if hasattr(runArgs,"fastPoolMerge") and runArgs.fastPoolMerge == True: +if hasattr(runArgs,"fastPoolMerge") and runArgs.fastPoolMerge is True: recoLog.info("Using CopyEventStreamInfo") from OutputStreamAthenaPool.OutputStreamAthenaPoolConf import CopyEventStreamInfo if rec.doWriteAOD: stream = StreamAOD diff --git a/Reconstruction/RecJobTransforms/share/skeleton.MergeRDO_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.MergeRDO_tf.py index f5afeb2e9c8..418f7cf270a 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.MergeRDO_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.MergeRDO_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for RDO merging @@ -127,7 +128,7 @@ ServiceMgr.EventSelector.InputCollections = athenaCommonFlags.FilesInput() try: ServiceMgr.EventSelector.CollectionType = CollType except: - print "Reading from file" + print("Reading from file") SkipEvents=0 ServiceMgr.EventSelector.SkipEvents = SkipEvents @@ -151,15 +152,15 @@ StreamRDO.ForceRead=TRUE; #force read of output data objs try: StreamRDO.AcceptAlgs = AcceptList except: - print "No accept algs indicated in AcceptList" + print("No accept algs indicated in AcceptList") try: StreamRDO.RequireAlgs = RequireList except: - print "No accept algs indicated in RequireList" + print("No accept algs indicated in RequireList") try: StreamRDO.VetoAlgs = VetoList except: - print "No accept algs indicated in VetoList" + print("No accept algs indicated in VetoList") # Perfmon from PerfMonComps.PerfMonFlags import jobproperties as pmon_properties diff --git a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoALL_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoALL_tf.py index a33f1f71dae..1a018126f36 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoALL_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoALL_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for RAW->ALL @@ -110,7 +111,7 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from RecJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) diff --git a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD.py b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD.py index bc1e5a49330..4bf44486d46 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for RAW->ESD @@ -19,7 +20,7 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) @@ -128,7 +129,7 @@ if hasattr(runArgs, "outputTXT_FTKIPFile"): rec.UserAlgs=["FastTrackSimWrap/FastTrackSimWrap_jobOptions.py"] # Trigger already run in this RDO? -if hasattr(runArgs, "doRDOTrigger") and runArgs.doRDOTrigger == True: +if hasattr(runArgs, "doRDOTrigger") and runArgs.doRDOTrigger is True: TriggerFlags.doTriggerConfigOnly=True # Event display tarballs diff --git a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD_tf.py b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD_tf.py index 59c331a419e..66a3b03f546 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD_tf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.RAWtoESD_tf.py @@ -1,3 +1,4 @@ +from __future__ import print_function ############################################################### # # Skeleton top job options for RAW->ESD @@ -22,7 +23,7 @@ try: from PrimaryDPDMaker.PrimaryDPDFlags import primDPD listOfFlags.append(primDPD) except ImportError: - print "WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project." + print("WARNING PrimaryDPDFlags not available. Only OK if you're using job transforms without the AtlasAnalysis project.") from PATJobTransforms.DPDUtils import SetupOutputDPDs rec.DPDMakerScripts.append(SetupOutputDPDs(runArgs,listOfFlags)) diff --git a/Reconstruction/RecJobTransforms/share/skeleton.RDOtoRDOtrigger.py b/Reconstruction/RecJobTransforms/share/skeleton.RDOtoRDOtrigger.py index 2f82d47117c..f42d5e734cf 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.RDOtoRDOtrigger.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.RDOtoRDOtrigger.py @@ -1,3 +1,5 @@ +from future.utils import iteritems + #################################################################### # # Skeleton top job options for RDO->RDOprime (RDO with trigger done) diff --git a/Reconstruction/RecJobTransforms/share/skeleton.csc_MergeHIST_trf.py b/Reconstruction/RecJobTransforms/share/skeleton.csc_MergeHIST_trf.py index b08ab791bd9..efbdbd5f0c0 100644 --- a/Reconstruction/RecJobTransforms/share/skeleton.csc_MergeHIST_trf.py +++ b/Reconstruction/RecJobTransforms/share/skeleton.csc_MergeHIST_trf.py @@ -1,6 +1,9 @@ +from __future__ import print_function + +from future import standard_library +standard_library.install_aliases() import sys import os -import commands import subprocess from AthenaCommon.Logging import logging @@ -52,7 +55,7 @@ recoLog.info( outFileName ) cmd = "rm -f hist_merge.log " recoLog.info( cmd ) -(retcode,error) = commands.getstatusoutput(cmd) +(retcode,error) = subprocess.getstatusoutput(cmd) newcommand= "DQHistogramMerge.py %s %s %s"%(filename,outFileName,runPostProcessing) recoLog.info(newcommand) ################################### @@ -66,20 +69,20 @@ dqhistpipe=subprocess.Popen(["DQHistogramMerge.py", filename, outFileName, "%s"% stdout=tmpbuff, stderr=tmpbuff, shell=False) status=dqhistpipe.wait() -print "---------------------------------------------------------------------------------------" -print '## Output of \'' + newcommand + '\':' +print("---------------------------------------------------------------------------------------") +print('## Output of \'' + newcommand + '\':') #print output try: logFile=open('hist_merge.log','w') tmpbuff.seek(0) for line in tmpbuff: - print line, + print(line, end=' ') logFile.write(line) finally: tmpbuff.close() logFile.close() -print '## DQHistogramMerge.py finished with retcode = %s' % (status) -print "---------------------------------------------------------------------------------------" +print('## DQHistogramMerge.py finished with retcode = %s' % (status)) +print("---------------------------------------------------------------------------------------") leavecode = '## DQHistogramMerge.py finished with retcode = %s' % (status) recoLog.info( leavecode ) @@ -90,19 +93,19 @@ dqhistpipe=subprocess.Popen(["DQHistogramMerge.py", testname,"dummy.root", "Fals stdout=tmpbuff, stderr=tmpbuff, shell=False) status=dqhistpipe.wait() -print "---------------------------------------------------------------------------------------" -print '## Output of \'' + newcommand + '\':' +print("---------------------------------------------------------------------------------------") +print('## Output of \'' + newcommand + '\':') try: logFile=open('validate_merge.log','w') tmpbuff.seek(0) for line in tmpbuff: - print line, + print(line, end=' ') logFile.write(line) finally: tmpbuff.close() logFile.close() -print '## DQHistogramMerge.py finished with retcode = %s' % (status) -print "---------------------------------------------------------------------------------------" +print('## DQHistogramMerge.py finished with retcode = %s' % (status)) +print("---------------------------------------------------------------------------------------") leavecode = '## DQHistogramMerge.py validate finished with retcode = %s' % (status) leavecodecmd = "echo \"" + leavecode + "\" >> hist_merge.log" os.system(leavecodecmd) @@ -112,19 +115,19 @@ tmpbuff=os.tmpfile() dqhistpipe=subprocess.Popen(["ScanHistFile.py", outFileName], stdout=tmpbuff, stderr=tmpbuff, shell=False) status=dqhistpipe.wait() -print "---------------------------------------------------------------------------------------" -print '## Output of \'' + newcommand + '\':' +print("---------------------------------------------------------------------------------------") +print('## Output of \'' + newcommand + '\':') try: logFile=open('ScanHistFile.log','w') tmpbuff.seek(0) for line in tmpbuff: - print line, + print(line, end=' ') logFile.write(line) finally: tmpbuff.close() logFile.close() -print '## ScanHistFile.py finished with retcode = %s' % (status) -print "---------------------------------------------------------------------------------------" +print('## ScanHistFile.py finished with retcode = %s' % (status)) +print("---------------------------------------------------------------------------------------") leavecode = '## ScanHistFile.py validate finished with retcode = %s' % (status) leavecodecmd = "echo \"" + leavecode + "\" >> hist_merge.log" os.system(leavecodecmd) diff --git a/Simulation/SimuJobTransforms/python/HitsFilePeeker.py b/Simulation/SimuJobTransforms/python/HitsFilePeeker.py index 3bae078b703..8460fc6c7f8 100644 --- a/Simulation/SimuJobTransforms/python/HitsFilePeeker.py +++ b/Simulation/SimuJobTransforms/python/HitsFilePeeker.py @@ -1,3 +1,5 @@ +from past.builtins import basestring + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration def hitColls2SimulatedDetectors(inputlist): @@ -12,7 +14,7 @@ def hitColls2SimulatedDetectors(inputlist): 'CSC_Hits': 'CSC', 'TGC_Hits': 'TGC', 'RPC_Hits': 'RPC', 'TruthEvent': 'Truth'} #'': 'ALFA', '': 'ZDC', for entry in inputlist: - if entry[1] in simulatedDictionary.keys(): + if entry[1] in simulatedDictionary: if simulatedDictionary[entry[1]] not in simulatedDetectors: simulatedDetectors += [simulatedDictionary[entry[1]]] return simulatedDetectors @@ -35,7 +37,7 @@ def HitsFilePeeker(runArgs, skeletonLog): except AssertionError: skeletonLog.error("Failed to open input file: %s", getHITSFile(runArgs)) #check evt_type of input file - if 'evt_type' in f.infos.keys(): + if 'evt_type' in f.infos: import re if not re.match(str(f.infos['evt_type'][0]), 'IS_SIMULATION') : skeletonLog.error('This input file has incorrect evt_type: %s',str(f.infos['evt_type'])) @@ -46,14 +48,14 @@ def HitsFilePeeker(runArgs, skeletonLog): else : skeletonLog.warning('Could not find \'evt_type\' key in athfile.infos. Unable to that check evt_type is correct.') metadatadict = dict() - if 'metadata' in f.infos.keys(): - if '/Simulation/Parameters' in f.infos['metadata'].keys(): + if 'metadata' in f.infos: + if '/Simulation/Parameters' in f.infos['metadata']: metadatadict = f.infos['metadata']['/Simulation/Parameters'] if isinstance(metadatadict, list): skeletonLog.warning("%s inputfile: %s contained %s sets of Simulation Metadata. Using the final set in the list.",inputtype,inputfile,len(metadatadict)) metadatadict=metadatadict[-1] ##Get IOVDbGlobalTag - if 'IOVDbGlobalTag' not in metadatadict.keys(): + if 'IOVDbGlobalTag' not in metadatadict: try: assert f.fileinfos['metadata']['/TagInfo']['IOVDbGlobalTag'] is not None metadatadict['IOVDbGlobalTag'] = f.fileinfos['metadata']['/TagInfo']['IOVDbGlobalTag'] @@ -65,8 +67,8 @@ def HitsFilePeeker(runArgs, skeletonLog): skeletonLog.warning("Failed to find IOVDbGlobalTag.") else: ##Patch for older hit files - if 'SimulatedDetectors' not in metadatadict.keys(): - if 'eventdata_items' in f.infos.keys(): + if 'SimulatedDetectors' not in metadatadict: + if 'eventdata_items' in f.infos: metadatadict['SimulatedDetectors'] = hitColls2SimulatedDetectors(f.infos['eventdata_items']) else : metadatadict['SimulatedDetectors'] = ['pixel','SCT','TRT','BCM','Lucid','LAr','Tile','MDT','CSC','TGC','RPC','Truth'] @@ -77,15 +79,15 @@ def HitsFilePeeker(runArgs, skeletonLog): ## Configure DetDescrVersion if hasattr(runArgs,"geometryVersion"): inputGeometryVersion = runArgs.geometryVersion - if type(inputGeometryVersion) == str and inputGeometryVersion.endswith("_VALIDATION"): + if isinstance(inputGeometryVersion, basestring) and inputGeometryVersion.endswith("_VALIDATION"): inputGeometryVersion = inputGeometryVersion.replace("_VALIDATION", "") - if 'SimLayout' in metadatadict.keys(): + if 'SimLayout' in metadatadict: if not re.match(metadatadict['SimLayout'], inputGeometryVersion): skeletonLog.warning("command-line geometryVersion (%s) does not match the value used in the Simulation step (%s) !", inputGeometryVersion, metadatadict['SimLayout']) globalflags.DetDescrVersion.set_Value_and_Lock( inputGeometryVersion ) skeletonLog.info("Using geometryVersion from command-line: %s", globalflags.DetDescrVersion.get_Value()) - elif 'SimLayout' in metadatadict.keys(): + elif 'SimLayout' in metadatadict: globalflags.DetDescrVersion.set_Value_and_Lock( metadatadict['SimLayout'] ) skeletonLog.info("Using geometryVersion from HITS file metadata %s", globalflags.DetDescrVersion.get_Value()) else: @@ -93,7 +95,7 @@ def HitsFilePeeker(runArgs, skeletonLog): ## Configure ConditionsTag if hasattr(runArgs,"conditionsTag"): - if 'IOVDbGlobalTag' in metadatadict.keys(): + if 'IOVDbGlobalTag' in metadatadict: if not re.match(metadatadict['IOVDbGlobalTag'], runArgs.conditionsTag): skeletonLog.warning("command-line conditionsTag (%s) does not match the value used in the Simulation step (%s) !", runArgs.conditionsTag, metadatadict['IOVDbGlobalTag']) @@ -102,7 +104,7 @@ def HitsFilePeeker(runArgs, skeletonLog): skeletonLog.info("Using conditionsTag from command-line: %s", globalflags.ConditionsTag.get_Value()) else: skeletonLog.info("globalflags.ConditionsTag already locked to %s - will not alter it.", globalflags.ConditionsTag.get_Value()) - elif 'IOVDbGlobalTag' in metadatadict.keys(): + elif 'IOVDbGlobalTag' in metadatadict: globalflags.ConditionsTag.set_Value_and_Lock( metadatadict['IOVDbGlobalTag'] ) skeletonLog.info("Using conditionsTag from HITS file metadata %s", globalflags.ConditionsTag.get_Value()) else: @@ -110,7 +112,7 @@ def HitsFilePeeker(runArgs, skeletonLog): raise SystemExit("conditionsTag not found in HITS file metadata or on transform command-line!") ## Configure DetFlags - if 'SimulatedDetectors' in metadatadict.keys(): + if 'SimulatedDetectors' in metadatadict: from AthenaCommon.DetFlags import DetFlags # by default everything is off DetFlags.all_setOff() @@ -119,7 +121,7 @@ def HitsFilePeeker(runArgs, skeletonLog): cmd='DetFlags.%s_setOn()' % subdet skeletonLog.debug(cmd) try: - exec cmd + exec(cmd) except: skeletonLog.warning('Failed to switch on subdetector %s',subdet) DetFlags.simulateLVL1.all_setOff() diff --git a/Simulation/SimuJobTransforms/python/SimBeamSpotShapeFilter.py b/Simulation/SimuJobTransforms/python/SimBeamSpotShapeFilter.py index dd1e2b11c45..8ba7c87ae5d 100644 --- a/Simulation/SimuJobTransforms/python/SimBeamSpotShapeFilter.py +++ b/Simulation/SimuJobTransforms/python/SimBeamSpotShapeFilter.py @@ -3,6 +3,7 @@ """ This library defines a class for filtering events based on the beamspot size in athena. """ +from __future__ import division __author__ = 'Anthony Morley' __version__ = '$Id $' diff --git a/Simulation/SimuJobTransforms/python/SimTransformUtils.py b/Simulation/SimuJobTransforms/python/SimTransformUtils.py index 0d1c60da634..45e5a1b2d12 100644 --- a/Simulation/SimuJobTransforms/python/SimTransformUtils.py +++ b/Simulation/SimuJobTransforms/python/SimTransformUtils.py @@ -1,3 +1,5 @@ +from __future__ import division +from builtins import range # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @brief Module with Digitization transform options and substep diff --git a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT.py b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT.py index 7155eecc3a1..5730fd8dfe3 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT.py +++ b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT.py @@ -1,3 +1,4 @@ + include("SimuJobTransforms/CommonSkeletonJobOptions.py") if hasattr(runArgs, "jobNumber"): diff --git a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_ISF.py b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_ISF.py index c3c868aa953..a595db1d48c 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_ISF.py +++ b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_ISF.py @@ -1,3 +1,4 @@ + ## Get the logger from AthenaCommon.Logging import * atlasG4log = logging.getLogger('ISF') diff --git a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_MC12.py b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_MC12.py index df188984e2c..b0b667918d3 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_MC12.py +++ b/Simulation/SimuJobTransforms/share/skeleton.EVGENtoHIT_MC12.py @@ -1,3 +1,4 @@ + ## Get the logger from AthenaCommon.Logging import * atlasG4log = logging.getLogger('AtlasG4') diff --git a/Simulation/SimuJobTransforms/share/skeleton.FilterHit.py b/Simulation/SimuJobTransforms/share/skeleton.FilterHit.py index 6ffb33f281b..4d60828a1be 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.FilterHit.py +++ b/Simulation/SimuJobTransforms/share/skeleton.FilterHit.py @@ -1,3 +1,5 @@ +from __future__ import print_function + import traceback from AthenaCommon.Logging import logging @@ -283,4 +285,4 @@ if hasattr(runArgs,"postExec"): filterHitLog.info(cmd) exec(cmd) #-------------------------------------------------------------- -print topSequence +print(topSequence) diff --git a/Simulation/SimuJobTransforms/share/skeleton.HITSMerge.py b/Simulation/SimuJobTransforms/share/skeleton.HITSMerge.py index 5a0b4a2d4ba..846f13c8e09 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.HITSMerge.py +++ b/Simulation/SimuJobTransforms/share/skeleton.HITSMerge.py @@ -1,3 +1,5 @@ +from __future__ import print_function + #import glob, os, re import traceback @@ -93,7 +95,7 @@ EventSelector.InputCollections = In try: EventSelector.CollectionType = CollType except: - print "Reading from file" + print("Reading from file") SkipEvents=0 if hasattr(runArgs,"skipEvents"): @@ -120,15 +122,15 @@ StreamHITS.ForceRead=TRUE; #force read of output data objs try: StreamHITS.AcceptAlgs = AcceptList except: - print "No accept algs indicated in AcceptList" + print("No accept algs indicated in AcceptList") try: StreamHITS.RequireAlgs = RequireList except: - print "No accept algs indicated in RequireList" + print("No accept algs indicated in RequireList") try: StreamHITS.VetoAlgs = VetoList except: - print "No accept algs indicated in VetoList" + print("No accept algs indicated in VetoList") # Perfmon from PerfMonComps.PerfMonFlags import jobproperties as pmon_properties @@ -168,4 +170,4 @@ if hasattr(runArgs,"postExec"): merHitLog.info(cmd) exec(cmd) #-------------------------------------------------------------- -print topSequence +print(topSequence) diff --git a/Simulation/SimuJobTransforms/share/skeleton.HITStoHIST_SIM.py b/Simulation/SimuJobTransforms/share/skeleton.HITStoHIST_SIM.py index 5c3b3eae074..19fafa827cd 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.HITStoHIST_SIM.py +++ b/Simulation/SimuJobTransforms/share/skeleton.HITStoHIST_SIM.py @@ -1,3 +1,5 @@ +from __future__ import print_function + include("SimuJobTransforms/CommonSkeletonJobOptions.py") include( "ParticleBuilderOptions/McAOD_PoolCnv_jobOptions.py") include( "EventAthenaPool/EventAthenaPool_joboptions.py" ) @@ -31,8 +33,8 @@ if hasattr(runArgs,"inputHITSFile"): ## Output HIST File if hasattr(runArgs,"outputHIST_SIMFile"): - print "Output is" - print runArgs.outputHIST_SIMFile + print("Output is") + print(runArgs.outputHIST_SIMFile) from GaudiSvc.GaudiSvcConf import THistSvc ServiceMgr += THistSvc() ServiceMgr.THistSvc.Output +=["HitAnalysis DATAFILE='"+runArgs.outputHIST_SIMFile+"' OPT='RECREATE'"] diff --git a/Simulation/SimuJobTransforms/share/skeleton.HITtoRDO.py b/Simulation/SimuJobTransforms/share/skeleton.HITtoRDO.py index e284c647663..aae02fe11e7 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.HITtoRDO.py +++ b/Simulation/SimuJobTransforms/share/skeleton.HITtoRDO.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from __future__ import division + include("SimuJobTransforms/CommonSkeletonJobOptions.py") if hasattr(runArgs, "jobNumber"): @@ -7,7 +10,7 @@ if hasattr(runArgs, "jobNumber"): from AthenaCommon.GlobalFlags import globalflags if hasattr(runArgs,"geometryVersion"): # strip _VALIDATION - print "stripping _VALIDATION" + print("stripping _VALIDATION") if runArgs.geometryVersion.endswith("_VALIDATION"): pos=runArgs.geometryVersion.find("_VALIDATION") globalflags.DetDescrVersion.set_Value_and_Lock( runArgs.geometryVersion[:pos] ) diff --git a/Simulation/SimuJobTransforms/share/skeleton.RDOtoHIST_DIGI.py b/Simulation/SimuJobTransforms/share/skeleton.RDOtoHIST_DIGI.py index 2eaba2b1933..7861821def2 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.RDOtoHIST_DIGI.py +++ b/Simulation/SimuJobTransforms/share/skeleton.RDOtoHIST_DIGI.py @@ -1,3 +1,5 @@ +from __future__ import print_function + include("SimuJobTransforms/CommonSkeletonJobOptions.py") include( "ParticleBuilderOptions/McAOD_PoolCnv_jobOptions.py") include( "EventAthenaPool/EventAthenaPool_joboptions.py" ) @@ -31,8 +33,8 @@ if hasattr(runArgs,"inputRDOFile"): ## Output HIST File if hasattr(runArgs,"outputHIST_DIGIFile"): - print "Output is" - print runArgs.outputHIST_DIGIFile + print("Output is") + print(runArgs.outputHIST_DIGIFile) from GaudiSvc.GaudiSvcConf import THistSvc ServiceMgr += THistSvc() ServiceMgr.THistSvc.Output +=["HitAnalysis DATAFILE='"+runArgs.outputHIST_DIGIFile+"' OPT='RECREATE'"] diff --git a/Simulation/SimuJobTransforms/share/skeleton.TestBeam.py b/Simulation/SimuJobTransforms/share/skeleton.TestBeam.py index f8b5416e4b6..ef7c64446d4 100644 --- a/Simulation/SimuJobTransforms/share/skeleton.TestBeam.py +++ b/Simulation/SimuJobTransforms/share/skeleton.TestBeam.py @@ -1,3 +1,5 @@ +from __future__ import print_function + ## Get the logger from AthenaCommon.Logging import * atlasG4log = logging.getLogger('TestBeam') @@ -225,7 +227,7 @@ if hasattr(runArgs, "enableLooperKiller") and not runArgs.enableLooperKiller: from AthenaCommon.CfgGetter import getAlgorithm topSeq += getAlgorithm("G4AtlasAlg",tryDefaultConfigurable=True) -print topSeq +print(topSeq) ## Add AMITag MetaData to TagInfoMgr if hasattr(runArgs, 'AMITag'): diff --git a/Tools/PyJobTransforms/python/transform.py b/Tools/PyJobTransforms/python/transform.py index ccd1e9cb620..826b76e55cc 100644 --- a/Tools/PyJobTransforms/python/transform.py +++ b/Tools/PyJobTransforms/python/transform.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from future.utils import iteritems +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.transform @@ -117,7 +120,7 @@ class transform(object): @property def exitCode(self): - if self._exitCode == None: + if self._exitCode is None: msg.warning('Transform exit code getter: _exitCode is unset, returning "TRF_UNKNOWN"') return trfExit.nameToCode('TRF_UNKNOWN') else: @@ -125,7 +128,7 @@ class transform(object): @property def exitMsg(self): - if self._exitMsg == None: + if self._exitMsg is None: msg.warning('Transform exit message getter: _exitMsg is unset, returning empty string') return '' else: @@ -252,7 +255,7 @@ class transform(object): # Need to know if any input or output files were set - if so then we suppress the # corresponding parameters from AMI inputFiles = outputFiles = False - for k, v in self._argdict.iteritems(): + for k, v in iteritems(self._argdict): if k.startswith('input') and isinstance(v, argFile): inputFiles = True elif k.startswith('output') and isinstance(v, argFile): @@ -269,7 +272,7 @@ class transform(object): from PyJobTransforms.trfAMI import TagInfo tag=TagInfo(self._argdict['AMIConfig'].value) updateDict = {} - for k, v in dict(tag.trfs[0]).iteritems(): + for k, v in iteritems(dict(tag.trfs[0])): # Convert to correct internal key form k = cliToKey(k) if inputFiles and k.startswith('input'): @@ -293,7 +296,7 @@ class transform(object): msg.debug('Read: {0}'.format(jsonParams)) extraParameters.update(convertToStr(jsonParams)) argfile.close() - except Exception, e: + except Exception as e: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Error when deserialising JSON file {0} ({1})'.format(self._argdict['argJSON'], e)) # Event Service @@ -305,7 +308,7 @@ class transform(object): extraParameters.update(updateDict) # Process anything we found - for k,v in extraParameters.iteritems(): + for k,v in iteritems(extraParameters): msg.debug('Found this extra argument: {0} with value: {1} ({2})'.format(k, v, type(v))) if k not in self.parser._argClass: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Argument "{0}" not known (try "--help")'.format(k)) @@ -321,7 +324,7 @@ class transform(object): # Set the key name as an argument property - useful to be able to look bask at where this # argument came from - for k, v in self._argdict.iteritems(): + for k, v in iteritems(self._argdict): if isinstance(v, argument): v.name = k @@ -337,7 +340,7 @@ class transform(object): JSONDump(self._argdict) sys.exit(0) - except trfExceptions.TransformArgException, e: + except trfExceptions.TransformArgException as e: msg.critical('Argument parsing failure: {0!s}'.format(e)) self._exitCode = e.errCode self._exitMsg = e.errMsg @@ -345,7 +348,7 @@ class transform(object): self.generateReport() sys.exit(self._exitCode) - except trfExceptions.TransformAMIException, e: + except trfExceptions.TransformAMIException as e: msg.critical('AMI failure: {0!s}'.format(e)) self._exitCode = e.errCode self._exitMsg = e.errMsg @@ -386,13 +389,13 @@ class transform(object): if 'showSteps' in self._argdict: for exe in self._executors: - print "Executor Step: {0} (alias {1})".format(exe.name, exe.substep) + print("Executor Step: {0} (alias {1})".format(exe.name, exe.substep)) if msg.level <= logging.DEBUG: - print " {0} -> {1}".format(exe.inData, exe.outData) + print(" {0} -> {1}".format(exe.inData, exe.outData)) sys.exit(0) if 'showGraph' in self._argdict: - print self._executorGraph + print(self._executorGraph) sys.exit(0) # Graph stuff! @@ -404,9 +407,9 @@ class transform(object): if 'showPath' in self._argdict: msg.debug('Execution path list is: {0}'.format(self._executorPath)) # Now print it nice - print 'Executor path is:' + print('Executor path is:') for node in self._executorPath: - print ' {0}: {1} -> {2}'.format(node['name'], list(node['input']), list(node['output'])) + print(' {0}: {1} -> {2}'.format(node['name'], list(node['input']), list(node['output']))) sys.exit(0) msg.debug('Execution path is {0}'.format(self._executorPath)) @@ -486,7 +489,7 @@ class transform(object): self._inputData = list() self._outputData = list() - for key, value in self._argdict.iteritems(): + for key, value in iteritems(self._argdict): # Note specifier [A-Za-z0-9_]+? makes this match non-greedy (avoid swallowing the optional 'File' suffix) m = re.match(r'(input|output|tmp)([A-Za-z0-9_]+?)(File)?$', key) # N.B. Protect against taking argunents which are not type argFile @@ -499,15 +502,15 @@ class transform(object): ## @note If we have no real data then add the pseudo datatype NULL, which allows us to manage # transforms which can run without data - if len(self._inputData) is 0: + if len(self._inputData) == 0: self._inputData.append('inNULL') - if len(self._outputData) is 0: + if len(self._outputData) == 0: self._outputData.append('outNULL') msg.debug('Transform has this input data: {0}; output data {1}'.format(self._inputData, self._outputData)) # Now see if we have any steering - manipulate the substep inputs and outputs before we # setup the graph - if 'steering' in self._argdict.keys(): + if 'steering' in self._argdict: msg.debug('Now applying steering to graph: {0}'.format(self._argdict['steering'].value)) self._doSteering() @@ -524,7 +527,7 @@ class transform(object): self._executorGraph.findExecutionPath() self._executorPath = self._executorGraph.execution - if len(self._executorPath) is 0: + if len(self._executorPath) == 0: raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_SETUP'), 'Execution path finding resulted in no substeps being executed' '(Did you correctly specify input data for this transform?)') @@ -538,7 +541,7 @@ class transform(object): def _doSteering(self, steeringDict = None): if not steeringDict: steeringDict = self._argdict['steering'].value - for substep, steeringValues in steeringDict.iteritems(): + for substep, steeringValues in iteritems(steeringDict): foundSubstep = False for executor in self._executors: if executor.name == substep or executor.substep == substep: @@ -552,7 +555,7 @@ class transform(object): startSet = executor.outData origLen = len(startSet) msg.debug('Data values to be modified are: {0}'.format(startSet)) - if steeringValue[1] is '+': + if steeringValue[1] == '+': startSet.add(steeringValue[2]) if len(startSet) != origLen + 1: raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_GRAPH_STEERING_ERROR'), @@ -573,7 +576,7 @@ class transform(object): @property def lastExecuted(self): # Just make sure we have the path traced - if not hasattr(self, '_executorPath') or len(self._executorPath) is 0: + if not hasattr(self, '_executorPath') or len(self._executorPath) == 0: return None lastExecutor = self._executorDictionary[self._executorPath[0]['name']] @@ -635,7 +638,7 @@ class transform(object): if reportType is None or 'pilotPickle' in reportType: self._report.writePilotPickleReport(filename='{0}Extract.pickle'.format(baseName), fast=fast, fileReport=fileReport) - except trfExceptions.TransformTimeoutException, reportException: + except trfExceptions.TransformTimeoutException as reportException: msg.error('Received timeout when writing report ({0})'.format(reportException)) msg.error('Report writing is aborted - sorry. Transform will exit with TRF_METADATA_CALL_FAIL status.') if ('orphanKiller' in self._argdict): @@ -644,7 +647,7 @@ class transform(object): infanticide(message=True) sys.exit(trfExit.nameToCode('TRF_METADATA_CALL_FAIL')) - except trfExceptions.TransformException, reportException: + except trfExceptions.TransformException as reportException: # This is a bad one! msg.critical('Attempt to write job report failed with exception {0!s}: {1!s}'.format(reportException.__class__.__name__, reportException)) msg.critical('Stack trace now follows:\n{0}'.format(traceback.format_exc())) @@ -718,10 +721,10 @@ class transform(object): def getFiles(self, io = None): res = [] msg.debug('Looking for file arguments matching: io={0}'.format(io)) - for argName, arg in self._argdict.iteritems(): + for argName, arg in iteritems(self._argdict): if isinstance(arg, argFile): msg.debug('Argument {0} is argFile type ({1!s})'.format(argName, arg)) - if io != None and arg.io != io: + if io is not None and arg.io != io: continue msg.debug('Argument {0} matches criteria'.format(argName)) res.append(arg) diff --git a/Tools/PyJobTransforms/python/trfAMI.py b/Tools/PyJobTransforms/python/trfAMI.py index 774b0a7e589..ffd5623dad1 100644 --- a/Tools/PyJobTransforms/python/trfAMI.py +++ b/Tools/PyJobTransforms/python/trfAMI.py @@ -1,3 +1,9 @@ +from future.utils import iteritems +from future.utils import listitems +from builtins import zip + +from builtins import object +from builtins import range # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfAMI @@ -25,7 +31,7 @@ AMIerrorCode=trfExit.nameToCode('TRF_AMI_ERROR') ## @brief Stores the configuration of a transform -class TrfConfig: +class TrfConfig(object): def __init__(self): self.name=None self.release=None @@ -41,7 +47,7 @@ class TrfConfig: theDict=self.inFiles.copy() theDict.update(self.outFiles) theDict.update(self.physics) - for (k,v) in theDict.iteritems(): + for (k,v) in iteritems(theDict): yield k,v def __str__(self): @@ -68,7 +74,7 @@ class TrfConfig: def _argsToString(self, adict): string='' - for (k,v) in adict.iteritems(): + for (k,v) in iteritems(adict): if self.newTransform: if not k.startswith('--'): k = "--"+k @@ -79,11 +85,11 @@ class TrfConfig: # Should be a substep argument if 'Exec' in k: # preExec, postExec string += " " + k - for vk, vv in v.iteritems(): + for vk, vv in iteritems(v): string += " " + _parseExecDict(vk, vv) elif 'Include' in k: # preInclude, postInclude string += " " + k - for vk, vv in v.iteritems(): + for vk, vv in iteritems(v): string += " " + _parseIncludeDict(vk, vv) else: # Misc substep string/number argument...? @@ -93,7 +99,7 @@ class TrfConfig: else: separator=':' string += " " + k - for vk, vv in v.iteritems(): + for vk, vv in iteritems(v): string += " " + vk + separator + vv elif isinstance(v, (list, tuple)): # athenaopts are special - space separated @@ -153,7 +159,7 @@ def isNewAMITag(tag): 'x' : 302, } - if tag[0] in newTagDict.keys(): + if tag[0] in newTagDict: if int(tag[1:]) > newTagDict[tag[0]]: msg.debug('it is a new tag') return True @@ -163,7 +169,7 @@ def isNewAMITag(tag): ## @brief Stores the information about a given tag. -class TagInfo: +class TagInfo(object): def __init__(self, tag, suppressNonJobOptions = True): self._tag=tag self._isNewTag = isNewAMITag(tag) @@ -332,7 +338,7 @@ def getTrfConfigFromPANDA(tag): physics = dict( (k, ReadablePANDA(v) ) for (k,v) in zip(keys, values)) # Hack to correct trigger keys being stored with spaces in panda - for k, v in physics.iteritems(): + for k, v in iteritems(physics): if 'triggerConfig' in k or 'triggerConfigByRun' in k: if ' ' in v: physics[k] = v.replace(' ', ',') @@ -347,7 +353,7 @@ def getTrfConfigFromPANDA(tag): msg.debug("Removed extraParamater=%s from arguments." % val) msg.debug("Checking for input/output file arguments...") - for arg in physics.keys(): + for arg in list(physics): if arg.lstrip('-').startswith('input') and arg.endswith('File'): value=physics.pop(arg) msg.debug("Found input file argument %s=%s." % (arg,value) ) @@ -360,7 +366,7 @@ def getTrfConfigFromPANDA(tag): trf.outFiles[arg]=getOutputFileName(fmt) msg.debug("Checking for not set arguments...") - for arg,value in physics.items(): + for arg,value in listitems(physics): if value=="NONE" or value=="none" or value==["NONE"]: val=physics.pop(arg) msg.debug("Removed %s=%s from arguments." % (arg, val) ) @@ -421,14 +427,14 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): try: # import pyAMI.atlas.api import pyAMI.exception - except ImportError, e: + except ImportError as e: raise TransformAMIException(AMIerrorCode, 'Import of pyAMI modules failed ({0})'.format(e)) try: amiclient=getAMIClient() # result = pyAMI.atlas.api.get_ami_tag(amiclient, tag) result = get_ami_tag(amiclient, tag, suppressNonJobOptions) - except pyAMI.exception.Error, e: + except pyAMI.exception.Error as e: msg.warning('An exception occured when connecting to primary AMI: {0}'.format(e)) msg.debug('Exception: {0}'.format(e)) if 'please login' in e.message or 'certificate expired' in e.message: @@ -442,7 +448,7 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): amiclient.config.endpoint = 'atlas-replica' # result = pyAMI.atlas.api.get_ami_tag(amiclient, tag) result = get_ami_tag(amiclient, tag, suppressNonJobOptions) - except pyAMI.exception.Error, e: + except pyAMI.exception.Error as e: msg.error('An exception occured when connecting to the AMI replica catalog: {0}'.format(e)) raise TransformAMIException(AMIerrorCode, 'Getting tag info from AMI failed (tried both primary and replica). ' 'See logfile for exception details.') @@ -454,11 +460,11 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): trf.outputs=result[0].get('outputs', {}) trf.release = result[0]['SWReleaseCache'].replace('_', ',') - if 'phconfig' in result[0].keys(): + if 'phconfig' in result[0]: trf.physics=deserialiseFromAMIString(result[0]['phconfig']) else: physics = {} - for k, v in result[0].iteritems(): + for k, v in iteritems(result[0]): if 'Exec' in k: execStrList = [execStr for execStr in convertToStr(v).replace('" "', '"" ""').split('" "')] physics[convertToStr(k)] = [remove_enclosing_quotes(execStr).replace('\\"', '"') for execStr in execStrList] @@ -473,11 +479,11 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): msg.debug('%s' % dumps(physics, indent = 4)) if suppressNonJobOptions: - for k in physics.keys(): + for k in list(physics): if k in ['productionStep', 'transformation', 'SWReleaseCache']: physics.pop(k) - for k, v in physics.iteritems(): + for k, v in iteritems(physics): if 'triggerConfig' in k or 'triggerConfigByRun' in k: if ' ' in v: physics[k] = v.replace(' ', ',') @@ -489,7 +495,7 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): msg.debug("Removed extraParamater=%s from arguments." % val) msg.debug("Checking for input/output file arguments...") - for arg in physics.keys(): + for arg in list(physics): if arg.lstrip('-').startswith('input') and arg.endswith('File'): value = physics.pop(arg) msg.debug("Found input file argument %s=%s." % (arg, value)) @@ -502,7 +508,7 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): trf.outFiles[arg] = getOutputFileName(fmt) msg.debug("Checking for not set arguments...") - for arg, value in physics.items(): + for arg, value in listitems(physics): if value == "NONE" or value == "none" or value == ["NONE"]: val = physics.pop(arg) msg.debug("Removed %s=%s from arguments." % (arg, val)) @@ -513,18 +519,18 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): raise TransformAMIException(AMIerrorCode, "Bad result for tag's phconfig: {0}".format(trf.physics)) if trf.inFiles == {}: - if 'inputs' in result[0].keys(): + if 'inputs' in result[0]: trf.inFiles=deserialiseFromAMIString(result[0]['inputs']) - for inFileType, inFileName in trf.inFiles.iteritems(): + for inFileType, inFileName in iteritems(trf.inFiles): # Not all AMI tags actually have a working filename, so fallback to trfDefaultFiles # if necessary if inFileName == '' or inFileName =={} or inFileName == [] or inFileName == '{}': trf.inFiles[inFileType] = getInputFileName(inFileType, tag) - if 'outputs' in result[0].keys(): + if 'outputs' in result[0]: outputs=deserialiseFromAMIString(result[0]['outputs']) - trf.outFiles=dict( (k, getOutputFileName(k.lstrip('output').rstrip('File')) ) for k in outputs.iterkeys() ) - trf.outfmts=[ outputs[k]['dstype'] for k in outputs.iterkeys() ] + trf.outFiles=dict( (k, getOutputFileName(k.lstrip('output').rstrip('File')) ) for k in outputs ) + trf.outfmts=[ outputs[k]['dstype'] for k in outputs ] except KeyError as e: raise TransformAMIException(AMIerrorCode, "Missing key in AMI data: {0}".format(e)) except Exception as e: @@ -546,11 +552,11 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True): def deserialiseFromAMIString(amistring): try: result = json.loads(amistring) - except ValueError, e_json: + except ValueError as e_json: msg.debug("Failed to decode {0} as JSON: {1}".format(amistring, e_json)) try: result = ast.literal_eval(amistring) - except SyntaxError, e_ast: + except SyntaxError as e_ast: errMsg = "Failed to deserialise AMI string '{0}' using JSON or eval".format(amistring) msg.error(errMsg) raise TransformAMIException(AMIerrorCode, errMsg) diff --git a/Tools/PyJobTransforms/python/trfArgClasses.py b/Tools/PyJobTransforms/python/trfArgClasses.py index 570c6879950..5158c203d01 100644 --- a/Tools/PyJobTransforms/python/trfArgClasses.py +++ b/Tools/PyJobTransforms/python/trfArgClasses.py @@ -1,3 +1,12 @@ +from __future__ import print_function +from future.utils import iteritems +from future.utils import itervalues +from future.utils import listvalues + +from past.builtins import basestring +from builtins import object +from builtins import int + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfArgClasses @@ -50,7 +59,7 @@ class argFactory(object): obj = self._genclass(*self._args, **self._kwargs) else: obj = self._genclass(valueString, *self._args, **self._kwargs) - except Exception, e: + except Exception as e: msg.fatal('Got this exception raised when calling object factory: {0}'.format(e)) raise return obj @@ -152,7 +161,7 @@ class argString(argument): # @details Sets value directly if it's a @c str, otherwise call the @c str() converter @value.setter def value(self, value): - if value == None: + if value is None: # For strings, None maps to '' self._value = '' else: @@ -194,7 +203,7 @@ class argInt(argument): # @throws trfExceptions.TransformArgException if @c int() conversion fails @value.setter def value(self, value): - if value == None: + if value is None: # For ints None maps to 0 self._value = 0 else: @@ -204,7 +213,7 @@ class argInt(argument): ## We try hard to convert the value we were given - anything @c int() swallows we accept try: self._value = int(value) - except ValueError, e: + except ValueError as e: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert value {0} to int: {1}'.format(value, e)) @@ -251,7 +260,7 @@ class argFloat(argument): @value.setter def value(self, value=None): # Default value will be 0.0 or self._min (if defined) - if value == None: + if value is None: if self._min is not None: self._value = self._min else: @@ -266,7 +275,7 @@ class argFloat(argument): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert %s to a float' % str(value)) - if (self._min != None and self.value < self._min) or (self._max != None and self._value > self._max): + if (self._min is not None and self.value < self._min) or (self._max is not None and self._value > self._max): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_OUT_OF_RANGE'), 'argFloat value out of range: %g is not between %s and %s' % (self.value, self._min, self._max)) @@ -289,7 +298,7 @@ class argBool(argument): @value.setter def value(self, value): # Default value matches the python bool() constructor - if value == None: + if value is None: self._value = False else: if isinstance(value, bool): @@ -339,13 +348,13 @@ class argList(argument): def value(self, value): if isinstance(value, (list, tuple)): self._value = list(value) - elif value==None: + elif value is None: self._value = [] return else: try: if self._supressEmptyStrings: - self._value = [ v for v in value.split(self._splitter) if v is not '' ] + self._value = [ v for v in value.split(self._splitter) if v != '' ] else: self._value = value.split(self._splitter) except AttributeError: @@ -386,17 +395,17 @@ class argIntList(argList): def value(self, value): if isinstance(value, list): for v in value: - if not isinstance(v, (int, long)): + if not isinstance(v, int): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Illegal value {0} in list of ints'.format(v)) self._value = value - elif value==None: + elif value is None: self._value = [] return else: try: if self._supressEmptyStrings: - self._value = [ v for v in value.split(self._splitter) if v is not '' ] + self._value = [ v for v in value.split(self._splitter) if v != '' ] else: self._value = value.split(self._splitter) self._value = [ int(el) for el in self._value ] @@ -442,22 +451,22 @@ class argKeyFloatValueList(argList): @value.setter def value(self, value): if isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Illegal key argument type {0} in dictionary for argKeyFloatValueList'.format(k)) if not isinstance(v, float): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_ERROR'), 'Illegal value argument type {0} in dictionary for argKeyFloatValueList'.format(v)) self._value = value - elif value==None: + elif value is None: self._value = {} return else: self._value = {} try: if self._supressEmptyStrings: - kvlist = [ v for v in value.split(self._splitter) if v is not '' ] + kvlist = [ v for v in value.split(self._splitter) if v != '' ] else: kvlist = value.split(self._splitter) for item in kvlist: @@ -533,7 +542,7 @@ class argFile(argList): } self._fileMetadata = {} if multipleOK is None: - if self._io is 'input': + if self._io == 'input': self._multipleOK = True else: self._multipleOK = False @@ -575,18 +584,18 @@ class argFile(argList): ## @brief mergeTargeSize value setter @mergeTargetSize.setter def mergeTargetSize(self, value): - if value==None: + if value is None: self._mergeTargetSize = 0 else: self._mergeTargetSize = value @property def prodsysDescription(self): - if type(self._type) is types.DictType: + if isinstance(self._type, dict): if self._type=={}: desc = {'type' : 'file', 'subtype' : "NONE" } else: - desc = {'type' : 'file', 'subtype' : dict((str(k).upper(), str(v).upper()) for (k,v) in self._type.iteritems())} + desc = {'type' : 'file', 'subtype' : dict((str(k).upper(), str(v).upper()) for (k,v) in iteritems(self._type))} else: desc = {'type' : 'file', 'subtype' : str(self._type).upper()} desc['multiple'] = self._multipleOK @@ -613,7 +622,7 @@ class argFile(argList): except KeyError: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Filename (key "lfn") not found in Tier-0 file dictionary: {0}'.format(myfile)) - for k, v in myfile.iteritems(): + for k, v in iteritems(myfile): if k == 'guid': self._setMetadata([myfile['lfn']], {'file_guid': v}) elif k == 'events': @@ -630,7 +639,7 @@ class argFile(argList): self._value = list(value) self._getDatasetFromFilename(reset = False) self._resetMetadata() - elif value==None: + elif value is None: self._value = [] return else: @@ -675,13 +684,13 @@ class argFile(argList): # Problem is not so much the [] expansion, but the invisible .N attempt number # One can only deal with this with a listdir() functionality # N.B. Current transforms only do globbing on posix fs too (see trfutil.expandStringToList()) - if self._urlType is 'posix': + if self._urlType == 'posix': msg.debug('Found POSIX filesystem input - activating globbing') newValue = [] for filename in self._value: # Simple case globbedFiles = glob.glob(filename) - if len(globbedFiles) is 0: # No files globbed for this 'filename' argument. + if len(globbedFiles) == 0: # No files globbed for this 'filename' argument. raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), 'Input file argument {0} globbed to NO input files - probably the file(s) are missing'.format(filename)) @@ -691,7 +700,7 @@ class argFile(argList): self._value = newValue msg.debug ('File input is globbed to %s' % self._value) - elif self._urlType is 'root': + elif self._urlType == 'root': msg.debug('Found root filesystem input - activating globbing') newValue = [] for filename in self._value: @@ -735,7 +744,7 @@ class argFile(argList): patt = re.compile(fileMask.replace('*','.*').replace('?','.')) for srmFile in myFiles: - if fileMask is not '': + if fileMask != '': if(patt.search(srmFile)) is not None: #if fnmatch.fnmatch(srmFile, fileMask): msg.debug('match: ',srmFile) @@ -747,14 +756,14 @@ class argFile(argList): except (AttributeError, TypeError, OSError): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_RUNTIME_ERROR'), 'Failed to convert %s to a list' % str(value)) - if len(self._value) > 0 and len(newValue) is 0: + if len(self._value) > 0 and len(newValue) == 0: # Woops - no files! raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_INPUT_FILE_ERROR'), 'Input file argument(s) {0!s} globbed to NO input files - ls command failed') self._value = newValue msg.debug ('File input is globbed to %s' % self._value) # Check if multiple outputs are ok for this object - elif self._multipleOK == False and len(self._value) > 1: + elif self._multipleOK is False and len(self._value) > 1: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_OUTPUT_FILE_ERROR'), 'Multiple file arguments are not supported for {0} (was given: {1}'.format(self, self._value)) @@ -853,10 +862,10 @@ class argFile(argList): if events is None: msg.debug('Got events=None for file {0} - returning None for this instance'.format(fname)) return None - if events is 'UNDEFINED': + if events == 'UNDEFINED': msg.debug('Got events=UNDEFINED for file {0} - returning UNDEFINED for this instance'.format(fname)) return 'UNDEFINED' - if not isinstance(events, (int, long)): + if not isinstance(events, int): msg.warning('Got unexpected events metadata for file {0}: {1!s} - returning None for this instance'.format(fname, events)) return None totalEvents += events @@ -885,7 +894,7 @@ class argFile(argList): # If we have the special guid option, then manually try to set GUIDs we find if self._guid is not None: msg.debug('Now trying to set file GUID metadata using {0}'.format(self._guid)) - for fname, guid in self._guid.iteritems(): + for fname, guid in iteritems(self._guid): if fname in self._value: self._fileMetadata[fname]['file_guid'] = guid else: @@ -901,13 +910,13 @@ class argFile(argList): # Normalise the files and keys parameter if files is None: files = self._value - elif isinstance(files, str): + elif isinstance(files, basestring): files = (files,) msg.debug('getMetadata will examine these files: {0!s}'.format(files)) if metadataKeys is None: - metadataKeys = self._metadataKeys.keys() - elif isinstance(metadataKeys, str): + metadataKeys = list(self._metadataKeys) + elif isinstance(metadataKeys, basestring): metadataKeys = (metadataKeys,) if maskMetadataKeys is not None: for key in maskMetadataKeys: @@ -944,7 +953,7 @@ class argFile(argList): # @param populate If missing key should be generated by calling the population subroutines # @param flush If cached data should be flushed and the generator rerun def getSingleMetadata(self, fname, metadataKey, populate = True, flush = False): - if not (isinstance(fname, str) and isinstance(metadataKey, str)): + if not (isinstance(fname, basestring) and isinstance(metadataKey, basestring)): raise trfExceptions.TransformInternalException(trfExit.nameToCode('TRF_INTERNAL'), 'Illegal call to getSingleMetadata function: {0!s} {1!s}'.format(fname, metadataKey)) md = self.getMetadata(files = fname, metadataKeys = metadataKey, populate = populate, flush = flush) @@ -967,7 +976,7 @@ class argFile(argList): if self._fileMetadata[fname]['_exists'] is False: # N.B. A log ERROR message has printed by the existence test, so do not repeat that news here for key in metadataKeys: - if key is not '_exists': + if key != '_exists': self._fileMetadata[fname][key] = None else: # OK, file seems to exist at least... @@ -979,13 +988,13 @@ class argFile(argList): if key in self._fileMetadata[fname]: msg.debug('Found cached value for {0}:{1} = {2!s}'.format(fname, key, self._fileMetadata[fname][key])) else: - msg.debug('No cached value for {0}:{1}. Calling generator function {2} ({3})'.format(fname, key, self._metadataKeys[key].func_name, self._metadataKeys[key])) + msg.debug('No cached value for {0}:{1}. Calling generator function {2} ({3})'.format(fname, key, self._metadataKeys[key].__name__, self._metadataKeys[key])) try: # For efficiency call this routine with all files we have msg.info("Metadata generator called to obtain {0} for {1}".format(key, files)) self._metadataKeys[key](files) - except trfExceptions.TransformMetadataException, e: - msg.error('Calling {0!s} raised an exception: {1!s}'.format(self._metadataKeys[key].func_name, e)) + except trfExceptions.TransformMetadataException as e: + msg.error('Calling {0!s} raised an exception: {1!s}'.format(self._metadataKeys[key].__name__, e)) if key not in self._fileMetadata[fname]: msg.warning('Call to function {0} for {1} file {2} failed to populate metadata key {3}'.format(self._metadataKeys[key].__name__, self.__class__.__name__, fname, key)) self._fileMetadata[fname][key] = None @@ -1003,12 +1012,12 @@ class argFile(argList): # @param files Files to set metadata for (@c None means "all") # @param metadataKeys Dictionary with metadata keys and values def _setMetadata(self, files=None, metadataKeys={}): - if files == None: + if files is None: files = self._value for fname in files: if fname not in self._fileMetadata: self._fileMetadata[fname] = {} - for k, v in metadataKeys.iteritems(): + for k, v in iteritems(metadataKeys): msg.debug('Manualy setting {0} for file {1} to {2}'.format(k, fname, v)) self._fileMetadata[fname][k] = v @@ -1023,11 +1032,11 @@ class argFile(argList): msg.debug('Testing for cached values for files {0} and keys {1}'.format(files, metadataKeys)) if files is None: files = self._value - elif isinstance(files, str): + elif isinstance(files, basestring): files = (files,) if metadataKeys is None: - metadataKeys = self._metadataKeys.keys() - elif isinstance(metadataKeys, str): + metadataKeys = list(self._metadataKeys) + elif isinstance(metadataKeys, basestring): metadataKeys = (metadataKeys,) isCachedFlag = True @@ -1036,7 +1045,7 @@ class argFile(argList): if key not in self._fileMetadata[fname]: isCachedFlag = False break - if isCachedFlag == False: + if isCachedFlag is False: break return isCachedFlag @@ -1073,7 +1082,7 @@ class argFile(argList): # @return None (internal @c self._fileMetadata cache is updated) def _getSize(self, files): for fname in files: - if self._urlType is 'posix': + if self._urlType == 'posix': try: self._fileMetadata[fname]['size'] = os.stat(fname).st_size except (IOError, OSError) as e: @@ -1122,7 +1131,7 @@ class argFile(argList): def _exists(self, files): msg.debug('Testing existance for {0}'.format(files)) for fname in files: - if self._urlType is 'posix': + if self._urlType == 'posix': try: size = os.stat(fname).st_size self._fileMetadata[fname]['file_size'] = size @@ -1208,7 +1217,7 @@ class argAthenaFile(argFile): # N.B. Could parallelise here for fname in myFiles: athFileMetadata = AthenaLiteFileInfo(fname, aftype, retrieveKeys=retrieveKeys) - if athFileMetadata == None: + if athFileMetadata is None: raise trfExceptions.TransformMetadataException(trfExit.nameToCode('TRF_METADATA_CALL_FAIL'), 'Call to AthenaLiteFileInfo failed') msg.debug('Setting metadata for file {0} to {1}'.format(fname, athFileMetadata[fname])) self._fileMetadata[fname].update(athFileMetadata[fname]) @@ -1823,13 +1832,13 @@ class argSubstep(argument): msg.debug('Attempting to set argSubstep from {0!s} (type {1}'.format(value, type(value))) if value is None: self._value = {} - elif isinstance(value, str): + elif isinstance(value, basestring): self._value = dict(self._parseStringAsSubstep(value)) elif isinstance(value, (list, tuple)): # This is a list of strings to parse, so we go through them one by one self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) self._value.update(dict(self._parseStringAsSubstep(item))) elif isinstance(value, dict): @@ -1871,13 +1880,13 @@ class argSubstep(argument): value = None ## @note First we see if we have an explicit name or substep match, then a special 'first' or 'default' match - if name in self._value.keys(): + if name in self._value: value = self._value[name] - elif substep in self._value.keys(): + elif substep in self._value: value = self._value[substep] - elif first and 'first' in self._value.keys(): + elif first and 'first' in self._value: value = self._value['first'] - elif 'default' in self._value.keys(): + elif 'default' in self._value: value = self._value['default'] ## @note Now see how we should handle an 'all', if it exists. @@ -1888,7 +1897,7 @@ class argSubstep(argument): ## @note Defining all: for a key which is not composable (like a list) # doesn't make much sense and, in this case, the specific value is allowed # to trump the all: - if 'all' in self._value.keys(): + if 'all' in self._value: if value is None: value = self._value['all'] elif isinstance(value, list): @@ -1936,13 +1945,13 @@ class argSubstepList(argSubstep): msg.debug('Attempting to set argSubstep from {0!s} (type {1}'.format(value, type(value))) if value is None: self._value = {} - elif isinstance(value, str): + elif isinstance(value, basestring): self._value = dict(self._parseStringAsSubstep(value)) elif isinstance(value, (list, tuple)): # This is a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) subStepList = self._parseStringAsSubstep(item) for subStep in subStepList: @@ -1951,8 +1960,8 @@ class argSubstepList(argSubstep): else: self._value[subStep[0]] = subStep[1] elif isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary key {0!s} for substep is not a string'.format(k)) if not isinstance(v, list): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary value {0!s} for substep is not a list'.format(v)) @@ -1989,23 +1998,23 @@ class argSubstepString(argSubstep): msg.debug('Attempting to set argSubstep from {0!s} (type {1}'.format(value, type(value))) if value is None: self._value = {} - elif isinstance(value, str): + elif isinstance(value, basestring): subStepList = self._parseStringAsSubstep(value) self._value = dict([(subStep[0], subStep[1]) for subStep in subStepList]) elif isinstance(value, (list, tuple)): # This is a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) subStepList = self._parseStringAsSubstep(item) for subStep in subStepList: self._value[subStep[0]] = subStep[1] elif isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary key {0!s} for substep is not a string'.format(k)) - if not isinstance(v, str): + if not isinstance(v, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary value {0!s} for substep is not a string'.format(v)) self._value = value else: @@ -2032,21 +2041,21 @@ class argSubstepBool(argSubstep): self._value = {} elif isinstance(value, bool): self._value = {self._defaultSubstep: value} - elif isinstance(value, str): + elif isinstance(value, basestring): subStepList = self._parseStringAsSubstep(value) self._value = dict([(subStep[0], strToBool(subStep[1])) for subStep in subStepList]) elif isinstance(value, (list, tuple)): # This is a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) subStepList = self._parseStringAsSubstep(item) for subStep in subStepList: self._value[subStep[0]] = strToBool(subStep[1]) elif isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary key {0!s} for substep is not a string'.format(k)) if not isinstance(v, bool): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary value {0!s} for substep is not a bool'.format(v)) @@ -2077,28 +2086,28 @@ class argSubstepInt(argSubstep): self._value = {} elif isinstance(value, int): self._value = {self._defaultSubstep: value} - elif isinstance(value, str): + elif isinstance(value, basestring): subStepList = self._parseStringAsSubstep(value) self._value = dict([(subStep[0], int(subStep[1])) for subStep in subStepList]) elif isinstance(value, (list, tuple)): # This is a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) subStepList = self._parseStringAsSubstep(item) for subStep in subStepList: self._value[subStep[0]] = int(subStep[1]) elif isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary key {0!s} for substep is not a string'.format(k)) - if not isinstance(v, (int, long)): + if not isinstance(v, int): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary value {0!s} for substep is not an int'.format(v)) self._value = value else: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Setter value {0!s} (type {1}) for substep argument cannot be parsed'.format(value, type(value))) - except ValueError, e: + except ValueError as e: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert substep value {0} to int'.format(value)) @@ -2136,22 +2145,22 @@ class argSubstepFloat(argSubstep): self._value = {} elif isinstance(value, float): self._value = {self._defaultSubstep: value} - elif isinstance(value, str): + elif isinstance(value, basestring): subStepList = self._parseStringAsSubstep(value) self._value = dict([(subStep[0], float(subStep[1])) for subStep in subStepList]) elif isinstance(value, (list, tuple)): # This is a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) subStepList = self._parseStringAsSubstep(item) for subStep in subStepList: self._value[subStep[0]] = float(subStep[1]) elif isinstance(value, dict): - for k, v in value.iteritems(): - if not isinstance(k, str): + for k, v in iteritems(value): + if not isinstance(k, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Dictionary key {0!s} for substep is not a string'.format(k)) if not isinstance(v, float): @@ -2162,11 +2171,11 @@ class argSubstepFloat(argSubstep): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Setter value {0!s} (type {1}) for substep argument cannot be parsed'.format(value, type(value))) # Now do min/max checks - for my_float in self._value.values(): - if (self._min != None and my_float < self._min) or (self._max != None and my_float > self._max): + for my_float in itervalues(self._value): + if (self._min is not None and my_float < self._min) or (self._max is not None and my_float > self._max): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_OUT_OF_RANGE'), 'argFloat value out of range: {0} is not between {1} and {2}'.format(my_float, self._min, self._max)) - except ValueError, e: + except ValueError as e: raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert substep value {0} to float: {1}'.format(value, e)) @@ -2222,8 +2231,8 @@ class argSubstepSteering(argSubstep): self._dumpvalue = [""] elif isinstance(value, dict): # OK, this should be the direct setable dictionary - but do a check of that - for k, v in value.iteritems(): - if not isinstance(k, str) or not isinstance(v, list): + for k, v in iteritems(value): + if not isinstance(k, basestring) or not isinstance(v, list): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert dict {0!s} to argSubstepSteering'.format(value)) for subv in v: @@ -2236,19 +2245,19 @@ class argSubstepSteering(argSubstep): # multi-valued argument we re-call value() with an expanded diectionary and # one can nievely reset dumpvalue by mistake self._dumpvalue = getattr(self, "_dumpvalue", value) - elif isinstance(value, (str, list, tuple)): - if isinstance(value, str): + elif isinstance(value, (basestring, list, tuple)): + if isinstance(value, basestring): value = [value,] self._dumpvalue = getattr(self, "_dumpvalue", value) # Now we have a list of strings to parse self._value = {} for item in value: - if not isinstance(item, str): + if not isinstance(item, basestring): raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert list item {0!s} to substep (should be a string)'.format(item)) if item in argSubstepSteering.steeringAlises: msg.debug("Found value {0} in steeringAlises ({1})".format(item, argSubstepSteering.steeringAlises[item])) - for substep, steerlist in argSubstepSteering.steeringAlises[item].iteritems(): + for substep, steerlist in iteritems(argSubstepSteering.steeringAlises[item]): if substep in self._value: self._value[substep].extend(steerlist) else: @@ -2288,9 +2297,9 @@ class argSubstepConditions(argSubstep): super(self.__class__, self.__class__).value.fset(self, value) current = None - for k, v in self._value.iteritems(): + for k, v in iteritems(self._value): if "CurrentMC" == v: - if current == None: + if current is None: current = self._amiLookUp(getAMIClient()) self._value[k] = current @@ -2368,9 +2377,9 @@ class trfArgParser(argparse.ArgumentParser): @property def getProdsysDesc(self): desc = {} - for name, argClass in self._argClass.iteritems(): + for name, argClass in iteritems(self._argClass): msg.debug('Detected the local variable {0}'.format(name)) - if type(argClass)!=type(None): + if argClass is not None: desc[name] = argClass().prodsysDescription if name in self._helpString: desc[name].update({'help': self._helpString[name]}) @@ -2397,14 +2406,14 @@ class trfArgParser(argparse.ArgumentParser): ## @brief Return a list of all arguments understood by this transform in prodsys style # @details Arguments which are irrelevant for production are removed and the '--' is added back on def dumpArgs(self): - keyArray = [ '--' + str(key) for key in self._helpString.keys() if key not in ('h', 'verbose', 'loglevel', 'dumpargs', 'argdict') ] + keyArray = [ '--' + str(key) for key in self._helpString if key not in ('h', 'verbose', 'loglevel', 'dumpargs', 'argdict') ] keyArray.sort() - print 'ListOfDefaultPositionalKeys={0}'.format(keyArray) + print('ListOfDefaultPositionalKeys={0}'.format(keyArray)) ## Getter for argument list @property def allArgs(self): - return self._helpString.keys() + return list(self._helpString) ## @brief Call argument_parser parse_args, then concatenate values @@ -2417,7 +2426,7 @@ class trfArgParser(argparse.ArgumentParser): super(trfArgParser, self).parse_args(args = args, namespace = namespace) else: namespace = super(trfArgParser, self).parse_args(args = args) - for k, v in namespace.__dict__.iteritems(): + for k, v in iteritems(namespace.__dict__): msg.debug('Treating key %s (%s)' % (k, v)) if isinstance(v, list): # We build on the v[0] instance as this contains the correct metadata @@ -2478,14 +2487,14 @@ def strToBool(string): # are not the same. def dictSubstepMerge(dict1, dict2): mergeDict = {} - allKeys = set(dict1.keys()) | set(dict2.keys()) + allKeys = set(dict1) | set(dict2) # Find the value type - lists are special... listType = False if len(dict1) > 0: - if isinstance(dict1.values()[0], list): + if isinstance(listvalues(dict1)[0], list): listType = True elif len(dict2) > 0: - if isinstance(dict2.values()[0], list): + if isinstance(listvalues(dict2)[0], list): listType = True if listType: for key in allKeys: diff --git a/Tools/PyJobTransforms/python/trfArgs.py b/Tools/PyJobTransforms/python/trfArgs.py index e35e3614193..ccab1ba9d92 100644 --- a/Tools/PyJobTransforms/python/trfArgs.py +++ b/Tools/PyJobTransforms/python/trfArgs.py @@ -1,3 +1,4 @@ +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @Package PyJobTransforms.trfArgs @@ -16,7 +17,7 @@ from PyJobTransforms.trfLogger import stdLogLevels ## Add standard transform arguments to an argparse ArgumentParser def addStandardTrfArgs(parser): parser.add_argument('--verbose', '--debug', action='store_true', help='Set transform loglevel to DEBUG') - parser.add_argument('--loglevel', choices=stdLogLevels.keys(), help='Set transform logging level') + parser.add_argument('--loglevel', choices=list(stdLogLevels), help='Set transform logging level') parser.add_argument('--argJSON', '--argjson', metavar='FILE', help='File containing JSON serialised argument dictionary') parser.add_argument('--dumpargs', action='store_true', help='Dump transform arguments and exit') parser.add_argument('--showGraph', action='store_true', help='Show multi-step transform graph, then exit') @@ -205,7 +206,7 @@ def addPrimaryDPDArguments(parser, pick = None, transform = None, multipleOK=Fal for substep, dpdList in matchedOutputList: for dpdName in [ dpd.replace('Stream', '') for dpd in dpdList ]: msg.debug('Handling {0}'.format(dpdName)) - if pick == None or dpdName in pick: + if pick is None or dpdName in pick: # Need to decide which file type we actually have here dpdType = dpdName.split('_')[0] if 'RAW' in dpdType: @@ -248,7 +249,7 @@ def addTopPhysDAODArguments(parser, pick = None): from TopPhysD2PDMaker.TopPhysD2PDFlags import TopPhysAllDAODs for dpdWriter in TopPhysAllDAODs: dpdName = dpdWriter.StreamName.replace('Stream', '') - if pick == None or dpdName in pick: + if pick is None or dpdName in pick: parser.add_argument('--output' + dpdName + 'File', type=argFactory(trfArgClasses.argFile, substep=['a2d']), group='Top DAODs', metavar=dpdName.upper(), help='Top ADOD output %s file (substep [a2d])' % (dpdName,)) @@ -273,7 +274,7 @@ def addD3PDArguments(parser, pick = None, transform = None, multipleOK=False, ad for dpdWriter in listAllKnownD3PD: dpdName = dpdWriter.StreamName.replace('Stream', '') - if pick == None or dpdName in pick: + if pick is None or dpdName in pick: if addD3PDMRGtypes: parser.add_argument('--input' + dpdName + 'File', type=argFactory(trfArgClasses.argNTUPFile, treeNames=dpdWriter.TreeNames, io='input'), @@ -457,7 +458,7 @@ def addExtraDPDTypes(parser, pick=None, transform=None, multipleOK=False, NTUPMe if NTUPMergerArgs: for dpd in extraDPDs: - if pick == None or dpd.name in pick: + if pick is None or dpd.name in pick: if dpd.name.startswith('NTUP'): parser.add_argument('--input' + dpd.name + 'File', type=argFactory(dpd.argclass, multipleOK=True, io='input', type=dpd.type, treeNames=dpd.treeNames), @@ -471,7 +472,7 @@ def addExtraDPDTypes(parser, pick=None, transform=None, multipleOK=False, NTUPMe pass else: for dpd in extraDPDs: - if pick == None or dpd.name in pick: + if pick is None or dpd.name in pick: msg.debug('Adding DPD {0} ({1}, {2}, {3}, {4})'.format(dpd.name, dpd.type, dpd.substeps, dpd.treeNames, dpd.argclass)) # NTUPs are a bit special as they can take a treeName to count events if issubclass(dpd.argclass, trfArgClasses.argNTUPFile): diff --git a/Tools/PyJobTransforms/python/trfDecorators.py b/Tools/PyJobTransforms/python/trfDecorators.py index e46a5deedfc..9261a2f7622 100644 --- a/Tools/PyJobTransforms/python/trfDecorators.py +++ b/Tools/PyJobTransforms/python/trfDecorators.py @@ -1,3 +1,5 @@ +from future import standard_library +standard_library.install_aliases() # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @Package PyJobTrasforms.trfDecorators @@ -67,7 +69,7 @@ def stdTrfExceptionHandler(func): # This subclass is treated as a 'normal' exit condition # but it should never happen in production as it's a transform definition error - except trfExceptions.TransformSetupException, e: + except trfExceptions.TransformSetupException as e: msg.critical('Transform setup failed: {0}'.format(e.errMsg)) msg.critical('To help you debug here is the stack trace:') msg.critical(traceback.format_exc(None)) @@ -75,7 +77,7 @@ def stdTrfExceptionHandler(func): trfUtils.infanticide(message=True) sys.exit(e.errCode) - except trfExceptions.TransformException, e: + except trfExceptions.TransformException as e: msg.critical('Got a transform exception in the outer exception handler: {0!s}'.format(e)) msg.critical('Stack trace is...') msg.critical(traceback.format_exc(None)) @@ -84,7 +86,7 @@ def stdTrfExceptionHandler(func): trfUtils.infanticide(message=True) sys.exit(trfExit.nameToCode('TRF_UNEXPECTED_TRF_EXCEPTION')) - except Exception, e: + except Exception as e: msg.critical('Got a general exception in the outer exception handler: {0!s}'.format(e)) msg.critical('Stack trace is...') msg.critical(traceback.format_exc(None)) @@ -132,7 +134,7 @@ def sigUsrStackTrace(func): def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=None): import traceback - import Queue + import queue import multiprocessing as mp from sys import exc_info @@ -155,7 +157,7 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N exc0=exc_info()[0] exc1=exc_info()[1] exc2=traceback.format_exc() - msg.warning('In time limited function %s an exception occurred' % (func.func_name)) + msg.warning('In time limited function %s an exception occurred' % (func.__name__)) msg.warning('Original traceback:') msg.warning(exc2) queue.put((False,(exc0, exc1, exc2))) @@ -185,7 +187,7 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N n=0 while n<=lretry: - msg.info('Try %i out of %i (time limit %s s) to call %s.' % (n+1, retry+1, ltimeout, func.func_name)) + msg.info('Try %i out of %i (time limit %s s) to call %s.' % (n+1, retry+1, ltimeout, func.__name__)) starttime = time.time() q=mp.Queue(maxsize=1) nargs = (q,) + args @@ -199,10 +201,10 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N if flag: return result else: - msg.warning('But an exception occurred in function %s.' % (func.func_name)) + msg.warning('But an exception occurred in function %s.' % (func.__name__)) msg.warning('Returning default return code %s.' % ldefaultrc) return ldefaultrc - except Queue.Empty: + except queue.Empty: # Our function did not run in time - kill increase timeout msg.warning('Timeout limit of %d s reached. Kill subprocess and its children.' % ltimeout) parent=proc.pid @@ -222,7 +224,7 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N raise TransformInternalException(trfExit.nameToCode("TRF_EXTERNAL"), errMsg) msg.warning('All %i tries failed!' % n) - raise TransformTimeoutException(trfExit.nameToCode('TRF_EXEC_TIMEOUT'), 'Timeout in function %s' % (func.func_name)) + raise TransformTimeoutException(trfExit.nameToCode('TRF_EXEC_TIMEOUT'), 'Timeout in function %s' % (func.__name__)) return funcWithTimeout diff --git a/Tools/PyJobTransforms/python/trfEnv.py b/Tools/PyJobTransforms/python/trfEnv.py index 432cf61b963..f3fa4eb7b88 100644 --- a/Tools/PyJobTransforms/python/trfEnv.py +++ b/Tools/PyJobTransforms/python/trfEnv.py @@ -1,3 +1,6 @@ +from future.utils import iteritems + +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @Package PyJobTransforms.trfEnv @@ -32,7 +35,7 @@ class environmentUpdate(object): # If imf=True/False then follow its lead, but otherwise try to detect the release # and enable if we have a release >= 17.7 if 'imf' in argdict: - if argdict['imf'].returnMyValue(name=name, substep=substep) == False: + if argdict['imf'].returnMyValue(name=name, substep=substep) is False: msg.info('Skipping inclusion of imf libraries: --imf is set to False') else: msg.info('Enabling inclusion of imf libraries: --imf is set to True') @@ -96,7 +99,7 @@ class environmentUpdate(object): ## @brief Return a list of KEY=VALUE pairs for this environment @property def values(self): - return [ "{0}={1}".format(k, v) for k, v in self._envdict.iteritems() ] + return [ "{0}={1}".format(k, v) for k, v in iteritems(self._envdict) ] ## @brief Count the number of environment items that need to be updated @property diff --git a/Tools/PyJobTransforms/python/trfExe.py b/Tools/PyJobTransforms/python/trfExe.py index a6b1d284ba5..e3b8ca97a09 100755 --- a/Tools/PyJobTransforms/python/trfExe.py +++ b/Tools/PyJobTransforms/python/trfExe.py @@ -1,3 +1,14 @@ +from __future__ import print_function +from future.utils import iteritems + +from past.builtins import basestring + +from builtins import zip +from builtins import next +from builtins import object +from builtins import range +from builtins import int + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfExe @@ -533,8 +544,8 @@ class echoExecutor(transformExecutor): msg.debug('exeStart time is {0}'.format(self._exeStart)) msg.info('Starting execution of %s' % self._name) msg.info('Transform argument dictionary now follows:') - for k, v in self.conf.argdict.iteritems(): - print "%s = %s" % (k, v) + for k, v in iteritems(self.conf.argdict): + print("%s = %s" % (k, v)) self._hasExecuted = True self._rc = 0 self._errMsg = '' @@ -612,7 +623,7 @@ class scriptExecutor(transformExecutor): elif 'TZHOME' in os.environ: msg.info('Tier-0 environment detected - enabling command echoing to stdout') self._echoOutput = True - if self._echoOutput == False: + if self._echoOutput is False: msg.info('Batch/grid running - command outputs will not be echoed. Logs for {0} are in {1}'.format(self._name, self._logFileName)) # Now setup special loggers for logging execution messages to stdout and file @@ -652,7 +663,7 @@ class scriptExecutor(transformExecutor): self._exeStart = os.times() msg.debug('exeStart time is {0}'.format(self._exeStart)) - if ('execOnly' in self.conf.argdict and self.conf.argdict['execOnly'] == True): + if ('execOnly' in self.conf.argdict and self.conf.argdict['execOnly'] is True): msg.info('execOnly flag is set - execution will now switch, replacing the transform') os.execvp(self._cmd[0], self._cmd) @@ -665,7 +676,7 @@ class scriptExecutor(transformExecutor): '--json-summary', self._memSummaryFile, '--interval', '30'] mem_proc = subprocess.Popen(memMonitorCommand, shell = False, close_fds=True) # TODO - link mem.full.current to mem.full.SUBSTEP - except Exception, e: + except Exception as e: msg.warning('Failed to spawn memory monitor for {0}: {1}'.format(self._name, e)) self._memMonitor = False @@ -693,7 +704,7 @@ class scriptExecutor(transformExecutor): while (not mem_proc.poll()) and countWait < 10: time.sleep(0.1) countWait += 1 - except OSError, UnboundLocalError: + except OSError as UnboundLocalError: pass @@ -704,7 +715,7 @@ class scriptExecutor(transformExecutor): try: memFile = open(self._memSummaryFile) self._memStats = json.load(memFile) - except Exception, e: + except Exception as e: msg.warning('Failed to load JSON memory summmary file {0}: {1}'.format(self._memSummaryFile, e)) self._memMonitor = False self._memStats = {} @@ -737,7 +748,7 @@ class scriptExecutor(transformExecutor): ## Check event counts (always do this by default) # Do this here so that all script executors have this by default (covers most use cases with events) - if 'checkEventCount' in self.conf.argdict.keys() and self.conf.argdict['checkEventCount'].returnMyValue(exe=self) is False: + if 'checkEventCount' in self.conf.argdict and self.conf.argdict['checkEventCount'].returnMyValue(exe=self) is False: msg.info('Event counting for substep {0} is skipped'.format(self.name)) else: checkcount=trfValidation.eventMatch(self) @@ -808,7 +819,7 @@ class athenaExecutor(scriptExecutor): msg.warning("Resource monitoring from PerfMon is now deprecated") # SkeletonFile can be None (disable) or a string or a list of strings - normalise it here - if type(skeletonFile) is str: + if isinstance(skeletonFile, basestring): self._skeleton = [skeletonFile] else: self._skeleton = skeletonFile @@ -930,7 +941,7 @@ class athenaExecutor(scriptExecutor): else: # Use a globbing strategy matchedViaGlob = False - for mtsType, mtsSize in self.conf.argdict['athenaMPMergeTargetSize'].value.iteritems(): + for mtsType, mtsSize in iteritems(self.conf.argdict['athenaMPMergeTargetSize'].value): if fnmatch(dataType, mtsType): self.conf._dataDictionary[dataType].mergeTargetSize = mtsSize * 1000000 # Convert from MB to B msg.info('Set target merge size for {0} to {1} from "{2}" glob'.format(dataType, self.conf._dataDictionary[dataType].mergeTargetSize, mtsType)) @@ -945,7 +956,7 @@ class athenaExecutor(scriptExecutor): # as soft linking can lead to problems in the PoolFileCatalog (see ATLASJT-317) for dataType in output: self.conf._dataDictionary[dataType].originalName = self.conf._dataDictionary[dataType].value[0] - if 'eventService' not in self.conf.argdict or 'eventService' in self.conf.argdict and self.conf.argdict['eventService'].value==False: + if 'eventService' not in self.conf.argdict or 'eventService' in self.conf.argdict and self.conf.argdict['eventService'].value is False: self.conf._dataDictionary[dataType].value[0] += "_000" msg.info("Updated athena output filename for {0} to {1}".format(dataType, self.conf._dataDictionary[dataType].value[0])) else: @@ -962,7 +973,7 @@ class athenaExecutor(scriptExecutor): outputFiles[dataType] = self.conf.dataDictionary[dataType] # See if we have any 'extra' file arguments - for dataType, dataArg in self.conf.dataDictionary.iteritems(): + for dataType, dataArg in iteritems(self.conf.dataDictionary): if dataArg.io == 'input' and self._name in dataArg.executor: inputFiles[dataArg.subtype] = dataArg @@ -1056,7 +1067,7 @@ class athenaExecutor(scriptExecutor): ## Our parent will check the RC for us try: super(athenaExecutor, self).validate() - except trfExceptions.TransformValidationException, e: + except trfExceptions.TransformValidationException as e: # In this case we hold this exception until the logfile has been scanned msg.error('Validation of return code failed: {0!s}'.format(e)) deferredException = e @@ -1165,7 +1176,7 @@ class athenaExecutor(scriptExecutor): msg.info('Updating athena --preloadlib option for substep {1} with: {0}'.format(self._envUpdate.value('LD_PRELOAD'), self.name)) newPreloads = ":".join(set(v.split(":")) | set(self._envUpdate.value('LD_PRELOAD').split(":"))) self.conf.argdict['athenaopts']._value[currentSubstep][i] = '--preloadlib={0}'.format(newPreloads) - except Exception, e: + except Exception as e: msg.warning('Failed to interpret athena option: {0} ({1})'.format(athArg, e)) preLoadUpdated[currentSubstep] = True break @@ -1238,36 +1249,36 @@ class athenaExecutor(scriptExecutor): ) try: with open(self._wrapperFile, 'w') as wrapper: - print >>wrapper, '#! /bin/sh' + print('#! /bin/sh', file=wrapper) if asetup: - print >>wrapper, "# asetup" - print >>wrapper, 'echo Sourcing {AtlasSetupDirectory}/scripts/asetup.sh {asetupStatus}'.format( + print("# asetup", file=wrapper) + print('echo Sourcing {AtlasSetupDirectory}/scripts/asetup.sh {asetupStatus}'.format( AtlasSetupDirectory = os.environ['AtlasSetup'], asetupStatus = asetup - ) - print >>wrapper, 'source {AtlasSetupDirectory}/scripts/asetup.sh {asetupStatus}'.format( + ), file=wrapper) + print('source {AtlasSetupDirectory}/scripts/asetup.sh {asetupStatus}'.format( AtlasSetupDirectory = os.environ['AtlasSetup'], asetupStatus = asetup - ) - print >>wrapper, 'if [ ${?} != "0" ]; then exit 255; fi' + ), file=wrapper) + print('if [ ${?} != "0" ]; then exit 255; fi', file=wrapper) if dbsetup: dbroot = path.dirname(dbsetup) dbversion = path.basename(dbroot) - print >>wrapper, "# DBRelease setup" - print >>wrapper, 'echo Setting up DBRelease {dbroot} environment'.format(dbroot = dbroot) - print >>wrapper, 'export DBRELEASE={dbversion}'.format(dbversion = dbversion) - print >>wrapper, 'export CORAL_AUTH_PATH={directory}'.format(directory = path.join(dbroot, 'XMLConfig')) - print >>wrapper, 'export CORAL_DBLOOKUP_PATH={directory}'.format(directory = path.join(dbroot, 'XMLConfig')) - print >>wrapper, 'export TNS_ADMIN={directory}'.format(directory = path.join(dbroot, 'oracle-admin')) - print >>wrapper, 'DATAPATH={dbroot}:$DATAPATH'.format(dbroot = dbroot) + print("# DBRelease setup", file=wrapper) + print('echo Setting up DBRelease {dbroot} environment'.format(dbroot = dbroot), file=wrapper) + print('export DBRELEASE={dbversion}'.format(dbversion = dbversion), file=wrapper) + print('export CORAL_AUTH_PATH={directory}'.format(directory = path.join(dbroot, 'XMLConfig')), file=wrapper) + print('export CORAL_DBLOOKUP_PATH={directory}'.format(directory = path.join(dbroot, 'XMLConfig')), file=wrapper) + print('export TNS_ADMIN={directory}'.format(directory = path.join(dbroot, 'oracle-admin')), file=wrapper) + print('DATAPATH={dbroot}:$DATAPATH'.format(dbroot = dbroot), file=wrapper) if self._disableMP: - print >>wrapper, "# AthenaMP explicitly disabled for this executor" - print >>wrapper, "export ATHENA_PROC_NUMBER=0" + print("# AthenaMP explicitly disabled for this executor", file=wrapper) + print("export ATHENA_PROC_NUMBER=0", file=wrapper) if self._envUpdate.len > 0: - print >>wrapper, "# Customised environment" + print("# Customised environment", file=wrapper) for envSetting in self._envUpdate.values: if not envSetting.startswith('LD_PRELOAD'): - print >>wrapper, "export", envSetting + print("export", envSetting, file=wrapper) # If Valgrind is engaged, a serialised Athena configuration file # is generated for use with a subsequent run of Athena with # Valgrind. @@ -1279,8 +1290,8 @@ class athenaExecutor(scriptExecutor): name = self._name ) # Run Athena for generation of its serialised configuration. - print >>wrapper, ' '.join(self._cmd), "--config-only={0}".format(AthenaSerialisedConfigurationFile) - print >>wrapper, 'if [ $? != "0" ]; then exit 255; fi' + print(' '.join(self._cmd), "--config-only={0}".format(AthenaSerialisedConfigurationFile), file=wrapper) + print('if [ $? != "0" ]; then exit 255; fi', file=wrapper) # Generate a Valgrind command, suppressing or ussing default # options as requested and extra options as requested. if 'valgrindDefaultOpts' in self.conf._argdict: @@ -1300,12 +1311,12 @@ class athenaExecutor(scriptExecutor): AthenaSerialisedConfigurationFile ) msg.debug("Valgrind command: {command}".format(command = command)) - print >>wrapper, command + print(command, file=wrapper) else: msg.info('Valgrind not engaged') # run Athena command - print >>wrapper, ' '.join(self._cmd) - os.chmod(self._wrapperFile, 0755) + print(' '.join(self._cmd), file=wrapper) + os.chmod(self._wrapperFile, 0o755) except (IOError, OSError) as e: errMsg = 'error writing athena wrapper {fileName}: {error}'.format( fileName = self._wrapperFile, @@ -1336,7 +1347,7 @@ class athenaExecutor(scriptExecutor): currentMergeSize = 0 for fname in fileArg.value: size = fileArg.getSingleMetadata(fname, 'file_size') - if type(size) not in (int, long): + if not isinstance(size, int): msg.warning('File size metadata for {0} was not correct, found type {1}. Aborting merge attempts.'.format(fileArg, type(size))) return # if there is no file in the job, then we must add it @@ -1376,7 +1387,7 @@ class athenaExecutor(scriptExecutor): mergeNames.append(mergeName) counter += 1 # Now actually do the merges - for targetName, mergeGroup, counter in zip(mergeNames, mergeCandidates, range(len(mergeNames))): + for targetName, mergeGroup, counter in zip(mergeNames, mergeCandidates, list(range(len(mergeNames)))): msg.info('Want to merge files {0} to {1}'.format(mergeGroup, targetName)) if len(mergeGroup) <= 1: msg.info('Skip merging for single file') @@ -1415,7 +1426,7 @@ class optionalAthenaExecutor(athenaExecutor): self.setValStart() try: super(optionalAthenaExecutor, self).validate() - except trfExceptions.TransformValidationException, e: + except trfExceptions.TransformValidationException as e: # In this case we hold this exception until the logfile has been scanned msg.warning('Validation failed for {0}: {1}'.format(self._name, e)) self._isValidated = False @@ -1636,7 +1647,7 @@ class DQMergeExecutor(scriptExecutor): for dataType in input: for fname in self.conf.dataDictionary[dataType].value: self.conf.dataDictionary[dataType]._getNumberOfEvents([fname]) - print >>DQMergeFile, fname + print(fname, file=DQMergeFile) self._cmd.append(self._histMergeList) @@ -1753,7 +1764,7 @@ class bsMergeExecutor(scriptExecutor): with open(self._mergeBSFileList, 'w') as BSFileList: for fname in self.conf.dataDictionary[self._inputBS].value: if fname not in self._maskedFiles: - print >>BSFileList, fname + print(fname, file=BSFileList) except (IOError, OSError) as e: errMsg = 'Got an error when writing list of BS files to {0}: {1}'.format(self._mergeBSFileList, e) msg.error(errMsg) @@ -1764,7 +1775,7 @@ class bsMergeExecutor(scriptExecutor): if self._outputFilename.endswith('._0001.data'): self._doRename = False self._outputFilename = self._outputFilename.split('._0001.data')[0] - elif self.conf.argdict['allowRename'].value == True: + elif self.conf.argdict['allowRename'].value is True: # OK, non-fatal, we go for a renaming msg.info('Output filename does not end in "._0001.data" will proceed, but be aware that the internal filename metadata will be wrong') self._doRename = True @@ -1802,7 +1813,7 @@ class bsMergeExecutor(scriptExecutor): msg.info('Renaming {0} to {1}'.format(self._expectedOutput, self.conf.dataDictionary[self._outputBS].value[0])) try: os.rename(self._outputFilename + '._0001.data', self.conf.dataDictionary[self._outputBS].value[0]) - except OSError, e: + except OSError as e: raise trfExceptions.TransformExecutionException(trfExit.nameToCode('TRF_OUTPUT_FILE_ERROR'), 'Exception raised when renaming {0} to {1}: {2}'.format(self._outputFilename, self.conf.dataDictionary[self._outputBS].value[0], e)) super(bsMergeExecutor, self).postExecute() @@ -1867,39 +1878,39 @@ class archiveExecutor(scriptExecutor): self._cmd = ['python'] try: with open('zip_wrapper.py', 'w') as zip_wrapper: - print >> zip_wrapper, "import zipfile, os, shutil" + print("import zipfile, os, shutil", file=zip_wrapper) if os.path.exists(self.conf.argdict['outputArchFile'].value[0]): #appending input file(s) to existing archive - print >> zip_wrapper, "zf = zipfile.ZipFile('{}', mode='a', allowZip64=True)".format(self.conf.argdict['outputArchFile'].value[0]) + print("zf = zipfile.ZipFile('{}', mode='a', allowZip64=True)".format(self.conf.argdict['outputArchFile'].value[0]), file=zip_wrapper) else: #creating new archive - print >> zip_wrapper, "zf = zipfile.ZipFile('{}', mode='w', allowZip64=True)".format(self.conf.argdict['outputArchFile'].value[0]) - print >> zip_wrapper, "for f in {}:".format(self.conf.argdict['inputDataFile'].value) + print("zf = zipfile.ZipFile('{}', mode='w', allowZip64=True)".format(self.conf.argdict['outputArchFile'].value[0]), file=zip_wrapper) + print("for f in {}:".format(self.conf.argdict['inputDataFile'].value), file=zip_wrapper) #This module gives false positives (as of python 3.7.0). Will also check the name for ".zip" #print >> zip_wrapper, " if zipfile.is_zipfile(f):" - print >> zip_wrapper, " if zipfile.is_zipfile(f) and '.zip' in f:" - print >> zip_wrapper, " archive = zipfile.ZipFile(f, mode='r')" - print >> zip_wrapper, " print 'Extracting input zip file {0} to temporary directory {1}'.format(f,'tmp')" - print >> zip_wrapper, " archive.extractall('tmp')" - print >> zip_wrapper, " archive.close()" + print(" if zipfile.is_zipfile(f) and '.zip' in f:", file=zip_wrapper) + print(" archive = zipfile.ZipFile(f, mode='r')", file=zip_wrapper) + print(" print 'Extracting input zip file {0} to temporary directory {1}'.format(f,'tmp')", file=zip_wrapper) + print(" archive.extractall('tmp')", file=zip_wrapper) + print(" archive.close()", file=zip_wrapper) # remove stuff as soon as it is saved to output in order to save disk space at worker node - print >> zip_wrapper, " if os.access(f, os.F_OK):" - print >> zip_wrapper, " print 'Removing input zip file {}'.format(f)" - print >> zip_wrapper, " os.unlink(f)" - print >> zip_wrapper, " if os.path.isdir('tmp'):" - print >> zip_wrapper, " for root, dirs, files in os.walk('tmp'):" - print >> zip_wrapper, " for name in files:" - print >> zip_wrapper, " print 'Zipping {}'.format(name)" - print >> zip_wrapper, " zf.write(os.path.join(root, name), name, compress_type=zipfile.ZIP_STORED)" - print >> zip_wrapper, " shutil.rmtree('tmp')" - print >> zip_wrapper, " else:" - print >> zip_wrapper, " print 'Zipping {}'.format(os.path.basename(f))" - print >> zip_wrapper, " zf.write(f, arcname=os.path.basename(f), compress_type=zipfile.ZIP_STORED)" - print >> zip_wrapper, " if os.access(f, os.F_OK):" - print >> zip_wrapper, " print 'Removing input file {}'.format(f)" - print >> zip_wrapper, " os.unlink(f)" - print >> zip_wrapper, "zf.close()" - os.chmod('zip_wrapper.py', 0755) + print(" if os.access(f, os.F_OK):", file=zip_wrapper) + print(" print 'Removing input zip file {}'.format(f)", file=zip_wrapper) + print(" os.unlink(f)", file=zip_wrapper) + print(" if os.path.isdir('tmp'):", file=zip_wrapper) + print(" for root, dirs, files in os.walk('tmp'):", file=zip_wrapper) + print(" for name in files:", file=zip_wrapper) + print(" print 'Zipping {}'.format(name)", file=zip_wrapper) + print(" zf.write(os.path.join(root, name), name, compress_type=zipfile.ZIP_STORED)", file=zip_wrapper) + print(" shutil.rmtree('tmp')", file=zip_wrapper) + print(" else:", file=zip_wrapper) + print(" print 'Zipping {}'.format(os.path.basename(f))", file=zip_wrapper) + print(" zf.write(f, arcname=os.path.basename(f), compress_type=zipfile.ZIP_STORED)", file=zip_wrapper) + print(" if os.access(f, os.F_OK):", file=zip_wrapper) + print(" print 'Removing input file {}'.format(f)", file=zip_wrapper) + print(" os.unlink(f)", file=zip_wrapper) + print("zf.close()", file=zip_wrapper) + os.chmod('zip_wrapper.py', 0o755) except (IOError, OSError) as e: errMsg = 'error writing zip wrapper {fileName}: {error}'.format(fileName = 'zip_wrapper.py', error = e @@ -1920,14 +1931,14 @@ class archiveExecutor(scriptExecutor): self._cmd = ['python'] try: with open('unarchive_wrapper.py', 'w') as unarchive_wrapper: - print >> unarchive_wrapper, "import zipfile" - print >> unarchive_wrapper, "for f in {}:".format(self.conf.argdict['inputArchFile'].value) - print >> unarchive_wrapper, " archive = zipfile.ZipFile(f, mode='r')" - print >> unarchive_wrapper, " path = '{}'".format(self.conf.argdict['path']) - print >> unarchive_wrapper, " print 'Extracting archive {0} to {1}'.format(f,path)" - print >> unarchive_wrapper, " archive.extractall(path)" - print >> unarchive_wrapper, " archive.close()" - os.chmod('unarchive_wrapper.py', 0755) + print("import zipfile", file=unarchive_wrapper) + print("for f in {}:".format(self.conf.argdict['inputArchFile'].value), file=unarchive_wrapper) + print(" archive = zipfile.ZipFile(f, mode='r')", file=unarchive_wrapper) + print(" path = '{}'".format(self.conf.argdict['path']), file=unarchive_wrapper) + print(" print 'Extracting archive {0} to {1}'.format(f,path)", file=unarchive_wrapper) + print(" archive.extractall(path)", file=unarchive_wrapper) + print(" archive.close()", file=unarchive_wrapper) + os.chmod('unarchive_wrapper.py', 0o755) except (IOError, OSError) as e: errMsg = 'error writing unarchive wrapper {fileName}: {error}'.format(fileName = 'unarchive_wrapper.py', error = e diff --git a/Tools/PyJobTransforms/python/trfExitCodes.py b/Tools/PyJobTransforms/python/trfExitCodes.py index 95238a2cd8a..a21a19497b4 100644 --- a/Tools/PyJobTransforms/python/trfExitCodes.py +++ b/Tools/PyJobTransforms/python/trfExitCodes.py @@ -1,3 +1,4 @@ +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfExitCodes diff --git a/Tools/PyJobTransforms/python/trfFileUtils-lite.py b/Tools/PyJobTransforms/python/trfFileUtils-lite.py index e65ab93f53a..e0b38527041 100644 --- a/Tools/PyJobTransforms/python/trfFileUtils-lite.py +++ b/Tools/PyJobTransforms/python/trfFileUtils-lite.py @@ -1,3 +1,7 @@ +from past.builtins import basestring + +from builtins import zip +from builtins import range # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfFileUtils @@ -40,7 +44,7 @@ def AthenaFileInfo(fileNames, retrieveKeys = athFileInterestingKeys): AthFile.server.flush_cache() AthFile.server.disable_pers_cache() - if isinstance(fileNames, str): + if isinstance(fileNames, basestring): fileNames = [fileNames,] metaDict = {} @@ -63,7 +67,7 @@ def AthenaFileInfo(fileNames, retrieveKeys = athFileInterestingKeys): msg.debug('Looking for key {0}'.format(key)) try: # AODFix is tricky... it is absent in many files, but this is not an error - if key is 'AODFixVersion': + if key == 'AODFixVersion': if 'tag_info' in meta.infos and isinstance('tag_info', dict) and 'AODFixVersion' in meta.infos['tag_info']: metaDict[fname][key] = meta.infos['tag_info'][key] else: @@ -72,15 +76,15 @@ def AthenaFileInfo(fileNames, retrieveKeys = athFileInterestingKeys): # So we use the same scheme as AutoConfiguration does, mapping project names to known values # It would be nice to import this all from AutoConfiguration, but there is no suitable method at the moment. # N.B. This is under discussion so this code is temporary fix (Captain's Log, Stardate 2012-11-28) - elif key is 'beam_type': + elif key == 'beam_type': try: if isinstance(meta.infos[key], list) and len(meta.infos[key]) > 0 and meta.infos[key][0] in ('cosmics' ,'singlebeam','collisions'): metaDict[fname][key] = meta.infos[key] else: from RecExConfig.AutoConfiguration import KnownCosmicsProjects, Known1BeamProjects, KnownCollisionsProjects, KnownHeavyIonProjects - if 'bs_metadata' in meta.infos.keys() and isinstance(meta.infos['bs_metadata'], dict) and 'Project' in meta.infos['bs_metadata'].keys(): + if 'bs_metadata' in meta.infos and isinstance(meta.infos['bs_metadata'], dict) and 'Project' in meta.infos['bs_metadata']: project = meta.infos['bs_metadata']['Project'] - elif 'tag_info' in meta.infos.keys() and isinstance(meta.infos['tag_info'], dict) and 'project_name' in meta.infos['tag_info'].keys(): + elif 'tag_info' in meta.infos and isinstance(meta.infos['tag_info'], dict) and 'project_name' in meta.infos['tag_info']: project = meta.infos['tag_info']['project_name'] else: msg.info('AthFile beam_type was not a known value ({0}) and no project could be found for this file'.format(meta.infos[key])) @@ -98,16 +102,16 @@ def AthenaFileInfo(fileNames, retrieveKeys = athFileInterestingKeys): # Erm, so we don't know msg.info('AthFile beam_type was not a known value ({0}) and the file\'s project ({1}) did not map to a known beam type using AutoConfiguration'.format(meta.infos[key], project)) metaDict[fname][key] = meta.infos[key] - except Exception, e: + except Exception as e: msg.error('Got an exception while trying to determine beam_type: {0}'.format(e)) metaDict[fname][key] = meta.infos[key] else: metaDict[fname][key] = meta.infos[key] except KeyError: msg.warning('Missing key in athFile info: {0}'.format(key)) - msg.debug('Found these metadata for {0}: {1}'.format(fname, metaDict[fname].keys())) + msg.debug('Found these metadata for {0}: {1}'.format(fname, list(metaDict[fname]))) return metaDict - except ValueError, e: + except ValueError as e: msg.error('Problem in getting AthFile metadata for {0}'.format(fileNames)) return None @@ -140,7 +144,7 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys msg.debug('Looking for key {0}'.format(key)) try: # AODFix is tricky... it is absent in many files, but this is not an error - if key is 'AODFixVersion': + if key == 'AODFixVersion': if 'tag_info' in meta and isinstance('tag_info', dict) and 'AODFixVersion' in meta['tag_info']: metaDict[filename][key] = meta['tag_info'][key] else: @@ -149,15 +153,15 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys # So we use the same scheme as AutoConfiguration does, mapping project names to known values # It would be nice to import this all from AutoConfiguration, but there is no suitable method at the moment. # N.B. This is under discussion so this code is temporary fix (Captain's Log, Stardate 2012.11.28) - elif key is 'beam_type': + elif key == 'beam_type': try: if isinstance(meta[key], list) and len(meta[key]) > 0 and meta[key][0] in ('cosmics' ,'singlebeam','collisions'): metaDict[filename][key] = meta[key] else: from RecExConfig.AutoConfiguration import KnownCosmicsProjects, Known1BeamProjects, KnownCollisionsProjects, KnownHeavyIonProjects - if 'bs_metadata' in meta.keys() and isinstance(meta['bs_metadata'], dict) and 'Project' in meta['bs_metadata'].keys(): + if 'bs_metadata' in meta and isinstance(meta['bs_metadata'], dict) and 'Project' in meta['bs_metadata']: project = meta['bs_metadata']['Project'] - elif 'tag_info' in meta.keys() and isinstance(meta['tag_info'], dict) and 'project_name' in meta['tag_info'].keys(): + elif 'tag_info' in meta and isinstance(meta['tag_info'], dict) and 'project_name' in meta['tag_info']: project = meta['tag_info']['project_name'] else: msg.info('AthFile beam_type was not a known value ({0}) and no project could be found for this file'.format(meta[key])) @@ -175,10 +179,10 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys # Erm, so we don't know msg.info('AthFile beam_type was not a known value ({0}) and the file\'s project ({1}) did not map to a known beam type using AutoConfiguration'.format(meta[key], project)) metaDict[filename][key] = meta[key] - except Exception, e: + except Exception as e: msg.error('Got an exception while trying to determine beam_type: {0}'.format(e)) metaDict[filename][key] = meta[key] - elif key is 'G4Version': + elif key == 'G4Version': msg.debug('Searching for G4Version in metadata') try: metaDict[filename][key] = meta['metadata']['/Simulation/Parameters']['G4Version'] @@ -221,7 +225,7 @@ def HISTEntries(fileName): name=key.GetName() - if name.startswith('run_') and name is not 'run_multiple': + if name.startswith('run_') and name != 'run_multiple': if rundir is not None: msg.warning('Found two run_ directories in HIST file %s: %s and %s' % ( fileName, rundir, name) ) @@ -267,7 +271,7 @@ def HISTEntries(fileName): nBinsX = h.GetNbinsX() nevLoc = 0 - for i in xrange(1, nBinsX): + for i in range(1, nBinsX): if h[i] < 0: msg.warning( 'Negative number of events for step %s in HIST file %s.' %( h.GetXaxis().GetBinLabel(i), fileName ) ) diff --git a/Tools/PyJobTransforms/python/trfFileUtils.py b/Tools/PyJobTransforms/python/trfFileUtils.py index b76fba42e41..92304f0e94c 100644 --- a/Tools/PyJobTransforms/python/trfFileUtils.py +++ b/Tools/PyJobTransforms/python/trfFileUtils.py @@ -1,3 +1,5 @@ + +from builtins import range # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfFileUtils @@ -45,7 +47,7 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys for key in retrieveKeys: msg.debug('Looking for key {0}'.format(key)) try: - if key is 'G4Version': + if key == 'G4Version': msg.debug('Searching for G4Version in metadata') try: metaDict[filename][key] = meta['metadata']['/Simulation/Parameters']['G4Version'] @@ -88,7 +90,7 @@ def HISTEntries(fileName): name=key.GetName() - if name.startswith('run_') and name is not 'run_multiple': + if name.startswith('run_') and name != 'run_multiple': if rundir is not None: msg.warning('Found two run_ directories in HIST file %s: %s and %s' % ( fileName, rundir, name) ) @@ -134,7 +136,7 @@ def HISTEntries(fileName): nBinsX = h.GetNbinsX() nevLoc = 0 - for i in xrange(1, nBinsX): + for i in range(1, nBinsX): if h[i] < 0: msg.warning( 'Negative number of events for step %s in HIST file %s.' %( h.GetXaxis().GetBinLabel(i), fileName ) ) diff --git a/Tools/PyJobTransforms/python/trfFileValidationFunctions.py b/Tools/PyJobTransforms/python/trfFileValidationFunctions.py index 1e9626a0bc0..ef33bdce375 100644 --- a/Tools/PyJobTransforms/python/trfFileValidationFunctions.py +++ b/Tools/PyJobTransforms/python/trfFileValidationFunctions.py @@ -1,3 +1,4 @@ + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfFileValidationFunctions diff --git a/Tools/PyJobTransforms/python/trfGraph.py b/Tools/PyJobTransforms/python/trfGraph.py index 45bb0326cc0..d09029facb6 100644 --- a/Tools/PyJobTransforms/python/trfGraph.py +++ b/Tools/PyJobTransforms/python/trfGraph.py @@ -1,3 +1,10 @@ +from future.utils import iteritems +from future.utils import itervalues + + +from builtins import int +from builtins import object + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfGraph @@ -57,7 +64,7 @@ class executorGraph(object): # Single executor - in this case inData/outData is not mandatory, so we set them to the # input/output data of the transform executor = list(executorSet)[0] - if len(executor._inData) is 0 and len(executor._outData) is 0: + if len(executor._inData) == 0 and len(executor._outData) == 0: executor.inData = inputData executor.outData = outputData @@ -80,7 +87,7 @@ class executorGraph(object): # nodes for any intermediate data end nodes as well pseudoNodes = dict() pseudoNodes['_start'] = graphNode(name='_start', inData=[], outData=self._inputData, weight = 0) - for node in self._nodeDict.itervalues(): + for node in itervalues(self._nodeDict): for dataType in node.outputDataTypes: endNodeName = '_end_{0}'.format(dataType) pseudoNodes[endNodeName] = graphNode(name=endNodeName, inData=[dataType], outData=[], weight = 0) @@ -147,16 +154,16 @@ class executorGraph(object): def _resetConnections(self): - for node in self._nodeDict.itervalues(): + for node in itervalues(self._nodeDict): node.resetConnections() ## @brief Look at executor nodes and work out how they are connected # @note Anything better than n^2? Should be ok for our low numbers of nodes, but could be optimised def findConnections(self): self._resetConnections() - for nodeNameA, nodeA in self._nodeDict.iteritems(): - for nodeNameB, nodeB in self._nodeDict.iteritems(): - if nodeNameA is nodeNameB: + for nodeNameA, nodeA in iteritems(self._nodeDict): + for nodeNameB, nodeB in iteritems(self._nodeDict): + if nodeNameA == nodeNameB: continue dataIntersection = list(set(nodeA.outputDataTypes) & set(nodeB.inputDataTypes)) msg.debug('Data connections between {0} and {1}: {2}'.format(nodeNameA, nodeNameB, dataIntersection)) @@ -174,11 +181,11 @@ class executorGraph(object): graphCopy = copy.deepcopy(self._nodeDict) # Find all valid start nodes in this graph - ones with no data dependencies themselves startNodeNames = [] - for nodeName, node in graphCopy.iteritems(): + for nodeName, node in iteritems(graphCopy): if len(node.connections['in']) == 0: startNodeNames.append(nodeName) - if len(startNodeNames) is 0: + if len(startNodeNames) == 0: raise trfExceptions.TransformGraphException(trfExit.nameToCode('TRF_GRAPH_ERROR'), 'There are no starting nodes in this graph - non-DAG graphs are not supported') @@ -203,7 +210,7 @@ class executorGraph(object): # If there are nodes left then the graph has cycles, which means it's not a DAG if len(graphCopy) > 0: raise trfExceptions.TransformGraphException(trfExit.nameToCode('TRF_GRAPH_ERROR'), - 'Graph topological sort had no more start nodes, but nodes were left {0} - non-DAG graphs are not supported'.format(graphCopy.keys())) + 'Graph topological sort had no more start nodes, but nodes were left {0} - non-DAG graphs are not supported'.format(list(graphCopy))) msg.debug('Topologically sorted node order: {0}'.format(self._toposort)) @@ -228,7 +235,7 @@ class executorGraph(object): def findExecutionPath(self): # Switch off all nodes, except if we have a single node which is not data driven... self._execution = {} - for nodeName, node in self._nodeDict.iteritems(): + for nodeName, node in iteritems(self._nodeDict): if len(self._nodeDict) == 1 and node.inputDataTypes == set() and node.inputDataTypes == set(): self._execution[nodeName] = {'enabled' : True, 'input' : set(), 'output' : set()} else: @@ -273,13 +280,13 @@ class executorGraph(object): if nextNodeName in bestPath.extraData: self._execution[nextNodeName]['input'].update(bestPath.extraData[nodeName]) # Add any extra data we need (from multi-exit nodes) to the data to produce list - for extraNodeData in bestPath.extraData.itervalues(): + for extraNodeData in itervalues(bestPath.extraData): for extra in extraNodeData: if extra not in dataAvailable: dataToProduce.update([extra]) # Now remove the fake data objects from activated nodes - for node, props in self._execution.iteritems(): + for node, props in iteritems(self._execution): msg.debug('Removing fake data from node {0}'.format(node)) props['input'] -= set(['inNULL', 'outNULL']) props['output'] -= set(['inNULL', 'outNULL']) @@ -314,41 +321,41 @@ class executorGraph(object): msg.debug('Started path finding with seed path {0}'.format(pathSet[0])) # Halting condition - only one path and its first element is startNodeName - while len(pathSet) > 1 or pathSet[0].path[0] is not startNodeName: + while len(pathSet) > 1 or pathSet[0].path[0] != startNodeName: msg.debug('Starting best path iteration with {0} paths in {1}'.format(len(pathSet), pathSet)) # Copy the pathSet to do this, as we will update it for path in pathSet[:]: msg.debug('Continuing path finding with path {0}'.format(path)) currentNodeName = path.path[0] - if currentNodeName is startNodeName: + if currentNodeName == startNodeName: msg.debug('Path {0} has reached the start node - finished'.format(path)) continue # If there are no paths out of this node then it's a dead end - kill it - if len(self._nodeDict[currentNodeName].connections['in']) is 0: + if len(self._nodeDict[currentNodeName].connections['in']) == 0: msg.debug('Path {0} is a dead end - removing'.format(path)) pathSet.remove(path) continue # If there is only one path out of this node, we extend it - if len(self._nodeDict[currentNodeName].connections['in']) is 1: - msg.debug('Single exit from path {0} - adding connection to {1}'.format(path, self._nodeDict[currentNodeName].connections['in'].keys()[0])) - self._extendPath(path, currentNodeName, self._nodeDict[currentNodeName].connections['in'].keys()[0]) + if len(self._nodeDict[currentNodeName].connections['in']) == 1: + msg.debug('Single exit from path {0} - adding connection to {1}'.format(path, list(self._nodeDict[currentNodeName].connections['in'])[0])) + self._extendPath(path, currentNodeName, list(self._nodeDict[currentNodeName].connections['in'])[0]) continue # Else we need to clone the path for each possible exit msg.debug('Multiple exits from path {0} - will clone for each extra exit'.format([path])) - for nextNodeName in self._nodeDict[currentNodeName].connections['in'].keys()[1:]: + for nextNodeName in list(self._nodeDict[currentNodeName].connections['in'])[1:]: newPath = copy.deepcopy(path) msg.debug('Cloned exit from path {0} to {1}'.format(newPath, nextNodeName)) self._extendPath(newPath, currentNodeName, nextNodeName) pathSet.append(newPath) # Finally, use the original path to extend along the first node exit - msg.debug('Adding exit from original path {0} to {1}'.format(path, self._nodeDict[currentNodeName].connections['in'].keys()[0])) - self._extendPath(path, currentNodeName, self._nodeDict[currentNodeName].connections['in'].keys()[0]) + msg.debug('Adding exit from original path {0} to {1}'.format(path, list(self._nodeDict[currentNodeName].connections['in'])[0])) + self._extendPath(path, currentNodeName, list(self._nodeDict[currentNodeName].connections['in'])[0]) # Now compare paths which made it to the end - only keep the shortest lowestCostPath = None for path in pathSet[:]: currentNodeName = path.path[0] - if currentNodeName is startNodeName: + if currentNodeName == startNodeName: if lowestCostPath is None: lowestCostPath = path continue @@ -405,7 +412,7 @@ class executorGraph(object): if len(self._toposort) > 0: nodeNames = self._toposort else: - nodeNames = self._nodeDict.keys() + nodeNames = list(self._nodeDict) nodeNames.sort() for nodeName in nodeNames: if not nodeName.startswith('_'): @@ -419,7 +426,7 @@ class executorGraph(object): if len(self._toposort) > 0: nodeNames = self._toposort else: - nodeNames = self._nodeDict.keys() + nodeNames = list(self._nodeDict) nodeNames.sort() for nodeName in nodeNames: nodeStrList.append(repr(self._nodeDict[nodeName])) diff --git a/Tools/PyJobTransforms/python/trfJobOptions.py b/Tools/PyJobTransforms/python/trfJobOptions.py index d947e4dbce4..85002ca81f3 100644 --- a/Tools/PyJobTransforms/python/trfJobOptions.py +++ b/Tools/PyJobTransforms/python/trfJobOptions.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from future.utils import iteritems +from builtins import object # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfJobOptions @@ -57,24 +60,24 @@ class JobOptionsTemplate(object): with open(self._runArgsFile, 'w') as runargsFile: try: # First write a little header - print >>runargsFile, os.linesep.join(("# Run arguments file auto-generated on {0} by:".format(time.asctime()), + print(os.linesep.join(("# Run arguments file auto-generated on {0} by:".format(time.asctime()), "# JobTransform: {0}".format(self._exe.name), "# Version: {0}".format(self._version) - )) + )), file=runargsFile) # Now make sure we import the runArgs class for out job options - print >>runargsFile, os.linesep.join(("# Import runArgs class", + print(os.linesep.join(("# Import runArgs class", "from PyJobTransforms.trfJobOptions import RunArguments", "{0} = RunArguments()".format(self._runArgsName) - )) + )), file=runargsFile) # Handy to write the substep name here as it can be used as (part of) a random seed # in some cases - print >>runargsFile, '{0}.trfSubstepName = {1!r}'.format(self._runArgsName, self._exe.name), os.linesep + print('{0}.trfSubstepName = {1!r}'.format(self._runArgsName, self._exe.name), os.linesep, file=runargsFile) # Now loop over the core argdict and see what needs to be given as a runArg declaredRunargs = [] - for k, v in self._exe.conf.argdict.iteritems(): + for k, v in iteritems(self._exe.conf.argdict): # Check if this arg is supposed to be in runArgs if isinstance(v, trfArgClasses.argument) and v.isRunarg: # Files handled later @@ -89,11 +92,11 @@ class JobOptionsTemplate(object): if isinstance(v, trfArgClasses.argSubstep): myValue = v.returnMyValue(exe = self._exe) if myValue is not None: - print >>runargsFile, "{0}.{1!s} = {2!r}".format(self._runArgsName, k, myValue) + print("{0}.{1!s} = {2!r}".format(self._runArgsName, k, myValue), file=runargsFile) msg.debug('Added substep type argument {0} as: {1}'.format(k, myValue)) declaredRunargs.append(k) else: - print >>runargsFile, "{0}.{1!s} = {2!r}".format(self._runArgsName, k, v.value) + print("{0}.{1!s} = {2!r}".format(self._runArgsName, k, v.value), file=runargsFile) declaredRunargs.append(k) else: msg.debug('Argument {0} is not a runarg - ignored'.format(k)) @@ -101,93 +104,93 @@ class JobOptionsTemplate(object): # Now make sure that if we did not add maxEvents then we set this to -1, which # avoids some strange defaults that only allow 5 events to be processed if 'maxEvents' not in declaredRunargs: - print >>runargsFile, os.linesep.join(("", "# Explicitly added to process all events in this step", + print(os.linesep.join(("", "# Explicitly added to process all events in this step", "{0}.maxEvents = -1".format(self._runArgsName), - )) + )), file=runargsFile) # Now deal with our input and output files - print >>runargsFile, os.linesep, "# Input data" - for dataType, dataArg in input.iteritems(): - print >>runargsFile, '{0}.input{1}File = {2!r}'.format(self._runArgsName, dataType, dataArg.value) - print >>runargsFile, '{0}.input{1}FileType = {2!r}'.format(self._runArgsName, dataType, dataArg.type) + print(os.linesep, "# Input data", file=runargsFile) + for dataType, dataArg in iteritems(input): + print('{0}.input{1}File = {2!r}'.format(self._runArgsName, dataType, dataArg.value), file=runargsFile) + print('{0}.input{1}FileType = {2!r}'.format(self._runArgsName, dataType, dataArg.type), file=runargsFile) # Add the input event count, if we know it if dataArg.isCached(metadataKeys = ['nentries']): - print >>runargsFile, '{0}.input{1}FileNentries = {2!r}'.format(self._runArgsName, dataType, dataArg.nentries) - print >>runargsFile, "{0}.{1}FileIO = {2!r}".format(self._runArgsName, dataType, self._exe.conf.dataDictionary[dataType].io) + print('{0}.input{1}FileNentries = {2!r}'.format(self._runArgsName, dataType, dataArg.nentries), file=runargsFile) + print("{0}.{1}FileIO = {2!r}".format(self._runArgsName, dataType, self._exe.conf.dataDictionary[dataType].io), file=runargsFile) - print >>runargsFile, os.linesep, "# Output data" - for dataType, dataArg in output.iteritems(): + print(os.linesep, "# Output data", file=runargsFile) + for dataType, dataArg in iteritems(output): # Need to be careful to convert _output_ filename as a strings, not a list - print >>runargsFile, '{0}.output{1}File = {2!r}'.format(self._runArgsName, dataType, dataArg.value[0]) - print >>runargsFile, '{0}.output{1}FileType = {2!r}'.format(self._runArgsName, dataType, dataArg.type) + print('{0}.output{1}File = {2!r}'.format(self._runArgsName, dataType, dataArg.value[0]), file=runargsFile) + print('{0}.output{1}FileType = {2!r}'.format(self._runArgsName, dataType, dataArg.type), file=runargsFile) # Process all of the tweaky special runtime arguments - print >>runargsFile, os.linesep, "# Extra runargs" + print(os.linesep, "# Extra runargs", file=runargsFile) ## @note extraRunargs are passed using repr, i.e., they should be constants - for k, v in self._exe._extraRunargs.iteritems(): + for k, v in iteritems(self._exe._extraRunargs): ## @note: What to do if this is a CLI argument as well, in particular # for arguments like preExec we want to add to the list, not replace it if k in declaredRunargs: if isinstance(self._exe.conf.argdict[k].value, list): msg.debug('Extending runarg {0!s}={1!r}'.format(k, v)) - print >>runargsFile, '{0}.{1!s}.extend({2!r})'.format(self._runArgsName, k, v) + print('{0}.{1!s}.extend({2!r})'.format(self._runArgsName, k, v), file=runargsFile) else: msg.debug('Adding runarg {0!s}={1!r}'.format(k, v)) - print >>runargsFile, '{0}.{1!s} = {2!r}'.format(self._runArgsName, k, v) + print('{0}.{1!s} = {2!r}'.format(self._runArgsName, k, v), file=runargsFile) ## @note runtime runargs are passed as strings, i.e., they can be evaluated - print >>runargsFile, os.linesep, '# Extra runtime runargs' - for k, v in self._exe._runtimeRunargs.iteritems(): + print(os.linesep, '# Extra runtime runargs', file=runargsFile) + for k, v in iteritems(self._exe._runtimeRunargs): # These options are string converted, not repred, so they can write an option # which is evaluated at runtime # Protect this with try: except: for the Embedding use case msg.debug('Adding runarg {0!s}={1!r}'.format(k, v)) - print >>runargsFile, os.linesep.join(('try:', + print(os.linesep.join(('try:', ' {0}.{1!s} = {2!s}'.format(self._runArgsName, k, v), 'except AttributeError:', - ' print "WARNING - AttributeError for {0}"'.format(k))) + ' print "WARNING - AttributeError for {0}"'.format(k))), file=runargsFile) ## @note Now write the literals into the runargs file if self._exe._literalRunargs is not None: - print >>runargsFile, os.linesep, '# Literal runargs snippets' + print(os.linesep, '# Literal runargs snippets', file=runargsFile) for line in self._exe._literalRunargs: - print >>runargsFile, line + print(line, file=runargsFile) ## Another special option - dataArgs are always written to the runargs file for dataType in self._exe._dataArgs: - print >>runargsFile, os.linesep, '# Forced data value arguments' + print(os.linesep, '# Forced data value arguments', file=runargsFile) if dataType in self._exe.conf.dataDictionary: - print >>runargsFile, '{0}.data{1}arg = {2!r}'.format(self._runArgsName, dataType, - self._exe.conf.dataDictionary[dataType].value) + print('{0}.data{1}arg = {2!r}'.format(self._runArgsName, dataType, + self._exe.conf.dataDictionary[dataType].value), file=runargsFile) else: - print >>runargsFile, '# Warning: data type "{0}" is not part of this transform'.format(dataType) + print('# Warning: data type "{0}" is not part of this transform'.format(dataType), file=runargsFile) # This adds the correct JO fragment for AthenaMP job, where we need to ask # the FileMgr to produce the requested log and report files # Also, aggregating the workers' logfiles into the mother's makes life # easier for debugging if self._exe._athenaMP: - print >>runargsFile, os.linesep, '# AthenaMP Options. nprocs = %d' % self._exe._athenaMP + print(os.linesep, '# AthenaMP Options. nprocs = %d' % self._exe._athenaMP, file=runargsFile) # Proxy for both options - print >>runargsFile, os.linesep.join((os.linesep, + print(os.linesep.join((os.linesep, 'from AthenaMP.AthenaMPFlags import jobproperties as AthenaMPJobProps', 'AthenaMPJobProps.AthenaMPFlags.WorkerTopDir="{0}"'.format(self._exe._athenaMPWorkerTopDir), 'AthenaMPJobProps.AthenaMPFlags.OutputReportFile="{0}"'.format(self._exe._athenaMPFileReport), 'AthenaMPJobProps.AthenaMPFlags.EventOrdersFile="{0}"'.format(self._exe._athenaMPEventOrdersFile), 'AthenaMPJobProps.AthenaMPFlags.CollectSubprocessLogs=True' - )) + )), file=runargsFile) if self._exe._athenaMPStrategy: # Beware of clobbering a non default value (a feature used by EventService) - print >>runargsFile, 'if AthenaMPJobProps.AthenaMPFlags.Strategy.isDefault():' - print >>runargsFile, '\tAthenaMPJobProps.AthenaMPFlags.Strategy="{0}"'.format(self._exe._athenaMPStrategy) + print('if AthenaMPJobProps.AthenaMPFlags.Strategy.isDefault():', file=runargsFile) + print('\tAthenaMPJobProps.AthenaMPFlags.Strategy="{0}"'.format(self._exe._athenaMPStrategy), file=runargsFile) if self._exe._athenaMPReadEventOrders: if os.path.isfile(self._exe._athenaMPEventOrdersFile): - print >>runargsFile, 'AthenaMPJobProps.AthenaMPFlags.ReadEventOrders=True' + print('AthenaMPJobProps.AthenaMPFlags.ReadEventOrders=True', file=runargsFile) else: raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_EXEC_RUNARGS_ERROR"), "Failed to find file: {0} required by athenaMP option: --athenaMPUseEventOrders true".format(self._exe._athenaMPEventOrdersFile)) if 'athenaMPEventsBeforeFork' in self._exe.conf.argdict: - print >>runargsFile, 'AthenaMPJobProps.AthenaMPFlags.EventsBeforeFork={0}'.format(self._exe.conf.argdict['athenaMPEventsBeforeFork'].value) + print('AthenaMPJobProps.AthenaMPFlags.EventsBeforeFork={0}'.format(self._exe.conf.argdict['athenaMPEventsBeforeFork'].value), file=runargsFile) msg.info('Successfully wrote runargs file {0}'.format(self._runArgsFile)) diff --git a/Tools/PyJobTransforms/python/trfMPTools.py b/Tools/PyJobTransforms/python/trfMPTools.py index 9e641eafb3c..795b3f47e52 100644 --- a/Tools/PyJobTransforms/python/trfMPTools.py +++ b/Tools/PyJobTransforms/python/trfMPTools.py @@ -1,3 +1,5 @@ +from future.utils import iteritems +from builtins import zip # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfMPTools @@ -47,7 +49,7 @@ def detectAthenaMPProcs(argdict = {}): else: raise ValueError("--nprocs was set more than once in 'athenaopts'") msg.info('AthenaMP detected from "nprocs" setting with {0} workers for substep {1}'.format(athenaMPProcs,substep)) - except ValueError, errMsg: + except ValueError as errMsg: myError = 'Problem discovering AthenaMP setup: {0}'.format(errMsg) raise trfExceptions.TransformExecutionException(trfExit.nameToCode('TRF_EXEC_SETUP_FAIL'), myError) @@ -62,8 +64,8 @@ def detectAthenaMPProcs(argdict = {}): # @param skipFileChecks Switches off checks on output files # @return @c None; side effect is the update of the @c dataDictionary def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictionary, athenaMPworkers, skipFileChecks = False, argdict = {}): - msg.debug("MP output handler called for report {0} and workers in {1}, data types {2}".format(athenaMPFileReport, athenaMPWorkerTopDir, dataDictionary.keys())) - outputHasBeenHandled = dict([ (dataType, False) for dataType in dataDictionary.keys() if dataDictionary[dataType] ]) + msg.debug("MP output handler called for report {0} and workers in {1}, data types {2}".format(athenaMPFileReport, athenaMPWorkerTopDir, list(dataDictionary))) + outputHasBeenHandled = dict([ (dataType, False) for dataType in dataDictionary if dataDictionary[dataType] ]) # if sharedWriter mode is active ignore athenaMPFileReport sharedWriter=False @@ -82,7 +84,7 @@ def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictiona msg.debug('Examining element {0} with attributes {1}'.format(filesElement, filesElement.attrib)) originalArg = None startName = filesElement.attrib['OriginalName'] - for dataType, fileArg in dataDictionary.iteritems(): + for dataType, fileArg in iteritems(dataDictionary): if fileArg.value[0] == startName: originalArg = fileArg outputHasBeenHandled[dataType] = True @@ -104,10 +106,10 @@ def athenaMPOutputHandler(athenaMPFileReport, athenaMPWorkerTopDir, dataDictiona # OK, we have something we need to search for; cache the dirwalk here MPdirWalk = [ dirEntry for dirEntry in os.walk(athenaMPWorkerTopDir) ] - for dataType, fileArg in dataDictionary.iteritems(): + for dataType, fileArg in iteritems(dataDictionary): if outputHasBeenHandled[dataType]: continue - if fileArg.io is "input": + if fileArg.io == "input": continue msg.info("Searching MP worker directories for {0}".format(dataType)) startName = fileArg.value[0] @@ -160,14 +162,14 @@ def athenaMPoutputsLinkAndUpdate(newFullFilenames, fileArg): try: os.rename(fname,fileArg.originalName) newFilenameValue[0]=fileArg.originalName - except OSError, e: + except OSError as e: raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_OUTPUT_FILE_ERROR"), "Failed to move {0} to {1}: {2}".format(fname, linkname, e)) else: try: if path.lexists(linkname): os.unlink(linkname) os.symlink(fname, linkname) - except OSError, e: + except OSError as e: raise trfExceptions.TransformExecutionException(trfExit.nameToCode("TRF_OUTPUT_FILE_ERROR"), "Failed to link {0} to {1}: {2}".format(fname, linkname, e)) fileArg.multipleOK = True diff --git a/Tools/PyJobTransforms/python/trfReports.py b/Tools/PyJobTransforms/python/trfReports.py index a564fd2568d..7c3de35b1d9 100644 --- a/Tools/PyJobTransforms/python/trfReports.py +++ b/Tools/PyJobTransforms/python/trfReports.py @@ -1,3 +1,15 @@ +from __future__ import print_function +from __future__ import division +from future.utils import iteritems +from future.utils import itervalues + + +from builtins import object +from future import standard_library +standard_library.install_aliases() + +from builtins import int + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfReports @@ -11,7 +23,7 @@ __version__ = '$Revision: 784023 $' -import cPickle as pickle +import pickle as pickle import json import os.path import platform @@ -83,16 +95,16 @@ class trfReport(object): if not self._dataDictionary: self._dataDictionary = self.python(fast = fast, fileReport = fileReport) - print >> report, '# {0} file generated on'.format(self.__class__.__name__), isodate() - print >> report, pprint.pformat(self._dataDictionary) + print('# {0} file generated on'.format(self.__class__.__name__), isodate(), file=report) + print(pprint.pformat(self._dataDictionary), file=report) if dumpEnv: - print >> report, '# Environment dump' - eKeys = os.environ.keys() + print('# Environment dump', file=report) + eKeys = list(os.environ) eKeys.sort() for k in eKeys: - print >> report, '%s=%s' % (k, os.environ[k]) - print >> report, '# Machine report' - print >> report, pprint.pformat(machineReport().python(fast = fast)) + print('%s=%s' % (k, os.environ[k]), file=report) + print('# Machine report', file=report) + print(pprint.pformat(machineReport().python(fast = fast)), file=report) def writeGPickleReport(self, filename, fast = False): with open(filename, 'w') as report: @@ -100,7 +112,7 @@ class trfReport(object): def writeClassicXMLReport(self, filename, fast = False): with open(filename, 'w') as report: - print >> report, prettyXML(self.classicEltree(fast = fast), poolFileCatalogFormat = True) + print(prettyXML(self.classicEltree(fast = fast), poolFileCatalogFormat = True), file=report) def writePilotPickleReport(self, filename, fast = False, fileReport = defaultFileReport): with open(filename, 'w') as report: @@ -156,7 +168,7 @@ class trfJobReport(trfReport): if fileReport[fileType]: myDict['files'][fileType] = [] # Should have a dataDictionary, unless something went wrong very early... - for dataType, dataArg in self._trf._dataDictionary.iteritems(): + for dataType, dataArg in iteritems(self._trf._dataDictionary): if dataArg.auxiliaryFile: # Always skip auxilliary files from the report continue if fileReport[dataArg.io]: @@ -195,7 +207,7 @@ class trfJobReport(trfReport): maxWorkers = 1 msg.debug('Raw cpu resource consumption: transform {0}, children {1}'.format(myCpuTime, childCpuTime)) # Reduce childCpuTime by times reported in the executors (broken for MP...?) - for exeName, exeReport in myDict['resource']['executor'].iteritems(): + for exeName, exeReport in iteritems(myDict['resource']['executor']): if 'mpworkers' in exeReport: if exeReport['mpworkers'] > maxWorkers : maxWorkers = exeReport['mpworkers'] try: @@ -250,8 +262,8 @@ class trfJobReport(trfReport): # Extract some executor parameters here for exeKey in ('preExec', 'postExec', 'preInclude', 'postInclude'): if exeKey in self._trf.argdict: - for substep, pyfrag in self._trf.argdict[exeKey].value.iteritems(): - if substep is 'all': + for substep, pyfrag in iteritems(self._trf.argdict[exeKey].value): + if substep == 'all': ElementTree.SubElement(trfTree, 'META', type = 'string', name = exeKey, value = str(pyfrag)) else: ElementTree.SubElement(trfTree, 'META', type = 'string', name = exeKey + '_' + substep, value = str(pyfrag)) @@ -265,7 +277,7 @@ class trfJobReport(trfReport): value = str(self._trf.argdict[exeKey].value)) # Now add information about output files - for dataArg in self._trf._dataDictionary.itervalues(): + for dataArg in itervalues(self._trf._dataDictionary): if dataArg.io == 'output': for fileEltree in trfFileReport(dataArg).classicEltreeList(fast = fast): trfTree.append(fileEltree) @@ -293,7 +305,7 @@ class trfJobReport(trfReport): # Emulate the NEEDCHECK behaviour if hasattr(self._trf, '_executorPath'): for executor in self._trf._executorPath: - if hasattr(executor, '_logScan') and self._trf.exitCode is 0: + if hasattr(executor, '_logScan') and self._trf.exitCode == 0: if executor._logScan._levelCounter['FATAL'] > 0 or executor._logScan._levelCounter['CRITICAL'] > 0: # This should not happen! msg.warning('Found FATAL/CRITICAL errors and exit code 0 - reseting to TRF_LOGFILE_FAIL') @@ -317,8 +329,8 @@ class trfJobReport(trfReport): # Mangle substep argumemts back to the old format for substepKey in ('preExec', 'postExec', 'preInclude', 'postInclude'): if substepKey in self._trf.argdict: - for substep, values in self._trf.argdict[substepKey].value.iteritems(): - if substep is 'all': + for substep, values in iteritems(self._trf.argdict[substepKey].value): + if substep == 'all': trfDict['jobOutputs'][-1]['more']['metadata'][substepKey] = values else: trfDict['jobOutputs'][-1]['more']['metadata'][substepKey + '_' + substep] = values @@ -327,8 +339,8 @@ class trfJobReport(trfReport): nentries = 'UNKNOWN' for fileArg in self._trf.getFiles(io = 'input'): thisArgNentries = fileArg.nentries - if isinstance(thisArgNentries, (int, long)): - if nentries is 'UNKNOWN': + if isinstance(thisArgNentries, int): + if nentries == 'UNKNOWN': nentries = thisArgNentries elif thisArgNentries != nentries: msg.warning('Found a file with different event count than others: {0} != {1} for {2}'.format(thisArgNentries, nentries, fileArg)) @@ -363,7 +375,7 @@ class trfExecutorReport(object): 'exeConfig' : {} } # Add executor config information - for k, v in self._exe.extraMetadata.iteritems(): + for k, v in iteritems(self._exe.extraMetadata): reportDict['exeConfig'][k] = v # Do we have a logscan to add? @@ -371,7 +383,7 @@ class trfExecutorReport(object): try: json.dumps(self._exe._logScan.python) reportDict['logfileReport'] = self._exe._logScan.python - except UnicodeDecodeError, e: + except UnicodeDecodeError as e: msg.error('Problem with serialising logfile report as JSON - this will be skipped from the report ({0})'.format(e)) reportDict['metaData'] = self._exe._logScan._metaData @@ -416,11 +428,11 @@ class trfFileReport(object): # @param base How extensive to make the report: name or full def python(self, fast = False, type = 'full'): # First entity contains shared properties - same for all files in this argFile - if type is 'name': + if type == 'name': fileArgProps = {'dataset': self._fileArg.dataset, 'nentries': self._fileArg.getnentries(fast), 'subFiles' : []} - elif type is 'full': + elif type == 'full': fileArgProps = {'dataset' : self._fileArg.dataset, 'type' : self._fileArg.type, 'subFiles' : [], @@ -473,10 +485,10 @@ class trfFileReport(object): entry = {'name': os.path.basename(filename)} else: entry = {'name': os.path.relpath(os.path.normpath(filename))} - if type is 'name': + if type == 'name': # For 'name' we return only the GUID entry.update(self._fileArg.getMetadata(files = filename, populate = not fast, metadataKeys = ['file_guid'])[filename]) - elif type is 'full': + elif type == 'full': # Suppress io because it's the key at a higher level and _exists because it's internal entry.update(self._fileArg.getMetadata(files = filename, populate = not fast, maskMetadataKeys = ['io', '_exists', 'integrity', 'file_type'])[filename]) else: @@ -509,13 +521,13 @@ class trfFileReport(object): tree = ElementTree.Element('File', ID = str(self._fileArg.getSingleMetadata(fname = filename, metadataKey = 'file_guid', populate = not fast))) logical = ElementTree.SubElement(tree, 'logical') lfn = ElementTree.SubElement(logical, 'lfn', name = filename) - for myKey, classicKey in self._internalToClassicMap.iteritems(): + for myKey, classicKey in iteritems(self._internalToClassicMap): # beam_type is tricky - we return only the first list value, # (but remember, protect against funny stuff!) - if myKey is 'beam_type': + if myKey == 'beam_type': beamType = self._fileArg.getSingleMetadata(fname = filename, metadataKey = myKey, populate = not fast) if isinstance(beamType, list): - if len(beamType) is 0: + if len(beamType) == 0: ElementTree.SubElement(tree, 'metadata', att_name = classicKey, att_value = '') else: ElementTree.SubElement(tree, 'metadata', att_name = classicKey, att_value = str(beamType[0])) @@ -553,19 +565,19 @@ class trfFileReport(object): 'dataset' : self._fileArg.dataset, } # Fill in the mapped 'primary' keys - for myKey, classicKey in self._internalToGpickleMap.iteritems(): + for myKey, classicKey in iteritems(self._internalToGpickleMap): fileDict[classicKey] = self._fileArg.getSingleMetadata(fname = filename, metadataKey = myKey, populate = not fast) - if classicKey is 'checkSum' and fileDict[classicKey] is 'UNDEFINED': + if classicKey == 'checkSum' and fileDict[classicKey] == 'UNDEFINED': # Old style is that we give back None when we don't know fileDict[classicKey] = None - elif fileDict[classicKey] is 'UNDEFINED': + elif fileDict[classicKey] == 'UNDEFINED': # Suppress things we don't generally expect to know del fileDict[classicKey] # Base 'more' stuff which is known by the argFile itself fileDict['more'] = {'metadata' : {'fileType' : self._fileArg.type}} - for myKey, classicKey in self._internalToGpickleMoreMap.iteritems(): + for myKey, classicKey in iteritems(self._internalToGpickleMoreMap): value = self._fileArg.getSingleMetadata(fname = filename, metadataKey = myKey, populate = not fast) - if value is not 'UNDEFINED': + if value != 'UNDEFINED': fileDict['more']['metadata'][classicKey] = value return fileDict @@ -581,7 +593,7 @@ class machineReport(object): for attr in attrs: try: machine[attr] = getattr(platform, attr).__call__() - except AttributeError, e: + except AttributeError as e: msg.error('Failed to get "{0}" attribute from platform module: {1}'.format(attr, e)) # Now try to get processor information from /proc/cpuinfo @@ -598,12 +610,12 @@ class machineReport(object): machine['model_name'] = v except ValueError: pass - except Exception, e: + except Exception as e: msg.warning('Unexpected error while parsing /proc/cpuinfo: {0}'.format(e)) try: with open('/etc/machinefeatures/hs06') as hs: machine['hepspec'] = hs.readlines()[0].strip() - except IOError, e: + except IOError as e: pass return machine @@ -615,7 +627,7 @@ def pyJobReportToFileDict(jobReport, io = 'all'): msg.warning('Job report has no "files" section') return dataDict for iotype in jobReport['files']: - if io is 'all' or io == iotype: + if io == 'all' or io == iotype: for filedata in jobReport['files'][iotype]: dataDict[filedata['type']] = filedata return dataDict diff --git a/Tools/PyJobTransforms/python/trfSignal.py b/Tools/PyJobTransforms/python/trfSignal.py index 701763b04ca..dba8a09fbc7 100755 --- a/Tools/PyJobTransforms/python/trfSignal.py +++ b/Tools/PyJobTransforms/python/trfSignal.py @@ -30,7 +30,7 @@ def setTrfSignalHandlers(handler): try: msg.debug("Setting signalhandler for %s to %s" % (s, handler)) _savedSignalHandlerDict[s] = signal.signal(getattr(signal, s), handler) - except Exception, e: + except Exception as e: msg.error("Unable to attach custom signal handler to %s: %s" % (s, e)) continue @@ -41,6 +41,6 @@ def resetTrfSignalHandlers(): for s in _defaultSignalList: try: signal.signal(getattr(signal, s), _savedSignalHandlerDict.get(s, signal.SIG_DFL)) - except Exception, e: + except Exception as e: msg.error("Unable to attach custom signal handler to %s: %s" % (s, e)) continue diff --git a/Tools/PyJobTransforms/python/trfUtils.py b/Tools/PyJobTransforms/python/trfUtils.py index d96dffb8fe7..f3228421bf1 100644 --- a/Tools/PyJobTransforms/python/trfUtils.py +++ b/Tools/PyJobTransforms/python/trfUtils.py @@ -1,3 +1,13 @@ +from __future__ import print_function +from future.utils import iteritems + +from past.builtins import basestring +from builtins import object +from future import standard_library +standard_library.install_aliases() +from builtins import map + + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfUtils @@ -28,6 +38,7 @@ from PyJobTransforms.trfExitCodes import trfExit import PyJobTransforms.trfExceptions as trfExceptions import logging +from functools import reduce msg = logging.getLogger(__name__) @@ -67,7 +78,7 @@ def getAncestry(listMyOrphans = False): p = Popen(psCmd, stdout=PIPE, stderr=PIPE) stdout = p.communicate()[0] psPID = p.pid - except OSError, e: + except OSError as e: msg.error('Failed to execute "ps" to get process ancestry: %s' % repr(e)) raise @@ -108,7 +119,7 @@ def getAncestry(listMyOrphans = False): # @return @c children List of child PIDs def listChildren(psTree = None, parent = os.getpid(), listOrphans = False): '''Take a psTree dictionary and list all children''' - if psTree == None: + if psTree is None: psTree = getAncestry(listMyOrphans = listOrphans) msg.debug("List children of %d (%s)" % (parent, psTree.get(parent, []))) @@ -159,7 +170,7 @@ def call(args, bufsize=0, executable=None, stdin=None, preexec_fn=None, close_fd if line: line="%s%s" % (message, line.rstrip()) if logger is None: - print line + print(line) else: logger.log(loglevel, line) @@ -168,7 +179,7 @@ def call(args, bufsize=0, executable=None, stdin=None, preexec_fn=None, close_fd while line: line="%s%s" % (message, line.strip()) if logger is None: - print line + print(line) else: logger.log(loglevel, line) line=p.stdout.readline() @@ -238,7 +249,7 @@ def asetupReport(): cmd = ['lstags'] lstagsOut = Popen(cmd, shell = False, stdout = PIPE, stderr = STDOUT, bufsize = 1).communicate()[0] setupMsg += "\n".join([ "\t\t{0}".format(pkg) for pkg in lstagsOut.split("\n") ]) - except (CalledProcessError, OSError), e: + except (CalledProcessError, OSError) as e: setupMsg += 'Execution of lstags failed: {0}'.format(e) else: setupMsg+= "No readable patch area found" @@ -287,7 +298,7 @@ def releaseIsOlderThan(major, minor=None): return False return True - except Exception, e: + except Exception as e: msg.warning('Exception thrown when attempting to detect athena version ({0}). No release check possible'.format(e)) return False @@ -311,7 +322,7 @@ def lineByLine(filename, strip=True, removeTimestamp=True, substepName=None): f = open(filename, 'r') for line in f: linecounter += 1 - if substepName and isinstance(substepName, str): # Remove substepName only if caller provides that string. + if substepName and isinstance(substepName, basestring): # Remove substepName only if caller provides that string. line = line.lstrip(substepName) if removeTimestamp: line = line.lstrip('0123456789:-, ') # Remove timestamps in both serial and MP mode. @@ -377,7 +388,7 @@ def isodate(): # None is still allowed as this is the default for "unset" in # some cases. def forceToAlphaNum(string): - if string == None or string.isalnum(): + if string is None or string.isalnum(): return string newstring = '' for piece in string: @@ -399,26 +410,26 @@ def forceToAlphaNum(string): # @return True if metadata is the same, otherwise False def cmpMetadata(metadata1, metadata2, guidCheck = 'valid'): # First check we have the same files - allFiles = set(metadata1.keys()) | set(metadata2.keys()) - if len(allFiles) > len(metadata1.keys()) or len(allFiles) > len(metadata2.keys()): + allFiles = set(metadata1) | set(metadata2) + if len(allFiles) > len(metadata1) or len(allFiles) > len(metadata2): msg.warning('In metadata comparison file lists are not equal - fails ({0} != {1}'.format(metadata1, metadata2)) return False for fname in allFiles: - allKeys = set(metadata1[fname].keys()) | set(metadata2[fname].keys()) - if len(allKeys) > len(metadata1[fname].keys()) or len(allFiles) > len(metadata2[fname].keys()): + allKeys = set(metadata1[fname]) | set(metadata2[fname]) + if len(allKeys) > len(metadata1[fname]) or len(allFiles) > len(metadata2[fname]): msg.warning('In metadata comparison key lists are not equal - fails') return False for key in allKeys: - if key is 'file_guid': - if guidCheck is 'ignore': + if key == 'file_guid': + if guidCheck == 'ignore': continue - elif guidCheck is 'equal': + elif guidCheck == 'equal': if metadata1[fname]['file_guid'].upper() == metadata2[fname]['file_guid'].upper(): continue else: msg.warning('In metadata comparison strict GUID comparison failed.') return False - elif guidCheck is 'valid': + elif guidCheck == 'valid': try: uuid.UUID(metadata1[fname]['file_guid']) uuid.UUID(metadata2[fname]['file_guid']) @@ -439,7 +450,7 @@ def unpackTarFile(filename, directory="."): tar = tarfile.open(filename) tar.extractall(path=directory) tar.close() - except Exception, e: + except Exception as e: errMsg = 'Error encountered while unpacking {0} to {1}: {2}'.format(filename, directory, e) msg.error(errMsg) raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_SETUP'), errMsg) @@ -454,9 +465,9 @@ def unpackTarFile(filename, directory="."): # @throws trfExceptions.TransformSetupException If the DBRelease tarball is unreadable or the version is not understood # @return Two element tuple: (@c True if release was unpacked or @c False if release was already unpacked, dbsetup path) def unpackDBRelease(tarball, dbversion=None): - if dbversion == None: + if dbversion is None: dbdMatch = re.match(r'DBRelease-([\d\.]+)\.tar\.gz', path.basename(tarball)) - if dbdMatch == None: + if dbdMatch is None: raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_DBRELEASE_PROBLEM'), 'Could not find a valid version in the DBRelease tarball: {0}'.format(tarball)) dbversion = dbdMatch.group(1) @@ -488,11 +499,11 @@ def setupDBRelease(setup): setupObj = Setup(dbdir) sys.path = opath msg.debug('DBRelease setup module was initialised successfully') - except ImportError, e: + except ImportError as e: errMsg = 'Import error while trying to load DB Setup module: {0}'.format(e) msg.error(errMsg) raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_DBRELEASE_PROBLEM'), errMsg) - except Exception, e: + except Exception as e: errMsg = 'Unexpected error while trying to load DB Setup module: {0}'.format(e) msg.error(errMsg) raise trfExceptions.TransformSetupException(trfExit.nameToCode('TRF_DBRELEASE_PROBLEM'), errMsg) @@ -537,15 +548,15 @@ def pickledDump(argdict): from PyJobTransforms.trfArgClasses import argument theArgumentDictionary = {} - for k, v in argdict.iteritems(): - if k is 'dumpPickle': + for k, v in iteritems(argdict): + if k == 'dumpPickle': continue if isinstance(v, argument): theArgumentDictionary[k] = getattr(v, "dumpvalue", v.value) else: theArgumentDictionary[k] = v with open(argdict['dumpPickle'], 'w') as pickleFile: - import cPickle as pickle + import pickle as pickle pickle.dump(theArgumentDictionary, pickleFile) @@ -556,8 +567,8 @@ def JSONDump(argdict): from PyJobTransforms.trfArgClasses import argument theArgumentDictionary = {} - for k, v in argdict.iteritems(): - if k is 'dumpJSON': + for k, v in iteritems(argdict): + if k == 'dumpJSON': continue if isinstance(v, argument): theArgumentDictionary[k] = getattr(v, "dumpvalue", v.value) @@ -571,11 +582,15 @@ def JSONDump(argdict): # from json (TODO: make the transforms happy with unicode as well as plain str!) def convertToStr(in_string): if isinstance(in_string, dict): - return dict([(convertToStr(key), convertToStr(value)) for key, value in in_string.iteritems()]) + return dict([(convertToStr(key), convertToStr(value)) for key, value in iteritems(in_string)]) elif isinstance(in_string, list): return [convertToStr(element) for element in in_string] - elif isinstance(in_string, unicode): + # Unicode is always str in Python3, but bytes are not + # TODO: remove unicode comparison after Python 3 migration + elif in_string.__class__.__name__ == 'unicode': return in_string.encode('utf-8') + elif in_string.__class__.__name__ == 'bytes': + return in_string.decode('utf-8') else: return in_string @@ -592,15 +607,15 @@ def cliToKey(option): def printHR(the_object): # dictionary if isinstance(the_object, dict): - for key, value in sorted(the_object.items()): - print u'{key}: {value}'.format(key = key, value = value) + for key, value in sorted(iteritems(the_object)): + print(u'{key}: {value}'.format(key = key, value = value)) # list or tuple elif isinstance(the_object, list) or isinstance(the_object, tuple): for element in the_object: - print element + print(element) # other else: - print the_object + print(the_object) ## @brief return a URL-safe, base 64-encoded pseudorandom UUID @@ -667,11 +682,11 @@ class Job(object): self.workFunctionTimeout = workFunctionTimeout self.className = self.__class__.__name__ self.resultGetter = None - if name == None: + if name is None: self._name = uniqueIdentifier() else: self._name = name - if self.workFunction == None: + if self.workFunction is None: exceptionMessage = "work function not specified" msg.error("{notifier}: exception message: {exceptionMessage}".format( notifier = self.className, @@ -692,7 +707,7 @@ class Job(object): # @return object description string def __str__(self): descriptionString = "" - for key, value in sorted(vars(self).items()): + for key, value in sorted(iteritems(vars(self))): descriptionString += str("{key}:{value} ".format( key = key, value = value) @@ -729,12 +744,12 @@ class JobGroup(object): self.className = self.__class__.__name__ self.completeStatus = False self.timeStampSubmission = None - if name == None: + if name is None: self._name = uniqueIdentifier() else: self._name = name #self.timeStampSubmissionComplete = None #delete - if timeout == None: + if timeout is None: self.timeout = 0 for job in self.jobs: self.timeout += job.workFunctionTimeout @@ -750,7 +765,7 @@ class JobGroup(object): # @return object description string def __str__(self): descriptionString = "" - for key, value in sorted(vars(self).items()): + for key, value in sorted(iteritems(vars(self))): descriptionString += str("{key}:{value} ".format( key = key, value = value) @@ -839,7 +854,7 @@ class ParallelJobProcessor(object): # @return object description string def __str__(self): descriptionString = "" - for key, value in sorted(vars(self).items()): + for key, value in sorted(iteritems(vars(self))): descriptionString += str("{key}:{value} ".format( key = key, value = value @@ -863,7 +878,7 @@ class ParallelJobProcessor(object): ): # If the input submission is not None, then update the jobSubmission # data attribute to that specified for this method. - if jobSubmission != None: + if jobSubmission is not None: self.jobSubmission = jobSubmission self.status = "submitted" msg.debug("{notifier}: status: {status}".format( @@ -1222,7 +1237,7 @@ def ValgrindCommand( for option in extraOptionsList: optionsList.append(option) # Add suppression files and athena commands - for suppressionFile, pathEnvironmentVariable in suppressionFilesAndCorrespondingPathEnvironmentVariables.iteritems(): + for suppressionFile, pathEnvironmentVariable in iteritems(suppressionFilesAndCorrespondingPathEnvironmentVariables): suppFile = findFile(os.environ[pathEnvironmentVariable], suppressionFile) if suppFile: optionsList.append("--suppressions=" + suppFile) @@ -1251,7 +1266,7 @@ def ValgrindCommand( def calcCpuTime(start, stop): cpuTime = None if start and stop: - cpuTime = reduce(lambda x1, x2: x1+x2, map(lambda x1, x2: x2-x1, start[2:4], stop[2:4])) + cpuTime = reduce(lambda x1, x2: x1+x2, list(map(lambda x1, x2: x2-x1, start[2:4], stop[2:4]))) return cpuTime diff --git a/Tools/PyJobTransforms/python/trfValidateRootFile.py b/Tools/PyJobTransforms/python/trfValidateRootFile.py index 2e162e2479c..05562df6273 100755 --- a/Tools/PyJobTransforms/python/trfValidateRootFile.py +++ b/Tools/PyJobTransforms/python/trfValidateRootFile.py @@ -9,6 +9,8 @@ +from __future__ import print_function +from builtins import range import sys import logging @@ -148,10 +150,10 @@ def checkFile(fileName, the_type, requireTree): def usage(): - print "Usage: validate filename type requireTree verbosity" - print "'type' must be either 'event' or 'basket'" - print "'requireTree' must be either 'true' or 'false'" - print "'verbosity' must be either 'on' or 'off'" + print("Usage: validate filename type requireTree verbosity") + print("'type' must be either 'event' or 'basket'") + print("'requireTree' must be either 'true' or 'false'") + print("'verbosity' must be either 'on' or 'off'") return 2 diff --git a/Tools/PyJobTransforms/python/trfValidation.py b/Tools/PyJobTransforms/python/trfValidation.py index 33bce379a54..1b37fdd9dda 100644 --- a/Tools/PyJobTransforms/python/trfValidation.py +++ b/Tools/PyJobTransforms/python/trfValidation.py @@ -1,3 +1,12 @@ +from future.utils import iteritems +from future.utils import listitems + +from past.builtins import basestring +from builtins import zip +from builtins import object +from builtins import range +from builtins import int + # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration ## @package PyJobTransforms.trfValidation @@ -150,7 +159,7 @@ class ignorePatterns(object): except ValueError: msg.warning('Could not parse this line as a valid error pattern: {0}'.format(line)) continue - except re.error, e: + except re.error as e: msg.warning('Could not parse valid regexp from {0}: {1}'.format(message, e)) continue @@ -158,7 +167,8 @@ class ignorePatterns(object): self._structuredPatterns.append({'service': reWho, 'level': level, 'message': reMessage}) - except (IOError, OSError) as (errno, errMsg): + except (IOError, OSError) as xxx_todo_changeme: + (errno, errMsg) = xxx_todo_changeme.args msg.warning('Failed to open error pattern file {0}: {1} ({2})'.format(fullName, errMsg, errno)) @@ -167,7 +177,7 @@ class ignorePatterns(object): try: self._searchPatterns.append(re.compile(string)) msg.debug('Successfully parsed additional logfile search string: {0}'.format(string)) - except re.error, e: + except re.error as e: msg.warning('Could not parse valid regexp from {0}: {1}'.format(string, e)) @@ -179,7 +189,7 @@ class logFileReport(object): def __init__(self, logfile=None, msgLimit=10, msgDetailLevel=stdLogLevels['ERROR']): # We can have one logfile or a set - if isinstance(logfile, str): + if isinstance(logfile, basestring): self._logfile = [logfile, ] else: self._logfile = logfile @@ -242,7 +252,7 @@ class athenaLogFileReport(logFileReport): @property def python(self): errorDict = {'countSummary': {}, 'details': {}} - for level, count in self._levelCounter.iteritems(): + for level, count in iteritems(self._levelCounter): errorDict['countSummary'][level] = count if self._levelCounter[level] > 0 and len(self._errorDetails[level]) > 0: errorDict['details'][level] = [] @@ -252,11 +262,11 @@ class athenaLogFileReport(logFileReport): def resetReport(self): self._levelCounter = {} - for level in stdLogLevels.keys() + ['UNKNOWN', 'IGNORED']: + for level in list(stdLogLevels) + ['UNKNOWN', 'IGNORED']: self._levelCounter[level] = 0 self._errorDetails = {} - for level in self._levelCounter.keys(): + for level in self._levelCounter: self._errorDetails[level] = [] # Format: # List of dicts {'message': errMsg, 'firstLine': lineNo, 'count': N} @@ -272,7 +282,7 @@ class athenaLogFileReport(logFileReport): # N.B. Use the generator so that lines can be grabbed by subroutines, e.g., core dump svc reporter try: myGen = trfUtils.lineByLine(log, substepName=self._substepName) - except IOError, e: + except IOError as e: msg.error('Failed to open transform logfile {0}: {1:s}'.format(log, e)) # Return this as a small report self._levelCounter['ERROR'] = 1 @@ -285,7 +295,7 @@ class athenaLogFileReport(logFileReport): self._metaData[key] = value m = self._regExp.match(line) - if m == None: + if m is None: # We didn't manage to get a recognised standard line from the file # But we can check for certain other interesting things, like core dumps if 'Core dump from CoreDumpSvc' in line > -1: @@ -362,7 +372,7 @@ class athenaLogFileReport(logFileReport): # Record some error details # N.B. We record 'IGNORED' errors as these really should be flagged for fixing - if fields['level'] is 'IGNORED' or stdLogLevels[fields['level']] >= self._msgDetails: + if fields['level'] == 'IGNORED' or stdLogLevels[fields['level']] >= self._msgDetails: if self._levelCounter[fields['level']] <= self._msgLimit: detailsHandled = False for seenError in self._errorDetails[fields['level']]: @@ -370,7 +380,7 @@ class athenaLogFileReport(logFileReport): seenError['count'] += 1 detailsHandled = True break - if detailsHandled == False: + if detailsHandled is False: self._errorDetails[fields['level']].append({'message': line, 'firstLine': lineCounter, 'count': 1}) elif self._levelCounter[fields['level']] == self._msgLimit + 1: msg.warning("Found message number {0} at level {1} - this and further messages will be supressed from the report".format(self._levelCounter[fields['level']], fields['level'])) @@ -392,7 +402,7 @@ class athenaLogFileReport(logFileReport): def worstError(self): worst = stdLogLevels['DEBUG'] worstName = 'DEBUG' - for lvl, count in self._levelCounter.iteritems(): + for lvl, count in iteritems(self._levelCounter): if count > 0 and stdLogLevels.get(lvl, 0) > worst: worstName = lvl worst = stdLogLevels[lvl] @@ -409,9 +419,9 @@ class athenaLogFileReport(logFileReport): firstLine = firstError = None firstLevel = stdLogLevels[floor] firstName = floor - for lvl, count in self._levelCounter.iteritems(): + for lvl, count in iteritems(self._levelCounter): if (count > 0 and stdLogLevels.get(lvl, 0) >= stdLogLevels[floor] and - (firstError == None or self._errorDetails[lvl][0]['firstLine'] < firstLine)): + (firstError is None or self._errorDetails[lvl][0]['firstLine'] < firstLine)): firstLine = self._errorDetails[lvl][0]['firstLine'] firstLevel = stdLogLevels[lvl] firstName = lvl @@ -428,7 +438,7 @@ class athenaLogFileReport(logFileReport): coreDumpReport = 'Core dump from CoreDumpSvc' for line, linecounter in lineGenerator: m = self._regExp.match(line) - if m == None: + if m is None: if 'Caught signal 11(Segmentation fault)' in line: coreDumpReport = 'Segmentation fault' if 'Event counter' in line: @@ -572,11 +582,11 @@ class scriptLogFileReport(logFileReport): def resetReport(self): self._levelCounter.clear() - for level in stdLogLevels.keys() + ['UNKNOWN', 'IGNORED']: + for level in list(stdLogLevels) + ['UNKNOWN', 'IGNORED']: self._levelCounter[level] = 0 self._errorDetails.clear() - for level in self._levelCounter.keys(): # List of dicts {'message': errMsg, 'firstLine': lineNo, 'count': N} + for level in self._levelCounter: # List of dicts {'message': errMsg, 'firstLine': lineNo, 'count': N} self._errorDetails[level] = [] def scanLogFile(self, resetReport=False): @@ -587,7 +597,7 @@ class scriptLogFileReport(logFileReport): msg.info('Scanning logfile {0}'.format(log)) try: myGen = trfUtils.lineByLine(log) - except IOError, e: + except IOError as e: msg.error('Failed to open transform logfile {0}: {1:s}'.format(log, e)) # Return this as a small report self._levelCounter['ERROR'] = 1 @@ -606,7 +616,7 @@ class scriptLogFileReport(logFileReport): def worstError(self): worstlevelName = 'DEBUG' worstLevel = stdLogLevels[worstlevelName] - for levelName, count in self._levelCounter.iteritems(): + for levelName, count in iteritems(self._levelCounter): if count > 0 and stdLogLevels.get(levelName, 0) > worstLevel: worstlevelName = levelName worstLevel = stdLogLevels[levelName] @@ -643,9 +653,9 @@ def returnIntegrityOfFile(file, functionName): # @ detail This method performs standard file validation in either serial or # @ parallel and updates file integrity metadata. def performStandardFileValidation(dictionary, io, parallelMode = False): - if parallelMode == False: + if parallelMode is False: msg.info('Starting legacy (serial) file validation') - for (key, arg) in dictionary.items(): + for (key, arg) in iteritems(dictionary): if not isinstance(arg, argFile): continue if not arg.io == io: @@ -692,7 +702,7 @@ def performStandardFileValidation(dictionary, io, parallelMode = False): else: msg.info('Guid is %s' % arg.getSingleMetadata(fname, 'file_guid')) msg.info('Stopping legacy (serial) file validation') - if parallelMode == True: + if parallelMode is True: msg.info('Starting parallel file validation') # Create lists of files and args. These lists are to be used with zip in # order to check and update file integrity metadata as appropriate. @@ -703,7 +713,7 @@ def performStandardFileValidation(dictionary, io, parallelMode = False): # Create a list for collation of file validation jobs for submission to # the parallel job processor. jobs = [] - for (key, arg) in dictionary.items(): + for (key, arg) in iteritems(dictionary): if not isinstance(arg, argFile): continue if not arg.io == io: @@ -764,7 +774,7 @@ def performStandardFileValidation(dictionary, io, parallelMode = False): # If the first (Boolean) element of the result tuple for the current # file is True, update the integrity metadata. If it is False, raise # an exception. - if currentResult[0] == True: + if currentResult[0] is True: msg.info('Updating integrity metadata for file {fileName}'.format(fileName = str(currentFile))) currentArg._setMetadata(files=[currentFile,], metadataKeys={'integrity': currentResult[0]}) else: @@ -841,7 +851,7 @@ class eventMatch(object): if eventCountConf: - if eventCountConfOverwrite == True: + if eventCountConfOverwrite is True: self._eventCountConf = eventCountConf else: self._eventCountConf.update(eventCountConf) @@ -889,13 +899,13 @@ class eventMatch(object): msg.warning('Found no dataDictionary entry for output data type {0}'.format(dataTypeName)) # Find if we have a skipEvents applied - if self._executor.conf.argdict.has_key("skipEvents"): + if "skipEvents" in self._executor.conf.argdict: self._skipEvents = self._executor.conf.argdict['skipEvents'].returnMyValue(exe=self._executor) else: self._skipEvents = None # Find if we have a maxEvents applied - if self._executor.conf.argdict.has_key("maxEvents"): + if "maxEvents" in self._executor.conf.argdict: self._maxEvents = self._executor.conf.argdict['maxEvents'].returnMyValue(exe=self._executor) if self._maxEvents == -1: self._maxEvents = None @@ -903,9 +913,9 @@ class eventMatch(object): self._maxEvents = None # Global eventAcceptanceEfficiency set? - if self._executor.conf.argdict.has_key("eventAcceptanceEfficiency"): + if "eventAcceptanceEfficiency" in self._executor.conf.argdict: self._evAccEff = self._executor.conf.argdict['eventAcceptanceEfficiency'].returnMyValue(exe=self._executor) - if (self._evAccEff == None): + if (self._evAccEff is None): self._evAccEff = 0.99 else: self._evAccEff = 0.99 @@ -918,8 +928,8 @@ class eventMatch(object): def decide(self): # We have all that we need to proceed: input and output data, skip and max events plus any efficiency factor # So loop over the input and output data and make our checks - for inData, neventsInData in self._inEventDict.iteritems(): - if type(neventsInData) not in (int, long): + for inData, neventsInData in iteritems(self._inEventDict): + if not isinstance(neventsInData, int): msg.warning('File size metadata for {inData} was not countable, found {neventsInData}. No event checks possible for this input data.'.format(inData=inData, neventsInData=neventsInData)) continue if inData in self._eventCountConf: @@ -927,13 +937,13 @@ class eventMatch(object): else: # OK, try a glob match in this case (YMMV) matchedInData = False - for inDataKey in self._eventCountConf.keys(): + for inDataKey in self._eventCountConf: if fnmatch.fnmatch(inData, inDataKey): msg.info("Matched input data type {inData} to {inDataKey} by globbing".format(inData=inData, inDataKey=inDataKey)) matchedInData = True break if not matchedInData: - msg.warning('No defined event count match for {inData} -> {outData}, so no check(s) possible in this case.'.format(inData=inData, outData=self._outEventDict.keys())) + msg.warning('No defined event count match for {inData} -> {outData}, so no check(s) possible in this case.'.format(inData=inData, outData=list(self._outEventDict))) continue # Now calculate the expected number of processed events for this input @@ -954,8 +964,8 @@ class eventMatch(object): msg.debug('Expected number of processed events for {0} is {1}'.format(inData, expectedEvents)) # Loop over output data - first find event count configuration - for outData, neventsOutData in self._outEventDict.iteritems(): - if type(neventsOutData) not in (int, long): + for outData, neventsOutData in iteritems(self._outEventDict): + if not isinstance(neventsOutData, int): msg.warning('File size metadata for {outData} was not countable, found "{neventsOutData}". No event checks possible for this output data.'.format(outData=outData, neventsOutData=neventsOutData)) continue if outData in self._eventCountConf[inDataKey]: @@ -964,7 +974,7 @@ class eventMatch(object): else: # Look for glob matches checkConf = None - for outDataKey, outDataConf in self._eventCountConf[inDataKey].iteritems(): + for outDataKey, outDataConf in iteritems(self._eventCountConf[inDataKey]): if fnmatch.fnmatch(outData, outDataKey): msg.info('Matched output data type {outData} to {outDatakey} by globbing'.format(outData=outData, outDatakey=outDataKey)) outDataKey = outData @@ -976,27 +986,27 @@ class eventMatch(object): msg.debug('Event count check for {inData} to {outData} is {checkConf}'.format(inData=inData, outData=outData, checkConf=checkConf)) # Do the check for thsi input/output combination - if checkConf is 'match': + if checkConf == 'match': # We need an exact match if neventsOutData == expectedEvents: msg.info("Event count check for {inData} to {outData} passed: all processed events found ({neventsOutData} output events)".format(inData=inData, outData=outData, neventsOutData=neventsOutData)) else: raise trfExceptions.TransformValidationException(trfExit.nameToCode('TRF_EXEC_VALIDATION_EVENTCOUNT'), 'Event count check for {inData} to {outData} failed: found {neventsOutData} events, expected {expectedEvents}'.format(inData=inData, outData=outData, neventsOutData=neventsOutData, expectedEvents=expectedEvents)) - elif checkConf is 'filter': + elif checkConf == 'filter': if neventsOutData <= expectedEvents and neventsOutData >= 0: msg.info("Event count check for {inData} to {outData} passed: found ({neventsOutData} output events selected from {expectedEvents} processed events)".format(inData=inData, outData=outData, neventsOutData=neventsOutData, expectedEvents=expectedEvents)) else: raise trfExceptions.TransformValidationException(trfExit.nameToCode('TRF_EXEC_VALIDATION_EVENTCOUNT'), 'Event count check for {inData} to {outData} failed: found {neventsOutData} events, expected from 0 to {expectedEvents}'.format(inData=inData, outData=outData, neventsOutData=neventsOutData, expectedEvents=expectedEvents)) - elif checkConf is 'minEff': + elif checkConf == 'minEff': if neventsOutData >= int(expectedEvents * self._evAccEff) and neventsOutData <= expectedEvents: msg.info("Event count check for {inData} to {outData} passed: found ({neventsOutData} output events selected from {expectedEvents} processed events)".format(inData=inData, outData=outData, neventsOutData=neventsOutData, expectedEvents=expectedEvents)) else: raise trfExceptions.TransformValidationException(trfExit.nameToCode('TRF_EXEC_VALIDATION_EVENTCOUNT'), 'Event count check for {inData} to {outData} failed: found {neventsOutData} events, expected from {minEvents} to {expectedEvents}'.format(inData=inData, outData=outData, neventsOutData=neventsOutData, minEvents=int(expectedEvents * self._evAccEff), expectedEvents=expectedEvents)) - elif isinstance(checkConf, (float, int, long)): + elif isinstance(checkConf, (float, int)): checkConf = float(checkConf) if checkConf < 0.0 or checkConf > 1.0: raise trfExceptions.TransformValidationException(trfExit.nameToCode('TRF_EXEC_VALIDATION_EVENTCOUNT'), diff --git a/Tools/PyJobTransforms/scripts/Asetup_report.py b/Tools/PyJobTransforms/scripts/Asetup_report.py index d8ca6c233ce..e67d8f7b399 100755 --- a/Tools/PyJobTransforms/scripts/Asetup_report.py +++ b/Tools/PyJobTransforms/scripts/Asetup_report.py @@ -2,5 +2,6 @@ # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration # +from __future__ import print_function from PyJobTransforms.trfUtils import asetupReport -print asetupReport() +print(asetupReport()) diff --git a/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py b/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py index 1558a9a4723..20154437b37 100755 --- a/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py +++ b/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py @@ -6,6 +6,7 @@ # N.B. Do need clarification as to if AODtoDPD is ever run in parallel with AOD merging # @version $Id: AODMerge_tf.py 530225 2012-12-12 18:16:17Z graemes $ +from __future__ import print_function import sys import time @@ -38,7 +39,7 @@ def main(): def getTransform(): executorSet = set() - print type(executorSet) + print(type(executorSet)) executorSet.add(athenaExecutor(name = 'EVNTMerge', skeletonFile = 'PyJobTransforms/skeleton.EVNTMerge.py',inData = ['EVNT'], outData = ['EVNT_MRG'])) trf = transform(executor = executorSet) diff --git a/Tools/PyJobTransforms/scripts/GetTfCommand.py b/Tools/PyJobTransforms/scripts/GetTfCommand.py index 36c6bb41009..4f4e0690af8 100755 --- a/Tools/PyJobTransforms/scripts/GetTfCommand.py +++ b/Tools/PyJobTransforms/scripts/GetTfCommand.py @@ -5,6 +5,7 @@ ## GetTfCommand.py - prints the job transform command accociated with an AMI tag. # $Id$ +from __future__ import print_function import sys import argparse @@ -33,14 +34,14 @@ def main(): try: tag = TagInfo(args['AMI'], suppressNonJobOptions) - except TransformAMIException, e: - print 'An AMI exception was raised when trying to resolve the tag {0}.'.format(args['AMI']) - print 'Exception message was: {0}'.format(e.errMsg) - print 'Note that you need both suitable credentials to access AMI and access to the panda database (only works from inside CERN) for GetTfCommand.py to work.' + except TransformAMIException as e: + print('An AMI exception was raised when trying to resolve the tag {0}.'.format(args['AMI'])) + print('Exception message was: {0}'.format(e.errMsg)) + print('Note that you need both suitable credentials to access AMI and access to the panda database (only works from inside CERN) for GetTfCommand.py to work.') sys.exit(1) if not 'printOnlyCmdLine' in args: - print tag + print(tag) if 'argdict' in args: tag.dump(args['argdict']) @@ -48,7 +49,7 @@ def main(): # only print the command line, allows stuff like # pathena --trf "`GetTfCommand --AMI q1234 --printOnlyCmdLine` --inputFile bla.input --maxEvents 42" trfCmdLine = tag.trfs[0].name + " " + tag.trfs[0]._argsToString(tag.trfs[0].physics) - print trfCmdLine.replace('"', '\\' + '"') + print(trfCmdLine.replace('"', '\\' + '"')) if __name__ == '__main__': diff --git a/Tools/PyJobTransforms/scripts/Merge_tf.py b/Tools/PyJobTransforms/scripts/Merge_tf.py index 3ea748ae888..f93a3ad79ce 100755 --- a/Tools/PyJobTransforms/scripts/Merge_tf.py +++ b/Tools/PyJobTransforms/scripts/Merge_tf.py @@ -82,7 +82,7 @@ def getTransform(): simStepSet.add(athenaExecutor(name = 'HITSMerge', substep="hitsmerge", skeletonFile = 'SimuJobTransforms/skeleton.HITSMerge.py', tryDropAndReload = False, inData = ['HITS'], outData = ['HITS_MRG'])) trf.appendToExecutorSet(list(simStepSet)[0]) - except ImportError, e: + except ImportError as e: msg.warning('Failed to import simulation arguments ({0}). HITSMerge will not be available.'.format(e)) diff --git a/Tools/PyJobTransforms/scripts/ScanLog.py b/Tools/PyJobTransforms/scripts/ScanLog.py index ccc6b75d7f9..58b7ca7eddb 100755 --- a/Tools/PyJobTransforms/scripts/ScanLog.py +++ b/Tools/PyJobTransforms/scripts/ScanLog.py @@ -2,13 +2,14 @@ # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +from __future__ import print_function import sys -print >>sys.stderr, ''' +print(''' This script is deprecated. Please use ValidateFiles_tf.py --logfile MYLOGFILE instead. -''' +''', file=sys.stderr) sys.exit(1) diff --git a/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py b/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py index 5f7bba1268c..d7d28be496a 100755 --- a/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py +++ b/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py @@ -56,7 +56,7 @@ def _getTransformsFromPATH(): def ___patchParams(d, target1, target2): - if d.has_key('listtype'): + if 'listtype' in d: listtype = d['listtype'] del d['listtype'] @@ -68,9 +68,9 @@ def ___patchParams(d, target1, target2): def __patchParams(d): ########################################################################## - if d.has_key('type') and d['type'].lower() == 'substep': + if 'type' in d and d['type'].lower() == 'substep': - if d.has_key('substeptype'): + if 'substeptype' in d: substeptype = d['substeptype'] del d['substeptype'] @@ -88,13 +88,13 @@ def __patchParams(d): ########################################################################## - if d.has_key('type') and (not d['type'] or d['type'].lower() == 'none'): + if 'type' in d and (not d['type'] or d['type'].lower() == 'none'): del d['type'] - if d.has_key('subtype') and (not d['subtype'] or d['subtype'].lower() == 'none'): + if 'subtype' in d and (not d['subtype'] or d['subtype'].lower() == 'none'): del d['subtype'] - if d.has_key('subsubtype') and (not d['subsubtype'] or d['subsubtype'].lower() == 'none'): + if 'subsubtype' in d and (not d['subsubtype'] or d['subsubtype'].lower() == 'none'): del d['subsubtype'] ############################################################################## diff --git a/Tools/PyJobTransforms/share/UseFrontier.py b/Tools/PyJobTransforms/share/UseFrontier.py index a9179877102..c5b1e2659d7 100644 --- a/Tools/PyJobTransforms/share/UseFrontier.py +++ b/Tools/PyJobTransforms/share/UseFrontier.py @@ -1,3 +1,4 @@ +from __future__ import print_function ## @brief Switch database to using FRONTIER, but with a fallback # to DBRelease if FRONTIER_SERVER is undefined (e.g., on HPC) @@ -6,8 +7,8 @@ # $Id: UseFrontier.py 605683 2014-07-09 17:22:17Z graemes $ if(os.environ.get('FRONTIER_SERVER')): - print 'UseFrontier.py: Enabling FRONTIER DB access' + print('UseFrontier.py: Enabling FRONTIER DB access') from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc ServiceMgr+=DBReplicaSvc(COOLSQLiteVetoPattern="DBRelease") else: - print 'UseFrontier.py: Using default DB access' \ No newline at end of file + print('UseFrontier.py: Using default DB access') \ No newline at end of file diff --git a/Tools/PyJobTransforms/test/test_AtlasG4_SimTTBar_tf.py b/Tools/PyJobTransforms/test/test_AtlasG4_SimTTBar_tf.py index 8d497f9402c..d8d67b6933f 100755 --- a/Tools/PyJobTransforms/test/test_AtlasG4_SimTTBar_tf.py +++ b/Tools/PyJobTransforms/test/test_AtlasG4_SimTTBar_tf.py @@ -44,9 +44,9 @@ class SimTTBar_tftest(unittest.TestCase): dataDict = pyJobReportToFileDict(md) # Change in SimuJobTransforms, but be compatible with type = hits and HITS dataKey = None - if 'hits' in dataDict.keys(): + if 'hits' in dataDict: dataKey = 'hits' - elif 'HITS' in dataDict.keys(): + elif 'HITS' in dataDict: dataKey = 'HITS' self.assertNotEqual(dataKey, None) self.assertEqual(dataDict[dataKey]['subFiles'][0]['nentries'], 2) diff --git a/Tools/PyJobTransforms/test/test_Reco_AthenaMP_tf.py b/Tools/PyJobTransforms/test/test_Reco_AthenaMP_tf.py index 775f68bbee8..737e7939f20 100755 --- a/Tools/PyJobTransforms/test/test_Reco_AthenaMP_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_AthenaMP_tf.py @@ -42,9 +42,9 @@ class Reco_tfAthenaMPtest(unittest.TestCase): md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) - self.assertTrue('ESD' in dataDict.keys()) - self.assertTrue('AOD' in dataDict.keys()) - self.assertTrue('HIST' in dataDict.keys()) + self.assertTrue('ESD' in dataDict) + self.assertTrue('AOD' in dataDict) + self.assertTrue('HIST' in dataDict) self.assertTrue(len(dataDict['ESD']['subFiles']), 4) self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 24) self.assertEqual(dataDict['HIST']['subFiles'][0]['nentries'], 24) diff --git a/Tools/PyJobTransforms/test/test_Reco_EOS_tf.py b/Tools/PyJobTransforms/test/test_Reco_EOS_tf.py index 2fc0ecb0fc3..d81a1b05e98 100755 --- a/Tools/PyJobTransforms/test/test_Reco_EOS_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_EOS_tf.py @@ -45,7 +45,7 @@ class Reco_tftest(unittest.TestCase): md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) - self.assertTrue('ESD' in dataDict.keys()) + self.assertTrue('ESD' in dataDict) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['ESD']['subFiles'][0]['geometry'], 'ATLAS-GEO-20-00-01') self.assertEqual(dataDict['ESD']['subFiles'][0]['conditions_tag'], 'COMCOND-BLKPA-006-01') diff --git a/Tools/PyJobTransforms/test/test_Reco_MC_tf.py b/Tools/PyJobTransforms/test/test_Reco_MC_tf.py index 8f9a379b63f..15a055c4810 100755 --- a/Tools/PyJobTransforms/test/test_Reco_MC_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_MC_tf.py @@ -43,9 +43,9 @@ class Reco_tftest(unittest.TestCase): md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) - self.assertTrue('ESD' in dataDict.keys()) - self.assertTrue('AOD' in dataDict.keys()) - self.assertTrue('HIST' in dataDict.keys()) + self.assertTrue('ESD' in dataDict) + self.assertTrue('AOD' in dataDict) + self.assertTrue('HIST' in dataDict) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['ESD']['subFiles'][0]['geometry'], 'ATLAS-GEO-20-00-01') self.assertEqual(dataDict['ESD']['subFiles'][0]['conditions_tag'], 'COMCOND-BLKPA-006-01') diff --git a/Tools/PyJobTransforms/test/test_Reco_Tier0_tf.py b/Tools/PyJobTransforms/test/test_Reco_Tier0_tf.py index f4b7c25ea48..33d8f7327fe 100755 --- a/Tools/PyJobTransforms/test/test_Reco_Tier0_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_Tier0_tf.py @@ -1,3 +1,5 @@ +from future import standard_library +standard_library.install_aliases() #! /usr/bin/env python # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration @@ -14,7 +16,7 @@ import json import subprocess import os import os.path -import cPickle as pickle +import pickle as pickle import sys import unittest @@ -63,7 +65,7 @@ class RecoTier0test(unittest.TestCase): md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) - self.assertTrue('ESD' in dataDict.keys()) + self.assertTrue('ESD' in dataDict) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 5) self.assertEqual(dataDict['ESD']['subFiles'][0]['name'], 'newESD.pool.root') diff --git a/Tools/PyJobTransforms/test/test_Reco_q222_tf.py b/Tools/PyJobTransforms/test/test_Reco_q222_tf.py index f77828d1727..9de492879b3 100755 --- a/Tools/PyJobTransforms/test/test_Reco_q222_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_q222_tf.py @@ -36,7 +36,7 @@ class Reco_tftest(unittest.TestCase): with open('jobReport.json') as jr: md = json.load(jr) self.assertEqual(isinstance(md, dict), True) - self.assertTrue('resource' in md.keys()) + self.assertTrue('resource' in md) self.assertEqual(md['resource']['executor']['AODtoTAG']['nevents'], 2) self.assertEqual(md['resource']['executor']['ESDtoAOD']['nevents'], 2) self.assertEqual(md['resource']['executor']['RAWtoESD']['nevents'], 2) diff --git a/Tools/PyJobTransforms/test/test_Reco_tf.py b/Tools/PyJobTransforms/test/test_Reco_tf.py index 95efbe3a630..0a025ba7b6b 100755 --- a/Tools/PyJobTransforms/test/test_Reco_tf.py +++ b/Tools/PyJobTransforms/test/test_Reco_tf.py @@ -48,10 +48,10 @@ class Reco_tftest(unittest.TestCase): md = json.load(jr) self.assertEqual(isinstance(md, dict), True) dataDict = pyJobReportToFileDict(md) - self.assertTrue('ESD' in dataDict.keys()) - self.assertTrue('AOD' in dataDict.keys()) - self.assertTrue('HIST' in dataDict.keys()) - self.assertTrue('TAG' in dataDict.keys()) + self.assertTrue('ESD' in dataDict) + self.assertTrue('AOD' in dataDict) + self.assertTrue('HIST' in dataDict) + self.assertTrue('TAG' in dataDict) self.assertEqual(dataDict['ESD']['subFiles'][0]['nentries'], 10) self.assertEqual(dataDict['ESD']['subFiles'][0]['name'], 'my.ESD.pool.root') self.assertEqual(dataDict['AOD']['subFiles'][0]['nentries'], 10) diff --git a/Tools/PyJobTransforms/test/test_trfArgClasses.py b/Tools/PyJobTransforms/test/test_trfArgClasses.py index ce514e123aa..eb8800140c4 100755 --- a/Tools/PyJobTransforms/test/test_trfArgClasses.py +++ b/Tools/PyJobTransforms/test/test_trfArgClasses.py @@ -8,6 +8,9 @@ # @version $Id: test_trfArgClasses.py 770616 2016-08-29 14:17:19Z uworlika $ # @note Tests of ATLAS specific file formats moved to test_trfArgClassesATLAS.py +from __future__ import print_function +from builtins import str +from builtins import object import unittest import logging @@ -391,8 +394,8 @@ class argConditionsTests(unittest.TestCase): return [{'globalTag': 'TEST'}] def getFakeClient(): return client - amiClient = argSubstepConditions.value.fset.func_globals['getAMIClient'] - argSubstepConditions.value.fset.func_globals['getAMIClient'] = getFakeClient() + amiClient = argSubstepConditions.value.fset.__globals__['getAMIClient'] + argSubstepConditions.value.fset.__globals__['getAMIClient'] = getFakeClient() return amiClient def test_condStr(self): @@ -419,7 +422,7 @@ class argConditionsTests(unittest.TestCase): self.assertEqual(cond2.value, {'one': 'apples', 'two': 'bananas'}) self.tear_down(client) def tear_down(self, client): - argSubstepConditions.value.fset.func_globals['getAMIClient'] = client + argSubstepConditions.value.fset.__globals__['getAMIClient'] = client @@ -427,19 +430,19 @@ class argFileTests(unittest.TestCase): def setUp(self): # In python 2.7 support for multiple 'with' expressions becomes available with open('file1', 'w') as f1: - print >>f1, 'This is test file 1' + print('This is test file 1', file=f1) with open('file2', 'w') as f2: - print >>f2, 'Short file 2' + print('Short file 2', file=f2) with open('file3', 'w') as f3: - print >>f3, 80*'-', 'Long file 3', 80*'-' + print(80*'-', 'Long file 3', 80*'-', file=f3) with open('file4', 'w') as f4: - print >>f4, 'Short file number 4' + print('Short file number 4', file=f4) with open('prefix.prodsysfile._001.suffix.1', 'w') as f1: - print >>f1, 'This is prodsys test file 1' + print('This is prodsys test file 1', file=f1) with open('prefix.prodsysfile._002.suffix.4', 'w') as f2: - print >>f2, 'Short prodsys file 2' + print('Short prodsys file 2', file=f2) with open('prefix.prodsysfile._003.suffix.7', 'w') as f3: - print >>f3, 80*'-', 'Long prodsys file 3', 80*'-' + print(80*'-', 'Long prodsys file 3', 80*'-', file=f3) self.mySingleFile = argFile(['file1'], io='output') self.myMultiFile = argFile(['file1', 'file2', 'file3'], io='input') diff --git a/Tools/PyJobTransforms/test/test_trfArgClassesATLAS.py b/Tools/PyJobTransforms/test/test_trfArgClassesATLAS.py index 3263ce4978e..d143a8ef4a4 100755 --- a/Tools/PyJobTransforms/test/test_trfArgClassesATLAS.py +++ b/Tools/PyJobTransforms/test/test_trfArgClassesATLAS.py @@ -9,6 +9,7 @@ # @note Tests of ATLAS specific file formats (that thus rely on other # parts of Athena) live here +from __future__ import print_function import sys import unittest @@ -47,29 +48,29 @@ class argPOOLFiles(unittest.TestCase): testFile = '/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root' os.stat(testFile) esdFile = argPOOLFile(testFile, io = 'input', type='esd') - self.assertEqual(esdFile.getMetadata(metadataKeys = tuple(athFileInterestingKeys)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'file_type': 'pool', 'file_guid': '0CABA22E-9096-E011-AE25-0030487C8CE6', 'nentries': 10L, 'file_size': 17033381}}) + self.assertEqual(esdFile.getMetadata(metadataKeys = tuple(athFileInterestingKeys)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'file_type': 'pool', 'file_guid': '0CABA22E-9096-E011-AE25-0030487C8CE6', 'nentries': 10, 'file_size': 17033381}}) esdFile = argPOOLFile(testFile, io = 'output', type='esd') - self.assertEqual(esdFile.getMetadata(), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'_exists': True, 'file_type': 'pool', 'file_guid': '0CABA22E-9096-E011-AE25-0030487C8CE6', 'file_size': 17033381, 'integrity': True, 'nentries': 10L}}) + self.assertEqual(esdFile.getMetadata(), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'_exists': True, 'file_type': 'pool', 'file_guid': '0CABA22E-9096-E011-AE25-0030487C8CE6', 'file_size': 17033381, 'integrity': True, 'nentries': 10}}) self.assertEqual(esdFile.getMetadata(metadataKeys = ('nentries',)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.ESD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'nentries': 10}}) self.assertEqual(esdFile.prodsysDescription['type'],'file') except OSError: # With python 2.7 this should call the self.skip() method - print >>sys.stderr, 'WARNING Skipping test_argPOOLFileMetadata_ESD - stat on AFS test file failed' + print('WARNING Skipping test_argPOOLFileMetadata_ESD - stat on AFS test file failed', file=sys.stderr) def test_argPOOLFileMetadata_AOD(self): try: testFile = '/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root' os.stat(testFile) aodFile = argPOOLFile(testFile, io = 'input', type='aod') - self.assertEqual(aodFile.getMetadata(metadataKeys = tuple(athFileInterestingKeys)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'file_type': 'pool', 'file_guid': '6E1FE6F0-9096-E011-9DDA-0030487C8CE6', 'nentries': 10L, 'file_size': 4673269}}) + self.assertEqual(aodFile.getMetadata(metadataKeys = tuple(athFileInterestingKeys)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'file_type': 'pool', 'file_guid': '6E1FE6F0-9096-E011-9DDA-0030487C8CE6', 'nentries': 10, 'file_size': 4673269}}) aodFile = argPOOLFile(testFile, io = 'output', type='aod') - self.assertEqual(aodFile.getMetadata(),{'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'_exists': True, 'file_type': 'pool', 'file_guid': '6E1FE6F0-9096-E011-9DDA-0030487C8CE6', 'file_size': 4673269, 'integrity': True, 'nentries': 10L}}) + self.assertEqual(aodFile.getMetadata(),{'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'_exists': True, 'file_type': 'pool', 'file_guid': '6E1FE6F0-9096-E011-9DDA-0030487C8CE6', 'file_size': 4673269, 'integrity': True, 'nentries': 10}}) self.assertEqual(aodFile.getMetadata(metadataKeys = ('nentries',)), {'/afs/cern.ch/atlas/offline/test/data11_7TeV.00182796.physics_JetTauEtmiss.merge.AOD._lb0300._SFO-10._0001.1.10evts.16.6.6.4.pool.root': {'nentries': 10}}) self.assertEqual(aodFile.prodsysDescription['type'],'file') self.assertTrue(aodFile.prodsysDescription['subtype']=='AOD') except OSError: # With python 2.7 this should call the self.skip() method - print >>sys.stderr, 'WARNING Skipping test_argPOOLFileMetadata_AOD - stat on AFS test file failed' + print('WARNING Skipping test_argPOOLFileMetadata_AOD - stat on AFS test file failed', file=sys.stderr) class argTAGFiles(unittest.TestCase): def test_argTAGFileMetadata(self): @@ -77,12 +78,12 @@ class argTAGFiles(unittest.TestCase): testFile = '/afs/cern.ch/work/g/graemes/ddm/data12_8TeV.00207865.physics_JetTauEtmiss.merge.TAG.r4065_p1278_tid01030417_00/TAG.01030417._000001.pool.root.1' os.stat(testFile) tagFile = argTAGFile(testFile, io = 'input', type='tag') - self.assertEqual(tagFile.getMetadata(), {'/afs/cern.ch/work/g/graemes/ddm/data12_8TeV.00207865.physics_JetTauEtmiss.merge.TAG.r4065_p1278_tid01030417_00/TAG.01030417._000001.pool.root.1': {'_exists': True, 'file_type': 'tag', 'file_guid': '3CCAD8D2-9195-5845-857B-550D616962F9', 'file_size': 12222088, 'integrity': True, 'nentries': 38112L}}) - self.assertEqual(tagFile.getMetadata(metadataKeys = ('nentries',)), {'/afs/cern.ch/work/g/graemes/ddm/data12_8TeV.00207865.physics_JetTauEtmiss.merge.TAG.r4065_p1278_tid01030417_00/TAG.01030417._000001.pool.root.1': {'nentries': 38112L}}) + self.assertEqual(tagFile.getMetadata(), {'/afs/cern.ch/work/g/graemes/ddm/data12_8TeV.00207865.physics_JetTauEtmiss.merge.TAG.r4065_p1278_tid01030417_00/TAG.01030417._000001.pool.root.1': {'_exists': True, 'file_type': 'tag', 'file_guid': '3CCAD8D2-9195-5845-857B-550D616962F9', 'file_size': 12222088, 'integrity': True, 'nentries': 38112}}) + self.assertEqual(tagFile.getMetadata(metadataKeys = ('nentries',)), {'/afs/cern.ch/work/g/graemes/ddm/data12_8TeV.00207865.physics_JetTauEtmiss.merge.TAG.r4065_p1278_tid01030417_00/TAG.01030417._000001.pool.root.1': {'nentries': 38112}}) self.assertEqual(tagFile.prodsysDescription['type'],'file') except OSError: # With python 2.7 this should call the self.skip() method - print >>sys.stderr, 'WARNING Skipping test_argTAGFileMetadata - stat on AFS test file failed' + print('WARNING Skipping test_argTAGFileMetadata - stat on AFS test file failed', file=sys.stderr) class argBSFiles(unittest.TestCase): def tearDown(self): @@ -103,7 +104,7 @@ class argBSFiles(unittest.TestCase): self.assertEqual(rawFile.prodsysDescription['type'],'file') except OSError: # With python 2.7 this should call the self.skip() method - print >>sys.stderr, 'WARNING Skipping test_argAthenaFileMetadata - stat on AFS test file failed' + print('WARNING Skipping test_argAthenaFileMetadata - stat on AFS test file failed', file=sys.stderr) def test_argBSMultiFileMetadata(self): try: @@ -121,7 +122,7 @@ class argBSFiles(unittest.TestCase): self.assertEqual(rawFile.prodsysDescription['type'], 'file') except OSError: # With python 2.7 this should call the self.skip() method - print >>sys.stderr, 'WARNING Skipping test_argAthenaMultiFileMetadata - stat on AFS test file failed' + print('WARNING Skipping test_argAthenaMultiFileMetadata - stat on AFS test file failed', file=sys.stderr) if __name__ == '__main__': diff --git a/Tools/PyJobTransforms/test/test_trfEnv.py b/Tools/PyJobTransforms/test/test_trfEnv.py index c9adf5050e2..2218597c656 100755 --- a/Tools/PyJobTransforms/test/test_trfEnv.py +++ b/Tools/PyJobTransforms/test/test_trfEnv.py @@ -7,6 +7,8 @@ # @author atlas-comp-transforms-dev@cern.ch # @version $Id: test_trfEnv.py 588222 2014-03-18 14:37:06Z graemes $ +from __future__ import print_function +from builtins import str import sys import unittest @@ -46,8 +48,8 @@ class trfEnvTests(unittest.TestCase): argDict = {'env': argSubstepList(["KEY1=VALUE1","KEY2=VALUE2","KEY3=VALUE3"]), 'imf': argSubstepBool('True')} envUp.setStandardEnvironment(argDict) - print envUp.values - print envUp._envdict + print(envUp.values) + print(envUp._envdict) self.assertTrue("KEY1" in envUp._envdict) self.assertTrue("LD_PRELOAD" in envUp._envdict) diff --git a/Tools/PyJobTransforms/test/test_trfMPTools.py b/Tools/PyJobTransforms/test/test_trfMPTools.py index b610de6ef0e..aa585d4bb7b 100755 --- a/Tools/PyJobTransforms/test/test_trfMPTools.py +++ b/Tools/PyJobTransforms/test/test_trfMPTools.py @@ -7,6 +7,7 @@ # @author graeme.andrew.stewart@cern.ch # @version $Id: test_trfMPTools.py 772406 2016-09-09 12:10:12Z mavogel $ +from __future__ import print_function import os import subprocess import unittest @@ -90,7 +91,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): open(os.path.join(delement[0], fname), "w") with open("athenaMP-outputs-RAWtoESD-r2e", "w") as mpoutput: - print >>mpoutput, """<?xml version="1.0" encoding="utf-8"?> + print("""<?xml version="1.0" encoding="utf-8"?> <athenaFileReport> <Files OriginalName="data15_13TeV.00267167.physics_Main.recon.ESD.f594._lb0176._SFO-1._0002"> <File description="POOL" mode="WRITE|CREATE" name="{CWD}/athenaMP-workers-RAWtoESD-r2e/worker_0/data15_13TeV.00267167.physics_Main.recon.ESD.f594._lb0176._SFO-1._0002" shared="True" technology="ROOT"/> @@ -113,7 +114,7 @@ class AthenaMPOutputParseTests(unittest.TestCase): <File description="HIST" mode="WRITE" name="{CWD}/athenaMP-workers-RAWtoESD-r2e/worker_7/tmp.HIST_ESD_INT" shared="False" technology="ROOT"/> </Files> </athenaFileReport> -""".format(CWD=os.getcwd()) +""".format(CWD=os.getcwd()), file=mpoutput) def tearDown(self): subprocess.call(['rm -fr athenaMP* data15* tmp.*'], shell=True) diff --git a/Tools/PyJobTransforms/test/test_trfReports.py b/Tools/PyJobTransforms/test/test_trfReports.py index 5470de4642f..d709c2dd5ad 100755 --- a/Tools/PyJobTransforms/test/test_trfReports.py +++ b/Tools/PyJobTransforms/test/test_trfReports.py @@ -7,6 +7,7 @@ # @author graeme.andrew.stewart@cern.ch # @version $Id: test_trfReports.py 745237 2016-05-06 02:33:15Z ssnyder $ +from __future__ import print_function import unittest import logging @@ -30,11 +31,11 @@ class trfFileReportUnitTests(unittest.TestCase): def setUp(self): # In python 2.7 support for multiple 'with' expressions becomes available with open('file1', 'w') as f1: - print >> f1, 'This is test file 1' + print('This is test file 1', file=f1) with open('file2', 'w') as f2: - print >> f2, 'Short file 2' + print('Short file 2', file=f2) with open('file3', 'w') as f3: - print >> f3, 80 * '-', 'Long file 3', 80 * '-' + print(80 * '-', 'Long file 3', 80 * '-', file=f3) # For test reports, use manually set GUIDs self.mySingleFile = trfArgClasses.argFile(['file1'], name = 'inputTEST_SINGLEFile', io = 'input', guid = {'file1': '05ACBDD0-5F5F-4E2E-974A-BBF4F4FE6F0B'}) diff --git a/Tools/PyJobTransforms/test/test_trfSubstepIntegration.py b/Tools/PyJobTransforms/test/test_trfSubstepIntegration.py index 8bf6ac52a58..051557d040b 100755 --- a/Tools/PyJobTransforms/test/test_trfSubstepIntegration.py +++ b/Tools/PyJobTransforms/test/test_trfSubstepIntegration.py @@ -7,6 +7,7 @@ # @author atlas-comp-transforms-dev@cern.ch # @version $Id: test_trfSubstepIntegration.py 649424 2015-02-24 22:06:20Z graemes $ +from __future__ import print_function import re import subprocess import unittest @@ -26,7 +27,7 @@ class trfExitTests(unittest.TestCase): p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) p.wait() for line in p.stdout: - print line, + print(line, end=' ') m = re.match(r"Executor Step: (\w+)", line) if m: steps.append(m.group(1)) @@ -41,7 +42,7 @@ class trfExitTests(unittest.TestCase): p = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = 1) p.wait() for line in p.stdout: - print line, + print(line, end=' ') m = re.match(r"Executor Step: (\w+)", line) if m: steps.append(m.group(1)) diff --git a/Tools/PyJobTransforms/test/test_trfUtils.py b/Tools/PyJobTransforms/test/test_trfUtils.py index 4be6920d863..1292fea6578 100755 --- a/Tools/PyJobTransforms/test/test_trfUtils.py +++ b/Tools/PyJobTransforms/test/test_trfUtils.py @@ -7,6 +7,7 @@ # @author graeme.andrew.stewart@cern.ch # @version $Id: test_trfUtils.py 711194 2015-11-27 14:44:03Z mavogel $ +from __future__ import print_function import unittest import os import pwd @@ -29,16 +30,16 @@ class trfUtilsInfanticide(unittest.TestCase): self.exitWrapper = "exit.sh" waitFile = open(self.waitWrapper, "w") - print >>waitFile, "#!/bin/sh" - print >>waitFile, "sleep 100" + print("#!/bin/sh", file=waitFile) + print("sleep 100", file=waitFile) waitFile.close() - os.chmod(self.waitWrapper, 00755) + os.chmod(self.waitWrapper, 0o0755) exitFile = open(self.exitWrapper, "w") - print >>exitFile, "#!/bin/sh" - print >>exitFile, "sleep 100 &" + print("#!/bin/sh", file=exitFile) + print("sleep 100 &", file=exitFile) exitFile.close() - os.chmod(self.exitWrapper, 00755) + os.chmod(self.exitWrapper, 0o0755) def teardown(self): diff --git a/Tools/PyJobTransforms/test/test_trfUtilsDBRelease.py b/Tools/PyJobTransforms/test/test_trfUtilsDBRelease.py index e275acaeb17..46d3a4762a9 100755 --- a/Tools/PyJobTransforms/test/test_trfUtilsDBRelease.py +++ b/Tools/PyJobTransforms/test/test_trfUtilsDBRelease.py @@ -50,7 +50,9 @@ class DBReleasetest(unittest.TestCase): def test_tarball(self): try: os.symlink('/afs/cern.ch/work/g/graemes/ddm/ddo.000001.Atlas.Ideal.DBRelease.v220701/DBRelease-22.7.1.tar.gz', 'DBRelease-22.7.1.tar.gz') - except (IOError, OSError) as (errno, errMsg): + except (IOError, OSError) as xxx_todo_changeme: + # Ignore file exists - if that happens the link was already there + (errno, errMsg) = xxx_todo_changeme.args # Ignore file exists - if that happens the link was already there if errno == 17: pass diff --git a/Tools/PyJobTransforms/test/test_trfValidation.py b/Tools/PyJobTransforms/test/test_trfValidation.py index 76364aad554..5703916b360 100755 --- a/Tools/PyJobTransforms/test/test_trfValidation.py +++ b/Tools/PyJobTransforms/test/test_trfValidation.py @@ -7,6 +7,8 @@ # @author graeme.andrew.stewart@cern.ch # @version $Id: test_trfValidation.py 763940 2016-07-24 13:46:01Z uworlika $ +from __future__ import print_function +from builtins import range import unittest import logging @@ -506,27 +508,27 @@ class athenaLogFileReportTests(unittest.TestCase): 16:32:39 IOVDbSvc INFO Total payload read from COOL: 456 bytes in (( 7.89 ))s''' with open('file1', 'w') as f1: - print >> f1, 'This is test file 1 w/o meaning' + print('This is test file 1 w/o meaning', file=f1) with open('file2', 'w') as f2: - print >> f2, testLogExcerpt + print(testLogExcerpt, file=f2) with open('file3', 'w') as f3: - print >> f3, testLogExcerpt - print >> f3, testErrorExcerpt + print(testLogExcerpt, file=f3) + print(testErrorExcerpt, file=f3) with open('file4', 'w') as f4: - print >> f4, testBadAlloc + print(testBadAlloc, file=f4) with open('file5', 'w') as f5: - print >> f5, testSuppressExcerpt + print(testSuppressExcerpt, file=f5) with open('file6', 'w') as f6: - print >> f6, testCoreDumpCurAlg + print(testCoreDumpCurAlg, file=f6) with open('file7', 'w') as f7: - print >> f7, testCoreDumpNoCurAlg + print(testCoreDumpNoCurAlg, file=f7) with open('file8', 'w') as f8: - print >> f8, testMissedBadAlloc + print(testMissedBadAlloc, file=f8) with open('file9', 'w') as f9: - print >> f9, testDbMonitor + print(testDbMonitor, file=f9) with open('file10', 'w') as f10: - print >> f10, testLogExcerptMP - print >> f10, testErrorExcerptMP + print(testLogExcerptMP, file=f10) + print(testErrorExcerptMP, file=f10) self.myFileReport1 = athenaLogFileReport('file1') self.myFileReport2 = athenaLogFileReport('file2') @@ -580,7 +582,7 @@ ERROR, but returned a StatusCode "SUCCESS"''' logFileName = 'logWithSubstepNameSerial' with open(logFileName, 'w') as logFile: - print >> logFile, testLogERRORwithSubstepNameSerial + print(testLogERRORwithSubstepNameSerial, file=logFile) logFileReportSerial = athenaLogFileReport(logfile=logFileName, substepName='RAWtoALL') expectedError = dict(level='ERROR', nLevel=logging.ERROR, @@ -600,7 +602,7 @@ ManagedAthenaTileMon reported an ERROR, but returned a StatusCode "SUCCESS"''' logFileName = 'logWithSubstepNameMP' with open(logFileName, 'w') as logFile: - print >> logFile, testLogERRORwithSubstepNameMP + print(testLogERRORwithSubstepNameMP, file=logFile) logFileReportMP = athenaLogFileReport(logfile=logFileName, substepName='RAWtoALL') expectedError = dict(level='ERROR', nLevel=logging.ERROR, @@ -617,7 +619,7 @@ ManagedAthenaTileMon reported an ERROR, but returned a StatusCode "SUCCESS"''' 'message': 'terminate after \'std::bad_alloc\'.'}}) def test_suppress(self): - print self.myFileReport5 + print(self.myFileReport5) self.assertEqual(self.myFileReport5._levelCounter['ERROR'], 100) self.assertEqual(len(self.myFileReport5._errorDetails['ERROR']), 10) pass @@ -631,7 +633,7 @@ ManagedAthenaTileMon reported an ERROR, but returned a StatusCode "SUCCESS"''' 'message': 'Segmentation fault: Event counter: unknown; Run: unknown; Evt: unknown; Current algorithm: unknown; Current Function: unknown'}}) def test_dbMonitor(self): - print self.myFileReport9 + print(self.myFileReport9) self.assertEqual(self.myFileReport9.dbMonitor(), {'bytes': 579, 'time': 12.45}) self.assertEqual(self.myFileReport8.dbMonitor(), None) -- GitLab