diff --git a/Tools/PyJobTransformsCore/CMakeLists.txt b/Tools/PyJobTransformsCore/CMakeLists.txt index be59de15822bebb774e74c82c26b8c1911960ee8..e20b18ac707acb926b04b839010b2507da049c60 100644 --- a/Tools/PyJobTransformsCore/CMakeLists.txt +++ b/Tools/PyJobTransformsCore/CMakeLists.txt @@ -6,8 +6,9 @@ atlas_subdir( PyJobTransformsCore ) # Install files from the package: -atlas_install_python_modules( python/*.py ) -atlas_install_scripts( share/slimmetadata share/checklog.py share/find_*.py share/trf_ls python/envutil.py ) +atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} ) +atlas_install_scripts( share/checklog.py share/find_*.py share/trf_ls POST_BUILD_CMD ${ATLAS_FLAKE8} ) +atlas_install_scripts( share/slimmetadata ) atlas_install_generic( share/*.db DESTINATION share EXECUTABLE ) diff --git a/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py b/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py index 18621634a2a2832d81ab5eada5fc6072e95e068a..47aadf82ffb18f06e60d68a182b98aa2c0b1e25b 100755 --- a/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py +++ b/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py @@ -268,7 +268,7 @@ class ErrorInfo( object ): if not tb: return None short_tb = [] for frame_info in tb: - if not 'AthenaCommon/Include.py' in frame_info[ 0 ]: + if 'AthenaCommon/Include.py' not in frame_info[ 0 ]: short_tb.append( frame_info ) return short_tb @@ -285,7 +285,7 @@ class ErrorInfo( object ): if self.code == 0: self.backtrace = None self.severity = None - elif self.severity == None: + elif self.severity is None: self.severity = FATAL ## Errors occur when certain events are being processed. @@ -331,19 +331,18 @@ class ErrorInfo( object ): # @return List of ErrorPattern instances or @c None def getErrorPatterns( release ): global __errorPatterns - releaseType = type( release ) - if releaseType.__name__ == 'str': + if isinstance(release, str): total_pats = [] # list of recognised error patterns for rel, pats in __errorPatterns.items(): if rel.match( release ): total_pats += pats return total_pats - elif releaseType == type( re.compile( 'test' ) ): + elif isinstance(release, re.Pattern): for rel, pats in __errorPatterns.items(): if rel.pattern == release.pattern: return pats else: - raise KeyError( 'getErrorPatterns() takes either a string or a compiled regular expression. Got an %s instead.' % releaseType.__name__ ) + raise KeyError( 'getErrorPatterns() takes either a string or a compiled regular expression. Got an %s instead.' % type(release).__name__ ) return None ## Retrieve error filter patterns for a given release. @@ -352,19 +351,18 @@ def getErrorPatterns( release ): # @return List of compiled regular expression isntances or @c None def getIgnorePatterns( release ): global __ignorePatterns - releaseType = type( release ) - if releaseType == str: + if isinstance(release, str): total_pats = [] # list of possible patterns to ignore for rel, pats in __ignorePatterns.items(): if rel.match( release ): total_pats += pats return total_pats - elif releaseType == type( re.compile( 'test' ) ): + elif isinstance(release, re.Pattern): for rel, pats in __ignorePatterns.items(): if rel.pattern == release.pattern: return pats else: - raise KeyError( 'getIgnorePatterns() takes either a string or a compiled regular expression. Got an %s instead.' % releaseType.__name__ ) + raise KeyError( 'getIgnorePatterns() takes either a string or a compiled regular expression. Got an %s instead.' % type(release).__name__ ) return None ## Add an ErrorPattern instance to a specific release. @@ -659,7 +657,7 @@ def matchErrorPattern(line,release): if release: try: release3 = '.'.join(release.split('.')[:3]) - except: + except Exception: release3 = release rels.insert(0,release3) for rel in rels: @@ -690,7 +688,7 @@ def matchIgnorePattern(line,release): try: # reduce 4-digit to 3-digit release release3 = '.'.join(release.split('.')[:3]) - except: + except Exception: release3 = release rels.insert(0,release3) for rel in rels: diff --git a/Tools/PyJobTransformsCore/python/CVSutil.py b/Tools/PyJobTransformsCore/python/CVSutil.py deleted file mode 100755 index c9ad8eefc24227f7597438601b3dcba233f912f0..0000000000000000000000000000000000000000 --- a/Tools/PyJobTransformsCore/python/CVSutil.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration - -from __future__ import print_function - -def CVSkeywords( listOfKeyWords ): - """Take as input a list of strings containing CVS keywords of the form: \"$<name>:<value>$\" - It will return a dictionary with <name>,<value> as key,value pairs. - See cvs manual for possible keywords and their meaning.""" - kwDict = {} - for kw in listOfKeyWords: - # CVS keywords are embedded between 2 '$' signs - dollar1 = kw.find('$') - dollar2 = kw.find('$',dollar1+1) - if dollar1 == -1 or dollar2 == -1: - print ("WARNING: %s is not a CVS keyword (it should have 2 '$' signs)" % kw) - continue - # get part in between the 2 '$' signs - cvskw = kw[dollar1+1:dollar2] - # split name and value - value = '' - colon = cvskw.find(':') - if colon == -1: - # no value. Full string is name - name = cvskw.strip() - else: - # name is part before colon - name = cvskw[:colon].strip() - # value is part after colon - if colon + 1 < len(cvskw): - value = cvskw[colon+1:].strip() - if not name: - print ("WARNING: \"%s\" is not a CVS keyword (it should have a name after the first $" % kw) - continue - kwDict[name] = value - - return kwDict - - -# Apply it to this module -CVSkeywordsMap = CVSkeywords( ["$Id: CVSutil.py,v 1.2 2009-01-29 12:04:16 ctan Exp $" , - "$Name: not supported by cvs2svn $" , - "$Revision: 285339 $"] ) - -__version__ = CVSkeywordsMap["Revision"] -__author__ = "clat@hep.ph.bham.ac.uk" - -del CVSkeywordsMap diff --git a/Tools/PyJobTransformsCore/python/CastorPreStager.py b/Tools/PyJobTransformsCore/python/CastorPreStager.py deleted file mode 100755 index 9581aac2fc23fc86a1b57414f54da2dbf91dab2a..0000000000000000000000000000000000000000 --- a/Tools/PyJobTransformsCore/python/CastorPreStager.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration - -from __future__ import print_function - -__doc__ = """FilePreStager for the castor tape system""" - -__all__ = [] - -from FilePreStager import * -import os,re,time,commands -import rfio -from envutil import * - -# mapping of Castor stager_qry output to FilePreStager stati. -_stager_qry_status_words = { 'STAGED' : FilePreStager.STAGED , - 'CANBEMIGR' : FilePreStager.STAGED , - 'STAGEIN' : FilePreStager.STAGEIN , - 'INVALID_STATUS' : FilePreStager.INVALID , - 'not in stager' : FilePreStager.ONTAPE } - -_stager_qry_statusRE = '|'.join( _stager_qry_status_words.keys() ) - -_stager_qry_output_patterns = [ \ - r'^%%s\s+\S+\s+(?P<status>%s)$' % (_stager_qry_statusRE) , - r'^Error [0-9]+.*?\(File\s+%%s\s+(?P<status>%s)\s*\)$' % (_stager_qry_statusRE) ] - -class CastorPreStager(FilePreStager): - def __init__(self,name=None,filenamePattern=r'^/castor/|^rfio:/castor/|^castor:', maxWaitingTime=120*60, pollingInterval=5*60): - FilePreStager.__init__(self,filenamePattern,maxWaitingTime, pollingInterval, name) - - def _writeOutFileList(self, listOfFiles, filename='castorFiles.list'): - '''Write list of files for castor to operate on to a file - prevents problems with excessive command line length''' - try: - f = open(filename, 'w') - for file in listOfFiles: - print (self.removePrefix(file), file=f) - f.close() - except IOError as errMsg: - raise FilePreStageError('Got IOError writing out list of files to stage as %s: %s' % (filename, errMsg)) - return filename - - - def getFilesStatus(self,listOfFiles): - """Return a dictionary containing the status of each file in <listOfFiles>. - The key is the filename and the value is the status.""" - if not listOfFiles: return {} - if isinstance(listOfFiles, str): - listOfFiles = [ listOfFiles ] - fileList = self._writeOutFileList(listOfFiles) - # compatibility with single filename - # Write list of files to a file, so prevent - castor_cmd = 'stager_qry' - if not find_executable(castor_cmd): - raise FilePreStageError( "%s not found in PATH" % (castor_cmd) ) - cmd = '%s -f %s' % (castor_cmd, fileList) - # execute command - self.printDebug(cmd) - stat,output = commands.getstatusoutput( cmd ) - self.printVerbose(output) - if stat: - raise FilePreStageError( "Error executing %s (output=%s)" % (cmd,output) ) - # analyse output - statusDict = {} - for f in listOfFiles: - for pat in _stager_qry_output_patterns: - patF = pat % self.removePrefix(f) - match = re.search( patF, output, re.MULTILINE ) - if match: - status = _stager_qry_status_words[match.group('status')] - break - else: - status = FilePreStager.UNKNOWN - statusDict[f] = status - - return statusDict - - - def preStageFiles(self,listOfFiles): - if not listOfFiles: return - if isinstance(listOfFiles, str): - listOfFiles = [ listOfFiles ] - # compatibility with single filename - fileList = self._writeOutFileList(listOfFiles) - castor_cmd = 'stager_get' - if not find_executable(castor_cmd): - raise FilePreStageError( "%s not found in PATH" % (castor_cmd) ) - cmd = '%s -f %s' % (castor_cmd, fileList) - # execute command - self.printDebug(cmd) - stat,output = commands.getstatusoutput( cmd ) - self.printVerbose(output) - if stat: - raise FilePreStageError( "Error executing %s (output=%s)" % (cmd,output) ) - -# -# setup for castor2 for atlas using the env values for STAGE_HOST and STAGE_SVCCLASS -# -print ("Setting up castor 2 for ATLAS ...") -castorEnv = {} -castorEnv['RFIO_USE_CASTOR_V2']='YES' -if os.getenv('STAGE_HOST')!= None: - castorEnv['STAGE_HOST']=os.getenv('STAGE_HOST') -else: - castorEnv['STAGE_HOST']='castoratlas' -if os.getenv('STAGE_SVCCLASS')!= None: - castorEnv['STAGE_SVCCLASS']=os.getenv('STAGE_SVCCLASS') -else: - castorEnv['STAGE_SVCCLASS']='default' -for n,v in castorEnv.items(): - print ("%s=%s" % (n,v)) - -os.environ.update(castorEnv) - -theCastorPreStager = CastorPreStager() diff --git a/Tools/PyJobTransformsCore/python/FakeAthena.py b/Tools/PyJobTransformsCore/python/FakeAthena.py index 5772ba21ca573a74adf9de86cb80edcada92c310..58ed730c4daba066fcf64764519eb639a30295df 100755 --- a/Tools/PyJobTransformsCore/python/FakeAthena.py +++ b/Tools/PyJobTransformsCore/python/FakeAthena.py @@ -1,9 +1,10 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -import os,sys, inspect -from PyJobTransformsCore.trfutil import * -from PyJobTransformsCore.trferr import * -#from AthenaCommon.AppMgr import NO_EVSEL,MONTECARLO +import os, inspect +from past.builtins import execfile +from PyJobTransformsCore.trfutil import find_joboptions +from PyJobTransformsCore.trferr import JobOptionsNotFoundError +from PyJobTransformsCore.TransformLogger import TransformLogger NO_EVSEL = -99 MONTECARLO = 1 diff --git a/Tools/PyJobTransformsCore/python/FilePreStager.py b/Tools/PyJobTransformsCore/python/FilePreStager.py index e408b75e12d1ed7deb09aaf76bf68d13d0189e7e..0df95929cd563cdcddacc794df10b61647c1c24a 100755 --- a/Tools/PyJobTransformsCore/python/FilePreStager.py +++ b/Tools/PyJobTransformsCore/python/FilePreStager.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration __doc__ = """A base class for pre-staging files from tape to disk. Specific tape staging @@ -219,9 +219,9 @@ class FileStagerRobot(TransformLogger): name = stager.name() oldStager = self.getStager(name) if oldStager is not None: - self.logger().warning("File pre-stager %s already in %s. Keeping old one." % (name,self.__name) ) + self.logger().warning("File pre-stager %s already in %s. Keeping old one.", name,self.__name) else: - self.logger().debug("Adding file pre-stager %s to %s" % (name,self.__name) ) + self.logger().debug("Adding file pre-stager %s to %s", name, self.__name) stager.setLoggerParentName(self.name()) self.__stagerList.append( stager ) diff --git a/Tools/PyJobTransformsCore/python/JobReport.py b/Tools/PyJobTransformsCore/python/JobReport.py index f2efa9afdf2839f194b9afce5aa01710f818b0b8..edcbf33879481e79db36da516666981856184bbd 100755 --- a/Tools/PyJobTransformsCore/python/JobReport.py +++ b/Tools/PyJobTransformsCore/python/JobReport.py @@ -3,12 +3,9 @@ ## @package JobReport # # @brief Main module containing the @em JobReport class and other ancillary classes @em JobInfo, @em FileInfo and @em TaskInfo. -# @author $LastChangedBy: graemes $ -# @version $Rev: 501327 $ -# @date $Date: 2012-05-18 16:24:55 +0200 (Fri, 18 May 2012) $ from __future__ import with_statement, print_function -import os,sys,re,shutil,traceback, subprocess, itertools +import os, sys, shutil, subprocess, itertools import pickle from copy import copy from PyJobTransformsCore import AtlasErrorCodes, extraMetadata @@ -265,7 +262,7 @@ class TaskInfo( XMLNode ): trfName = None taskFound = True yield self - elif trfName == False: # omit top level task + elif trfName is False: # omit top level task trfName = None for subtask in self.subTasks: for st in subtask.tasks( trfName, False ): @@ -400,7 +397,7 @@ class JobReport( object ): # stored e.g. in composite transforms. def __init__( self ): # Can't be at class scope due to py3 scoping rules for comprehensions. - JobReport.defaultFiles = [ defaultFilenameBase + ext for ext in fileExtensions ] + [ metadata_xml, jobinfo_xml ] + JobReport.defaultFiles = [ self.defaultFilenameBase + ext for ext in self.fileExtensions ] + [ self.metadata_xml, self.jobinfo_xml ] self.reset() self.setCommand() @@ -915,7 +912,7 @@ class JobReport( object ): try: if athenaOK and self.info( 'athCode' ).contents() != '0': athenaOK = False - except: + except Exception: athenaOK = False for eInfo in self.errors(): if self.__ignoreUnknown and eInfo.code == 69999 and athenaOK: @@ -956,7 +953,7 @@ class JobReport( object ): if self.__ignoreErrors: try: athCode = self.info( 'athCode' ).contents() - except: + except Exception: pass else: # Return success if Athena returns success regardless of any errors detected @@ -993,7 +990,7 @@ class JobReport( object ): temp2 = 'tempfile2_TOBEREMOVED_' try: # the actual execution - slimmetadataProcess = subprocess.check_call( [ "slimmetadata", filename, temp1, temp2 ] ) + subprocess.check_call( [ "slimmetadata", filename, temp1, temp2 ] ) except subprocess.CalledProcessError as cpe: print ("Error slimming %s [%s]: %s" % ( filename, cpe.returncode, cpe.message )) else: @@ -1002,7 +999,7 @@ class JobReport( object ): for f in [ temp1, temp2 ]: try: os.remove( f ) - except: + except Exception: pass ## A wrapper method to allow for metadata to be written in two different formats @@ -1220,12 +1217,12 @@ class JobReport( object ): for info in info_try_block.keys(): try: info_try_block[info]=self.info( info ).getContents()[ info ] - except: + except Exception: pass for info in info_try_block_odict.keys(): try: info_try_block_odict[info]=self.info( info ).getContents() - except: + except Exception: pass nevents = 0 for oFile in self.outputFiles(): @@ -1290,7 +1287,7 @@ class JobReport( object ): ifiles.append( { 'lfn' : os.path.basename( i.filename() ), 'GUID' : i.guid(), 'dataset' : i.metaData( 'dataset' ) } ) - except: + except Exception: print ('JobReport collecting info input files: problems with ', i) # collect info about outputfiles, # (metadata should be by file because of the combined trfs) @@ -1322,12 +1319,12 @@ class JobReport( object ): for info in info_try_block.keys(): try: info_try_block[info]=self.info( info ).getContents()[ info ] - except: + except Exception: pass for info in info_try_block_odict.keys(): try: info_try_block_odict[info]=self.info( info ).getContents() - except: + except Exception: pass nevents = 0 for oFile in self.outputFiles(): diff --git a/Tools/PyJobTransformsCore/python/TransformConfig.py b/Tools/PyJobTransformsCore/python/TransformConfig.py index 36aff25438c2c9e27cf52e1f65c67ab2246e44f3..842254e6abbc0c13e893b25fcb248facb0954533 100755 --- a/Tools/PyJobTransformsCore/python/TransformConfig.py +++ b/Tools/PyJobTransformsCore/python/TransformConfig.py @@ -100,7 +100,7 @@ class Descriptor(object): <variableName> is the name of the variable that is being set and is typically only used for error messages.""" if self.__allowed and value not in self.__allowed: - raise TransformConfigError( '%s value %r is not in %s' % \ + raise TransformConfigError( '%s value %r is not in %s' % (variableName, value, self.__allowed) ) return value @@ -156,7 +156,7 @@ class Float(Descriptor): def _checkValue(self,variableName,value): try: value = float(value) - except: + except Exception: raise TransformConfigError( '%s value %s is not a float' % (variableName, value) ) # check the value against list of possible values @@ -173,7 +173,7 @@ class Integer(Descriptor): inValue = value try: value = int(value) - except: + except Exception: raise TransformConfigError( '%s value %s is not an int' % (variableName, value) ) if value != float(inValue): @@ -206,8 +206,8 @@ class UniqueList(Descriptor): try: value.__iter__ return list( value ) - except: - raise TransformConfigError( '%s should be a list or tuple. Got %s instead.' % \ + except Exception: + raise TransformConfigError( '%s should be a list or tuple. Got %s instead.' % ( variableName, type( value ).__name__ ) ) @@ -217,7 +217,7 @@ class UniqueList(Descriptor): if allowed: for v in value: if v not in allowed: - raise TransformConfigError( '%s value %r is not one of %s' % \ + raise TransformConfigError( '%s value %r is not one of %s' % (variableName, value, allowed) ) # make entries unique newValue = set() #[] @@ -248,7 +248,7 @@ class ListOfStrings(UniqueList): # check that all entries are strings for v in value: if not isinstance( v, str ): - raise TransformConfigError("Entry %r in %s is not a string (but an %s)" % \ + raise TransformConfigError("Entry %r in %s is not a string (but an %s)" % ( v, variableName, type( v ).__name__ ) ) return value @@ -290,7 +290,7 @@ class JobConfigMetaClass(type): ## print "JobConfigMetaClass( self=%r, name=%r, bases=%r, dict=%r )" % (self,name,bases,dict) if '__slots__' not in dict: raise TransformConfigError('Class %s does not have member __slots__. Please add __slots__ = ()' - ' to the class definition' % (name,) ) + ' to the class definition' % (name,) ) # # add list of properties # diff --git a/Tools/PyJobTransformsCore/python/VTimer.py b/Tools/PyJobTransformsCore/python/VTimer.py index 1956cbe21a093104a837d812aa485c41cde64067..4ddcb524ab904e006d7be45da0aca2209dcf67cb 100644 --- a/Tools/PyJobTransformsCore/python/VTimer.py +++ b/Tools/PyJobTransformsCore/python/VTimer.py @@ -84,7 +84,7 @@ class VTimer( object ): x = '%s %s' % ( tName, x ) try: getattr( self.logger, severity )( x ) - except: + except Exception: print (x) def toHMS( self, seconds = 0 ): @@ -143,7 +143,7 @@ class VTimer( object ): self._print( 'not started [%s]' % name ) # not re-inserting back onto _resultsStack as should not be on the stack in the first place. return - elif tContent.started == False: + elif tContent.started is False: self._print( 'already stopped [%s]' % name ) # not re-inserting back onto _resultsStack as should not be on the stack in the first place. return @@ -200,7 +200,7 @@ class VTimer( object ): return try: fileObj = open( fileName, 'w' ) - except: + except Exception: self._print( ' : Could not open %s for writing.' % fileName ) return pickle.dump( self._resultsDict, fileObj, pickle.HIGHEST_PROTOCOL ) diff --git a/Tools/PyJobTransformsCore/python/ValidateRootFile.py b/Tools/PyJobTransformsCore/python/ValidateRootFile.py index 90d7c71bc55a7786531b53249b9b1e758c90048b..ee8590381d840718b00a78deb14227d664fcddde 100755 --- a/Tools/PyJobTransformsCore/python/ValidateRootFile.py +++ b/Tools/PyJobTransformsCore/python/ValidateRootFile.py @@ -1,6 +1,6 @@ #!/bin/env python -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration from __future__ import print_function @@ -17,16 +17,16 @@ def checkPFCorruption(filename,verbose=False): print ("ERROR can't access file",filename) return -1 - ROOT = RootUtils.import_root() + ROOT = RootUtils.import_root() # noqa: F841 from ROOT import TFile,TTree try: f=TFile.Open(filename) - except: + except Exception: print ("Can't open file",filename) return -1 - nEvents=n=None + n=None keys=f.GetListOfKeys() for k in keys: @@ -34,7 +34,7 @@ def checkPFCorruption(filename,verbose=False): tn=k.GetName() t=f.Get(tn) if not isinstance(t,TTree): return - except: + except Exception: print ("Can't get tree %s from file %s",tn,fn) f.Close() return -1 @@ -52,9 +52,6 @@ def checkPFCorruption(filename,verbose=False): print ("Checking event",i) print ("Tree %s: %i event(s) ok" % (tn,n)) - # Use CollectionTree determine the number of events - if tn=='CollectionTree': - nEvents=n pass #end of loop over trees f.Close() diff --git a/Tools/PyJobTransformsCore/python/athena_wrapper.py b/Tools/PyJobTransformsCore/python/athena_wrapper.py index 2623a89bc535a61769433d0d5f49f9a71ae26627..659a1f3fa086d91e46f072029b466d25f67a4fa5 100755 --- a/Tools/PyJobTransformsCore/python/athena_wrapper.py +++ b/Tools/PyJobTransformsCore/python/athena_wrapper.py @@ -1,6 +1,6 @@ #!/bin/sh -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration ## @package athena_wrapper # @@ -11,9 +11,6 @@ # @li Handle any exceptions that arise from the execution of @c athena.py and create the # appropriate AtlasErrorCodes.ErrorInfo object and a corresponding job report file. # -# @author $LastChangedBy: graemes $ -# @version $Rev: 518863 $ -# @date $Date: 2012-09-25 11:45:05 +0200 (Tue, 25 Sep 2012) $ """date" # executable for the shell, but a string block to python # First part (shell part) copied from athena.py @@ -146,12 +143,15 @@ fi import builtins printfunc = getattr(builtins,'print') -import os, sys +import sys +from past.builtins import execfile from AthenaCommon.Include import IncludeError from PyJobTransformsCore import trferr, trfconsts -from PyJobTransformsCore.JobReport import * +from PyJobTransformsCore.JobReport import JobReport from PyJobTransformsCore import AtlasErrorCodes +# flake8: noqa + ## The err variable will be used to contain an ErrorInfo instance # after the trferr.errorHandler handles an exception. err = None @@ -161,7 +161,7 @@ err = None try: ## Position of the '-c' option given at the command line. cPos = sys.argv.index( '-c' ) -except: +except Exception: pass else: sys.argv[ cPos + 1 ] = ' '.join( sys.argv[ cPos + 1 : ] ) @@ -220,7 +220,7 @@ except str as e: severity = AtlasErrorCodes.FATAL, message = e ) # Catch all other exceptions -except: +except Exception: err = AtlasErrorCodes.ErrorInfo( acronym = 'ATH_EXC_PYT', severity = AtlasErrorCodes.FATAL ) diff --git a/Tools/PyJobTransformsCore/python/basic_trfarg.py b/Tools/PyJobTransformsCore/python/basic_trfarg.py index 42683c000aef8e332840c49d6cabd1ed985cf4a3..4ec3008082d06aac32d2ef6b32840043151fcf51 100755 --- a/Tools/PyJobTransformsCore/python/basic_trfarg.py +++ b/Tools/PyJobTransformsCore/python/basic_trfarg.py @@ -1,23 +1,19 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration ## @package basic_trfarg # # @brief Package contains the basic argument types for JobTransforms. # @details Classes defined in this package are not intended to be used directly. # @see Argument classes designed to be used can be found in the full_trfarg package. -# @author $LastChangedBy: ivukotic $ -# @version $Rev: 451072 $ -# @date $Date: 2011-07-28 19:22:06 +0200 (Thu, 28 Jul 2011) $ from __future__ import print_function -import os, shutil, time, fnmatch, subprocess, copy -import stat as statconsts -from PyJobTransformsCore import fileutil, AtlasErrorCodes -from PyJobTransformsCore.trfutil import * -from PyJobTransformsCore.trferr import * +import os, sys, time, fnmatch, subprocess, copy, re +from PyJobTransformsCore import fileutil, envutil +from PyJobTransformsCore.trfutil import AODFile, BSFile, CommentLine, DPDFile, ESDFile, EvgenFile, FileType, HitsFile, PoolDataFile, expandStringToList, strip_suffix +from PyJobTransformsCore.trferr import TransformDefinitionError, TransformArgumentError, InputFileError, OutputFileError from PyJobTransformsCore.JobReport import FileInfo -from PyJobTransformsCore.TransformLogger import TransformLogger,logging +from PyJobTransformsCore.TransformLogger import TransformLogger ## @brief Base class of all transform argument classes. @@ -442,7 +438,7 @@ class FloatArg( Argument ): """Turn a command line argument string into an float python object""" try: return float(val) except ValueError : - raise TransformArgumentError( '%s=%s is not of type %s' % \ + raise TransformArgumentError( '%s=%s is not of type %s' % (self.name(), repr(val), self.basicType()) ) @@ -468,7 +464,7 @@ class StringArg( Argument ): def toPython(self,val): try: return str(val+'') except TypeError : - raise TransformArgumentError( '%s=%s is not of type %s' % \ + raise TransformArgumentError( '%s=%s is not of type %s' % (self.name(), repr(val), self.basicType()) ) @@ -487,10 +483,10 @@ class StringChoicesArg( ArgChoices, StringArg ): if default is not None: try: default = self.toPython(default) except TransformDefinitionError : - raise TransformDefinitionError( 'Default value %s=%s is not of type %s' % \ + raise TransformDefinitionError( 'Default value %s=%s is not of type %s' % (self._name, repr(default), self.basicType()) ) if not self.checkChoices(default): - raise TransformDefinitionError( 'Default value %s=%s is not one of %s' % \ + raise TransformDefinitionError( 'Default value %s=%s is not one of %s' % (self._name, repr(default), self.choices()) ) Argument.setDefault(self,default) @@ -503,7 +499,7 @@ class StringChoicesArg( ArgChoices, StringArg ): valUpper = val.upper() for c in self.choices(): if valUpper == c.upper() and val != c: - self.logger().warning( 'Changing case of %s to %s' % (val,c) ) + self.logger().warning( 'Changing case of %s to %s', val, c ) val = c break Argument.setValue(self,val) @@ -517,7 +513,7 @@ class StringChoicesArg( ArgChoices, StringArg ): if not ArgChoices.checkChoices(self,val): raise TransformArgumentError( '%s=%r is not one of %s' % (name, val, ','.join(choices)) ) - self.logger().debug( '%s is in list %s -> OK' % (repr(val), repr(choices)) ) + self.logger().debug( '%s is in list %s -> OK', repr(val), repr(choices) ) # @@ -546,14 +542,14 @@ class BoolArg( ArgChoices, Argument ): def toPython(self,val): - if type(val) == type(True): return val != False - if type(val) == type(''): + if isinstance(val, bool): return val is not False + if isinstance(val, str): if val.lower() == 'true': return True if val.lower() == 'false': return False - raise TransformArgumentError( '%s=%r is not one of %s' % \ + raise TransformArgumentError( '%s=%r is not one of %s' % (self.name(), val, ','.join(self.choices())) ) else: - raise TransformArgumentError( '%s=%r is not of type %s' % \ + raise TransformArgumentError( '%s=%r is not of type %s' % (self.name(), val, self.basicType()) ) @@ -591,7 +587,7 @@ class FileArg( StringArg ): #convert value to the correct case valUpper = value.upper() if valUpper == 'NONE' and value != 'NONE': - self.logger().info( 'Changing case of %s to %s' % (value,valUpper) ) + self.logger().info( 'Changing case of %s to %s', value, valUpper ) value = valUpper if value != self.originalValue(): self.__eventCount = None @@ -613,7 +609,7 @@ class FileArg( StringArg ): """check that file exists (possibly with attempt number) and is non-empty""" val = self.value() if val.startswith( 'LFN:' ): - self.logger().info( '%s is an LFN. Omitting any local file checks.' % val ) + self.logger().info( '%s is an LFN. Omitting any local file checks.', val ) return # if not fileutil.exists(val): # found = fileutil.exists_suffix_number(val + '.') @@ -700,7 +696,7 @@ class DataFileArg( FileArg ): singleHash = False except IndexError: # problem with format of filename. filename ends with a single '#'! hashPos = -1 - self.logger().warning( "Error trying to extract dataset from %s." % filename ) + print( "Error trying to extract dataset from %s." % filename ) dataset = filename[ 0 : hashPos ] if omitFromName: dsPrefix = '' @@ -718,7 +714,7 @@ class DataFileArg( FileArg ): try: fname = dsPrefix + dataset + filename[ hashPos + 2 : ] except IndexError: # problem with format of filename. filename ends with a double '#'! - self.logger().warning( "Error trying to extract filename from %s." % filename ) + print( "Error trying to extract filename from %s." % filename ) else: if dirName: fname = os.sep.join( [ dirName, fname ] ) @@ -737,7 +733,7 @@ class DataFileArg( FileArg ): # def eventCount(self): if self.__eventCount is None and self: - self.logger().info("Counting events of %s..." % self.originalValue()) + self.logger().info("Counting events of %s...", self.originalValue()) start = time.time() self.__eventCount = self._fileType.eventCount( self ) if self.__eventCount is not None: @@ -770,7 +766,7 @@ class InputFileArg( FileArg ): """Check that the file exists, and is readable""" if not self: return self.checkFile() - self.logger().debug( 'Inputfile %s is usable -> OK' % (self.originalValue()) ) + self.logger().debug( 'Inputfile %s is usable -> OK', self.originalValue() ) def eventCount(self): @@ -913,7 +909,7 @@ class InputDataFileArg( DataFileArg ): if self: try: val = self.value()[0] - except: + except Exception: pass else: return self._fileType.type( val ) @@ -925,7 +921,7 @@ class InputDataFileArg( DataFileArg ): if self: try: val = self.value()[0] - except: + except Exception: pass else: return self._fileType.contents( val ) @@ -952,14 +948,14 @@ class InputDataFileArg( DataFileArg ): """First filename without the path and type""" try: return self._fileType.baseFilename( self.value()[0] ) - except: + except Exception: return "" def bareFilename(self): """First filename without the path, the contents and the type.""" try: return self._fileType.bareFilename( self.value()[0] ) - except: + except Exception: return "" @@ -992,7 +988,7 @@ class InputTarFileArg( InputFileArg ): if not line: continue self._filelist.append(line) # hack for slving bug on poll (Python 2.4?) - ret=p.wait() + p.wait() while True: line = p.stdout.readline().strip() if not line: break @@ -1107,11 +1103,12 @@ class InputTarFileAndSetupArg( InputTarFileArg ): pwd = os.getcwd() if setupdir and setupdir != os.curdir: os.chdir(setupdir) if fullsetup.endswith('.py'): + from past.builtins import execfile # avoid pollution of global namespace env = {} execfile( setupbase,env ) else: - source_setup( setupbase ) + envutil.source_setup( setupbase ) # go back to original directory os.chdir(pwd) @@ -1168,7 +1165,6 @@ class OutputFileArg( DataFileArg ): DataFileArg.preRunAction(self) val = self.value() self._fileInfo = None - mess = '' if not self: self.logger().debug( 'No output file expected. Nothing to be done.' ) elif fileutil.exists(val): @@ -1182,7 +1178,7 @@ class OutputFileArg( DataFileArg ): def validateFile( self ): if self._temporary or self._intermediate: - self.logger().info( '%s is a temporary/intermediate file. It will not be validated.' % self.name() ) + self.logger().info( '%s is a temporary/intermediate file. It will not be validated.', self.name() ) return try: self._fileType.validateFile( self, **self.__validationDict ) @@ -1214,12 +1210,12 @@ class OutputFileArg( DataFileArg ): if FT is None: FT='unset' necoll=self._fileType.writeSize(self) if necoll is not None: - # returns filetype, number_of_events and tuple_of_sizes + # returns filetype, number_of_events and tuple_of_sizes alltheData.append(FT.upper()) alltheData.append(necoll[0]) alltheData.append(necoll[1]) return - except: + except Exception: print ("basic_trfarg.py exception caught:", sys.exc_type, ":", sys.exc_value) else: print ('basic_trfarg.py Not checking object sizes for this file type') diff --git a/Tools/PyJobTransformsCore/python/envutil.py b/Tools/PyJobTransformsCore/python/envutil.py index 156c076e7937811dea5c0414c3cede16b93f83f2..387f799c88855f819ca60431498413a9a86954e2 100755 --- a/Tools/PyJobTransformsCore/python/envutil.py +++ b/Tools/PyJobTransformsCore/python/envutil.py @@ -355,8 +355,7 @@ def find_files_split( filename, dirlist, access, depth ): for f in filelist: ## print ("Trying %s..." % f) if not os.path.isfile(f) or not fileutil.access(f, access): continue - if not f in filenameList: - fullfile = os.path.join( dir, f ) + if f not in filenameList: ## print ("==> Adding %s to list from %s" % (f,dir)) dirnameList.append(dir) filenameList.append(f) @@ -637,47 +636,3 @@ def source_setup(setupshell,options='',envRE=None): newenv = getenv_from_output( source_cmd + '; printenv', envRE ) update_env(newenv) return newenv - - -def setup_cmt(cmtroot): - """setup cmt itself, located at root directory cmtroot""" - setupsh = os.path.join(cmtroot,'mgr','setup.sh') - envRE = "CMTROOT|CMTBIN|CMTCONFIG|PATH|CLASSPATH" - return source_setup(setupsh, envRE=envRE) - - -def setup_cmt_from_package(packageRoot): - setupsh = os.path.join(packageRoot,'cmt','setup.sh') - cmtroot = getenv_from_output('cat ' + setupsh,'CMTROOT').get('CMTROOT') - if not cmtroot: - raise EnvironmentError( 'Failed to setup cmt from %s' % (setupsh) ) - setup_cmt(cmtroot) - - -def setup_cmt_package(packageRoot,envRE=None): - setupsh = os.path.join(packageRoot,'cmt','setup.sh') - cmtroot = os.environ.get('CMTROOT') - if not cmtroot: setup_cmt_from_package(packageRoot) - cmtsetup = os.path.join( cmtroot, 'mgr', 'setup.sh' ) - cmtcmd = os.path.join( cmtroot, 'mgr', 'cmt' ) - cmd = 'source %s 1>/dev/null 2>/dev/null && %s setup -sh -no_cleanup' % (cmtsetup, cmtcmd) - newenv = getenv_from_output(cmd,envRE) - update_env(newenv) - return newenv - - -def cmt_package_version(packageRoot,withVersionDirectory=True): - """Return the version of the cmt package at given package Root directory""" - version = None - # first try the version.cmt file - versioncmtfilename = os.path.join(packageRoot,'cmt','version.cmt') - if os.path.exists(versioncmtfilename): - cmtversionfile = open(versioncmtfilename) - for line in cmtversionfile: - if line: version = line.strip() - cmtversionfile.close() - # if failed, assume with-version-directory and take last directory of packageRoot - if not version and withVersionDirectory: - version = os.path.basename( packageRoot ) - - return version diff --git a/Tools/PyJobTransformsCore/python/extraMetadata.py b/Tools/PyJobTransformsCore/python/extraMetadata.py index 982760cce3f2894b169869c1dcda495293ee6f70..ec41a5980677aee2fb1be9ea218131ad7ddb4735 100644 --- a/Tools/PyJobTransformsCore/python/extraMetadata.py +++ b/Tools/PyJobTransformsCore/python/extraMetadata.py @@ -1,6 +1,4 @@ -# -# extraMetadata.py -# +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # # Created by Alvin on 01/09/2009. # diff --git a/Tools/PyJobTransformsCore/python/fileutil.py b/Tools/PyJobTransformsCore/python/fileutil.py index db3cbbd1e919d8f51f008cf64514c000c5812043..e249361a0b47623e854deb145da0828115aea5e4 100755 --- a/Tools/PyJobTransformsCore/python/fileutil.py +++ b/Tools/PyJobTransformsCore/python/fileutil.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration from __future__ import print_function @@ -62,7 +62,7 @@ class AccessType: def matches(self,filename): - return re.search( self.matchPat, filename ) != None + return re.search( self.matchPat, filename ) is not None def cleanUp(self,filename): @@ -104,7 +104,7 @@ class Tee: def flush(self): self.screen.flush() - file.flush(self) + self.f.flush() @@ -155,8 +155,8 @@ def remove(filename): if at == IO_LOCAL: if exists(filename): retry_file_access( os.remove, filename ) else: - print ("WARNING: file %s file %s can not be removed" % \ - (at.name, filename)) + print ("WARNING: file %s file %s can not be removed" % + (at.name, filename)) def exists_suffix(filename,suffixRE): diff --git a/Tools/PyJobTransformsCore/python/full_trfarg.py b/Tools/PyJobTransformsCore/python/full_trfarg.py index f0247beb4315c462d4751b9b5cfc48cc2bd0c877..9bb9daf79fa8bb47ae34c54449a36a477847454a 100755 --- a/Tools/PyJobTransformsCore/python/full_trfarg.py +++ b/Tools/PyJobTransformsCore/python/full_trfarg.py @@ -1,20 +1,13 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration - -import CVSutil -CVSkeywords = CVSutil.CVSkeywords( ["$Id: full_trfarg.py,v 1.61 2009-04-17 06:40:35 cote Exp $" , - "$Name: not supported by cvs2svn $" , - "$Revision: 525634 $"] ) - -__version__ = CVSkeywords["Revision"] -__author__ = "clat@hep.ph.bham.ac.uk" +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration __doc__ = """End-user Fully specialised arguments that can be used in JobTransform implemenations.""" +import os import copy -from PyJobTransformsCore.basic_trfarg import * -from PyJobTransformsCore.envutil import * -from PyJobTransformsCore.trfutil import * -from PyJobTransformsCore.trferr import * +from PyJobTransformsCore.basic_trfarg import Argument, BoolArg, InputDataFileArg, InputTarFileAndSetupArg, InputTarFileArg, IntegerArg, OutputDataFileArg, OutputFileArg, StringArg +from PyJobTransformsCore.envutil import find_joboptions +from PyJobTransformsCore.trfutil import AODFile, BSFile, DPDFile, ESDFile, EvgenFile, FTKIPFile, HistogramFile, HitsFile, JiveXMLTGZFile, MonitorHistFile, NtupleFile, RDOFile, SANFile, TAGFile, expandStringToList, strip_suffix +from PyJobTransformsCore.trferr import JobOptionsNotFoundError, TransformArgumentError, TransformDefinitionError from PyJobTransformsCore import fileutil @@ -122,17 +115,17 @@ class OutputSkimmedBSFileArg(OutputDataFileArg): #print "FilenameTDAQ",filenameTDAQ if fileutil.exists(filename): - self.logger().info("Found skimmed bystream file called %s" % filename) + self.logger().info("Found skimmed bystream file called %s", filename) OutputDataFileArg.prepareFileInfo( self ) return elif fileutil.exists(filenameTDAQ): - self.logger().info("Found skimmed bystream file called %s, renaming back to %s" % (filenameTDAQ, filename)) - os.rename(filenameTDAQ,filename); #try - except? + self.logger().info("Found skimmed bystream file called %s, renaming back to %s", filenameTDAQ, filename) + os.rename(filenameTDAQ,filename) #try - except? #That's of course a hack that will work only in local file system. OutputDataFileArg.prepareFileInfo( self ) return else: - self.logger().info("No skimmed bystream file corresponding to %s found." % filename) + self.logger().info("No skimmed bystream file corresponding to %s found.", filename) return class InputESDFileArg(InputDataFileArg): @@ -337,7 +330,7 @@ class BasicExec(Argument): try: valOut=StringToExec(valIn) return valOut - except: + except Exception: raise TransformArgumentError( '%s=%s: syntax error in BasicExec' % (self.name(),valIn) ) return None @@ -370,7 +363,7 @@ class ListOfStringsArg(StringArg): try: valTmp=valIn.replace(',,',',') valList=valTmp.split(',') - except: + except Exception: raise TransformArgumentError( '%s=%s: syntax error in list of strings' % (self.name(),valIn) ) return valList @@ -392,10 +385,10 @@ class JobOptionsArg(StringArg): elif pacType == list: self._packages = package else: - raise TypeError("%s constructor argument \'package\' is not of type str or list (got type %s)" % \ - (self.__class__.__name__, pacType.__name__) ) + raise TypeError("%s constructor argument \'package\' is not of type str or list (got type %s)", + self.__class__.__name__, pacType.__name__) # prepend current directory if not already present - if not os.curdir in self._packages: self._packages.insert(0,os.curdir) + if os.curdir not in self._packages: self._packages.insert(0,os.curdir) self._fullFiles = [] self._exeEnv = {} # copy of python environment after last execution of joboptions StringArg.__init__(self,help,name) @@ -442,7 +435,7 @@ class JobOptionsArg(StringArg): val = Argument.value(self) newVal = [] for v in val: - if self._packages and not os.sep in v: + if self._packages and os.sep not in v: for p in self._packages: full = os.path.join(p,v) if find_joboptions( full ): @@ -462,7 +455,7 @@ class JobOptionsArg(StringArg): #convert value to the correct case valUpper = value.upper() if valUpper == 'NONE' and value != 'NONE': - self.logger().info( 'Changing case of %s to %s' % (value,valUpper) ) + self.logger().info( 'Changing case of %s to %s', value,valUpper ) value = valUpper # treat empty string as NONE if value == '': value = 'NONE' @@ -485,7 +478,7 @@ class JobOptionsArg(StringArg): for i in range(len(shortList)): short = shortList[i] full = fullList[i] - self.logger().info( 'Found %s in %s' % (short, strip_suffix(full,short)) ) + self.logger().info( 'Found %s in %s', short, strip_suffix(full,short) ) def exeJobOptions( self, env = {} ): """Execute the lines in the jobOptions file in environment <env>. The runtime @@ -499,7 +492,7 @@ class JobOptionsArg(StringArg): for i in range( len( shortList ) ): short = shortList[ i ] full = fullList[ i ] - self.logger().info( "Executing jobOptions file %s." % full ) + self.logger().info( "Executing jobOptions file %s.", full ) FakeAthena.theApp.exeJobOptions( short, env ) self._exeEnv.update( copy.copy( env ) ) @@ -706,7 +699,7 @@ class DBReleaseArg( InputTarFileAndSetupArg ): """Tarball containing the DBRelease to use""" def __init__( self, name = 'DBRelease', destdir = os.curdir ): InputTarFileAndSetupArg.__init__( self, help = 'default', name = name, - setupfile = 'setup.py', envVars = None, destdir = destdir, temporary = True ) + setupfile = 'setup.py', envVars = None, destdir = destdir, temporary = True ) def isFullArgument(self): return True diff --git a/Tools/PyJobTransformsCore/python/pool.py b/Tools/PyJobTransformsCore/python/pool.py index 5e058ff8459785729dbb2dd020dd05aaba3da930..4330a6ef78177a566335cae07479e664b8be5e00 100755 --- a/Tools/PyJobTransformsCore/python/pool.py +++ b/Tools/PyJobTransformsCore/python/pool.py @@ -1,7 +1,7 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration -import os,re -from envutil import * +import os +from envutil import find_path_env, append_path_env_if POOL_HOME = 'POOL_HOME' @@ -18,7 +18,6 @@ def setup_environment(): pool_home = os.path.dirname(pool_path[0]) os.environ[POOL_HOME] = pool_home - pool_lib = os.path.join( pool_home, 'lib' ) pool_bin = os.path.join( pool_home, 'bin' ) append_path_env_if('PYTHONPATH',pool_bin) @@ -26,6 +25,4 @@ def setup_environment(): setup_environment() -from PyFCAction import * - - +from PyFCAction import * # noqa: F401 F403 diff --git a/Tools/PyJobTransformsCore/python/runargs.py b/Tools/PyJobTransformsCore/python/runargs.py index 525564c0209c7a97e57d705b08972283c6a4d285..cf97bbb42f9e1099e9b450ccd345e798a5f46144 100755 --- a/Tools/PyJobTransformsCore/python/runargs.py +++ b/Tools/PyJobTransformsCore/python/runargs.py @@ -5,8 +5,7 @@ from __future__ import print_function __author__ = "clat@hep.ph.bham.ac.uk" import os -from PyJobTransformsCore.TransformConfig import * -from PyJobTransformsCore.TransformLogger import TransformLogger +from PyJobTransformsCore.TransformConfig import Descriptor, JobConfig from PyJobTransformsCore.trfutil import VALIDATION_DICT from PyJobTransformsCore.trferr import TransformArgumentError @@ -81,7 +80,7 @@ class CommandLineOption( Descriptor ): if not func: return if self.__takesArgument: func(value) - elif value == True: + elif value is True: func() @@ -98,7 +97,7 @@ class CommandLineOption( Descriptor ): # convert to boolean if possible. try: value = { 'TRUE' : True, 'FALSE' : False, '' : True }[ value.upper() ] - except: + except Exception: raise TransformArgumentError( '%s value of %s not recognised.' % ( variableName, value ) ) elif not isinstance( value, bool ): raise TransformArgumentError( '%s should be boolean. Got %s (%s) instead.' % (variableName, value, valType) ) @@ -113,7 +112,7 @@ class CommandLineOption( Descriptor ): choices = self.allowedValues() try: valueList = value.split( ',' ) - except: + except Exception: valueList = [ value ] # Convert value to the correct case, if there is a list of string choices if choices: @@ -136,7 +135,7 @@ class CommandLineOption( Descriptor ): raise TransformArgumentError( '%s value %r is not in %s' % ( variableName, v, choices ) ) try: # string list return ','.join( newValueList ) - except: # assume boolean + except Exception: # assume boolean return newValueList[0] # check against list of possible values return Descriptor._checkValue(self,variableName,value) @@ -185,7 +184,7 @@ class CommandLineOption( Descriptor ): # Check all allowed values if not self.__takesArgument: for val in allowedValues: - if val != True and val != False: + if val is not True and val is not False: raise TransformArgumentError( '%s should be True or False. Got %s (%s) instead.' % (variableName, val, type(val).__name__) ) else: for val in allowedValues: @@ -369,7 +368,7 @@ class RunOptions(JobConfig): # convert to boolean if possible. try: value = { 'TRUE' : True, 'FALSE' : False }[ value.upper() ] - except: + except Exception: pass else: value='' @@ -396,7 +395,7 @@ class RunOptions(JobConfig): try: # test if it is a negative number float(name) # covers both float and int - except: + except Exception: raise TransformArgumentError('option %s not supported' % name) else: # it is a negative number: add it to the list, and go to next @@ -416,7 +415,7 @@ class RunOptions(JobConfig): # convert to boolean if possible. try: value = { 'TRUE' : True, 'FALSE' : False }[ value.upper() ] - except: + except Exception: pass setattr(self,longName,value) else: diff --git a/Tools/PyJobTransformsCore/python/trf.py b/Tools/PyJobTransformsCore/python/trf.py index 459f076a96b2203619e83a3c9e779e4c1a2c42f7..eac660bbc9130554ef560204fd919ad4c1a350e9 100755 --- a/Tools/PyJobTransformsCore/python/trf.py +++ b/Tools/PyJobTransformsCore/python/trf.py @@ -1,33 +1,30 @@ -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration ## @package trf # # @brief Main package containing the @em JobTransform class. # @details Main module of @em PyJobTransformsCore package containing the base class @em JobTransform. -# @author $LastChangedBy: graemes $ -# @version $Rev: 576050 $ -# @date $Date: 2013-12-18 09:14:07 +0100 (Wed, 18 Dec 2013) $ from __future__ import with_statement, print_function -import os, sys, time, getopt, stat, re, math, subprocess, signal, threading -import traceback +from past.builtins import execfile +import os, sys, time, stat, re, math, subprocess, signal, inspect import stat as statconsts from copy import copy try: import cPickle as pickle -except: +except Exception: import pickle import fileutil from PyJobTransformsCore import trfenv, trferr, trfconsts, AtlasErrorCodes -from PyJobTransformsCore.trferr import * -from PyJobTransformsCore.trfutil import * -from PyJobTransformsCore.JobReport import * -from PyJobTransformsCore.TransformLogger import * +from PyJobTransformsCore.trferr import AthenaLogChecker, InputFileError, JobOptionsNotFoundError, TransformArgumentError, TransformDefinitionError, TransformEnvironmentError, TransformErrorDiagnoser, TransformValidationError +from PyJobTransformsCore.trfutil import Author, CommentLine, PostJobOptionsFile, PreJobOptionsFile, SQLiteSupport, TRF_SETTING, VersionString, find_joboptions, get_atlas_release, get_files, setDefaultSignalHandlers, setTrfSignalHandler, strip_suffix +from PyJobTransformsCore.JobReport import JobReport, JobInfo, _extraMetadataDict +from PyJobTransformsCore.TransformLogger import TransformLogger from PyJobTransformsCore.TransformConfig import TransformConfig -from PyJobTransformsCore.runargs import * +from PyJobTransformsCore.runargs import RunArguments, RunOptions, VALIDATION_DICT from PyJobTransformsCore.VTimer import vTimer -from PyJobTransformsCore.basic_trfarg import Argument,OutputFileArg -from PyJobTransformsCore.full_trfarg import * +from PyJobTransformsCore.basic_trfarg import Argument +from PyJobTransformsCore.full_trfarg import OptionArg, JobConfigArg from AthenaCommon import ExitCodes ## @class JobTransform @@ -132,7 +129,7 @@ class JobTransform(TransformLogger): if _v: version = _v break - except: + except Exception: pass ## Specify if this is the last trf in a chained series of trfs. # This allows certain processes to be omitted/added depending on it's position in the trf chain. @@ -352,11 +349,11 @@ class JobTransform(TransformLogger): self._logFile.write(line) try: self._logFile.close() - except: + except Exception: pass try: rc = self._runJobProcess.returncode - except: + except Exception: rc = None if rc < 0 or rc is None: os.system( 'dmesg > dmesg_trf.txt' ) @@ -562,13 +559,13 @@ class JobTransform(TransformLogger): try: if minEvents < 0 or maxEvents < 0 or maxEvents < minEvents: raise Exception - except: - self.logger().info( "%s should be greater or equal to %s" % ( maxEvents, minEvents ) ) + except Exception: + self.logger().info( "%s should be greater or equal to %s", maxEvents, minEvents ) return self._minEvents = minEvents - self.logger().info("Setting minimum number of output events to %d" % minEvents) + self.logger().info("Setting minimum number of output events to %d", minEvents) self._maxEvents = maxEvents - self.logger().info("Setting maximum number of output events to %d" % maxEvents) + self.logger().info("Setting maximum number of output events to %d", maxEvents) ## @brief Ensure that the exit code is not affected by the presence of unknown errors. # @details Aligns the transform exit code with that of @c athena.py (@em only if the latter was successful) @@ -634,7 +631,7 @@ class JobTransform(TransformLogger): try: execfile( fname ) except IOError: - self.logger().warning( "Error reading file %s containing extra metadata." % fname ) + self.logger().warning( "Error reading file %s containing extra metadata.", fname ) else: _extraMetadataDict.update( locals()['extraMetadataDict'] ) @@ -704,7 +701,7 @@ class JobTransform(TransformLogger): def enableMaskAllErrors( self, val ): try: val = val.upper() - except: + except Exception: pass else: if val == 'ALL': @@ -755,7 +752,7 @@ class JobTransform(TransformLogger): for n in self._sharedRunOpts: try: setattr(self._runOpts,n,getattr(parentOpts,n)) - except: + except Exception: pass ## Add or replace options at the command line for @c athena.py @@ -773,7 +770,7 @@ class JobTransform(TransformLogger): except ValueError: pass else: - self.logger().warning( '%s cannot be used with the default --tcmalloc option. --tcmalloc option removed.' % option ) + self.logger().warning( '%s cannot be used with the default --tcmalloc option. --tcmalloc option removed.', option ) # remove old option if needed firstWord = option.split()[0] for opt in self._athenaOptions: @@ -819,13 +816,12 @@ class JobTransform(TransformLogger): if isinstance(authorList,Author): authorsAdd = [ authorList ] else: - authType = type(authorList) - if authType == type(''): + if isinstance(authorList, str): authorsAdd = [ Author(a) for a in authorList.split(',') ] - elif authType == type(list()): + elif isinstance(authorList, list): pass else: - raise TransformDefinitionError('Author type %s not supported' % authType) + raise TransformDefinitionError('Author type %s not supported' % type(authorList)) self._authors += authorsAdd # update authors template authStrings = [ str(a) for a in authorsAdd ] @@ -1067,7 +1063,7 @@ class JobTransform(TransformLogger): # @return None def ls(self): for arg in self._positionalArgs: - self.logger().info( '%s=%s # %s' % ( arg.name(), arg.value(), arg.getHelp() ) ) + self.logger().info( '%s=%s # %s', arg.name(), arg.value(), arg.getHelp() ) ## Getter function for the job transform name. # @see _name attribute. @@ -1139,7 +1135,7 @@ class JobTransform(TransformLogger): def getArgumentOfType(self,typename): for arg in self._positionalArgs: if arg.argumentType() == typename: return arg - self.logger().warning( "Argument of %s type not found. Returning None." % typename ) + self.logger().warning( "Argument of %s type not found. Returning None.", typename ) return None ## Getter function for the list of arguments in positional order. @@ -1170,7 +1166,7 @@ class JobTransform(TransformLogger): # @return None def writeRunArgs(self): filename = self.runArgsFilename() - self.logger().info( 'Writing runArgs to file \"%s\"' % filename ) + self.logger().info( 'Writing runArgs to file \"%s\"', filename ) with open( filename, 'w' ) as f: f.write( self.getRunArgsTemplate() % self.argumentValueDict() ) filename = os.path.splitext(filename)[0] + '.gpickle' @@ -1182,7 +1178,7 @@ class JobTransform(TransformLogger): raise TransformDefinitionError( "Auto-generation of skeleton jobOptions file not yet supported." + " It must be specified in the constructor of %s" % self.__class__.__name__ ) filename = self.skeletonFilename() - self.logger().info( 'Writing skeleton to file \"%s\"' % filename ) + self.logger().info( 'Writing skeleton to file \"%s\"', filename ) with open( filename, 'w' ) as f: f.write( self.getSkeletonTemplate() % self.argumentValueDict() ) @@ -1318,7 +1314,6 @@ class JobTransform(TransformLogger): args = self._runOpts.extractOptions(argList) # process arguments posArgs = self._positionalArgs - reqArgs = self._requiredArgs nArgs = len(args) # fill the dictionary with all given arguments lastNamed='' @@ -1329,15 +1324,15 @@ class JobTransform(TransformLogger): if equal == -1: #positional argument. Not allowed after named argument if lastNamed: - raise TransformArgumentError(\ + raise TransformArgumentError( 'No positional arguments allowed after named arguments.' + - ' Positional argument %d (%s) after named argument %s=%r' % \ + ' Positional argument %d (%s) after named argument %s=%r' % (i+1,val,lastNamed,self.getArgument(lastNamed).value()) ) try: name = posArgs[pos].name() except IndexError: - raise TransformArgumentError ('too many arguments: %d (max=%d)' % \ - (nArgs, len(posArgs))) + raise TransformArgumentError ('too many arguments: %d (max=%d)' % + (nArgs, len(posArgs))) pos += 1 else: #named argument or option @@ -1382,7 +1377,7 @@ class JobTransform(TransformLogger): if oldEvents > 0: newEvents = int( math.ceil(oldEvents/eff) ) maxEvts.setValue(newEvents) - self.logger().info("Processing %d events instead of %d to account for filtering efficiency %f" % (newEvents, oldEvents, eff ) ) + self.logger().info("Processing %d events instead of %d to account for filtering efficiency %f", newEvents, oldEvents, eff ) ## Dump to file the complete shell command to facilitate the re-execution of the transform. # @remarks The arguments of the transform are written to a separate file as well. @@ -1421,7 +1416,7 @@ class JobTransform(TransformLogger): # try environment variable DBRELEASE self._dbrelease = os.environ.get('DBRELEASE') if self._dbrelease: - self.logger().info( "Got Database Release number %s from environment variable DBRELEASE" % self._dbrelease ) + self.logger().info( "Got Database Release number %s from environment variable DBRELEASE", self._dbrelease ) else: self.logger().info( "Database Release no longer needed for r19 and beyond" ) return @@ -1452,8 +1447,8 @@ class JobTransform(TransformLogger): machineInfo = JobInfo('Machine') # Platform information import platform - joinList = lambda x : '-'.join(x) - asIs = lambda x: x + joinList = lambda x : '-'.join(x) # noqa: E731 + asIs = lambda x: x # noqa: E731 platformEnv = { 'architecture' : joinList , 'dist' : joinList , 'machine' : asIs , @@ -1471,8 +1466,8 @@ class JobTransform(TransformLogger): cpucache='' modelstring='UNKNOWN' fname='/proc/cpuinfo' - modelRE=re.compile('^model name\s+:\s+(\w.+)') - cacheRE=re.compile('^cache size\s+:\s+(\d+ KB)') + modelRE=re.compile(r'^model name\s+:\s+(\w.+)') + cacheRE=re.compile(r'^cache size\s+:\s+(\d+ KB)') try: with open( fname ) as f: for line in f: @@ -1507,7 +1502,7 @@ class JobTransform(TransformLogger): 'X?PRINT.*', 'INPUTRC', 'LESS.*', 'AFSHOME', 'USERPATH', 'IFS', 'LAMHELPFILE', 'CLUSTER_DIR', 'ENVIRONMENT', 'GS_LIB', 'ROOTPATH', 'XAUTHORITY' '.*_DCOP', 'DCOP_.*', 'DOTFONTPATH', 'INITIALISED', - 'SAVEHIST', 'HISTSIZE', + 'SAVEHIST', 'HISTSIZE', 'cmt', 'jcmt', 'CVS.*', 'CMTCVSOFFSET', os.sep ) excludeEnvRE = re.compile( '^%s$' % '|'.join(excludeEnv) ) for n,v in os.environ.items(): @@ -1579,7 +1574,7 @@ class JobTransform(TransformLogger): # gather metadata from logfile logfile = self._logFilename if os.path.exists(logfile): - self.logger().info( "Scanning logfile %s for metadata..." % logfile ) + self.logger().info( "Scanning logfile %s for metadata...", logfile ) # pattern in logfile: # MetaData: <name> [unit]=<value> metaPat = re.compile( r"^MetaData:\s+(.*?)\s*=\s*(.*)$" ) @@ -1590,7 +1585,7 @@ class JobTransform(TransformLogger): if match: name=match.group(1).split()[0] # take first word (second word is optional unit) value=match.group(2) - self.logger().info( "Found MetaData: %s=%s" % (name,value) ) + self.logger().info( "Found MetaData: %s=%s", name,value ) addMeta[name] = value continue # gather extra metadata from extraMetadata @@ -1659,7 +1654,7 @@ class JobTransform(TransformLogger): # gather metadata from logfile logfile = self._logFilename if os.path.exists(logfile): - self.logger().info( "Scanning logfile %s for metadata..." % (logfile) ) + self.logger().info( "Scanning logfile %s for metadata...", logfile ) # pattern in logfile: # MetaData: <name> [unit]=<value> metaPat = re.compile( r"^MetaData:\s+(.*?)\s*=\s*(.*)$" ) @@ -1670,7 +1665,7 @@ class JobTransform(TransformLogger): if match: name=match.group(1).split()[0] # take first word (second word is optional unit) value=match.group(2) - self.logger().info( "Found MetaData: %s=%s" % (name,value) ) + self.logger().info( "Found MetaData: %s=%s", name, value ) addMeta[name] = value continue # gather extra metadata from extraMetadata @@ -1709,7 +1704,6 @@ class JobTransform(TransformLogger): # @return None def preStageInputFiles(self): from PyJobTransformsCore.FilePreStager import theFileStagerRobot - from PyJobTransformsCore import CastorPreStager self._addLogger( theFileStagerRobot ) fileList = [] for f in self._inputFiles: @@ -1758,9 +1752,9 @@ class JobTransform(TransformLogger): if len(self._inputFiles) > 1: self.logger.info('Checking maxEvents against {0} only ({1} are assumed to have the same event count)'.format(self._inputFiles[0].name(), [f.name() for f in self._inputFiles[1:]])) inputFile = self._inputFiles[0] - if VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testCountEvents' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False: raise Exception - except: + except Exception: self.logger().info( "Skipping input file max event check." ) return # Do nothing if all events are to be used (-1) rather than set it to the actual number. @@ -1779,12 +1773,12 @@ class JobTransform(TransformLogger): self.logger().warning("Found 0 events in %s, but total filesize %s. Ignoring event count.", inputFile.name(), totalSize ) else: - raise InputFileError(inputFile.originalValue(),' empty file(s). Argument %s' % \ + raise InputFileError(inputFile.originalValue(),' empty file(s). Argument %s' % (inputFile.name(),) , error='TRF_INFILE_EMPTY') elif events < maxEvents: if self._maxEventsStrategy =='ABORT': - raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' % \ + raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' % (events,maxEvents), error='TRF_INFILE_TOOFEW') elif self._maxEventsStrategy =='INPUTEVENTS': @@ -1798,10 +1792,10 @@ class JobTransform(TransformLogger): self.logger().warning("Unknown maxEventsStratety (%s). Ignoring that %s=%d is larger than number of events (%d) in input file(s) %s", self._maxEventsStrategy,maxEventsArg.name(),maxEvents,events,inputFile.originalValue()) # Do check on minimum number of input events - if VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testEventMinMax' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False: self.logger().info( "Input file event min/max test omitted." ) elif self._minEvents and events < self._minEvents: - raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' % \ + raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' % (events,self._minEvents), error='TRF_INFILE_TOOFEW') else: @@ -1830,7 +1824,7 @@ class JobTransform(TransformLogger): # SQLiteSupport is enabled. if self._useSQLite is None: self._useSQLite = self._mcInput - self.logger().info( "%s use of SQLite." % { True : 'Enabling', False : 'Disabling', None : 'Disabling' }[ self._useSQLite ] ) + self.logger().info( "%s use of SQLite.", { True : 'Enabling', False : 'Disabling', None : 'Disabling' }[ self._useSQLite ] ) if not self._useSQLite: continue cmd.preRunAction() @@ -1849,12 +1843,12 @@ class JobTransform(TransformLogger): for l in lines: # print (l.strip()) if l.startswith('/atlas/Role=production'): isProd=True - except OSError as e: + except OSError: print ("trf.py - Not a prodSys environment.") - if isProd==False and os.getenv('TZAMIPW') is None: print ('Performance data will not get stored in the AMI db.' ) - elif self._exportToAmi==True and self.name()!='Digi_trf': # digi is off as it has no AMItag and is fast + if isProd is False and os.getenv('TZAMIPW') is None: print ('Performance data will not get stored in the AMI db.' ) + elif self._exportToAmi is True and self.name()!='Digi_trf': # digi is off as it has no AMItag and is fast #print ('-------', self.name(), '-------') @@ -1862,8 +1856,7 @@ class JobTransform(TransformLogger): isStream='' isAMItag='' isRun=-1 - isFormat='' - + import PyUtils.AthFile as athFile # this loop just tries to find runnumber, stream, amitag. should not look at NTUP files as these have not metadata embeded for arg in self._positionalArgs: @@ -1900,39 +1893,29 @@ class JobTransform(TransformLogger): isRun = inputFileSummary['run_number'][0] print ('isRun ', isRun) - # if 'stream_names' in inputFileSummary: - # isFormat = inputFileSummary['stream_names'][0].replace('Stream','') - # print ('isFormat ',isFormat) - - # if arg.name().startswith('outputRDOFile'): - # print ('This is RDO. Changing format to proper one.') - # isFormat='RDO' - - if isMC==True: + if isMC is True: print ('this is MC. Changin stream->procstep and runnumber -> pandaid') isStream=self.name() - fromFN = inFile.split('.'); if inFile[1].isdigit(): isRun = inFile[1] else: isRun = 0 - except Exception as e: + except Exception: print ("Problem in decoding variables.") print (sys.exc_info()[0]) print (sys.exc_info()[1]) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) print ('=====================') if isAMItag!='': - - print ('trf.py STARTING UPLOAD the final values -> stream:',isStream,'\trunnumber:',isRun,'\tamitag:',isAMItag)#, '\tformat:',isFormat + print ('trf.py STARTING UPLOAD the final values -> stream:',isStream,'\trunnumber:',isRun,'\tamitag:',isAMItag) import PyJobTransforms.performanceDataUploader as pu uploader = pu.PerformanceUploader(isProd) uploader.establishConnection() - # this loop finds sizes and formats and uploads to AMI for all the files. + # this loop finds sizes and formats and uploads to AMI for all the files. for cmd in self._postRunActions: try: # print ('trf.py _postRunAction ',cmd) @@ -1946,13 +1929,13 @@ class JobTransform(TransformLogger): print ('trf.py object size data upload DONE') except Exception as exc: print (exc) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) except Exception as e: print ('trf.py WARNING: Could not send size data to AMI ' , e) print (sys.exc_info()[0]) print (sys.exc_info()[1]) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) if self._name=='AtlasG4_trf' or self._name=='Evgen_trf' or self._name=='Digi_trf': @@ -1968,7 +1951,7 @@ class JobTransform(TransformLogger): uploader.uploadPerfMonSD(isAMItag, self._name, isStream, int(isRun), perffile) except Exception as exc: print (exc) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) print ('trf.py upload of job performance data done!') @@ -1976,7 +1959,7 @@ class JobTransform(TransformLogger): print ('trf.py WARNING: Could not send job info to AMI ' , e) print (sys.exc_info()[0]) print (sys.exc_info()[1]) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) else: print ('there is no perfmon file: ', perffile) @@ -2001,11 +1984,11 @@ class JobTransform(TransformLogger): timelimited(120, self.doUpload) except Exception as exc: print (exc) - except: + except Exception: print ("Unexpected error:", sys.exc_info()[0]) - if not ( VALIDATION_DICT['ALL'] == False or VALIDATION_DICT[ 'testMatchEvents' ] == False ): + if not ( VALIDATION_DICT['ALL'] is False or VALIDATION_DICT[ 'testMatchEvents' ] is False ): self.matchEvents() else: self.logger().info( "Skipping event number matching." ) @@ -2031,13 +2014,13 @@ class JobTransform(TransformLogger): self.gatherEnvironmentInfo() # Do check on minimum and maximum number of requested output events (only if not running in test mode) maxEvents = None - if VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testEventMinMax' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False: self.logger().info( "Event min/max test omitted." ) else: maxEventsArg = self.getArgumentOfType("MaxEvents") try: maxEvents = maxEventsArg.value() - except: + except Exception: pass if maxEvents and maxEvents != -1: # undo any efficiency correction @@ -2074,7 +2057,7 @@ class JobTransform(TransformLogger): fullJo = find_joboptions( jo ) if not fullJo: raise JobOptionsNotFoundError(jo,'Top jobOptions file not found') - self.logger().info( 'Found top jobOptions %s in %s' % (jo, strip_suffix(fullJo,jo)) ) + self.logger().info( 'Found top jobOptions %s in %s', jo, strip_suffix(fullJo,jo) ) with open( fullJo ) as joFile: for line in joFile: self._logFile.write(line) @@ -2097,7 +2080,7 @@ class JobTransform(TransformLogger): with open( athenaScript, 'w' ) as athenaFile: # If we have an asetup to add, add it here.... if 'asetup' in self._namedArgs: - self.logger().info('Found asetup arg: %s' % self._namedArgs['asetup'].value()) + self.logger().info('Found asetup arg: %s', self._namedArgs['asetup'].value()) athenaFile.write('#! /bin/sh' + os.linesep) athenaFile.write('%s/scripts/asetup.sh %s' % (os.environ['AtlasSetup'], self._namedArgs['asetup'].value()) + os.linesep) runViaScript = True @@ -2107,14 +2090,13 @@ class JobTransform(TransformLogger): statconsts.S_IRUSR | statconsts.S_IRGRP | statconsts.S_IROTH | statconsts.S_IWUSR ) except Exception as e: - self.logger().warning( 'Encountered an error while trying to create %s. %s' % ( athenaScript, e ) ) - logStartAthena = self._logFile.tell() + self.logger().warning( 'Encountered an error while trying to create %s. %s', athenaScript, e ) # the actual execution if runViaScript: - self.logger().info( 'Executing %s: %s' % ( self.name(), athenaScript ) ) + self.logger().info( 'Executing %s: %s', self.name(), athenaScript ) self._runJobProcess = subprocess.Popen( args = athenaScript, bufsize = 1, shell = False,stdout = subprocess.PIPE, stderr = subprocess.STDOUT ) else: - self.logger().info( 'Executing %s: %s' % ( self.name(), ' '.join( athenaScriptArgs ) ) ) + self.logger().info( 'Executing %s: %s', self.name(), ' '.join( athenaScriptArgs ) ) self._runJobProcess = subprocess.Popen( args = athenaScriptArgs, bufsize = 1, shell = False,stdout = subprocess.PIPE, stderr = subprocess.STDOUT ) # Poll stdout of the process and write to log file while self._runJobProcess.poll() is None: @@ -2123,18 +2105,18 @@ class JobTransform(TransformLogger): self._logFile.write(line) # adding the exit status from athena rc = self._runJobProcess.returncode - self.logger().info( '%s has completed running of Athena with exit code %s.' % ( self.name(), rc ) ) + self.logger().info( '%s has completed running of Athena with exit code %s.', self.name(), rc ) if rc < 0: # dump dmesg to file when the athena job receives a signal. os.system( 'dmesg > dmesg_athena.txt' ) if rc == -signal.SIGKILL: - self.logger().error( 'Athena received signal %s = SIGKILL. Athena was killed, job will terminate. ' % rc ) + self.logger().error( 'Athena received signal %s = SIGKILL. Athena was killed, job will terminate. ', rc ) elif rc == -signal.SIGTERM: - self.logger().error( 'Athena received signal %s = SIGTERM. Athena was terminated, job will terminate. ' % rc ) + self.logger().error( 'Athena received signal %s = SIGTERM. Athena was terminated, job will terminate. ', rc ) else: # After dicsussion with athena core people, we decided it's best to encode the # signal exit code in a 'shell like' way, adding 128 to it - self.logger().error( 'Athena received signal %s. Exit code reset to Athena exit code %d.' % (-rc, 128 + abs(rc) ) ) + self.logger().error( 'Athena received signal %s. Exit code reset to Athena exit code %d.', -rc, 128 + abs(rc) ) rc = 128 + abs(rc) # Add the athena exit codes and acroynm directly into the main job report before # it is lost due to some exception occuring between now and the end of this function. @@ -2163,7 +2145,7 @@ class JobTransform(TransformLogger): if status == 0: self.doPostRunActions() # do check on number of events in output files - if VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testEventMinMax' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False: self.logger().info( "Output file event min/max test omitted." ) else: for f in self._outputFiles: @@ -2182,7 +2164,7 @@ class JobTransform(TransformLogger): # @warning Derived transforms must not override this function. # @return JobReport.JobReport instance def execute(self): - self.logger().info( 'Using %s' % ( trfenv.trfPath) ) + self.logger().info( 'Using %s', trfenv.trfPath ) try: #clean up old stuff fileutil.remove(self._logFilename) @@ -2199,7 +2181,7 @@ class JobTransform(TransformLogger): self._jobReport.addReport( self.runJob(), 'MERGE' ) # Catch all exceptions except Exception as e: - self.logger().error( "During execution of %s, exception caught: %s" % ( self.name(), e ) ) + self.logger().error( "During execution of %s, exception caught: %s", self.name(), e ) self._jobReport.addError( self._handleException(e) ) # run the error diagnoser on all errors errorDocter = TransformErrorDiagnoser() @@ -2211,7 +2193,7 @@ class JobTransform(TransformLogger): self._jobReport.setIgnoreErrors( self._ignoreErrors ) errorcode = self._jobReport.errorCode() exitcode = self._jobReport.exitCode() - self.logger().info( "JobTransform completed for %s with error code %s (exit code %d)" % (self.name(),errorcode,exitcode) ) + self.logger().info( "JobTransform completed for %s with error code %s (exit code %d)", self.name(),errorcode,exitcode ) dirInfo = self._getRunDirInfo() # in case of ERROR, add workdir contents printListing = errorcode and 'KEY_INTERRUPT' not in self._jobReport.errorAcronym() diff --git a/Tools/PyJobTransformsCore/python/trfValidateRootFile.py b/Tools/PyJobTransformsCore/python/trfValidateRootFile.py index 8e7157276f5ea78872f55bf4ec4e8ee80d06c3b8..8340ae856c3bcfe6000849233f155311984e44be 100755 --- a/Tools/PyJobTransformsCore/python/trfValidateRootFile.py +++ b/Tools/PyJobTransformsCore/python/trfValidateRootFile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # @Package PyJobTransforms.trfValidateRootFile # @brief Functionality to test a Root file for corruption @@ -15,31 +15,31 @@ import sys from PyUtils import RootUtils ROOT = RootUtils.import_root() -from ROOT import TFile, TTree, TKey, TDirectory, TClass, TList, TObjArray, TStopwatch, TBasket, TDirectoryFile +from ROOT import TFile, TTree, TDirectory, TStopwatch, TDirectoryFile def checkBranch(branch, msg): - msg.debug('Checking branch %s...' % branch.GetName()) + msg.debug('Checking branch %s...', branch.GetName()) - nBaskets=branch.GetWriteBasket(); + nBaskets=branch.GetWriteBasket() - msg.debug('Checking %s baskets...' % nBaskets) + msg.debug('Checking %s baskets...', nBaskets) for iBasket in range(nBaskets): - basket=branch.GetBasket(iBasket); + basket=branch.GetBasket(iBasket) if not basket: - msg.warning('Basket %s of branch %s is corrupted.' % (iBasket, branch.GetName() )) + msg.warning('Basket %s of branch %s is corrupted.', iBasket, branch.GetName()) return 1 - listOfSubBranches=branch.GetListOfBranches(); - msg.debug('Checking %s subbranches...' % listOfSubBranches.GetEntries()) + listOfSubBranches=branch.GetListOfBranches() + msg.debug('Checking %s subbranches...', listOfSubBranches.GetEntries()) for subBranch in listOfSubBranches: if checkBranch(subBranch,msg)==1: - return 1; + return 1 - msg.debug('Branch %s looks ok.' % branch.GetName()) + msg.debug('Branch %s looks ok.', branch.GetName()) return 0 @@ -47,11 +47,11 @@ def checkTreeBasketWise(tree, msg): listOfBranches=tree.GetListOfBranches() - msg.debug('Checking %s branches ...' % listOfBranches.GetEntries()) + msg.debug('Checking %s branches ...', listOfBranches.GetEntries()) for branch in listOfBranches: if checkBranch(branch,msg)==1: - msg.warning('Tree %s is corrupted (branch %s ).' % (tree.GetName(), branch.GetName())) + msg.warning('Tree %s is corrupted (branch %s ).', tree.GetName(), branch.GetName()) return 1 return 0 @@ -61,11 +61,11 @@ def checkTreeEventWise(tree, msg): nEntries=tree.GetEntries() - msg.debug('Checking %s entries...' % nEntries) + msg.debug('Checking %s entries...', nEntries) for i in range(nEntries): if tree.GetEntry(i)<0: - msg.warning('Event %s of tree %s is corrupted.' % (i, tree.GetName())) + msg.warning('Event %s of tree %s is corrupted.', i, tree.GetName()) return 1 return 0 @@ -73,33 +73,33 @@ def checkTreeEventWise(tree, msg): def checkDirectory(directory, type, requireTree, msg): - msg.debug('Checking directory %s...' % directory.GetName()) + msg.debug('Checking directory %s...', directory.GetName()) listOfKeys=directory.GetListOfKeys() - msg.debug('Checking %s keys... ' % listOfKeys.GetEntries()) + msg.debug('Checking %s keys... ', listOfKeys.GetEntries()) for key in listOfKeys: - msg.debug('Looking at key %s...' % key.GetName()) - msg.debug('Key is of class %s.' % key.GetClassName()) + msg.debug('Looking at key %s...', key.GetName()) + msg.debug('Key is of class %s.', key.GetClassName()) object=directory.Get(key.GetName()) if not object: - msg.warning("Can't get object of key %s." % key.GetName()) + msg.warning("Can't get object of key %s.", key.GetName()) return 1 - if ( the_object.GetName().find('Meta') > -1 ) and isinstance(the_object,TDirectoryFile): - msg.warning("Will ignore Meta TDirectoryFile %s!" % the_object.GetName() ) + if ( object.GetName().find('Meta') > -1 ) and isinstance(object,TDirectoryFile): + msg.warning("Will ignore Meta TDirectoryFile %s!", object.GetName() ) continue if requireTree and not isinstance(object, TTree): - msg.warning("Object %s is not of class TTree!" % object.GetName()) + msg.warning("Object %s is not of class TTree!", object.GetName()) return 1 if isinstance(object,TTree): - msg.debug('Checking tree %s ...' % object.GetName()) + msg.debug('Checking tree %s ...', object.GetName()) if type=='event': if checkTreeEventWise(object, msg)==1: @@ -108,47 +108,47 @@ def checkDirectory(directory, type, requireTree, msg): if checkTreeBasketWise(object, msg)==1: return 1 - msg.debug('Tree %s looks ok.' % object.GetName()) + msg.debug('Tree %s looks ok.', object.GetName()) if isinstance(object, TDirectory): if checkDirectory(object, type, requireTree, msg)==1: return 1 - msg.debug('Directory %s looks ok.' % directory.GetName()) + msg.debug('Directory %s looks ok.', directory.GetName()) return 0 def checkFile(fileName, type, requireTree, msg): - msg.info('Checking file %s.' % fileName) + msg.info('Checking file %s.', fileName) file=TFile.Open(fileName) if not file: - msg.warning("Can't access file %s." % fileName) + msg.warning("Can't access file %s.", fileName) return 1 if not file.IsOpen(): - msg.warning("Can't open file %s." % fileName) + msg.warning("Can't open file %s.", fileName) return 1 if file.IsZombie(): - msg.warning("File %s is a zombie." % fileName) + msg.warning("File %s is a zombie.", fileName) file.Close() return 1 if file.TestBit(TFile.kRecovered): - msg.warning("File %s needed to be recovered." % fileName) + msg.warning("File %s needed to be recovered.", fileName) file.Close() return 1 if checkDirectory(file, type, requireTree, msg)==1: - msg.warning("File %s is corrupted." % fileName) + msg.warning("File %s is corrupted.", fileName) file.Close() return 1 - file.Close(); - msg.info("File %s looks ok." % fileName) + file.Close() + msg.info("File %s looks ok.", fileName) return 0 @@ -203,10 +203,10 @@ def main(argv): return usage() rc=checkFile(fileName,type, requireTree, msg) - msg.debug('Returning %s' % rc) + msg.debug('Returning %s', rc) - clock.Stop(); - clock.Print(); + clock.Stop() + clock.Print() return rc diff --git a/Tools/PyJobTransformsCore/python/trfenv.py b/Tools/PyJobTransformsCore/python/trfenv.py index 77446f055264b144b38bc8f0732cc31e127e84ae..aaee0c4b66324fc1c42045449dc82dc8db65095a 100755 --- a/Tools/PyJobTransformsCore/python/trfenv.py +++ b/Tools/PyJobTransformsCore/python/trfenv.py @@ -1,4 +1,4 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration # prepare the runtime environment for the transformations import os,re,sys @@ -9,7 +9,7 @@ if sys.hexversion < minPyVersion: minVersion = "%d.%d.%d" % ( (minPyVersion >> 24) & 0xFF, (minPyVersion >> 16) & 0xFF, (minPyVersion >> 8 ) & 0xFF ) raise EnvironmentError( "Used python version (%s) too old. Requiring at least version %s" % (gotVersion,minVersion) ) -from PyJobTransformsCore.trferr import * +from PyJobTransformsCore.trferr import TransformEnvironmentError from PyJobTransformsCore import trfconsts,envutil # no imports out of scope! diff --git a/Tools/PyJobTransformsCore/python/trferr.py b/Tools/PyJobTransformsCore/python/trferr.py index 56dff3387adffcc52537264308dfda7170d7085f..894e926910622b431e8835504625e397de866b4c 100755 --- a/Tools/PyJobTransformsCore/python/trferr.py +++ b/Tools/PyJobTransformsCore/python/trferr.py @@ -11,14 +11,12 @@ __all__ = [ 'TransformError', 'TransformDefinitionError', 'TransformArgumentErro 'JobOptionsNotFoundError', 'TransformErrorHandler', 'AthenaLogChecker', 'TransformThreadTimeout', 'TransformThreadError' ] -import sys,re,os,copy +import re +import os from PyJobTransformsCore import fileutil, trfconsts, AtlasErrorCodes, VTimer -from PyJobTransformsCore.xmlutil import XMLNode -from PyJobTransformsCore.envutil import * +from PyJobTransformsCore.envutil import find_library, examine_library, find_in_stack from PyJobTransformsCore.TransformLogger import TransformLogger -#from AthenaCommon.Logging import logging from AthenaCommon.Include import IncludeError -from PyJobTransformsCore.JobReport import * from future import standard_library standard_library.install_aliases() @@ -248,8 +246,8 @@ class AthenaLogChecker: releaseName = 'ALL' if self.release: releaseName += ',' + self.release - logger.info("Scanning athena logfile %s assuming ATLAS release %s ..." % \ - (filename,releaseName) ) + logger.info("Scanning athena logfile %s assuming ATLAS release %s ...", + filename, releaseName) logger.info("Athena initialise()...") logFile = open(filename) nLines = 0 @@ -259,7 +257,7 @@ class AthenaLogChecker: report.addError( self.processLine( line, logger ) ) logFile.close() if logger: - logger.info("Done scanning %d lines of file %s. Summary:" % (nLines,filename) ) + logger.info("Done scanning %d lines of file %s. Summary:", nLines, filename) logger.info(" Ignored : %d", self.ignoreCount ) logger.info(" Warnings: %d", self.warningCount ) logger.info(" Errors : %d", self.errorCount ) @@ -301,27 +299,27 @@ class AthenaLogChecker: # match ignore patterns ignore = AtlasErrorCodes.matchIgnorePattern(line,self.release) if ignore: - if ignore.re.pattern == '.*?\s+?INFO .+': + if ignore.re.pattern == r'.*?\s+?INFO .+': return None self.ignoreCount += 1 if logger: - logger.debug("ignoring error in line: \"%s\"" % line) - logger.debug(" because it matched: \"%s\"" % ignore.re.pattern) + logger.debug("ignoring error in line: \"%s\"", line) + logger.debug(" because it matched: \"%s\"", ignore.re.pattern) return None # then match known error patterns match, err = AtlasErrorCodes.matchErrorPattern(line,self.release) if err: self.processError(err) if logger: - logger.debug("matched error category %s in line: %s" % (err.category.acronym,line)) - logger.debug(" because it matched: \"%s\"" % match.re.pattern) + logger.debug("matched error category %s in line: %s", err.category.acronym, line) + logger.debug(" because it matched: \"%s\"", match.re.pattern) return err # finally, perform generic error match err = self.extractError(line) if err: self.processError(err) if logger: - logger.verbose("non-matched error in line: %s" % line) + logger.verbose("non-matched error in line: %s", line) return err return None @@ -339,7 +337,7 @@ class AthenaLogChecker: with who, severity and message field filled. For all other messages return None""" line=line.rstrip() - lineREC = re.compile("(^\S*\s*(?=WARNING|ERROR|FATAL))(WARNING|ERROR|FATAL)\:?\s+(.+$)") + lineREC = re.compile(r"(^\S*\s*(?=WARNING|ERROR|FATAL))(WARNING|ERROR|FATAL)\:?\s+(.+$)") match = lineREC.search(line) if match: who = match.group(1).strip() @@ -485,7 +483,7 @@ class TransformErrorHandler(TransformLogger): # add filename to EnvironmentError for printout if isinstance(e,EnvironmentError): fn = e.filename - if fn != None and fn not in e.args: e.args += (fn,) + if fn is not None and fn not in e.args: e.args += (fn,) # # specific processing # @@ -499,9 +497,8 @@ class TransformErrorHandler(TransformLogger): elif isinstance(e,Exception): if hasattr(e,'args') and type(e.args) == list and e.args: args0 = e.args[0] - argType0 = type(args0) # test for some known strings - if argType0 == type(''): + if isinstance(args0, str): if args0.find('Failed to load DLL') != -1: return self.handleDllLoadError(e) # error was not handled @@ -529,7 +526,7 @@ class TransformErrorHandler(TransformLogger): def handleSystemExit(self,e): try: status = e.args[ 0 ] - except: + except Exception: status = 0 if status == 0: return AtlasErrorCodes.ErrorInfo( acronym = 'OK' ) @@ -556,13 +553,16 @@ class TransformErrorHandler(TransformLogger): def handleDllLoadError(self,e): # try to find the guilty one + import subprocess + from PyJobTransformsCore.trfutil import TRACEBACK_TEXT + mess = None diag = None - dllRE = "^theApp.Dlls\s*[+]?=" + dllRE = r"^theApp.Dlls\s*[+]?=" stack = find_in_stack( dllRE ) if stack: text = stack[TRACEBACK_TEXT] - dllNameRE = "([\w\.\-]+)" + dllNameRE = r"([\w\.\-]+)" subRE = "%s%s%s%s" % (dllRE,r"\s*\[\s*\"", dllNameRE, r"\"\s*\]") dll = re.sub( subRE, r"\1", text ) lib = 'lib%s.so' % (dll) @@ -572,7 +572,7 @@ class TransformErrorHandler(TransformLogger): if not full_lib: diag += '%s not found.' % (lib) else: - self.logger().debug( "Found %s. Checking dependencies..." % full_lib ) + self.logger().debug( "Found %s. Checking dependencies...", full_lib ) lddOut = subprocess.getoutput( 'ldd %s' % (full_lib) ) missLibs = [ ] subRE = "%s%s%s" % (r"^\s*",dllNameRE,r"\s+.*not found\s*.*$") diff --git a/Tools/PyJobTransformsCore/python/trfutil.py b/Tools/PyJobTransformsCore/python/trfutil.py index bc7a55248e091cdd70d388b71c1d7144376fc2e2..ec1ff87d6b4cc74b94c09ca8e2dbca1b520ae871 100755 --- a/Tools/PyJobTransformsCore/python/trfutil.py +++ b/Tools/PyJobTransformsCore/python/trfutil.py @@ -2,12 +2,12 @@ from __future__ import print_function -import os, sys, re, shutil, inspect, glob, time, signal, pickle +import os, sys, re, shutil, glob, time, signal, pickle import bz2 import functools import tarfile -import traceback import uuid +from builtins import range from subprocess import Popen, PIPE, STDOUT from threading import Thread @@ -16,14 +16,12 @@ try: except AttributeError: # __stop does not exist in Python 3.0 TRF_Thread_stop = Thread._stop -from .xmlutil import XMLNode -from .envutil import * +from .envutil import find_executable, find_file_env, find_files_env from PyJobTransformsCore import trfconsts, trfenv, fileutil -from PyJobTransformsCore.trferr import * -from PyJobTransformsCore import AtlasErrorCodes +from PyJobTransformsCore.trferr import TransformValidationError, TransformThreadError, TransformThreadTimeout, JobOptionsNotFoundError, FileError from PyJobTransformsCore.TransformLogger import TransformLogger from PyJobTransformsCore.VTimer import vTimer -from PyUtils import AthFile, RootUtils +from PyUtils import RootUtils try: import PyDumper.SgDumpLib as sdl @@ -37,7 +35,7 @@ TRF_SETTING = { 'testrun' : False, 'validationTimeout' : 600, 'validationRetry' LFN_VER_DICT = {} -_PFNPat = re.compile( '^(?P<url>\S*?)(?P<lfn>[A-Za-z0-9\.\-\_]+?)(?P<ver>\.\d+)?$' ) +_PFNPat = re.compile( r'^(?P<url>\S*?)(?P<lfn>[A-Za-z0-9\.\-\_]+?)(?P<ver>\.\d+)?$' ) _defaultSignalHandlerDict = {} @@ -151,7 +149,7 @@ def getAncestry(): # so signaling left to right is correct def listChildren(psTree = None, parent = os.getpid()): '''Take a psTree dictionary and list all children''' - if psTree == None: + if psTree is None: psTree = getAncestry() children = [] @@ -183,7 +181,6 @@ def infanticide(childPIDs, sleepTime = 3): ## @brief Decorator to dump a stack trace when hit by SIGUSR def sigUsrStackTrace(func): - import os import signal import traceback @@ -241,7 +238,7 @@ def timelimited_exec1( tl_func, tl_timeout = TRF_SETTING[ 'TRFTimeout' ], tl_ret myChildren = listChildren() infanticide(myChildren) p.poll() - if p.returncode == None: + if p.returncode is None: # Error - set some fallback value for rc rc = -signal.SIGALRM else: @@ -368,7 +365,7 @@ def getGUIDfromPFC(filename): if p.returncode != 0: print ("GUID retrieval failed: %s" % stderr) return (1, None) - if guid == None: + if guid is None: print ('Did not find GUID in catalog %s (usually harmless)' % catalog) return (0, None) print ("GUID retrieval: %s (%s) found in %s" % ( guid, filename, catalog )) @@ -401,13 +398,13 @@ def StringToList(cmd): else: try: valList=cmd.split(',,') - except: + except Exception: raise ValueError("StringToList cannot interpret '%s' as a list."%str(cmd)) return valList def ntup_entries(fname, tree_names): """Count events in ROOT-based files.""" - if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testCountEvents' ] == False ): + if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ): print ('Test run in progress. Event count (ROOT-based) disabled.') return None #work with string or list of strings @@ -494,7 +491,7 @@ def strip_prefix( aString, aPrefix ): def remove_filename_extension( aString ): """Remove everything from <aString> starting from the last . (dot) after - the last path separator (/ or \) if any. If <aString> does not have a dot + the last path separator ('/' or '\') if any. If <aString> does not have a dot or if it ends with a path separator, then nothing is removed.""" slash = aString.rfind(os.sep) dot = aString.rfind(os.extsep,slash+1) @@ -737,7 +734,7 @@ def get_files( listOfFiles, fromWhere='data', doCopy='ifNotLocal', errorIfNotFou # Check existence of targetFile (side effect of an exception when running os.path.samefile). try: isSameFile = os.path.samefile( srcFile, targetFile ) - except OSError as x: # good. targetFile does not exist. + except OSError: # good. targetFile does not exist. # print ("%s does not exist. %s" % ( targetFile, x )) if os.path.islink( targetFile ): # broken symlink try: @@ -767,7 +764,7 @@ def get_files( listOfFiles, fromWhere='data', doCopy='ifNotLocal', errorIfNotFou try: print ("**Attempting to remove %s" % targetFile) os.remove( targetFile ) # remove files and symlinks - except: # dst file is a directory + except Exception: # dst file is a directory for _root, _dirs, _files in os.walk( targetFile , topdown = False ): for name in _files: os.remove( os.path.join( _root, name ) ) @@ -916,10 +913,10 @@ def expandParallelVectorNotation( valIn ): return [ valIn ] if valIn.count( '[' ) != valIn.count( ']' ): raise Exception( 'Mismatched brackets.') - pieces = re.findall( '\[[\S]+\]', valIn ) # get the bracket sections + pieces = re.findall( r'\[[\S]+\]', valIn ) # get the bracket sections if not pieces: return [ valIn ] - if False in [ not re.findall('\[|\]', p[ 1:-1 ] ) for p in pieces ]: + if False in [ not re.findall( r'\[|\]', p[ 1:-1 ] ) for p in pieces ]: raise Exception( 'Nested brackets detected.' ) ppieces = [ [ i.strip() for i in p[ 1:-1 ].split( ',' ) ] for p in pieces ] for i in map( None, *ppieces ): @@ -1036,7 +1033,7 @@ def getCachedFileInfo( filename, infoKey ): return resultList try: return resultList[0] - except: + except Exception: pass print (stdout) return None @@ -1056,13 +1053,13 @@ def corruptionTestBS( filename, file_type,logger): while p.poll() is None: line = p.stdout.readline() if line: - logger.info("AtlListBSEvents Report: %s" % line.strip()) + logger.info("AtlListBSEvents Report: %s", line.strip()) rc = p.returncode if rc == 0: return rc #AltListBSEvents.exe failed, fall back to PyDumper else: - logger.info("AtlListBSEvents failed to validate %s, Using the (slower) PyDumper method " %filename) + logger.info("AtlListBSEvents failed to validate %s, Using the (slower) PyDumper method ", filename) cmdSnippet = os.linesep.join( [ "from sys import exit", "import os", @@ -1216,7 +1213,7 @@ class BSFile( FileType ): except Exception as e: print ("Event count failed for %s: %s" % ( arg, e )) return None - if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testCountEvents' ] == False ): + if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ): logger.info( 'Test run in progress. Event count (AthFile-based) disabled.' ) return None resultList = getCachedFileInfo( fileList, 'nentries' ) @@ -1240,7 +1237,7 @@ class BSFile( FileType ): except Exception as e: print ("Could not validate file associated with %s: %s" % ( arg, e )) return - if VALIDATION_DICT[ 'ALL' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False: logger.info( "Skipping all validation routines." ) return # Defined default validation values @@ -1258,28 +1255,28 @@ class BSFile( FileType ): if vDict[ 'testIfExists' ]: raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' ) else: - logger.info( "Ignoring missing %s." % fName ) + logger.info( "Ignoring missing %s.", fName ) return if fileutil.getsize( fName ) == 0: if vDict[ 'testIfEmpty' ]: raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' ) else: - logger.info( "Ignoring empty %s." % fName ) + logger.info( "Ignoring empty %s.", fName ) return # Check if sdl can cope with the file type if self.validationType() == 'any': vDict[ 'testIfCorrupt' ] = False if vDict[ 'testIfCorrupt' ] and sdl is not None: - logger.info( "Checking %s for corruption." % fName ) + logger.info( "Checking %s for corruption.", fName ) vTimer.start( '%s validation' % argName ) sc = corruptionTestBS( fName, self.validationType(),logger ) vTimer.stop( '%s validation' % argName ) if sc < 0: - logger.warning( "Execution of corruption test failed [%s]." % sc ) + logger.warning( "Execution of corruption test failed [%s].", sc ) elif sc > 0: raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) ) if vDict[ 'testCountEvents' ]: - logger.info( "Attempting to validate %s using event count routine." % fName ) + logger.info( "Attempting to validate %s using event count routine.", fName ) vTimer.start( '%s validation' % argName ) eCount = arg.eventCount() vTimer.stop( '%s validation' % argName ) @@ -1288,15 +1285,15 @@ class BSFile( FileType ): if not vDict[ 'continueOnZeroEventCount' ]: raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' ) else: - logger.info(" WARNING - 0 events in %s, proceeding with empty file. " % fName) + logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName) else: - logger.info( "Ignoring 0 events in %s." % fName ) + logger.info( "Ignoring 0 events in %s.", fName ) return - elif eCount == None: + elif eCount is None: if vDict[ 'stopOnEventCountNone' ]: raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' ) else: - logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways." % fName ) + logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName ) if callable( vDict[ 'extraValidation' ] ): vTimer.start() extraValidationResult = None @@ -1305,9 +1302,9 @@ class BSFile( FileType ): except TransformThreadTimeout: logger.warning( 'Extra validation routine timed out.' ) except TransformThreadError as e: - logger.warning( 'Thread running extra validation routine failed to stop.\n%s' % e ) + logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e ) except Exception as e: - logger.warning( 'Extra validation routine error.\n%s' % e ) + logger.warning( 'Extra validation routine error.\n%s', e ) vTimer.stop() if not extraValidationResult: raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' ) @@ -1322,7 +1319,7 @@ class BSFile( FileType ): # sc, out = sdl.run_sg_dump( files = [ fName ], output = os.devnull, pyalg_cls = 'PyDumper.PyComps:DataProxyLoader', use_recex_links = False, file_type = self.validationType(), msg = logger ) # if sc != 0: # raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) ) - logger.info( "%s successfully validated." % fName ) + logger.info( "%s successfully validated.", fName ) return def getGUID(self,filename): @@ -1330,7 +1327,7 @@ class BSFile( FileType ): return None guid = getCachedFileInfo( filename, 'file_guid' ) if guid is None: - raise FileError( filename, "File %s GUID not present in BS file." % filename ) + raise FileError( filename, "File %s GUID not present in BS file.", filename ) return guid def getMetaData(self,filename): @@ -1361,7 +1358,7 @@ class RootTTreeFile( FileType ): return None # Use FClistGUID rc, guid = getGUIDfromPFC(filename) - if guid != None: + if guid is not None: return guid if rc != 0: print ('Warning: Problem with PFC') @@ -1391,7 +1388,7 @@ class PoolDataFile( RootTTreeFile ): def getGUID(self, filename): # Use FClistGUID rc, guid = getGUIDfromPFC(filename) - if guid != None: + if guid is not None: return guid if rc != 0: print ('Warning: Problem with PFC') @@ -1409,7 +1406,7 @@ class PoolDataFile( RootTTreeFile ): if filename in words[1]: guid = words[0] break - except: + except Exception: continue if p.returncode != 0: print ("GUID retrieval failed: %s" % stderr) @@ -1430,7 +1427,7 @@ class PoolDataFile( RootTTreeFile ): except Exception as e: print ("Could not validate file associated with %s: %s" % ( arg, e )) return - if VALIDATION_DICT[ 'ALL' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False: logger.info( "Skipping all validation routines." ) return # Defined default validation values @@ -1445,26 +1442,26 @@ class PoolDataFile( RootTTreeFile ): if vDict[ 'testIfExists' ]: raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' ) else: - logger.info( "Ignoring missing %s." % fName ) + logger.info( "Ignoring missing %s.", fName ) return if fileutil.getsize( fName ) == 0: if vDict[ 'testIfEmpty' ]: raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' ) else: - logger.info( "Ignoring empty %s." % fName ) + logger.info( "Ignoring empty %s.", fName ) return # Check if sdl can cope with the file type if self.validationType() == 'any': vDict[ 'testIfCorrupt' ] = False if vDict[ 'testIfCorrupt' ]: - logger.info( "Checking %s for corruption." % fName ) + logger.info( "Checking %s for corruption.", fName ) vTimer.start( '%s validation' % argName ) sc = corruptionTestROOT( fName, self.validationType() ) vTimer.stop( '%s validation' % argName ) if sc<0: raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) ) if vDict[ 'testCountEvents' ]: - logger.info( "Attempting to validate %s using event count routine." % fName ) + logger.info( "Attempting to validate %s using event count routine.", fName ) vTimer.start( '%s validation' % argName ) eCount = arg.eventCount() vTimer.stop( '%s validation' % argName ) @@ -1473,15 +1470,15 @@ class PoolDataFile( RootTTreeFile ): if not vDict[ 'continueOnZeroEventCount' ]: raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' ) else: - logger.info(" WARNING - 0 events in %s, proceeding with empty file. " % fName) + logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName) else: - logger.info( "Ignoring 0 events in %s." % fName ) + logger.info( "Ignoring 0 events in %s.", fName ) return - elif eCount == None: + elif eCount is None: if vDict[ 'stopOnEventCountNone' ]: raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' ) else: - logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways." % fName ) + logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName ) if callable( vDict[ 'extraValidation' ] ): vTimer.start() extraValidationResult = None @@ -1490,9 +1487,9 @@ class PoolDataFile( RootTTreeFile ): except TransformThreadTimeout: logger.warning( 'Extra validation routine timed out.' ) except TransformThreadError as e: - logger.warning( 'Thread running extra validation routine failed to stop.\n%s' % e ) + logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e ) except Exception as e: - logger.warning( 'Extra validation routine error.\n%s' % e ) + logger.warning( 'Extra validation routine error.\n%s', e ) vTimer.stop() if not extraValidationResult: raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' ) @@ -1508,7 +1505,7 @@ class PoolDataFile( RootTTreeFile ): # if sc != 0: # raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) ) - logger.info( "%s successfully validated." % fName ) + logger.info( "%s successfully validated.", fName ) return def writeSize(self, arg): @@ -1544,8 +1541,8 @@ class PoolDataFile( RootTTreeFile ): # print (sys.exc_info()[0]) # print (sys.exc_info()[1]) # return - - #returns number_of_events and tuple of sizes + + #returns number_of_events and tuple of sizes return [ne, collectionSize] else: print ('not needed for file of this type') @@ -1560,7 +1557,7 @@ class PoolDataFile( RootTTreeFile ): except Exception as e: print ("Event count failed for %s: %s" % ( arg, e )) return None - if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testCountEvents' ] == False ): + if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ): logger.info( 'Test run in progress. Event count (AthFile-based) disabled.' ) return None resultList = getCachedFileInfo( fileList, 'nentries' ) @@ -1581,7 +1578,7 @@ class PoolDataFile( RootTTreeFile ): if oldLevel != newLevel: os.environ['POOL_OUTMSG_LEVEL'] = newLevel if logger: - logger.info( "Setting POOL message level to %d." % level ) + logger.info( "Setting POOL message level to %d.", level ) class EvgenFile( PoolDataFile ): @@ -1622,7 +1619,7 @@ class TAGFile( RootTTreeFile ): def getGUID(self, filename): # Use FClistGUID, then fallback to AthFile (N.B. tag files have funny names in the PFC!) rc, guid = getGUIDfromPFC("RootCollection||PFN:" + filename) - if guid != None: + if guid is not None: return guid if rc != 0: print ('Warning: Problem with PFC') @@ -1638,7 +1635,6 @@ class TAGFile( RootTTreeFile ): """Return number of events in file of argument arg. Return None if event count is not applicable to file type.""" try: - logger = arg.logger() fName = arg.value() except Exception as e: print ("Event count failed for %s: %s" % ( arg, e )) @@ -1726,7 +1722,6 @@ class NtupleFile( RootTTreeFile ): def eventCount( self, arg ): try: - logger = arg.logger() fName = arg.value() except Exception as e: print ("Event count failed for %s: %s" % ( arg, e )) @@ -1750,7 +1745,7 @@ class NtupleFile( RootTTreeFile ): except Exception as e: print ("Could not validate file associated with %s: %s" % ( arg, e )) return - if VALIDATION_DICT[ 'ALL' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False: logger.info( "Skipping all validation routines." ) return # Defined default validation values @@ -1765,17 +1760,17 @@ class NtupleFile( RootTTreeFile ): if vDict[ 'testIfExists' ]: raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' ) else: - logger.info( "Ignoring missing %s." % fName ) + logger.info( "Ignoring missing %s.", fName ) return if fileutil.getsize( fName ) == 0: if vDict[ 'testIfEmpty' ]: raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' ) else: - logger.info( "Ignoring empty %s." % fName ) + logger.info( "Ignoring empty %s.", fName ) return if vDict[ 'testIfCorrupt' ]: - logger.info( "Checking %s for corruption." % fName ) + logger.info( "Checking %s for corruption.", fName ) vTimer.start( '%s validation' % argName ) from PyJobTransformsCore.trfValidateRootFile import checkFile as checkNTUPFile sc = checkNTUPFile(fileName=fName, type='basketWise', requireTree=False, msg=logger) @@ -1785,7 +1780,7 @@ class NtupleFile( RootTTreeFile ): if vDict[ 'testCountEvents' ] and self.tree_names: - logger.info( "Attempting to validate %s using event count routine." % fName ) + logger.info( "Attempting to validate %s using event count routine.", fName ) vTimer.start( '%s validation' % argName ) eCount = arg.eventCount() vTimer.stop( '%s validation' % argName ) @@ -1794,18 +1789,18 @@ class NtupleFile( RootTTreeFile ): if not vDict[ 'continueOnZeroEventCount' ]: raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' ) else: - logger.info(" WARNING - 0 events in %s, proceeding with empty file. " % fName) + logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName) else: - logger.info( "Ignoring 0 events in %s." % fName ) + logger.info( "Ignoring 0 events in %s.", fName ) return - elif eCount == None: + elif eCount is None: if vDict[ 'stopOnEventCountNone' ]: raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' ) else: - logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways." % fName ) + logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName ) else: - logger.info( "Event counting not tested for %s." % fName ) - logger.info( "%s successfully validated." % fName ) + logger.info( "Event counting not tested for %s.", fName ) + logger.info( "%s successfully validated.", fName ) return @@ -1823,7 +1818,7 @@ class MonitorHistFile( RootTTreeFile ): except Exception as e: print ("Event count failed for %s: %s" % ( arg, e )) return None - if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] == False or VALIDATION_DICT[ 'testCountEvents' ] == False ): + if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ): logger.info( 'Test run in progress. Event count (ROOT-based) disabled.' ) return None ROOT = RootUtils.import_root(batch=True) @@ -1833,9 +1828,9 @@ class MonitorHistFile( RootTTreeFile ): except TransformThreadTimeout: logger.warning( 'ROOT file opening timed out.' ) except TransformThreadError as e: - logger.warning( 'Thread for ROOT file opening failed to stop.\n%s' % e ) + logger.warning( 'Thread for ROOT file opening failed to stop.\n%s', e ) except Exception as e: - logger.warning( 'ROOT file open error.\n%s' % e ) + logger.warning( 'ROOT file open error.\n%s', e ) if not f: logger.warning("Could not open file [%s].", fName) return None @@ -1858,12 +1853,12 @@ class MonitorHistFile( RootTTreeFile ): return None try: nBinsX = h.GetNbinsX() - except: + except Exception: f.Close() logger.warning( 'Unable to retrieve number of events.' ) return None nev = 0 - for i in xrange(1, nBinsX): + for i in range(1, nBinsX): if h[i] < 0: # should not happen logger.warning( 'Negative number of events for step %s.', h.GetXaxis().GetBinLabel(i) ) @@ -1892,7 +1887,7 @@ class MonitorHistFile( RootTTreeFile ): except Exception as e: print ("Could not validate file associated with %s: %s" % ( arg, e )) return - if VALIDATION_DICT[ 'ALL' ] == False: + if VALIDATION_DICT[ 'ALL' ] is False: logger.info( "Skipping all validation routines." ) return # Defined default validation values @@ -1907,23 +1902,23 @@ class MonitorHistFile( RootTTreeFile ): if vDict[ 'testIfExists' ]: raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' ) else: - logger.info( "Ignoring missing %s." % fName ) + logger.info( "Ignoring missing %s.", fName ) return if fileutil.getsize( fName ) == 0: if vDict[ 'testIfEmpty' ]: raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' ) else: - logger.info( "Ignoring empty %s." % fName ) + logger.info( "Ignoring empty %s.", fName ) return # if vDict[ 'testIfCorrupt' ]: -# logger.info( "Checking %s for corruption." % fName ) +# logger.info( "Checking %s for corruption.", fName ) # vTimer.start( '%s validation' % argName ) # sc = corruptionTestROOT( fName, self.validationType() ) # vTimer.stop( '%s validation' % argName ) # if sc<0: # raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) ) if vDict[ 'testCountEvents' ] and 'HIST_' not in fName: - logger.info( "Attempting to validate %s using event count routine." % fName ) + logger.info( "Attempting to validate %s using event count routine.", fName ) vTimer.start( '%s validation' % argName ) eCount = arg.eventCount() vTimer.stop( '%s validation' % argName ) @@ -1932,17 +1927,17 @@ class MonitorHistFile( RootTTreeFile ): if not vDict[ 'continueOnZeroEventCount' ]: raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' ) else: - logger.info(" WARNING - 0 events in %s, proceeding with empty file. " % fName) + logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName) else: - logger.info( "Ignoring 0 events in %s." % fName ) + logger.info( "Ignoring 0 events in %s.", fName ) return - elif eCount == None: + elif eCount is None: if vDict[ 'stopOnEventCountNone' ]: raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' ) else: - logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways." % fName ) + logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName ) elif 'HIST_' in fName: - logger.info('No event counting validation performed because file %s is of HIST_ subtype' % fName) + logger.info('No event counting validation performed because file %s is of HIST_ subtype', fName) if callable( vDict[ 'extraValidation' ] ): vTimer.start() @@ -1952,13 +1947,13 @@ class MonitorHistFile( RootTTreeFile ): except TransformThreadTimeout: logger.warning( 'Extra validation routine timed out.' ) except TransformThreadError as e: - logger.warning( 'Thread running extra validation routine failed to stop.\n%s' % e ) + logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e ) except Exception as e: - logger.warning( 'Extra validation routine error.\n%s' % e ) + logger.warning( 'Extra validation routine error.\n%s', e ) vTimer.stop() if not extraValidationResult: raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' ) - logger.info( "%s successfully validated." % fName ) + logger.info( "%s successfully validated.", fName ) return @@ -1999,8 +1994,8 @@ class CommentLine: def bigComment(self,char='-',width=80): line = CommentLine.getLine(char,width) return line + os.linesep + \ - self.smallComment() + os.linesep + \ - line + self.smallComment() + os.linesep + \ + line # @@ -2120,7 +2115,7 @@ class StringNumberList: if self.__numberList is not None: return (self.__numberList,self.__digits) nums = self.getNumbers(openBracket,closeBracket) if nums is None: return (None,None) - if nums is "": return (list(),0) + if nums=="": return (list(),0) numList = [ ] bclose = len(nums) posB = 1 @@ -2293,7 +2288,7 @@ class ServiceOverride(PostJobOptionsFile,TransformLogger): val = members[mem] jo.append( "%s.%s = %r" % (self.__service, mem, val) ) filename += '_%s_%s' % (mem,val) - self.logger().info('Creating jobOptions file %s' % filename) + self.logger().info('Creating jobOptions file %s', filename) joFile = open(filename,'w') joFile.write( os.linesep.join(jo) + os.linesep ) joFile.close() diff --git a/Tools/PyJobTransformsCore/python/xmlutil.py b/Tools/PyJobTransformsCore/python/xmlutil.py index e90dc947d099d6e92ed775e8b1d7922fa1186140..a20761dabb37c877aca1ef0ecc67541682c7801c 100755 --- a/Tools/PyJobTransformsCore/python/xmlutil.py +++ b/Tools/PyJobTransformsCore/python/xmlutil.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration from __future__ import print_function @@ -48,7 +48,7 @@ class XMLNode: def __init__(self,name,contents=None): self.__name = name self.__contents = None - if contents != None: self.setContents(contents) + if contents is not None: self.setContents(contents) self.__attributes = {} diff --git a/Tools/PyJobTransformsCore/share/checklog.py b/Tools/PyJobTransformsCore/share/checklog.py index 18243ace002f042752323c43bc534c48b1d57711..5719aaa9c8d99235ae2e88c934cab7b7213f7802 100755 --- a/Tools/PyJobTransformsCore/share/checklog.py +++ b/Tools/PyJobTransformsCore/share/checklog.py @@ -1,20 +1,23 @@ #!/usr/bin/env python -import os,sys,re +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration + +import os +import sys from getopt import getopt from AthenaCommon.Logging import logging from AthenaCommon import ExitCodes -from PyJobTransformsCore.trferr import * -from PyJobTransformsCore.JobReport import * -from PyJobTransformsCore.trfutil import * +from PyJobTransformsCore.trferr import TransformErrorDiagnoser, AthenaLogChecker +from PyJobTransformsCore.JobReport import JobReport, JobInfo +from PyJobTransformsCore.trfutil import get_atlas_release def usage(): - print "Parse an athena logfile for errors." - print "Usage: %s [options] <logfilename>" % os.path.basename(sys.argv[0]) - print "Options:" - print " -h : print short help" - print " -d : print details on the error matching" - print " -x : write jobInfo.xml file" - print " -r <release> : assume atlas release <release>" + print("Parse an athena logfile for errors.") + print("Usage: %s [options] <logfilename>" % os.path.basename(sys.argv[0])) + print("Options:") + print(" -h : print short help") + print(" -d : print details on the error matching") + print(" -x : write jobInfo.xml file") + print(" -r <release> : assume atlas release <release>") if len(sys.argv) <= 1: usage() @@ -75,7 +78,7 @@ errorDocter = TransformErrorDiagnoser() for error in report.errors(): errorDocter.diagnoseError(error) -print report +print(report) if writeXML: report.writeJobInfoXML() # exit with appropriate code diff --git a/Tools/PyJobTransformsCore/share/find_data.py b/Tools/PyJobTransformsCore/share/find_data.py index fb8e106b7d89eec6fe7372d5b4d7b567238977b6..cb3a2f3ee60d9aaddcc599c6a75c7f2aad1d9f5a 100755 --- a/Tools/PyJobTransformsCore/share/find_data.py +++ b/Tools/PyJobTransformsCore/share/find_data.py @@ -1,5 +1,5 @@ #!/usr/bin/env python - +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration from __future__ import print_function import sys,os diff --git a/Tools/PyJobTransformsCore/share/find_library.py b/Tools/PyJobTransformsCore/share/find_library.py index 383dda295e0945d2c7e285d9bb54c1bc029556c7..db739019eeaf9ee3ff6ce6a2c02af4e99696675f 100755 --- a/Tools/PyJobTransformsCore/share/find_library.py +++ b/Tools/PyJobTransformsCore/share/find_library.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration import sys,os __doc__ = """Print the full path of the requested shared library, as found in LD_LIBRARY_PATH. @@ -7,7 +8,7 @@ The 'lib' prefix and 'so' suffix can be omitted. If no match if found, nothing i def usage(): use = "usage: %s <library> [library]" % os.path.basename(sys.argv[0]) - print use + os.linesep*2 + __doc__ + print(use + os.linesep*2 + __doc__) if len(sys.argv) <= 1: usage() @@ -20,8 +21,4 @@ except ImportError: for lib in sys.argv[1:]: full = find_libraries(lib) - for f in full: print f - - - - + for f in full: print(f) diff --git a/Tools/PyJobTransformsCore/share/find_python_module.py b/Tools/PyJobTransformsCore/share/find_python_module.py index aa0ade202eca95a3211d47a2254a139236d6de41..a169e5bd5963da1449366530573cffc36089270b 100755 --- a/Tools/PyJobTransformsCore/share/find_python_module.py +++ b/Tools/PyJobTransformsCore/share/find_python_module.py @@ -1,14 +1,15 @@ #!/usr/bin/env python +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration import sys,os def usage(): use = "usage: %s <module> [module]" % os.path.basename(sys.argv[0]) - print use + """ + print(use + """ Print the full path of the requested python module(s), as found in sys.path (PYTHONPATH + system paths) Wildcards are accepted, in which case all (first) matches are printed, one per line. The '.py' suffix can be omitted, in which case both .py and .pyc extensions are tried. -If no match if found, nothing is printed.""" +If no match if found, nothing is printed.""") if len(sys.argv) <= 1: @@ -22,4 +23,4 @@ except ImportError: for mod in sys.argv[1:]: full = find_python_modules(mod) - for f in full: print f + for f in full: print(f) diff --git a/Tools/PyJobTransformsCore/share/trf_ls b/Tools/PyJobTransformsCore/share/trf_ls index 463529e9c8045a124ec256f5c6aad9ae14ba9e4e..068bb9959c6d96b4d000c0f9c20466c9d9c4f9e5 100755 --- a/Tools/PyJobTransformsCore/share/trf_ls +++ b/Tools/PyJobTransformsCore/share/trf_ls @@ -1,8 +1,9 @@ #!/usr/bin/env python +# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration import sys,os,getopt def usage(): use = "usage: %s [-h] [-p] [-f] [[trf_name] [trf_name]]" % os.path.basename(sys.argv[0]) - print use + """ + print(use + """ Print a list of available jobtransforms, or find a specific one. Wildcards allowed. If <trf_name> does not end with _trf.py, this will be added before searching. @@ -12,7 +13,7 @@ Options: -h : Print this help message -f : Print full path name. -p : Look in PYTHONPATH and include package name in front of trf name (for import in python) - If combined with option -f, print full path of python module.""" + If combined with option -f, print full path of python module.""") showPython = False @@ -61,11 +62,10 @@ for f in filelist: for full in found: trf = os.path.basename(full) if showPath: - print full + print(full) elif showPython: dir = os.path.dirname(full) package = os.path.basename(dir) - print '%s ' % os.path.join(package,trf) + print('%s ' % os.path.join(package,trf)) else: - print trf - + print(trf) diff --git a/Tools/PyJobTransformsCore/test/CastorPreStager_test.py b/Tools/PyJobTransformsCore/test/CastorPreStager_test.py deleted file mode 100755 index 88d78a2b0b51a370c73136cb2566f50ba75e39ce..0000000000000000000000000000000000000000 --- a/Tools/PyJobTransformsCore/test/CastorPreStager_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration - - -from PyJobTransformsCore.FilePreStager import theFileStagerRobot -theFileStagerRobot.setLoggerLevel(0) # all - -from PyJobTransformsCore import CastorPreStager -from PyJobTransformsCore.trfutil import FileList - -# use FileArg for expansion of coded filelist -fileList = FileList.expandStringToList( "rfio:/castor/cern.ch/atlas/csc/valiprod/sampleA/mc12/005001.pythia_minbias/simul/v12000003/mc12.005001.pythia_minbias.simul.HITS.v12000003._[00001-4].pool.root" ) -fileList += [ "/castor/cern.ch/atlas/csc/valiprod/sampleA/mc12/005001.pythia_minbias/simul/v12000003/mc12.005001.pythia_minbias.simul.HITS.v12000003._00006.pool.root", - "castor:/castor/cern.ch/atlas/csc/valiprod/sampleA/mc12/005001.pythia_minbias/simul/v12000003/mc12.005001.pythia_minbias.simul.HITS.v12000003._00007.pool.root" ] - -fileList += [ "any_file_not_in_castor" ] - -filesToStage = theFileStagerRobot.addFilesToStagerIfNeeded( fileList ) -print "Added %s files for pre-staging" % len(filesToStage) -for f in filesToStage: print f -theFileStagerRobot.waitUntilAllFilesStaged()