diff --git a/Tools/PyJobTransformsCore/CMakeLists.txt b/Tools/PyJobTransformsCore/CMakeLists.txt
index e20b18ac707acb926b04b839010b2507da049c60..2d1ce20fdf77a68f7742fdf6ef0fbdb36800cb0c 100644
--- a/Tools/PyJobTransformsCore/CMakeLists.txt
+++ b/Tools/PyJobTransformsCore/CMakeLists.txt
@@ -1,15 +1,8 @@
-################################################################################
-# Package: PyJobTransformsCore
-################################################################################
+# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
 # Declare the package name:
 atlas_subdir( PyJobTransformsCore )
 
 # Install files from the package:
 atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
-atlas_install_scripts( share/checklog.py share/find_*.py share/trf_ls POST_BUILD_CMD ${ATLAS_FLAKE8} )
-atlas_install_scripts( share/slimmetadata )
-atlas_install_generic( share/*.db
-                       DESTINATION share
-                       EXECUTABLE )
-
+atlas_install_scripts( share/find_*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
diff --git a/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py b/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py
deleted file mode 100755
index 47aadf82ffb18f06e60d68a182b98aa2c0b1e25b..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/AtlasErrorCodes.py
+++ /dev/null
@@ -1,706 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-## @package AtlasErrorCodes
-#
-#  @brief This module contains all the error-related classes and functions that facilitate
-#  the logistics of error management.
-#  @author $LastChangedBy: jchapman $
-#  @version $Rev: 355335 $
-#  @date $Date: 2011-03-30 13:05:33 +0200 (Wed, 30 Mar 2011) $
-
-from __future__ import with_statement, print_function
-import os,re,sys,traceback
-from .envutil import find_file_env
-from .xmlutil import XMLNode
-
-__all__ = [ 'AtlasErrorCodeException', 'ErrorInfo', 'ErrorCategory' ]
-
-## Standard way to denote a INFO level error severity. 
-INFO='INFO'
-## Standard way to denote a WARNING level error severity. 
-WARNING='WARNING'
-## Standard way to denote a ERROR level error severity.
-ERROR='ERROR'
-## Standard way to denote a FATAL level error severity.
-FATAL='FATAL'
-## Standard way to denote the absence of an error.
-NO_ERROR = None
-
-## Default filename containing the error categories to be used if one is not provided.
-defaultCategoryFile = 'atlas_error_categories.db'
-## Default filename containing the error patterns to be used if one is not provided.
-defaultErrorPatternFile = 'atlas_error_patterns.db'
-## Default filename containing the error filters to be used if one is not provided.
-defaultIgnorePatternFile = 'atlas_error_ignore.db'
-
-## Error category dictionary with @em code as key
-__categoryCodeDict = {}
-## Error category dictionary with @em acronym as key
-__categoryAcronymDict = {}
-## Dictionary storing lists of ErrorPattern instances with the Atlas software @em release as key.
-__errorPatterns  = {}
-## Dictionary storing lists of error filter regular expressions with Atlas software @em release as key.
-__ignorePatterns = {}
-
-
-## Simple Exception class extension to highlight the fact that an exception
-#  occurred in the AtlasErrorCodes package.
-class AtlasErrorCodeException( Exception ):
-    ## Constructor for the AtlasErrorCodeException class.
-    #  @param message A description of the nature of the exception.
-    #  @return AtlasErrorCodeException instance
-    def __init__( self, message ):
-        Exception.__init__( self, message )
-        
-
-## Container class for information describing an error category.
-class ErrorCategory( object ):
-    ## Constructor for the ErrorCategory class.
-    #  @param code The code associated with the particular error category.
-    #  @param acronym The acronym associated with the particular error category.
-    #  @param description The textual description of the particular error category.
-    #  @param count The number of errors found that are of this particular error category. Defaults to @c 1.
-    #  @return ErrorCategory instance
-    def __init__( self, code, acronym, description, count = None ):
-        if count is None:
-            count = 1
-        ## The code associated with the particular error category.
-        self.code = code
-        ## The acronym associated with the particular error category.
-        self.acronym = acronym
-        ## The textual description of the particular error category.
-        self.description = description
-        ## The number of errors found that are of this particular error category.
-        self.count = count
-
-    ## Pretty print of the error category.
-    #  @return String
-    def __str__( self ):
-        s = '%s,\t%s,\t%s' % ( self.acronym, self.code, self.description )
-        if self.count > 1:
-            s += ' (%s times)' % self.count
-        return s
-
-    ## Generate an XML node containing all the error category's information.
-    #  @return xmlutil.XMLNode instance 
-    def xmlNode( self ):
-        return XMLNode( "errorcategory", self.description ).setAttributes( code = self.code, acronym = self.acronym, count = str( self.count ) )
-
-    ## Retrieve the string representation of the xmlutil.XMLNode instance associated with this ErrorCategory instance.
-    #  @return String 
-    def getXML( self ):
-        return self.xmlNode().getXML()
-        
-        
-## Special case: The OK category describes a successful transform execution and omits/delays
-#  the need to load other error categories from disk.
-OK_CATEGORY = ErrorCategory( 0, 'OK', 'Finished successfully' )
-## Special case: A category for an error that is not recognised.
-#  The code, acronym and description of this unknown category is used to to supplement
-#  the creation of ErrorCategory instances where the code and/or acronym is not recognised.
-UNKNOWN_CATEGORY = ErrorCategory( 999999, 'UNKNOWN', 'Unknown ErrorCode or Acronym' )
-## Special case: This category describes a permissible error.
-#  @see JobReport.GPickleContents_new() and JobReport.GPickleContents_old().
-NEEDCHECK = ErrorCategory( 1, 'NEEDCHECK', 'Successful but warrants further investigation' )
-
-
-## Container class for information describing an error pattern
-class ErrorPattern( object ):
-    ## Constructor for the ErrorPattern class.
-    #  @param pattern The regular expression that is able to match an error.
-    #  @param acronym The acronym associated with the particular error category.
-    #  @param severity The severity level of the error in question. Defaults to @c AtlasErrorCodes.ERROR.
-    #  @return ErrorPattern instance
-    def __init__( self, pattern, acronym, severity = None ):
-        if severity is None:
-            severity = ERROR
-        self.severity = severity
-        self.patternRE = re.compile( pattern )
-        self.acronym = acronym
-
-    ## Check if a given string matches the pattern.
-    #  @return re.match object if a match is successful, and @c None otherwise.
-    def match(self,value):
-        return self.patternRE.match(value)
-
-## Container class for information describing an error.
-class ErrorInfo( object ):
-    ## Properties associated with the class. These properties are automatically set to @c None
-    #  if not explicitly declared in the constructor.
-    __slots__ = ( 'message', 'code', 'acronym', 'category', 'severity', 'who', 'producer', 'stage', 'events', 'diagnosis', 'backtrace', 'count', 'exitstatus', 'stdout', 'stderr' )
-    ## List of properties to omit when ErrorInfo.__str__() is called. 
-    __dontPrint = [ 'count', 'category', 'code', 'acronym' ]
-    ## List of properties to omit when generating a xmlutil.XMLNode instance when ErrorInfo.getXML() is called. 
-    __notInXML  = [ 'count', 'category' ]
-    ## List of properties to omit when making comparisons between ErrorInfo instances.
-    __skipInComparison = [ 'count', 'events' ]
-
-    ## @brief Constructor for the ErrorInfo class.
-    #  @details This contructor does not require any arguments to create a valid ErrorInfo instance.
-    #  However, it will accept the following keyword arguments to supplement its properties:
-    #  @li @b code    : Error code.
-    #  @li @b acronym : Error acronym.
-    #  @li @b severity : @em AtlasErrorCodes.FATAL, @em AtlasErrorCodes.ERROR, @em AtlasErrorCodes.WARNING or
-    #  @c None (if the error code is @em 0).
-    #  @li @b who     : Name of the Algorithm, Tool, Service, JobTransform, etc. that generated the error.
-    #  @li @b producer: Name of the program that produced the error (e.g. @c athena.py, the transform's name).
-    #  @li @b stage   : Running stage/step of the executable (e.g. in @c athena.py: @em initialise,
-    #  @em execute and @em finalise).
-    #  @li @b events  : Where available, it will be the list of event numbers where the error occurred.
-    #  @li @b message : Additional String description of the error.
-    #  @li @b diagnosis : Usually populated by the trferr.errorHandler during the final stages of the transform execution.
-    #  It provides some information on the causes of the error.
-    #  @li @b backtrace : By default, the backtrace is a list of tuples in the same format as
-    #  <tt>traceback.extract_tb(sys.exc_info()[2])</tt>.
-    #  @li @b exitstatus: A 2-tuple containing the exit status code of the sub-process and
-    #  its interpretation (where available).
-    #  @li @b stdout   : The stdout of the sub-process.
-    #  @li @b stderr   : The stderr of the sub-process.
-    #  @return ErrorInfo instance
-    def __init__( self, **kwargs ):
-        # Initialise all property values to the default value of @c None.
-        self.clear()
-        # Overwrite default values with those provided in the constructor.
-        for name,value in kwargs.items():
-            setattr( self, name, value )
-        # set more meaningful defaults for certain properties
-        if not self.backtrace:
-            self.backtrace = self.defaultBacktrace()
-        if self.count is None:
-            self.count = 1
-        cat = kwargs.get( 'acronym' ) or kwargs.get( 'code' )
-        if cat is not None:
-            self.setCategory( cat )
-
-    ## Pretty print information contained in the object. Properties specified in ErrorInfo.__dontPrint will be omitted.
-    #  @return String (multi-line)
-    def __str__( self ):
-        acronym = self.acronym
-        description = None
-        if self.category:
-            if not acronym: acronym = self.category.acronym
-            if acronym != 'TRF_UNKNOWN':
-                description = self.category.description
-        atts = {}
-        count = self.count
-        if count > 1: atts[ 'count' ] = count
-        header = 'ErrorInfo'
-        if atts:
-            header += ' ('
-            for n,v in atts.items():
-                header += '%s=%s ' % (n,v)
-            # overwrite last space
-            header = header[ :-1 ] + ')'
-        me = [ header ]
-        codeLine = ''
-        if acronym:
-            codeLine += 'acronym=%s' % acronym
-        if self.code is not None:
-            codeLine += ' (%s)' % self.code
-            if description: codeLine += ' ' + description
-        if codeLine: me += [ codeLine ]
-        # add the other (non-None) variable arguments
-        for arg in ErrorInfo.__slots__:
-            # don't print some
-            if arg in ErrorInfo.__dontPrint: continue
-            valStr = self.valueString( arg )
-            if valStr:
-                indent = ' ' * ( len( arg ) + 1 )
-                valStr = valStr.replace( os.linesep, os.linesep + indent ).strip()
-                me.append( '%s=%s' % ( arg, valStr ) )
-        sep = os.linesep + '  '
-        return sep.join( me )
-
-    ## Part of ErrorInfo instance comparison mechanism.
-    #  Properties in ErrorInfo.__skipInComparison will not be compared.
-    #  @param other The @em other ErrorInfo instance.
-    #  @return Boolean
-    def __eq__( self, other ):
-        for att in ErrorInfo.__slots__:
-            if att in ErrorInfo.__skipInComparison: continue
-            if getattr( self, att ) != getattr( other, att ): return False
-        return True
-
-    ## Part of ErrorInfo instance comparison mechanism. The negated result of ErrorInfo.__eq__().
-    #  @param other The @em other ErrorInfo instance.
-    #  @return Boolean
-    def __ne__( self, other ):
-        return not self.__eq__( other )
-
-    ## Convenience function to add ErrorInfo instances together. This allows the @c += syntax to be used.
-    #  @param other The @em other ErrorInfo instance.
-    #  @return self
-    def __iadd__( self, other ):
-        self.count += other.count
-        self.addEvents( other.events )
-        return self
-
-    ## Formats the value of a requested property.
-    #  @param name The @em name of the property to retrieve.
-    #  @return String (multi-line)
-    def valueString( self, name ):
-        value = getattr( self, name )
-        if value is None: return ''
-        valType = type( value ).__name__
-        if name == 'backtrace':
-            # make nice backtrace format
-            valStr = ''.join( traceback.format_list( value ) )
-        elif valType == 'list' or valType == 'tuple':
-            valStr = ','.join( [ str( s ) for s in value ] )
-        elif valType == 'dict':
-            valStr = os.linesep.join( [ '%s=%s' % ( n, v ) for n, v in value.items() ] )
-        else:
-            valStr = str( value )
-        return valStr
-
-    ## Initialise all the properties as defined in ErrorInfo.__slots__ to @c None.
-    #  @return None
-    def clear( self ):
-        for arg in ErrorInfo.__slots__:
-            setattr( self, arg, None )
-
-    ## Retrieve the default backtrace which excludes @c AthenaCommon/Include.py from the stack trace.
-    #  @return List of String (or @c None)
-    def defaultBacktrace( self ): 
-        exc_info =  sys.exc_info()[ 2 ]
-        if not exc_info: return None
-        tb = traceback.extract_tb( exc_info )
-        if not tb: return None
-        short_tb = []
-        for frame_info in tb:
-            if 'AthenaCommon/Include.py' not in frame_info[ 0 ]:
-                short_tb.append( frame_info )
-        return short_tb
-
-    ## Set the error category property.
-    #  @param cat A ErrorCategory instance, acronym or code may be used.
-    #  @return None 
-    def setCategory( self, cat ):
-        if isinstance( cat, str ) or isinstance( cat, int ):
-            cat = getCategory( cat )
-        self.category = cat
-        self.code = cat.code
-        self.acronym = cat.acronym
-        # no backtrace needed if error code is 0 (OK)
-        if self.code == 0:
-            self.backtrace = None
-            self.severity = None
-        elif self.severity is None:
-            self.severity = FATAL
-
-    ## Errors occur when certain events are being processed.
-    #  The function adds the given event number to the set of events that gave rise to this error.
-    #  @param events A single event number or a list of event numbers permissible.
-    #  @return None
-    def addEvents( self, events ):
-        if events is None:
-            return
-        try:
-            self.events.update( events )
-        except AttributeError: # self.events is None
-            try:
-                self.events = set( events )
-            except TypeError:
-                self.events = set( [ events ] )
-        except TypeError:
-            self.events.add( events )
-
-    ## Generate an XML node containing all the error's properties.
-    #  Properties listed in ErrorInfo.__notInXML are ignored.
-    #  @return xmlutil.XMLNode instance 
-    def xmlNode( self ):
-        node = XMLNode( 'ErrorInfo' )
-        # set attibutes
-        node.setAttribute( 'count', self.count )
-        for att in ErrorInfo.__slots__:
-            # don't print some
-            if att in ErrorInfo.__notInXML: continue
-            valStr = self.valueString( att ).strip()
-            if valStr:
-                node.addContents( XMLNode( att, valStr ) )
-        return node
-
-    ## Retrieve the string representation of the xmlutil.XMLNode instance associated with this ErrorInfo instance.
-    #  @return String 
-    def getXML(self):
-        return self.xmlNode().getXML()
-
-## Retrieve error patterns for a given release.
-#  @param release Atlas release number which is the key used in the AtlasErrorCodes.__errorPatterns dictionary.
-#  @exception KeyError Raised when the given @em release is not a String or a compiled regular expression.
-#  @return List of ErrorPattern instances or @c None
-def getErrorPatterns( release ):
-    global __errorPatterns
-    if isinstance(release, str):
-        total_pats = [] # list of recognised error patterns
-        for rel, pats in __errorPatterns.items():
-            if rel.match( release ):
-                total_pats += pats
-        return total_pats
-    elif isinstance(release, re.Pattern):
-        for rel, pats in __errorPatterns.items():
-            if rel.pattern == release.pattern:
-                return pats
-    else:
-        raise KeyError( 'getErrorPatterns() takes either a string or a compiled regular expression. Got an %s instead.' % type(release).__name__ )
-    return None
-
-## Retrieve error filter patterns for a given release.
-#  @param release Atlas release number which is the key used in the AtlasErrorCodes.__ignorePatterns dictionary.
-#  @exception KeyError Raised when the given @em release is not a String or a compiled regular expression.
-#  @return List of compiled regular expression isntances or @c None
-def getIgnorePatterns( release ):
-    global __ignorePatterns
-    if isinstance(release, str):
-        total_pats = [] # list of possible patterns to ignore
-        for rel, pats in __ignorePatterns.items():
-            if rel.match( release ): 
-                total_pats += pats  
-        return total_pats
-    elif isinstance(release, re.Pattern):
-        for rel, pats in __ignorePatterns.items():
-            if rel.pattern == release.pattern:
-                return pats
-    else:
-        raise KeyError( 'getIgnorePatterns() takes either a string or a compiled regular expression. Got an %s instead.' % type(release).__name__ )
-    return None
-
-## Add an ErrorPattern instance to a specific release.
-#  @param release Atlas release number which is the key used in the AtlasErrorCodes.__errorPatterns dictionary.
-#  @param pat The ErrorPattern instance to add.
-#  @return None
-def addErrorPattern( release, pat ):
-    global __errorPatterns
-    releaseRE = re.compile( release )
-    patList = getErrorPatterns( releaseRE )
-    if patList:
-        patList.append( pat )
-    else:
-        __errorPatterns[ releaseRE ] = [ pat ]
-
-## Add a compiled regular expression of an error filter to a specific release.
-#  @param release Atlas release number which is the key used in the AtlasErrorCodes.__ignorePatterns dictionary.
-#  @param pat The compiled regular expression instance to add.
-#  @return None
-def addIgnorePattern( release, pat ):
-    global __ignorePatterns
-    releaseRE = re.compile( release )
-    patList = getIgnorePatterns( releaseRE )
-    if patList:  
-        patList.append( pat )
-    else:
-        __ignorePatterns[ releaseRE ] = [ pat ]
-
-## Add a new ErrorCategory entry to the global dictionaries.
-#  @param code New error category code.
-#  @param acronym New error acronym.
-#  @param description New error description.
-#  @return Boolean
-def addCategory( code, acronym, description ):
-    global __categoryCodeDict, __categoryAcronymDict
-    # check that error code is unique
-    cat = __categoryCodeDict.get( code )
-    if cat:
-        print ("ERROR: Error category code %s already exists with acronym %s (new acronym: %s)" % ( code, cat.acronym, acronym ))
-        return False
-    # check that error acronym is unique
-    cat = __categoryAcronymDict.get( acronym )
-    if cat:
-        print ("ERROR: Error category acronym %s already exists with code %d (new code: %d)" % ( acronym, cat.code, code ))
-        return False
-    # all OK. Now add it.
-    cat = ErrorCategory( code, acronym, description )
-    __categoryCodeDict[ code ] = cat
-    __categoryAcronymDict[ acronym ] = cat
-    return True
-
-## Get the full error category information by providing an error code or an error acronym.
-#  @param codeOrAcronym An Integer error code or a String acronym is accepted.
-#  @exception AtlasErrorCodeException codeOrAcronym is of an unexpected type. 
-#  @return ErrorCategory instance (or @c None)
-def getCategory( codeOrAcronym ):
-    if codeOrAcronym is None: return None
-    global __categoryCodeDict, __categoryAcronymDict
-    argType = type(codeOrAcronym).__name__
-    if argType == 'int': # code
-        code = codeOrAcronym
-        if code == OK_CATEGORY.code: return OK_CATEGORY
-        if not categoriesCount(): readCategories()
-        cat = __categoryCodeDict.get(code)
-        return cat or ErrorCategory( code, UNKNOWN_CATEGORY.acronym, UNKNOWN_CATEGORY.description )
-    if argType == 'str': # acronym
-        acronym = codeOrAcronym
-        if acronym == OK_CATEGORY.acronym: return OK_CATEGORY
-        if not categoriesCount(): readCategories()
-        cat = __categoryAcronymDict.get(acronym)
-        return cat or ErrorCategory( UNKNOWN_CATEGORY.code, acronym, UNKNOWN_CATEGORY.description )
-    # if we get here, there is a problem
-    raise AtlasErrorCodeException("Argument to getCategory() should be an error code (integer) or an error acronym (string)")
-
-## Given an error code, retrieve the associated error acronym.
-#  @param code The Integer error code.
-#  @return String
-def getAcronym( code ):
-    global __categoryCodeDict
-    # no need to load categories if it is OK
-    if code == OK_CATEGORY.code: return OK_CATEGORY.acronym
-    if not categoriesCount(): readCategories()
-    cat = __categoryCodeDict.get( code )
-    return cat and cat.acronym
-
-## Given an error acronym, retrieve the associated error code.
-#  @param acronym The String error acronym.
-#  @return Integer
-def getCode( acronym ):
-    global __categoryAcronymDict
-    # no need to load categories if it is OK
-    if acronym == OK_CATEGORY.acronym: return OK_CATEGORY.code
-    if not categoriesCount(): readCategories()
-    cat = __categoryAcronymDict.get( acronym )
-    return cat and cat.code
-
-## Print the values in the global AtlasErrorCodes.__categoryCodeDict dictionary.
-#  @return None
-def dumpCategories():
-    global __categoryCodeDict
-    for value in __categoryCodeDict.values():
-        print (value)
-
-## Count the number of entries in the global AtlasErrorCodes.__categoryCodeDict dictionary.
-#  @return Integer
-def categoriesCount():
-    global __categoryCodeDict
-    return len( __categoryCodeDict )
-
-## Clear the global AtlasErrorCodes.__categoryCodeDict dictionary.
-#  @return None
-def clearCategories():
-    global __categoryCodeDict
-    __categoryCodeDict.clear()
-
-## @brief Read error categories from file.
-#  @details Expected file format:
-#  One category per line, with 3 items separated by the ',' character:
-#  The first word on the line should be an Integer and is the @c errorCode.
-#  The second word on the line is interpreted as the @c errorAcronym.
-#  The rest of the line is considered the @c errorDescription.
-#  Leading and trailing whitespaces of each item are stripped.
-#  @param filename Name of the text file containing the error categories.
-#  Defaults to @c AtlasErrorCodes.defaultCategoryFile.
-#  @param clearExisting Clear all entries from the global 
-#  AtlasErrorCodes.__categoryCodeDict before populating it again. Defaults to @c True.
-#  @exception AtlasErrorCodeException is raised when the file containing the
-#  error categories is missing or when the file is in the wrong format.
-#  @return None
-def readCategories( filename = defaultCategoryFile, clearExisting = True ):
-    if clearExisting: clearCategories()
-    if os.path.exists( filename ):
-        fullfile = filename
-    else:
-        # The current directory is searched followed by the paths specified in the @c DATAPATH environment variable.
-        fullfile = find_file_env( filename, 'DATAPATH' )
-        if not fullfile:
-            raise AtlasErrorCodeException( 'Could not find error category file %s' % filename )
-    linenum = 0
-    nRead = 0
-    nAmbiguous = 0
-    with open( fullfile ) as catFile:
-        for line in catFile:
-            linenum += 1
-            line = line.strip()
-            # skip empty lines and commented out lines
-            if not line or line.startswith( '#' ): continue
-            parts = line.split( ',' )
-            if len( parts ) < 3:
-                raise AtlasErrorCodeException( 'missing comma in line %s of error category file %s:\n%s' % ( linenum, filename, line ) )
-            code = int( parts[ 0 ].strip() )
-            acronym = parts[ 1 ].strip()
-            description = ','.join( parts[ 2: ]).strip()
-            added = addCategory( code, acronym, description )
-            if added:
-                nRead += 1
-                globals()[ acronym ] = code
-            else:
-                nAmbiguous += 1
-    print ("INFO: read %s error categories from file %s" % ( nRead, fullfile ))
-    if nAmbiguous:
-        print ("ERROR: Ignored %d ambiguous error category definitions" % nAmbiguous)
-
-## Count the number of ErrorPatterns in the global AtlasErrorCodes.__errorPatterns dictionary
-def errorPatternsCount():
-    global __errorPatterns
-    return sum( [ len(val) for val in __errorPatterns.values() ] )
-
-## Clear the global AtlasErrorCodes.__errorPatterns dictionary
-def clearErrorPatterns():
-    global __errorPatterns
-    __errorPatterns.clear()
-
-## @brief Read error patterns from file.
-#  @details Expected file format:
-#  One error pattern entry per line, with 4 items separated by the ',' character:
-#  The first item is the Atlas @c release.
-#  The second item is interpreted as the @c errorAcronym.
-#  The third item is the @c producer of the error. 
-#  The rest of the line is considered the REGEX @c pattern.
-#  Leading and trailing whitespaces of each item are stripped.
-#  @param filename Name of the text file containing the error patterns.
-#  Defaults to @c AtlasErrorCodes.defaultErrorPatternFile.
-#  @param clearExisting Clear all entries from the global 
-#  AtlasErrorCodes.__errorPatterns before populating it again. Defaults to @c True.
-#  @exception AtlasErrorCodeException is raised when the file containing the
-#  error patterns is missing, when the file is in the wrong format or when the
-#  extracted acronym is not recognised.
-#  @return None
-def readErrorPatterns(filename=defaultErrorPatternFile,clearExisting=True):
-    if clearExisting: clearErrorPatterns()
-    if os.path.exists(filename):
-        fullfile = filename
-    else:
-        fullfile = find_file_env(filename,'DATAPATH',depth=10)
-        if not fullfile:
-            raise AtlasErrorCodeException('Could not find error patterns file %s' % filename)
-    linenum = 0
-    nRead = 0
-    severityRE = re.compile(r'\s*(?P<severity>%s|%s|%s|%s)\s+' % ( INFO, WARNING, ERROR, FATAL ) )
-    with open( fullfile ) as patFile:
-        for line in patFile:
-            linenum += 1
-            line = line.strip()
-            # skip empty and commented out lines.
-            if not line or line.startswith('#'): continue
-            parts = line.split(',')
-            if len(parts) < 4:
-                raise AtlasErrorCodeException('missing comma in line %s of error pattern file %s:\n%s'% (linenum,filename,line) )
-            release = parts[0].strip()
-            acronym = parts[1].strip()
-            who = parts[2].strip()
-            if who:
-                pattern = r'(?P<who>%s)\s+?' % who
-            else:
-                pattern = '(?P<who>)'
-            msg = ','.join(parts[3:]).strip()
-            pattern += r'(?P<message>(?:%s).*)' % msg
-            # check that error acronym is known
-            cat = __categoryAcronymDict.get(acronym)
-            if not cat:
-                raise AtlasErrorCodeException('Unknown acronym: %s in line %s of error pattern file %s:\n%s'% (acronym,linenum,filename,line) )
-            # Determine severity of error pattern
-            severityRE_result = severityRE.match( msg )
-            if not severityRE_result:
-                severity = FATAL # if severity cannot be deduced, it is treated as FATAL.
-            else:
-                severity = severityRE_result.group( 'severity' )
-            addErrorPattern( release, ErrorPattern( pattern, acronym, severity ) )
-            nRead += 1
-    print ("INFO: read %d error patterns from file %s" % ( nRead, fullfile ))
-
-## Count the number of regex instances in the global AtlasErrorCodes.__ignorePatterns dictionary.
-def ignorePatternsCount():
-    return sum( [ len(val) for key,val in __ignorePatterns.items() ] )
-
-## @brief Read error filter patterns from file.
-#  @details Expected file format:
-#  One error pattern entry per line, with 3 items separated by the ',' character:
-#  The first item is the Atlas @c release.
-#  The second item is the @c producer of the error. 
-#  The rest of the line is considered the REGEX @c pattern.
-#  Leading and trailing whitespaces of each item are stripped.
-#  @param filename Name of the text file containing the error patterns.
-#  Defaults to @c AtlasErrorCodes.defaultErrorPatternFile.
-#  @exception AtlasErrorCodeException is raised when the file containing the
-#  error patterns is missing or when the file is in the wrong format
-#  @return None
-def readIgnorePatterns(filename=defaultIgnorePatternFile):
-    if os.path.exists(filename):
-        fullfile = filename
-    else:
-        fullfile = find_file_env(filename,'DATAPATH',depth=10)
-        if not fullfile:
-            raise AtlasErrorCodeException('Could not find ignore patterns file %s' % filename)
-    linenum = 0
-    nRead = 0
-    with open( fullfile ) as patFile:
-        for line in patFile:
-            linenum += 1
-            line = line.strip()
-            # skip empty lines and commented out lines
-            if not line or line.startswith('#'): continue
-            parts = line.split(',')
-            if len(parts) < 3:
-                raise AtlasErrorCodeException('missing comma in line %s of ignore pattern file %s:\n%s'% (linenum,filename,line) )
-            release = parts[0].strip()
-            who = parts[1].strip()
-            if who:
-                pattern = r'%s\s+?' % who
-            else:
-                pattern = ''
-            pattern += ','.join(parts[2:]).strip()
-            addIgnorePattern( release, re.compile(pattern) )
-            nRead += 1
-    print ("INFO: read %d ignore patterns from file %s" % (nRead,fullfile))
-
-## Import the error codes into shell environment
-#  @warning Deprecated. 
-#  @return None
-def import_errorcodes(environment):
-    for code,cat in __categoryCodeDict.items():
-        environment[cat.acronym] = code
-
-## Match a line (from the log file) against all known error patterns for a particular Atlas software release.
-#  @param line A line (String) from the error logs.
-#  @param release The release number of the Atlas software used to execute the transform.
-#  @return a tuple containing the matched regex object, and the associated ErrorInfo object. Returns <tt>( None, None )</tt> if match not found.
-def matchErrorPattern(line,release):
-    rels = [ 'ALL' ]
-    # add current release
-    if release:
-        try:
-            release3 = '.'.join(release.split('.')[:3])
-        except Exception:
-            release3 = release
-        rels.insert(0,release3)
-    for rel in rels:
-        patList = getErrorPatterns(rel)
-        if patList:
-            for pat in patList:
-                match = pat.match(line)
-                if match:
-                    who = match.group('who')
-                    if who is not None:
-                        who = who.strip()
-                        if not who: who = None
-                    return match,ErrorInfo( acronym = pat.acronym,
-                                            severity = pat.severity,
-                                            who = who,
-                                            message = match.group('message') )
-    # no match
-    return None,None
-
-## Match the line against error patterns to be ignored for a particular Atlas software release.
-#  @param line A line (String) from the error logs.
-#  @param release The release number of the Atlas software used to execute the transform.
-#  @return matched regex object (or @c None if match failed)
-def matchIgnorePattern(line,release):
-    rels = [ 'ALL' ]
-    # add current release
-    if release:
-        try:
-            # reduce 4-digit to 3-digit release
-            release3 = '.'.join(release.split('.')[:3])
-        except Exception:
-            release3 = release
-        rels.insert(0,release3)
-    for rel in rels:
-        patList = getIgnorePatterns(rel)
-        if patList:
-            for pat in patList:
-                match = pat.match(line)
-                if match:
-                    return match
-        else:
-            pass
-    # no match
-    return None
-
-
diff --git a/Tools/PyJobTransformsCore/python/FilePreStager.py b/Tools/PyJobTransformsCore/python/FilePreStager.py
deleted file mode 100755
index 0df95929cd563cdcddacc794df10b61647c1c24a..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/FilePreStager.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-
-__doc__ = """A base class for pre-staging files from tape to disk. Specific tape staging
-systems need to derive from this class and implement member functions
-getFilesStatus(), preStageFiles()."""
-
-import time,re,os
-import rfio
-from PyJobTransformsCore.TransformLogger import TransformLogger,logging
-
-
-class FilePreStageError(IOError):
-    """exception thrown in the case of problems"""
-    def __init__(self,*vargs):
-        IOError.__init__(self,*vargs)
-
-
-class FilePreStager(TransformLogger):
-    STAGED      = 'On disk'          # file is on disk
-    STAGEIN     = 'Being staged from tape to disk'     # file is in the process of being put on disk
-    ONTAPE      = 'Not on disk (but on tape)'   # file is not on disk
-    NOTONTAPE   = 'Not on tape'      # file is not on tape
-    INVALID     = 'Invalid'          # invalid status
-    UNKNOWN     = 'Unknown status'   # unknown status
-    CANBEMIGR   = 'Waiting for tape migration (on the stager disk)' 
-    # list of all possible file status
-    fileStatus = ( STAGED, STAGEIN, ONTAPE, NOTONTAPE, INVALID, UNKNOWN, CANBEMIGR)
-    # list of problematic file status
-    problemStatus = ( NOTONTAPE, INVALID, UNKNOWN )
-
-    def __init__(self,filenamePattern,maxWaitingTime,pollingInterval,name=None):
-        """<filenamePattern>: regular expression to match the filenames that are in this stager.
-        <pollingInterval>: default polling interval (seconds) to check file status when files are being staged from tape to disk.
-        <maxWaitingTime>: default maximum time (seconds) to wait for all files to be staged.
-        <name>: name by which this stager will be known"""
-        global theFileStagerRobot
-        if not name: name = self.__class__.__name__
-        TransformLogger.__init__(self,name)
-        self.__name = name
-        self.__filesToPreStage = {} # map with (key,value)=(filename,status)
-        self.setFilenamePattern( filenamePattern )
-        self.__maxWaitingTime = maxWaitingTime
-        self.__pollingInterval = pollingInterval
-        # list of files to pre-stage and their last status (key=filename,value=status)
-        theFileStagerRobot.addStager( self )
-        
-
-    def _notImplemented(self,funcName):
-        raise FilePreStageError('%s has not implemented function %s' % (self.__class__.__name__, funcName) )
-
-    
-    def getFilesStatus(self,listOfFiles):
-        """Return a dictionary containing the status of each file in <listOfFiles>.
-        The key is the filename and the value is the status (one of FilePreStager.fileStatus).
-        <listOfFiles> is either a python list (or tuple) with filenames or a single filename.
-        If <listOfFiles> is empty, return an empty dictionary (and do nothing else).
-        Must be implemented in derived class."""
-        self._notImplemented(FilePreStager.getFilesStatus.__name__)
-
-
-    def preStageFiles(self,listOfFile):
-        """Initiate staging of files from tape to disk. Function must return immediately
-        and not wait until the files are staged. Raise a FilePreStageError exception in case of errors.
-        <listOfFiles> is either a python list (or tuple) with filenames or a single filename.
-        If <listOfFiles> is empty, do nothing.
-        Must be implemented in derived class."""
-        self._notImplemented(FilePreStager.preStageFiles.__name__)
-
-
-    def name(self):
-        return self.__name
-    
-
-    def fileExists(self,filename):
-        """Check that the file exists in the tape system"""
-        return rfio.exists(self.removePrefix(filename))
-        
-
-    def isFileInStager(self,filename):
-        """Return boolean to indicate whether <filename> is known to the stager (i.e. on tape)."""
-        return self.__filenamePattern.search(filename) is not None
-
-
-    def maxWaitingTime(self):
-        """Default maximum waiting time (seconds) for stageAllFilesAndWait()"""
-        return self.__maxWaitingTime
-
-
-    def pollingInterval(self):
-        """Default interval (seconds) for polling the status of the files being staged"""
-        return self.__pollingInterval
-
-
-    def filenamePattern(self):
-        return self.__filenamePattern.pattern
-
-
-    def setFilenamePattern(self,pattern):
-        self.__filenamePattern = re.compile(pattern)
-        
-
-    def setMaxWaitingTime(self,t):
-        """Set the default maximum waiting time (seconds) for stageAllFilesAndWait()"""
-        self.__maxWaitingTime = t
-
-
-    def setPollingInterval(self,t):
-        """Set the default interval (seconds) for polling the status of the files being staged"""
-        self.__pollingInterval = t
-
-
-    def removePrefix(self,filename):
-        """Utility function. Remove prefix until first : (needed for certain commands)"""
-        colon = filename.find(':')
-        if colon == -1:
-            return filename
-        else:
-            firstChar = colon + 1
-            if firstChar == len(filename):
-                return ''
-            else:
-                return filename[firstChar:]
-
-
-    def addFilesToPreStage(self,listOfFiles):
-        """Add a list (or tuple) of files or a single file to the list of files to be pre-staged.
-        <listOfFiles> is either a python list (or tuple) with filenames or a single filename (string).
-        Each file is tested with self.isFileInStager(), and a FilePreStageError exception
-        is raised if the file is not in the stager."""
-        # compatability with single filename
-        if not listOfFiles: return
-        if type(listOfFiles).__name__ == 'str': listOfFiles = [ listOfFiles ]
-        for f in listOfFiles:
-            if not self.isFileInStager(f):
-                raise FilePreStageError( 'File %s does not seem to belong in %s' % (f,self.__class__.__name__) )
-            if not self.fileExists(f):
-                raise FilePreStageError( 'File %s does not exist in %s' % (f,self.__class__.__name__) )
-            self.__filesToPreStage[f] = FilePreStager.UNKNOWN
-
-
-    def updateStatus(self,printStatus='none'):
-        """Update the status of all files. Print out status according to the value of <printStatus>:
-        'none'   : Don't print anything.
-        'changed : Only print files with changed state
-        'all'    : Print out all files
-        """
-        fileList = self.__filesToPreStage
-        if not fileList: return
-        statusDict = self.getFilesStatus(fileList.keys())
-        if printStatus == 'changed':
-            self.printInfo( "Status of requested files with changed status on: %s" % time.asctime() )
-        elif printStatus == 'all':
-            self.printInfo( "Status of all requested files on: %s" % time.asctime() )
-        nChanged = 0
-        for filename,status in statusDict.items():
-            if printStatus == 'all': self.printInfo( "  %s: %s" % (filename,status) )
-            if status != fileList[filename]:
-                # print any changes
-                if printStatus == 'changed': self.printInfo( "  %s: %s" % (filename,status) )
-                nChanged += 1
-                # update state
-                fileList[filename] = status
-        if nChanged == 0 and printStatus == 'changed':
-            self.printInfo( "  (No changes since last check)" )
-
-
-    def filesToPreStage(self):
-        return self.__filesToPreStage
-
-        
-    def getFilesWithStatus(self,status,*vargs):
-        """Return the list of files which have one of the status given in the argument list"""
-        statusList = [ status ] + list(vargs)
-        return [ f for f,s in self.__filesToPreStage.items() if s in statusList ]
-
-
-    def getFilesNotWithStatus(self,status,*vargs):
-        """Return a list of files which do NOT have any of the status given in the argument list"""
-        statusList = [ status ] + list(vargs)
-        return [ f for f,s in self.__filesToPreStage.items() if s not in statusList ]
-
-
-    def stageAllFiles(self,needUpdate=True):
-        """Initiate stage-in of files that are not already on disk. Return immediately,
-        and do not wait until all files are staged."""
-        if not self.__filesToPreStage: return
-        if needUpdate: self.updateStatus()
-        toBeStaged =  self.getFilesWithStatus( FilePreStager.ONTAPE )
-        if toBeStaged:
-            self.printInfo("Pre-staging file(s) %s" % ','.join(toBeStaged))
-            self.preStageFiles( toBeStaged )
-
-
-
-class FileStagerRobot(TransformLogger):
-    def __init__(self,name=None):
-        if not name: name = self.__class__.__name__
-        TransformLogger.__init__(self,name)
-        self.setLoggerLevel( logging.INFO )
-        self.__name = name
-        self.__stagerList = []
-
-
-    def name(self):
-        return self.__name
-        
-
-    def setLoggerParentName(self,name):
-        """Override from TransformLogger: propagate to all stagers"""
-        TransformLogger.setLoggerParentName(self,name)
-        for stager in self.__stagerList:
-            stager.setLoggerParentName(name)
-
-
-    def addStager(self,stager):
-        if not isinstance(stager,FilePreStager):
-            raise FilePreStageError('%s is not a FilePreStager' % stager.__class__.__name__)
-        name = stager.name()
-        oldStager = self.getStager(name)
-        if oldStager is not None:
-            self.logger().warning("File pre-stager %s already in %s. Keeping old one.", name,self.__name)
-        else:
-            self.logger().debug("Adding file pre-stager %s to %s", name, self.__name)
-            stager.setLoggerParentName(self.name())
-            self.__stagerList.append( stager )
-        
-
-    def getStager(self,name):
-        for stager in self.__stagerList:
-            if stager.name() == name: return stager
-        # not found
-        return None
-
-         
-    def getStagerForFile(self,filename):
-        """Return the stager object if the filename needs a stager (i.e. matches one of the
-        filenamePatterns). If the filename does not need a stager, None is returned.
-        Static function: must be called as FilePreStager.getStager()"""
-        for stager in self.__stagerList:
-            if stager.isFileInStager(filename): return stager
-        return None
-    
-
-    def addFilesToStagerIfNeeded(self,listOfFiles):
-        """For all files in <listOfFiles>, if the file is in a tape stager system,
-        add it to the list of files to be pre-staged using that stager.
-        Return the list of files that were added to a stager.
-        <listOfFiles> is either a python list (or tuple) with filenames or a single filename.
-        Static function: must be called as FilePreStager.addFileToStagerIfNeeded()"""
-        if not listOfFiles: return []
-        # compatibility with single filename
-        if type(listOfFiles).__name__ == 'str': listOfFiles = [ listOfFiles ]
-        preStageList = []
-        for f in listOfFiles:
-            stager = self.getStagerForFile(f)
-            if stager:
-                stager.addFilesToPreStage(f)
-                preStageList.append(f)
-                
-        return preStageList
-    
-
-    def updateStatus(self,printStatus='none'):
-        for stager in self.__stagerList:
-            stager.updateStatus(printStatus)
-            
-
-    def filesToPreStage(self):
-        """Full list of files that should be pre-staged"""
-        fileDict = {}
-        for stager in self.__stagerList:
-            fileDict.update( stager.filesToPreStage() )
-        return fileDict
-    
-
-    def getFilesWithStatus(self, status, *vargs):
-        fileList = []
-        for stager in self.__stagerList:
-            fileList += stager.getFilesWithStatus( status, *vargs )
-        return fileList
-        
-
-    def getFilesNotWithStatus(self, status, *vargs):
-        fileList = []
-        for stager in self.__stagerList:
-            fileList += stager.getFilesNotWithStatus( status, *vargs )
-        return fileList
-
-
-    def getFilesNotYetStaged(self):
-        """Full list of files that are not yet pre-staged (and can be)"""
-        return self.getFilesWithStatus( FilePreStager.STAGEIN, FilePreStager.ONTAPE )
-
-
-    def getProblemFiles(self):
-        """return list of files that are in some problem state"""
-        return self.getFilesWithStatus( *list(FilePreStager.problemStatus) )
-
-
-    def stageAllFiles(self,needUpdate=True):
-        """Initiate stage-in of files that are not already on disk. Return immediately,
-        and do not wait until all files are staged."""
-        for stager in self.__stagerList:
-            stager.stageAllFiles(needUpdate)
-            
-
-    def waitUntilAllFilesStaged(self):
-        """Pre-stage all files to disk, and do not return before all files are staged.
-        Return the dictionary of files (key=filename, value=status) that were not staged
-        within the maxWaitingTime. If all goes well, therefore, an empty dictionary is returned.
-        <maxWaitingTime> is the maximum time (seconds) to wait for all files to arrive on disk.
-        If not given (or None), the value set in the constructor is used.
-        <pollingInterval> is the time interval between checking the status of the files.
-        If  <maxWaitingTime> or <pollingInterval> are not given (or None), their value
-        is taken from the values set in the constructor."""
-        # set maxWaitingTime and pollingInterval to the longest of all stagers
-        maxWaitingTime = 0
-        pollingInterval = 0
-        for stager in self.__stagerList:
-            maxWait = stager.maxWaitingTime()
-            if maxWait > maxWaitingTime: maxWaitingTime = maxWait
-            poll = stager.pollingInterval()
-            if poll > pollingInterval: pollingInterval = poll
-        startTime = time.time()
-        self.updateStatus('all')
-        problemFiles = self.getProblemFiles()
-        notYetOnDisk = self.getFilesNotYetStaged()
-        while not problemFiles and notYetOnDisk and time.time() - startTime < maxWaitingTime:
-            self.stageAllFiles(False)
-            time.sleep( pollingInterval )
-            self.updateStatus('changed')
-            problemFiles = self.getProblemFiles()
-            notYetOnDisk = self.getFilesNotYetStaged()
-
-        fileDict = self.filesToPreStage()
-        problemDict = {}
-        if problemFiles:
-            mess = "Problems with the following to-be-staged files:"
-            for filename in problemFiles:
-                status = fileDict[filename]
-                problemDict[filename] = status
-                mess += os.linesep + "%s: %s" % (filename,status)
-            self.printError( mess )
-        # add others not yet staged
-        notYetOnDisk = self.getFilesNotWithStatus( FilePreStager.STAGED )
-        if notYetOnDisk:
-            mess = "Files not staged to disk within the maximum waiting time (%s s):" % maxWaitingTime
-            for filename in notYetOnDisk:
-                status = fileDict[filename]
-                problemDict[filename] = status
-                # only printout if not already printed out above
-                if filename not in problemFiles:
-                    mess += os.linesep + "%s: %s" % (filename,status)
-            self.printError( mess )
-        if not problemDict:
-            self.printInfo( "All requested files are staged to disk" )
-
-        return problemDict
-
-
-# Main entry point: the global file stager robot 
-theFileStagerRobot = FileStagerRobot()
diff --git a/Tools/PyJobTransformsCore/python/JobReport.py b/Tools/PyJobTransformsCore/python/JobReport.py
deleted file mode 100755
index edcbf33879481e79db36da516666981856184bbd..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/JobReport.py
+++ /dev/null
@@ -1,1427 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-## @package JobReport
-#
-#  @brief Main module containing the @em JobReport class and other ancillary classes @em JobInfo, @em FileInfo and @em TaskInfo.
-
-from __future__ import with_statement, print_function
-import os, sys, shutil, subprocess, itertools
-import pickle
-from copy import copy
-from PyJobTransformsCore import AtlasErrorCodes, extraMetadata
-from .xmlutil import XMLNode
-
-__all__ = [ 'JobInfo', 'FileInfo', 'TaskInfo', 'JobReport', '_extraMetadataDict' ] 
-
-# Get access to the extraMetadata.extraMetadataDict when this package is imported. 
-_extraMetadataDict = extraMetadata.extraMetadataDict
-
-## Container class used to store metadata associated with a job.
-#  @details Metadata associated with a job is added as attributes
-class JobInfo(XMLNode):
-
-    ## Constructor for the JobInfo class.
-    #  @param name Name of the job.
-    #  @param contents Set the contents. Accepted values include @em XMLNode instances, 
-    #  list of strings, plain strings and None. Defaults to @b None.
-    #  @see xmlutil.XMLNode.setContents()
-    def __init__(self,name,contents=None):
-        XMLNode.__init__(self,name,contents)
-
-    ## String representation of the JobInfo instance.
-    #  @see xmlutil.XMLNode.__str__()
-    #  @return String
-    def __str__(self):
-        return XMLNode.__str__(self)
-
-    ## Return a shallow copy of self.
-    #  @return copy of self (JobReport.JobInfo instance)
-    def xmlNode(self):
-        return copy(self)
-
-
-## Container class used to store metadata for input and output files.
-class FileInfo(XMLNode):
-
-    ## Contructor for the FileInfo class.
-    #  @param filename Name of the associated file.
-    #  @param guid GUID associated with the file.
-    #  @param metadata Dictionary of metadata associated with the file. 
-    #  This dictionary is used to update the private @em xmlutil.XMLNode.__attributes dictionary attribute. Defaults to @b None.
-    #  @see xmlutil.XMLNode.setAttributes()
-    def __init__(self,filename,guid, metadata=None):
-        XMLNode.__init__(self,"File")
-        if metadata is None:
-            metadata = {}
-        self.setAttributes(filename=filename,guid=guid,metaData=metadata)
-
-    ## Facility to allow the addition of metadata from another FileInfo instance (retain existing filename & guid).
-    #  @return self (JobReport.FileInfo instance)
-    def __iadd__(self,other):
-        self.addMetaData( other.metaData() )
-        return self
-
-    ## Facility to allow FileInfo instance to be used as a key in a dictionary or similar data structures.
-    #  @details The generated hash value is the identity of the associated file name.
-    #  @return Integer
-    def __hash__( self ):
-        return id( self.filename() )
-    
-    ## Facility to determine equality between self and another FileInfo instance.
-    #  @details Comparison criteria is simply the associated file name.
-    #  @return Boolean
-    def __eq__( self, other ):
-        return self.filename() == other.filename()
-
-    ## Facility to allow FileInfo instances to be compared.
-    #  @details Comparison criteria is simply the associated file name.
-    #  @return Integer (-1,0,1)
-    def __cmp__( self, other ):
-        fName = self.filename()
-        oName = other.filename()
-        if fName == oName:
-            return 0
-        if fName < oName:
-            return -1
-        return 1
-
-    ## @brief String representation of the FileInfo instance.
-    #  @details Pretty printing of information on the FileInfo instance (i.e. name, guid and associated metadata).
-    #  @return String (multi-line)
-    def __str__(self):
-        indent = '  '
-        me = [ "%s %s:" % (self.name(),self.filename()) ,
-               "%sguid=%s" % (indent,self.guid()) ]
-        meta = self.metaData()
-        if meta:
-            me += [ "%smetaData:" % (indent) ]
-            for n,v in meta.items():
-                me += [ "%s%s=%s" % (2*indent,n,v) ]
-        return os.linesep.join(me)
-
-    ## Getter function for the @em guid attribute.
-    #  @warning If the @em guid attribute is missing, the function will return @b None.
-    #  @return String
-    def guid(self):
-        return self.getAttribute("guid")
-
-    ## Getter function for the @em filename attribute.
-    #  @warning If the @em filename attribute is missing, the function will return @b None.
-    #  @return String
-    def filename(self):
-        return self.getAttribute("filename")
-
-    ## Getter function for metadata.
-    #  @param name If @em name is omitted, the entire metadata dictionary is returned. 
-    #  If specified, only the value of the associated metadata is returned. Defaults to @b None.
-    #  @param moreOnly If set to @b True, the metadata dictionary is returned with several entries 
-    #  (i.e. @em dataset, @em size, @em events and @em checkSum) omitted. 
-    #  This is used by the routine that creates the job report gpickle file in the old format. 
-    #  The new format does not require this alteration. Defaults to @b False.
-    #  @return The entire metadata dictionary or a single value (depending on the @em name parameter).
-    def metaData( self, name = None, moreOnly = None ):
-        if moreOnly is None:
-            moreOnly = False
-        tempMetaData = self.getAttribute( "metaData" )
-        if moreOnly:
-            tempMetaData = copy( self.getAttribute( "metaData" ) )
-            for itemToRemove in [ 'dataset', 'size', 'events', 'checkSum' ]:
-                try:
-                    tempMetaData.pop( itemToRemove )
-                except KeyError:
-                    pass
-        if name is None:        
-            return tempMetaData
-        return tempMetaData.get( name )
-
-    ## Update the existing metadata dictionary.
-    #  @param metaDict A dictionary used to update the current metadata dictionary. 
-    #  @return None
-    def addMetaData(self,metaDict):
-        self.metaData().update(metaDict)
-
-    ## Clear the existing metadata dictionary.
-    #  @return None
-    def clearMetadata( self ):
-        self.metaData().clear()
-
-    ## Contruction of a new xmlutil.XMLNode instance to represent information contained in self.
-    #  @return xmltuil.XMLNode instance
-    def xmlNode(self):
-        lfn = XMLNode( 'lfn' ).setAttribute( 'name', os.path.basename( self.filename() ) )
-        logical = XMLNode( 'logical', lfn )
-        fileNode = XMLNode( self.name(), logical ).setAttribute( 'ID', self.guid() )
-        meta = self.metaData()
-        for key,val in meta.items():
-            metaNode = XMLNode( 'metadata' ).setAttributes( att_name = key, att_value = val )
-            fileNode.addContents( metaNode )
-        return fileNode
-
-    ## Retrieve the XML representation of the information contained in self.
-    #  @return String (multi-line)
-    def getXML(self):
-        return self.xmlNode().getXML()
-    
-    ## Retrieve the information contained in self as a dictionary.
-    #  @return Dictionary
-    def getDict( self ):
-        _d = { 'name' : os.path.basename( self.filename() ),
-               'guid' : self.guid() }
-        try:
-            _d.update( self.metaData() )
-        finally:
-            return _d
-
-## @brief Container class used to store information pertaining to a (transform) task.
-#  @details The TaskInfo class encapsulates all information pertaining to a particular transformation. 
-#  It is designed to be recursively included to express and retain the relationships in composite transformations. 
-#  Each transformation will have an associated TaskInfo instance describe the task it is running. 
-#  Each TaskInfo instance may recursively include other sub-tasks (also TaskInfo instances).
-class TaskInfo( XMLNode ):
-
-    ## Constructor for TaskInfo.
-    #  @param taskname Name of the task. Typically the same as the associated transformation. Defaults to @c DEFAULT
-    #  @param metaData Dictionary of metadata to be associated with the task and stored as attributes of the task. 
-    #  Defaults to an empty Dictionary.
-    def __init__( self, taskname = None, metaData = None ):
-        if metaData is None:
-            metaData = {}
-        if taskname is None:
-            taskname = 'DEFAULT'
-        XMLNode.__init__( self, "Task" )
-        # insert metadata provided as attributes.
-        self.setAttributes( taskname = taskname, metaData = metaData)
-        ## Dictionary of JobInfo objects
-        self.infos     = {}
-        ## Dictionary of output FileInfo objects.
-        self.outFiles  = {}
-        ## Dictionary of input FileInfo objects
-        self.inFiles   = {}
-        ## Dictionary of AtlasErrorCodes.ErrorCategory objects. Stores the error categories of the errors encountered by the task.
-        self.errorCategories = {}
-        ## List of AtlasErrorCodes.ErrorInfo objects representing errors encountered by the task.
-        self.errors    = []
-        ## Dictionary of the number of errors of a particular severity required to be stored.
-        self.errorLimits = { AtlasErrorCodes.FATAL : 10, AtlasErrorCodes.ERROR : 10, AtlasErrorCodes.WARNING : 10 }
-        ## List of AtlasErrorCodes.ErrorInfo objects representing validation-related errors encountered by the task.
-        self.validationErrors = []
-        ## List of sub-tasks (i.e. TaskInfo objects). Sub-tasks only exist for composite tasks.
-        self.subTasks  = []
-        ## The command issued at the command-line to execute the associated transformation.
-        self.command   = None
-
-    ## Facility to allow TaskInfo objects to be @em combined (i.e. the updating of attributes with those from another TaskInfo object) 
-    #  @remarks All attributes are updated apart from the @em taskname and any other attributes explicitly added post-contruction.
-    #  @return self
-    def __iadd__(self,other):
-        self.infos.update( other.infos )
-        self.outFiles.update( other.outFiles )
-        self.inFiles.update( other.inFiles)
-        self.addMetaData( other.metaData() )
-        for o_err in other.errors:
-            self.addError( o_err )
-        for o_vError in other.validationErrors:
-            self.addValidationError( o_vError )
-        for o_errCat in other.errorCategories.values():
-            self.addErrorCategory( o_errCat )
-        for severity, limit in self.errorLimits.items():
-            if limit + other.errorLimits[ severity ] <= 10:
-                self.errorLimits[ severity ] = 0
-        self.subTasks.extend( other.subTasks )
-        return self
-
-    ## String representation of the metadata associated with the TaskInfo instance and it's sub-tasks with the 
-    #  relationship between nested tasks retained.
-    #  @return String (multi-line)
-    def __str__(self):
-        indent = '  '
-        me = [ "\n%s %s:" % (self.name(),self.taskName()) ]
-        meta = self.metaData()
-        if meta:
-            me += [ "%smetaData:" % (indent) ]
-            for n,v in meta.items():
-                me += [ "%s%s=%s" % (2*indent,n,v) ]
-        me.append( "Sub-tasks:" )
-        me.extend( [ st.taskName() for st in self.subTasks ] )
-        return os.linesep.join(me)
-    
-    ## Getter function for the @em taskname attribute.
-    #  @return String
-    def taskName( self ):
-        return self.getAttribute( "taskname" )
-
-    ## Generator function yielding the @em taskName TaskInfo instance and all its sub-tasks.
-    #  @param trfName Name of the TaskInfo instance to begin the preoder/depth-first traversal. 
-    #  Defaults to top-level task.
-    #  @param createIfNotExist Boolean flag to denote if a new TaskInfo instance should be created 
-    #  if a task @em trfName is not found. Defaults to @b False.
-    #  @return TaskInfo instance
-    def tasks( self, trfName = None, createIfNotExist = False ):
-        taskFound = False
-        if trfName in [ None, True, self.taskName() ]:
-            trfName = None
-            taskFound = True
-            yield self
-        elif trfName is False: # omit top level task
-            trfName = None
-        for subtask in self.subTasks:
-            for st in subtask.tasks( trfName, False ):
-                taskFound = True
-                yield st
-        if not taskFound and createIfNotExist:
-            st = TaskInfo( trfName )
-            self.subTasks.append( st )
-            yield st
-
-    ## Getter function for the @em metaData dictionary attribute.
-    #  @param name Name of a particular metadata entry. If provided, the value associated with that particular metadata 
-    #  entry is returned. If omitted, the entire @em metaData dictionary attribute is returned.
-    #  @return String or Dictionary.
-    def metaData(self,name=None):
-        if name is None:
-            return self.getAttribute("metaData")
-        else:
-            return self.getAttribute("metaData").get(name)
-
-    ## Add new metadata entries and/or update existing ones.
-    #  @param metaDict Dictionary containg metadata entries to add/update.
-    #  @return None
-    def addMetaData(self,metaDict):
-        self.metaData().update( metaDict )
-
-    ## Add an AtlasErrorCodes.ErrorCategory to the @em errorCategories Dictionary.
-    #  @remarks The addition of an duplicate AtlasErrorCodes.ErrorCategory will not result in a replacement of the 
-    #  existing entry but an increment in the AtlasErrorCodes.ErrorCategory.count value instead.
-    #  @return None
-    def addErrorCategory( self, category ):
-        try:
-            self.errorCategories[ category.acronym ].count += category.count
-        except KeyError:
-            self.errorCategories[ category.acronym ] = copy( category )
-    
-    ## Associate an AtlasErrorCodes.ErrorInfo object with this task. 
-    #  @param error The AtlasErrorCodes.ErrorInfo object to add.
-    #  @remarks If the error already exists, it will be @em added to the existing one.
-    #  @see AtlasErrorCodes.ErrorInfo.__iadd__() on details of how AtlasErrorCodes.ErrorInfo objects are added together.
-    #  @return None
-    def addError( self, error ):
-        for err in self.errors:
-            if err == error:
-                err += error
-                break
-        else:
-            self.errors.append( copy( error ) )
-
-    ## Associate a validation-related AtlasErrorCodes.ErrorInfo object with this task.
-    #  @param error The AtlasErrorCodes.ErrorInfo object to add.
-    #  @remarks If the validation error already exists, it will be @em added to the existing one.
-    #  @see AtlasErrorCodes.ErrorInfo.__iadd__() on details of how AtlasErrorCodes.ErrorInfo objects are added together.
-    #  @return None
-    def addValidationError( self, error ):
-        for ve in self.validationErrors:
-            if ve == error:
-                ve += error
-                break
-        else:
-            self.validationErrors.append( copy( error ) )
-
-    ## Associate a FileInfo object with the task.
-    #  @param fileType Type of file to add. Accepted values are @c input and @c output.
-    #  @param fileInfo The FileInfo object to associate with the task.
-    #  @remarks If the FileInfo object already exists, it is @em added to the existing one.
-    #  @see FileInfo.__iadd__() on details of how FileInfo objects are added together.
-    #  @return None 
-    def addFile( self, fileType, fileInfo ):
-        fileDict = { 'INPUT' : self.inFiles, 'OUTPUT': self.outFiles }[ fileType.upper() ]
-        try:
-            fInfo = fileDict[ fileInfo.filename() ]
-        except KeyError:
-            fileDict[ fileInfo.filename() ] = copy( fileInfo )
-        else:
-            fInfo += fileInfo
-
-    ## Contruction of a new xmlutil.XMLNode instance to represent information contained in self.
-    #  @remarks JobInfo information is omitted as it is not required when writing out to XML.
-    #  @return xmltuil.XMLNode instance
-    def xmlNode(self):
-        taskNode = XMLNode( self.name() ).setAttribute( 'name', self.taskName() )
-        for metaKey, metaValue in self.metaData().items():
-            metaNode = XMLNode( "metadata" ).setAttributes( name = metaKey, value = metaValue )
-            taskNode.addContents( metaNode )
-#        infoNode = XMLNode( "jobinfo" )
-#        for infoObject in self.infos.values():
-#            infoNode.addContents( infoObject.xmlNode() )
-#        taskNode.addContents( infoNode )
-        inFileNode = XMLNode( "inputfiles" )
-        for inFileObject in self.inFiles.values():
-            inFileNode.addContents( inFileObject.xmlNode() )
-        taskNode.addContents( inFileNode )
-        outFileNode = XMLNode( "outputfiles" )
-        for outFileObject in self.outFiles.values():
-            outFileNode.addContents( outFileObject.xmlNode() )
-        taskNode.addContents( outFileNode )
-        for sTask in self.subTasks:
-            taskNode.addContents( sTask.xmlNode() )
-        return taskNode
-
-    ## Generate the XML string describing this task.
-    #  @return String (multi-line)
-    def getXML( self ):
-        return self.xmlNode().getXML()
-    
-    ## Generate a customised Python dictionary containing the task's @em name, @em metadata, 
-    #  associated @em files and @em sub-tasks.
-    #  @return Dictionary
-    def getDict( self ):
-        _d = { 'name' : self.taskName(),
-               'metadata' : self.metaData(),
-               'inputfiles' : [ inFileObject.getDict() for inFileObject in self.inFiles.values() ],
-               'outputfiles' : [ outFileObject.getDict() for outFileObject in self.outFiles.values() ],
-               'subtasks' : [ sTask.getDict() for sTask in self.subTasks ] }
-        return _d
-
-
-## This class contains information on the results of execution of the associated transform.
-class JobReport( object ):
-    metadata_xml = 'metadata.xml'
-    jobinfo_xml = 'jobInfo.xml'
-    defaultFilenameBase = 'jobReport'
-    fileExtensions = ( '.txt', '.pickle', '.xml' )
-    # List of files created by the transform
-    
-    ## @brief Constructor for the JobReport class.
-    #  @details A newly instantiated JobReport class is invalid for the purpose for which it was designed as 
-    #  it does not contain the required information for it to be used effectively. Although not designed to be 
-    #  used directly, it is automatically created (an appropriately populated) when the transform is executed. 
-    #  A JobReport instance may be explicitly created as a container in which other JobReport instances may be 
-    #  stored e.g. in composite transforms.
-    def __init__( self ):
-        # Can't be at class scope due to py3 scoping rules for comprehensions.
-        JobReport.defaultFiles = [ self.defaultFilenameBase + ext for ext in self.fileExtensions ] + [ self.metadata_xml, self.jobinfo_xml ]
-
-        self.reset()
-        self.setCommand()
-        # print everything by default
-        self.setPrintOptions( [], [] )
-
-    ## Persistify the JobReport if it was modified.
-    def __del__( self ):
-        if self.__modified: self.write()
-
-    ## String representation of the Job Report. 
-    #  @remarks The contents of this string is influenced by the use of the setPrintOptions() function.
-    #  @return String (multi-line)
-    def __str__( self ):
-        if self.__printNone: return "" # return empty string
-        # header
-        header = "===== Job Report"
-        if self._trf:
-            header += " produced by %s" % self._trf
-            if self._trfVersion:
-                header += " version %s" % self._trfVersion
-            if self._trfComment:
-                header += " (%s)" % self._trfComment
-        header += " ====="
-        me = [ header ]
-        # Summary
-        if self._shouldPrintItem( 'Summary' ):
-            me.append( 'Command=%s' % self.command() )
-            me.append( 'ExitCode=%s' % self.exitCode() )
-            ec = self.errorCode()
-            ea = AtlasErrorCodes.getAcronym(ec)
-            me.append( 'ErrorCode=%d (%s)' % (ec,ea) )
-            # error category counts
-            for cat in self.errorCategories():
-                me.append( 'ErrorCategory=%s' % (cat,) )
-            # fatals
-            me.append( 'Fatals=%d' % self.fatalCount() )
-            # errors
-            me.append( 'Errors=%d' % (self.errorCount()) )
-            # warnings
-            me.append( 'Warnings=%d' % (self.warningCount()) )
-            # output files
-            for f in self.outputFiles():
-                me.append( str(f) )
-        # Errors
-        if self._shouldPrintItem( 'Errors' ):
-            for info in itertools.chain( self.validationErrors(), self.errors() ):
-                me.append( str( info ) )
-        # Other info
-        for info in self.infos():
-            if self._shouldPrintItem( info.name() ):
-                me.append( str( info ) )
-        return os.linesep.join( me )
-
-    ## Function to test if a particular item should be included in the print out of the job report.
-    #  @param name The name of the item to be checked.
-    #  @return Boolean
-    def _shouldPrintItem( self, name ):
-        return not self.__printNone and name not in self.__printNotOptions and \
-               ( not self.__printOptions or name in self.__printOptions )
-
-    ## Resets all attributes to their default values.
-    #  @return None
-    def reset( self ):
-        ## Name of the associated transform 
-        self._trf = None
-        ## Version of the asociated transform
-        self._trfVersion = None
-        ## Comments pertaining to the associated transform
-        self._trfComment = None
-        ## The internally associated TaskInfo instance.
-        self._task = TaskInfo()
-        ## Boolean flag to indicate if @c self has been modified. 
-        self.__modified = False
-        ## Exit code/error code manipulation Boolean flag. Determines if unrecognised errors are ignored.
-        self.__ignoreUnknown = False
-        ## Exit code/error code manipulation Boolean flag. Ignore all errors.
-        self.__ignoreAll = False
-        ## Exit code/error code manipulation Boolean flag. 
-        #  Ignore errors if the athena.py job and the various validation routines performed were successful.
-        self.__ignoreErrors = False
-        ## Boolean flag to switch between old and new metadata and job report gpickle formats.
-        #  @see writeMetaDataXML_new(), writeMetaDataXML_old(), GPickleContents_new() and GPickleContents_old().
-        self.useNewMetadataFormat = False
-        ## Boolean flag to determine if the validation routines performed on the associated transform was successful.
-        #  @remarks This Boolean flag may be modified when the getExitError() function is run.
-        self.validationSuccess = True
-        ## Information tailored for the production system. This dictionary will typically be written to disk.
-        #  @see writeGPickle().
-        self.gpickle_dict = {}
-        ## Boolean flag to allow printing of job report to be disabled.
-        self.__printNone = False
-        ## List of items to print. @b Note: @em [] denotes all items should be printed.
-        self.__printOptions = []
-        ## List of items not to print.
-        self.__printNotOptions = []
-
-    ## Associate the job report with av transform.
-    #  @param name The name of the transform.
-    #  @param version A String providing the version of the transform. Defaults to @b None.
-    #  @param comment A String providing some description of the transform. Defaults to @b None.
-    #  @remarks The internally associated TaskInfo instance is given the same parameters.
-    #  @return None
-    def setProducer( self, name, version = None, comment = None ):
-        self._task.setAttributes( taskname = name, version = version, comment = comment )
-        self._trf = name
-        self._trfVersion = version
-        self._trfComment = comment
-
-    ## Getter function for retrieving the @em taskname attribute from the internally associated TaskInfo instance.
-    #  @return String
-    def producer( self ):
-        return self._task.getAttribute( 'taskname' )
-
-    ## Getter function for retrieving the @em version attribute from the internally associated TaskInfo instance.
-    #  @return String
-    def producerVersion( self ):
-        return self._task.getAttribute( 'version' )
-
-    ## Getter function for retrieving the @em comment attribute from the internally associated TaskInfo instance.
-    #  @return String
-    def producerComment( self ):
-        return self._task.getAttribute( 'comment' )
-
-    ## Getter function for retrieving the command issued at the command-line to run the transform performing 
-    #  a particular task @em trfName.
-    #  @param trfName The name of the transform to retrieve the command from. Defaults to @b None.
-    #  @remarks If @em trfName is provided, the internally associated TaskInfo instance 
-    #  (including all its sub-tasks, in any) is searched and a task/sub-task with a matching @em trfName is 
-    #  accessed and its associated execution command retrieved. If @em trfName is omitted, the top-level task 
-    #  (i.e. the task directly associated with this job report) is used.
-    #  @return String
-    def command( self, trfName = None ):
-        return self.task( trfName ).command
-
-    ## Record the command used at the command-line to execute the transform.
-    #  @param command The command used at the command-line. Defaults to @c sys.argv.
-    #  @param trfName The name of the task to associated the @em command with. Defaults to @b None.
-    #  @return None
-    def setCommand( self, command = None, trfName = None ):
-        if command is None:
-            command = sys.argv[0] + ' '
-            for i in sys.argv[1:]:
-                if i.find('=') > -1:
-                    key, value = i.split('=', 1)
-                    command += key + '=' + "'" + value.replace("'", "'\\''") + "'" + ' '
-                else:
-                    command += "'" + i.replace("'", "'\\''") + "'" + ' '
-        self.task( trfName ).command = command
-        self.__modified = True
-
-    ## Customise the information presented when the job report is printed.
-    #  @param options List of names of items to print. A comma-delimited string is also accepted. 
-    #  An empty list denotes @em all items are to be printed. Defaults to the current value i.e. unchanged.
-    #  @param notOptions List of names of items @em not to print. Takes precedence over @em options list. 
-    #  A comma-delimited string is also accepted. Defaults to the current value i.e. unchanged.
-    #  @warning if @em None is found in the @em options list, @em nothing will be printed.
-    #  @return None
-    def setPrintOptions( self, options = None, notOptions = None ):
-        if options is None:
-            options = self.__printOptions
-        if notOptions is None:
-            notOptions = self.__printNotOptions
-        try:
-            self.__printOptions = options.split( ',' )
-        except AttributeError:
-            self.__printOptions = copy( options )
-        self.__printNone = 'None' in self.__printOptions
-        try:
-            self.__printNotOptions = notOptions.split( ',' )
-        except AttributeError:
-            self.__printNotOptions = copy( notOptions )
-
-    ## Write out the current string presentation of the job report in a (one-off) customised format without 
-    #  affecting the current formating.
-    #  @param options List of names of items to print. A comma-delimited string is also accepted. 
-    #  An empty list denotes @em all items are to be printed. Defaults to the current value i.e. unchanged.
-    #  @param notOptions List of names of items @em not to print. Takes precedence over @em options list. 
-    #  A comma-delimited string is also accepted. Defaults to the current value i.e. unchanged.
-    #  @param output An open file(-like) object to write the job report to. Defaults to @c sys.stdout.
-    #  @warning if @em None is found in the @em options list, @em nothing will be printed.
-    #  @return None
-    def dump(self,options=None,notOptions=None,output=sys.stdout):
-        # remember current options
-        oldOptions = self.__printOptions
-        oldNotOptions = self.__printNotOptions
-        # set new options (makes a copy)
-        self.setPrintOptions(options,notOptions)
-        # get the printout in a string
-        meString = self.__str__()
-        # restore options
-        self.__printOptions = oldOptions
-        self.__printNotOptions = oldNotOptions
-        if meString:
-            output.write( meString + os.linesep )
-
-    ## Generator function yielding all (relevant) associated JobInfo instances.
-    #  @param trfName Retrieval of all JobInfo instances starting from the task @em trfName.
-    #  (preoder/depth-first traversal of tasks). Defaults to the top-level task.
-    #  @return JobInfo instance
-    def infos( self, trfName = None ):
-        for t in self.tasks( trfName ):
-            for i in t.infos.values():
-                yield i
-
-    ## Getter function for JobInfo instance.
-    #  @param name Name of JobInfo instance to retrieve.
-    #  @param trfName The name of the task to retrieve this JobInfo instance from. Defaults to the top-level task.
-    #  @return String
-    def info( self, name, trfName = None ):
-        return self.task( trfName ).infos[ name ]
-
-    ## Add a JobInfo instance to the selected task's dictionary of JobInfo instances.
-    #  @param info JobInfo instance to be added.
-    #  @param trfName The name of the task to add this JobInfo instance to. Defaults to the top-level task.
-    #  @return None
-    def addInfo(self, info, trfName = None):
-        if info is None: 
-            return
-        self.task( trfName ).infos[ info.name() ] = copy( info )
-        self.__modified = True
-
-    ## Remove a JobInfo instance from the selected task's dictionary of JobInfo instances.
-    #  @param name Name of the JobInfo instance to remove from the selected task's dictionary of JobInfo instances.
-    #  @param trfName The name of the task to remove this JobInfo instance from. Defaults to the top-level task.
-    #  @return None
-    def removeInfo(self, name, trfName = None ):
-        self.task( trfName ).infos.pop( name )
-        self.__modified = True
-
-    ## Generator function yielding all (relevant) associated output FileInfo instances.
-    #  @param trfName Retrieval of all output FileInfo instances starting from the task @em trfName 
-    #  (preoder/depth-first traversal of tasks). Defaults to the top-level task.
-    #  @return FileInfo instance
-    def outputFiles( self, trfName = None ):
-        for t in self.tasks( trfName ):
-            for oFile in t.outFiles.values():
-                yield oFile
-
-    ## Getter function for a particular output FileInfo instance.
-    #  @param filename The name of the file described by the FileInfo instance.
-    #  @param trfName The name of the task to retrieve this FileInfo instance from. Defaults to the top-level task.
-    #  @return FileInfo instance (or @b None if retrieval was unsuccessful)
-    def outputFile( self, filename, trfName = None ):
-        for f in self.outputFiles( trfName ):
-            if f.filename() == filename: return f
-        return None
-
-    ## Add a FileInfo instance to the selected task's dictionary of output FileInfo instances.
-    #  @param info FileInfo instance to be added.
-    #  @param trfName The name of the task to add this FileInfo instance to. Defaults to the top-level task.
-    #  @warning If a similar entry (i.e. FileInfo instance with the same name) exists, it will be overwritten.
-    #  @return None
-    def addOutputFile( self, info, trfName = None ):
-        if info is None: 
-            return
-        self.task( trfName ).addFile( 'OUTPUT', info )
-        self.__modified = True
-
-    ## Remove a FileInfo instance from the selected task's dictionary of output FileInfo instances.
-    #  @param filename Name of the FileInfo instance to remove from the selected task's dictionary of output FileInfo instances.
-    #  @param trfName The name of the task to remove this FileInfo instance from. Defaults to the top-level task.
-    #  @return None
-    def removeOutputFile( self, filename, trfName = None ):
-        self.task( trfName ).outFiles.pop( filename )
-        self.__modified = True
-
-    ## Generator function yielding all (relevant) associated intput FileInfo instances.
-    #  @param trfName Retrieval of all input FileInfo instances starting from the task @em trfName 
-    #  (preoder/depth-first traversal of tasks). Defaults to the top-level task.
-    #  @return FileInfo instance
-    def inputFiles( self, trfName = None ):
-        for t in self.tasks( trfName ):
-            for iFile in t.inFiles.values():
-                yield iFile
-
-    ## Getter function for a particular input FileInfo instance.
-    #  @param filename The name of the file described by the FileInfo instance.
-    #  @param trfName The name of the task to retrieve this FileInfo instance from. Defaults to the top-level task.
-    #  @return FileInfo instance (or @b None if retrieval was unsuccessful)
-    def inputFile( self, filename, trfName = None ):
-        for f in self.inputFiles( trfName ):
-            if f.filename() == filename: return f
-        return None
-
-    ## Add a FileInfo instance to the selected task's dictionary of input FileInfo instances.
-    #  @param info FileInfo instance to be added.
-    #  @param trfName The name of the task to add this FileInfo instance to. Defaults to the top-level task.
-    #  @warning If a similar entry (i.e. FileInfo instance with the same name) exists, it will be overwritten.
-    #  @return None
-    def addInputFile( self, info, trfName = None ):
-        if info is None: 
-            return
-        self.task( trfName ).addFile( 'INPUT', info )
-        self.__modified = True
-
-    ## Remove a FileInfo instance from the selected task's dictionary of input FileInfo instances.
-    #  @param filename Name of the FileInfo instance to remove from the selected task's dictionary of
-    #  input FileInfo instances.
-    #  @param trfName The name of the task to remove this FileInfo instance from.
-    #  Defaults to the top-level task.
-    #  @return None
-    def removeInputFile( self, filename, trfName = None ):
-        self.task( trfName ).inFiles.pop( filename )
-        self.__modified = True
-
-    ## Wrapper function to the generator function yielding all (relevant) associated TaskInfo instances.
-    #  @param trfName Retrieval of all TaskInfo instances starting from the task @em trfName
-    #  (preoder/depth-first traversal of tasks). If @c None, all sub-transform tasks including the top-level
-    #  transform are retrieved. Defaults to @c None.
-    #  @param createIfNotExist Boolean flag to denote if a new TaskInfo instance should be created 
-    #  if the task @em trfName is not found. Defaults to @b False.
-    #  @return TaskInfo instance
-    def tasks( self, trfName = None, createIfNotExist = False ):
-        return self._task.tasks( trfName, createIfNotExist )
-
-    ## Getter function for TaskInfo instance.
-    #  @param trfName The name of the task to retrieve. If @c None, all task including the top level
-    #  task are retrieved. Defaults to @c None. 
-    #  @param createIfNotExist Boolean variable to denote if a new TaskInfo instance should be created 
-    #  if the task @em trfName is not found. Defaults to @c False.
-    #  @exception KeyError is raised if the required task @em trfName cannot be found.
-    #  @return TaskInfo instance
-    def task( self, trfName = None, createIfNotExist = False ):
-        try:
-            return self.tasks( trfName, createIfNotExist ).next()
-        except StopIteration:
-            raise KeyError( "Task %s not found." % trfName )
-
-    ## Add an AtlasErrorCodes.ErrorInfo instance to the current list of errors.
-    #  @param error The AtlasErrorCodes.ErrorInfo instance to add.
-    #  @param add Deprecated. Not in use.
-    #  @remarks The list of error categories (from errors encountered by the associated transform) is updated as well.
-    #  @return None
-    def addError( self, error, add = None ):
-        if error is None:
-            return
-        # set producer if not yet set
-        producer = error.producer or self._trf or 'Unknown'
-        error.producer = producer
-        # Update list of errors for the task concerned.
-        t = self.task( producer, createIfNotExist = True )
-        t.errorLimits[ error.severity ] -= 1
-        for e in t.errors:
-            # error already exists: Merge error
-            if e == error:
-                e += error
-                break
-        else:
-            if t.errorLimits[ error.severity ] < 0:
-                t.errorLimits[ error.severity ] = 0
-            else:
-                t.errors.append( copy( error ) )
-        self.__modified = True
-        if error.code:
-            self.addErrorCategory( error.category )
-
-    ## Add an AtlasErrorCodes.ErrorInfo instance to the current list of validation-related errors.
-    #  @param vError The AtlasErrorCodes.ErrorInfo instance to add.
-    #  @remarks The list of error categories is @em not updated unlike the addError() function.
-    #  @return None
-    def addValidationError( self, vError ):
-        if vError is None:
-            return
-        # set producer if not yet set
-        producer = vError.producer or self._trf or 'Unknown'
-        vError.producer = producer
-        # Update list of vErrors for the task concerned.
-        vErrors = self.task( producer, createIfNotExist = True ).validationErrors
-        for e in vErrors:
-            # vError already exists: Merge vError
-            if e == vError:
-                e += vError
-                break
-        else:
-            vErrors.append( copy( vError ) )
-        self.__modified = True
-
-    ## Add a category of error to the list of error categories to a selected task.
-    #  @param category The AtlasErrorCodes.ErrorCategory instance to add.
-    #  @param trfName The name of the task to add this AtlasErrorCodes.ErrorCategory instance to.
-    #  If @c None, AtlasErrorCodes.ErrorCategory from all sub-transform tasks are retrieved. Defaults to @c None.
-    #  @return None
-    def addErrorCategory( self, category, trfName = None):
-        self.task( trfName ).addErrorCategory( category )
-        self.__modified = True
-
-    ## Adds the contents of the @em other report in one of three modes.
-    #  @param report Instance of @em other report.
-    #  @param mode 3 different modes are allowed:
-    #  @li @c APPEND_TASK : @em Add the main task from the @em other report to the current list of subtasks.
-    #  @li @c REPLACE     : @em Replace contents of current report with the contents of the @em other report.
-    #  @li @c MERGE       : @em Merge contents of the @em other report to self.
-    #  Defaults to @c APPEND_TASK.
-    #  @exception AttributeError Mode provided not recognised.
-    #  @exception KeyError The @em other report has missing @c athCode/athAcronym @c infos entries.
-    #  @return None
-    def addReport( self, report, mode = None ):
-        APPEND_TASK, REPLACE, MERGE = 'APPEND_TASK', 'REPLACE', 'MERGE'
-        if report == self:
-            return
-        if mode is None:
-            mode = APPEND_TASK
-        mode = mode.upper()
-        if mode not in [ REPLACE, APPEND_TASK, MERGE ]:
-            raise AttributeError( "'mode' parameter can only accepts the following values: [ 'REPLACE', 'APPEND_TASK', 'MERGE' ]" )
-        # Add tasks
-        if mode == APPEND_TASK:
-            self._task.subTasks.append( report._task ) # append the top level task
-            try:
-                self._task.infos[ 'athCode' ] = report._task.infos[ 'athCode' ]
-                self._task.infos[ 'athAcronym' ] = report._task.infos[ 'athAcronym' ]
-            except KeyError:
-                self.__modified = True
-                #
-                #
-                # Commenting out on Nov 17, 2011 because it seems to
-                # produce misleading reports when jobs fail.
-                # Thomas Gadfort (tgadfort@bnl.gov)
-                #
-                #
-                #raise KeyError( 'Attempting to add a report (task %s) with missing athCode/athAcronym.' % report._task.taskName() )
-        elif mode == REPLACE:
-            self._trf = report._trf
-            self._trfVersion = report._trfVersion
-            self._trfComment = report._trfComment
-            self._task = report._task
-        elif mode == MERGE:
-            self._trf = report._trf
-            self._trfVersion = report._trfVersion
-            self._trfComment = report._trfComment or self._trfComment
-            self._task += report._task
-        self.__modified = True
-
-    ## Access to private boolean variable that keeps track of whether the job report was modified.
-    #  @see __modified
-    #  @return Boolean value
-    def modified(self):
-        return self.__modified
-
-    ## Generator for AtlasErrorCodes.ErrorCategory instances associated with a particular sub-transform task.
-    #  @param trfName The name of the particular sub-transform task to retrieve the 
-    #  AtlasErrorCodes.ErrorCategory instances from. If @c None, AtlasErrorCodes.ErrorCategory instances 
-    #  associated with @em all sub-transform tasks will be retrieved. Defaults to @c None.
-    #  @return AtlasErrorCodes.ErrorCategory instance
-    def errorCategories( self, trfName = None ):        
-        for t in self.tasks( trfName ):
-            for errCat in t.errorCategories.values():
-                yield errCat
-
-    ## Generator for AtlasErrorCodes.ErrorInfo instances associated with a particular sub-transform task 
-    #  and @em severity level.
-    #  @param severity The @em severity level required. Defaults to a list that consists of 
-    #  @em AtlasErrorCodes.FATAL and @em AtlasErrorCodes.ERROR error levels.
-    #  @param trfName The name of the particular sub-transform task to retrieve the AtlasErrorCodes.ErrorInfo
-    #  instance from. If @c None, AtlasErrorCodes.ErrorInfo instances associated with @em all sub-transform tasks
-    #  will be retrieved. Defaults to @c None.
-    #  @return AtlasErrorCodes.ErrorInfo instance
-    def errors( self, severity = None, trfName = None ):
-        if severity is None:
-            severity = [ AtlasErrorCodes.FATAL, AtlasErrorCodes.ERROR ]
-        elif not isinstance( severity, list ):
-            severity = [ severity ]
-        for t in self.tasks( trfName ):
-            for e in t.errors:
-                if e.severity in severity:
-                    yield e
-
-    ## Generator for validation errors associated with a particular sub-transform task.
-    #  @param trfName The name of the particular sub-transform task to retrieve the validation errors from.
-    #  If @c None, validation errors from @em all sub-transform tasks will be retrieved. Defaults to @c None.
-    #  @remark Validation errors are AtlasErrorCodes.ErrorInfo instances.
-    #  They are simply stored in a separate TaskInfo.validationErrors list.
-    #  @return AtlasErrorCodes.ErrorCategory instance
-    def validationErrors( self, trfName = None ):
-        for t in self.tasks( trfName ):
-            for e in t.validationErrors:
-                yield e
-
-    ## Count of number of AtlasErrorCodes.FATAL errors from a particular producer (or sub-transform task).
-    #  @param producer The name of the particular sub-transform task to retrieve the count from.
-    #  If @c None, the count will include @em all sub-transform tasks. Defaults to @c None.
-    #  @return Integer
-    def fatalCount( self, producer = None ):
-        return len( list( itertools.chain( self.errors( AtlasErrorCodes.FATAL, producer ), self.validationErrors( producer ) ) ) )
-
-    ## Count of number of AtlasErrorCodes.ERROR errors from a particular producer (or sub-transform task).
-    #  @param producer The name of the particular sub-transform task to retrieve the count from.
-    #  If @c None, the count will include @em all sub-transform tasks. Defaults to @c None.
-    #  @return Integer
-    def errorCount( self, producer = None ):
-        return len( list( self.errors( AtlasErrorCodes.ERROR, producer ) ) )
-
-    ## Count of number of AtlasErrorCodes.WARNING errors from a particular producer (or sub-transform task).
-    #  @param producer The name of the particular sub-transform task to retrieve the count from.
-    #  If @c None, the count will include @em all sub-transform tasks. Defaults to @c None.
-    #  @return Integer
-    def warningCount( self, producer = None ):
-        return len( list( self.errors( AtlasErrorCodes.WARNING, producer ) ) )
-
-    ## Retrieves the AtlasErrorCodes.ErrorInfo instance that corresponds to the first error of severity 
-    #  AtlasErrorCodes.FATAL or AtlasErrorCodes.ERROR encountered. If the JobReport.__ignoreUnknown flag is set,
-    #  the first AtlasErrorCodes.ErrorInfo instance that does not have an error code of @b TRF_UNKNOWN is returned.
-    #  @remarks If there are @em any validation errors, the @c --ignoreerrors=True option (if used) will be
-    #  ignored and the first validation error (also an AtlasErrorCodes.ErrorInfo instance) is returned instead.
-    #  @return AtlasErrorCodes.ErrorInfo instance
-    def getExitError( self ):
-        for eInfo in self.validationErrors():
-            self.validationSuccess = False
-            return eInfo
-        athenaOK = 'ATH_FAILURE' not in self.errorCategories()
-        try:
-            if athenaOK and self.info( 'athCode' ).contents() != '0':
-                athenaOK = False
-        except Exception:
-            athenaOK = False
-        for eInfo in self.errors():
-            if self.__ignoreUnknown and eInfo.code == 69999 and athenaOK:
-                continue
-            return eInfo
-        if athenaOK:
-            return AtlasErrorCodes.ErrorInfo( acronym = 'OK' )
-        return AtlasErrorCodes.ErrorInfo( acronym = 'ATH_FAILURE', severity = AtlasErrorCodes.FATAL )
-
-    ## Retrieve the code associated with an AtlasErrorCodes.ErrorInfo instance.
-    #  @param exitError The AtlasErrorCodes.ErrorInfo instance to retrieve the error code from. 
-    #  If @c None, the JobReport.getExitError() function is used to retrieve the required
-    #  AtlasErrorCodes.ErrorInfo instance. Defaults to @c None.
-    #  @remarks The errorCode is coded as: 10000 * @b X + 100 * @b Y + 10 * @b Z.
-    #  @return Integer
-    def errorCode(self, exitError = None ):
-        if exitError is None:
-            error = self.getExitError()
-        else:
-            error = exitError
-        return error.code
-
-    ## Retrieve the error acronym associated with the exit error (an AtlasErrorCodes.ErrorInfo instance).
-    #  @return String
-    def errorAcronym(self):
-        return self.getExitError().acronym
-
-    ## A number based on the associated error code.
-    #  @remarks If the JobReport.__ignoreAll boolean value is @c True, the exit code is @c 0.
-    #  If the JobReport.__ignoreErrors boolean value is @c True and the @c athena.py successfully returns 
-    #  without any validation errors, the exit code is @c 0.
-    #  Otherwise, the exit code with simply be the @b Y portion of the error code.
-    #  @return Integer
-    def exitCode( self ):
-        if self.__ignoreAll:
-            return 0
-        exitError = self.getExitError()
-        if self.__ignoreErrors:
-            try:
-                athCode = self.info( 'athCode' ).contents()
-            except Exception:
-                pass
-            else:
-                # Return success if Athena returns success regardless of any errors detected
-                if athCode == '0' and self.validationSuccess:
-                    return 0
-        err = self.errorCode( exitError )
-        ex = (err % 10000) / 100
-        # guard against zero exit code while having a non-zero error code
-        if ex == 0 and err != 0:
-            ex = err
-            while ex > 255: ex /= 100
-        return ex
-
-    ## Append the transform/producer name to a given @em filename.
-    #  @param filename The transform name is appended to the @em filename.
-    #  @remarks If the transform name is not defined, the given @em filename is return unchanged.
-    #  return String
-    def producerFilename(self,filename):
-        base,ext = os.path.splitext(filename)
-        if self._trf is None:
-            return filename
-        producer = os.path.splitext( self._trf )[0]
-        return base + '_' + producer + ext
-    
-    ## The current format of the metadata.xml file contain repeated elements (across entries).
-    #  This script (courtesy of Pavel Nevski) extracts repeated metadata to be placed in a common area
-    #  in the file so as to reduce the file size.
-    #  @param filename Name of the file to apply the script to.
-    #  @remarks This functions has been made a staticmethod.
-    #  @return None
-    @staticmethod
-    def slimdownMetadata( filename ):
-        temp1 = 'tempfile1_TOBEREMOVED_'
-        temp2 = 'tempfile2_TOBEREMOVED_'
-        try:
-            # the actual execution
-            subprocess.check_call( [ "slimmetadata", filename, temp1, temp2 ] )
-        except subprocess.CalledProcessError as cpe:
-            print ("Error slimming %s [%s]: %s" % ( filename, cpe.returncode, cpe.message ))
-        else:
-            print ("%s has been slimmed." % filename)
-        # removing the temp files
-        for f in [ temp1, temp2 ]:
-            try:
-                os.remove( f )
-            except Exception:
-                pass
-
-    ## A wrapper method to allow for metadata to be written in two different formats
-    #  depending on the boolean useNewMetadataFormat variable.
-    #  @param writeFinalCopy Boolean value to pass to underlaying methods to determine
-    #  if the final version of the metadata.xml file should be written. Defaults to @c True.
-    #  @return None
-    def writeMetaDataXML( self, writeFinalCopy = True ):
-        if self.useNewMetadataFormat:
-            self.writeMetaDataXML_new( writeFinalCopy )
-        else:
-            self.writeMetaDataXML_old( writeFinalCopy )
-        self.__modified = False
-
-    ## Write out metadata in the new format.
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  metadata.xml file should be written. Defaults to @c True.
-    #  @remarks This new format does away with the need for the JobReport.slimdownMetadata() method.
-    #  @return None
-    def writeMetaDataXML_new( self, writeFinalCopy = True ):
-        headerLines = [ '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
-                        '<!-- ATLAS file meta-data catalog -->' ]
-        xmlStr = os.linesep.join( headerLines )
-        xmlStr += os.linesep + self._task.getXML()
-        filename = self.producerFilename( JobReport.metadata_xml )
-        print ("Writing new style %s file..." % filename)
-        try:
-            with open( filename, 'w' ) as metaFile:
-                metaFile.write( xmlStr + os.linesep )
-        except Exception as msg:
-            print ("WARNING: Could not write metadata to file %s: %s" % ( filename, msg ))
-            return
-        if writeFinalCopy and self._trf:
-            shutil.copyfile( filename, JobReport.metadata_xml )
-
-    ## Write out metadata in the current format.
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  metadata.xml file should be written. Defaults to @c True.
-    #  @remarks The current format requires the JobReport.slimdownMetadata() method to be executed.
-    #  @return None
-    def writeMetaDataXML_old(self,writeFinalCopy=True):
-        headerLines = [ '<?xml version="1.0" encoding="UTF-8" standalone="no" ?>' ,
-                        '<!-- ATLAS file meta-data catalog -->' ,
-                        '<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">' ]
-        # gather all metadata names in the header
-        metaNames = set()
-        for f in self.outputFiles():
-            metaNames.update( f.metaData().keys() )
-        # add metadata names as META tags in xml header
-        # add POOLFILECATALOG header
-        xmlStr = os.linesep.join( headerLines )
-        filesNode = XMLNode("POOLFILECATALOG")
-        for m in metaNames:
-            meta = XMLNode("META").setAttributes(name=m,type="string")
-            filesNode.addContents( meta )
-        for f in self.outputFiles():
-            filesNode.addContents(f.xmlNode())
-        xmlStr += os.linesep + filesNode.getXML()
-        filename = self.producerFilename( JobReport.metadata_xml )
-        print ("Writing %s file..." % filename)
-        try:
-            with open( filename, 'w' ) as metaFile:
-                metaFile.write( xmlStr + os.linesep )
-        except Exception as msg:
-            print ("WARNING: Could not write metadata to file %s: %s" % ( filename, msg ))
-            return
-        # Temporary hack to remove redundant metadata.
-        JobReport.slimdownMetadata( filename )
-        if writeFinalCopy and self._trf:
-            shutil.copyfile( filename, JobReport.metadata_xml )
-
-    ## Write out the jobInfo.xml file.
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  jobInfo.xml file should be written. Defaults to @c True.
-    #  return None
-    def writeJobInfoXML(self,writeFinalCopy=True):
-        # header
-        xmlStr = '<?xml version="1.0" encoding="UTF-8"?>'
-        jobInfo = XMLNode("jtinfo")
-        jobInfo.setAttribute('producer',self._trf)
-        if self._trfVersion:
-            jobInfo.setAttribute('producerversion',self._trfVersion)
-        if self._trfComment:
-            jobInfo.setAttribute('producercomment',self._trfComment)
-        # command
-        jobInfo.addContents( XMLNode("Command",self.command()) )
-        # exit code
-        jobInfo.addContents( XMLNode("ExitCode", self.exitCode()) )
-        # error code
-        err = self.getExitError()
-        cat = err.category
-        jobInfo.addContents( XMLNode("ErrorCode", cat.code) )
-        jobInfo.addContents( XMLNode("ErrorAcronym", cat.acronym) )
-        jobInfo.addContents( XMLNode("ErrorDescription", cat.description) )
-        jobInfo.addContents( XMLNode("ErrorMessage", err.message) )
-        jobInfo.addContents( XMLNode("ErrorSeverity", err.severity) )
-        # add error category counts
-        catFound = False
-        for cat in self.errorCategories():
-            catFound = True
-            jobInfo.addContents( cat.xmlNode() )
-        # Set OK
-        if not catFound:
-            jobInfo.addContents( AtlasErrorCodes.OK_CATEGORY.xmlNode() )
-        # add fatal/error/warning counts
-        jobInfo.addContents( XMLNode("warning",self.warningCount()) )
-        jobInfo.addContents( XMLNode("error",self.errorCount()) )
-        jobInfo.addContents( XMLNode("fatal",self.fatalCount()) )
-        # add errors
-        for error in itertools.chain( self.validationErrors(), self.errors() ):
-            jobInfo.addContents( error.xmlNode() )
-        # add any other info
-        # combine the infos from the current task and all its subtasks
-        tInfo = {}
-        for info in self.infos():
-            tInfo[ info.name() ] = info
-        for info in tInfo.values():
-            jobInfo.addContents( info.xmlNode() )
-        # make XML string
-        xmlStr += os.linesep + jobInfo.getXML()
-        filename=self.producerFilename( JobReport.jobinfo_xml )
-        print ("Writing %s file..." % filename)
-        try:
-            with open( filename, 'w' ) as jobFile:
-                jobFile.write( xmlStr + os.linesep )
-        except Exception as msg:
-            print ("WARNING: Could not write job info to file %s: %s" % ( filename, msg ))
-            return
-        self.__modified = False
-        if writeFinalCopy and self._trf:
-            shutil.copyfile(filename,JobReport.jobinfo_xml)
-
-    ## Write out the jobReport.txt.
-    #  @param filenameBase Name of the file the text version of the Job Report will be based on.
-    #  Defaults to @c JobReport.defaultFilenameBase.
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  jobInfo.xml file should be written. Defaults to @c True.
-    #  @return None
-    def writeTXT(self,filenameBase=defaultFilenameBase,writeFinalCopy=True):
-        if not filenameBase.endswith( '.txt' ):
-            filenameBase += '.txt'
-        filename = self.producerFilename( filenameBase )
-        try:
-            with open( filename, 'w' ) as txtFile:
-                # always write full info to file
-                self.dump( options = [], notOptions = [], output = txtFile )
-        except Exception as msg:
-            print ("WARNING: Could not write job report to file %s: %s" % ( filename, msg ))
-            return
-        self.__modified = False
-        if writeFinalCopy and self._trf:
-            shutil.copyfile( filename, filenameBase )
-
-    ## Wrapper function to execute all JobReport persistency functions.
-    #  @param writeFinalCopy Boolean value to be passed to the various JobReport persistency functions.
-    #  Defaults to @c True.
-    #  @return None
-    def writeAll( self, writeFinalCopy = True ):
-        self.writeGPickle( writeFinalCopy = writeFinalCopy )
-        self.write( writeFinalCopy = writeFinalCopy )
-        self.writeTXT( writeFinalCopy = writeFinalCopy )
-        self.writeMetaDataXML( writeFinalCopy = writeFinalCopy )
-        self.writeJobInfoXML( writeFinalCopy = writeFinalCopy )
-
-    ## Dump the JobReport instance to a pickle file.
-    #  @param filenameBase The name of the pickle file created will be based on @em filenameBase.
-    #  Defaults to @c JobReport.defaultFilenameBase.
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  jobReport.pickle file should be written. Defaults to @c True.
-    #  @remarks The boolean return value signifies if the function was completed successfully.
-    #  @return Boolean
-    def write(self,filenameBase=defaultFilenameBase,writeFinalCopy=True):
-        if not filenameBase.endswith('.pickle'):
-            filenameBase += '.pickle'
-        filename = self.producerFilename( filenameBase )
-        try:
-            with open( filename, 'w' ) as errorFile:
-                pickle.dump( self, errorFile, 2 )
-        except Exception as msg:
-            print ("WARNING: Could not pickle job report to file %s: %s" % ( filename, msg ))
-            return False
-        self.__modified = False
-        if writeFinalCopy and self._trf:
-            shutil.copyfile( filename, filenameBase )
-        return True
-
-    ## A wrapper method to allow for jobReport.gpickle to be written in two different formats
-    #  depending on the boolean useNewMetadataFormat variable.
-    #  @return None
-    def GPickleContents( self ):
-        if self.useNewMetadataFormat:
-            return self.GPickleContents_new()
-        return self.GPickleContents_old()
-
-    ## Generate the dictionary in the current format containing information required by
-    #  the Production System. The sequence and hierarchy of sub-transform tasks are preserved.
-    #  @remarks The structure of the dictionary to be returned has been pre-defined by
-    #  the Production System.
-    #  @return Dictionary
-    def GPickleContents_new(self):
-        # collect info about the error lines
-        errorLineInfo={}
-        for eltrf in itertools.chain( self.validationErrors(), self.errors() ):
-            try:
-                errorLineInfo[ eltrf.producer ].append( eltrf.xmlNode().getContents() )
-            except KeyError:
-                errorLineInfo[ eltrf.producer ] = [ eltrf.xmlNode().getContents() ]
-        # not always the info is available 
-        info_try_block={'AtlasRelease':'unknown',
-                        'DBRelease':'unknown',
-                        'Workdir':'unknown',
-                        'RunDirUsedDisk':None,'RunDirAvailableDisk':None,
-                        'athCode':None,'athAcronym':'unknown'}
-        info_try_block_odict={'Machine':'unknown','Environment':'unknown'}
-        for info in info_try_block.keys():
-            try:
-                info_try_block[info]=self.info( info ).getContents()[ info ]
-            except Exception:
-                pass
-        for info in info_try_block_odict.keys():
-            try:
-                info_try_block_odict[info]=self.info( info ).getContents()
-            except Exception:
-                pass
-        nevents = 0
-        for oFile in self.outputFiles():
-            evts = oFile.metaData( 'events' )
-            if evts is not None and evts > nevents:
-                nevents = evts
-        trfExitCode = self.exitCode()
-        trfCode = self.errorCode()
-        errorCount = self.errorCount()
-        fatalCount = self.fatalCount()
-        
-        trfAcronym = AtlasErrorCodes.getAcronym( trfCode )
-        if trfExitCode == 0:
-            if errorCount > 0:
-                trfAcronym = AtlasErrorCodes.NEEDCHECK.acronym
-            if fatalCount > 0:
-                # There is a problem when trfExitCode=0 and there are fatal errors so reset to a TRF_EXE error
-                print ("Warning: Trf exit code was 0, but fatal errors were detected. Resetting trf exit code to TRF_EXE.")
-                trfAcronym = 'TRF_EXE'
-                trfCode = AtlasErrorCodes.getCode( trfAcronym )
-
-        # define the info structured as discussed with prodsys 
-
-        self.gpickle_dict={'prodsys':{'athCode':info_try_block['athCode'],
-                                      'athAcronym':info_try_block['athAcronym'],
-                                      'trfExitCode':trfExitCode,
-                                      'trfCode':trfCode,
-                                      'trfAcronym':trfAcronym,
-                                      'nevents':nevents,
-                                      'more':{'Machine':info_try_block_odict['Machine']},
-                                      'task':self._task.getDict()
-                                     }, 
-                           'more':{
-                                      'Producer':self._trf,
-                                      'ProducerVersion':self._trfVersion,
-                                      'ProducerComment':self._trfComment, 
-                                      'Command':self._task.command,  
-                                      'Fatals':fatalCount, 
-                                      'Errors':errorCount,
-                                      'ErrorLines':errorLineInfo,
-                                      'Warnings':self.warningCount(),
-                                      'AtlasRelease':info_try_block['AtlasRelease'],
-                                      'DBRelease':info_try_block['DBRelease'],
-                                      'Workdir':info_try_block['Workdir'],
-                                      'Environment':info_try_block_odict['Environment'],
-                                      'RunDirUsedDisk':info_try_block['RunDirUsedDisk'],
-                                      'RunDirAvailableDisk':info_try_block['RunDirAvailableDisk'] 
-                                  }
-                          }
-        return self.gpickle_dict
-
-    ## Generate the dictionary in a new format containing information required by the Production System.
-    #  The sequence and hierarchy of sub-transform tasks are @em not preserved.
-    #  @remarks The structure of the dictionary to be returned has been pre-defined by
-    #  the Production System.
-    #  @return Dictionary
-    def GPickleContents_old(self):
-        # collect limited info about the inputfiles
-        ifiles=[]
-        for i in self.inputFiles():
-            try: 
-                ifiles.append( { 'lfn' : os.path.basename( i.filename() ), 
-                                 'GUID' : i.guid(), 
-                                 'dataset' : i.metaData( 'dataset' ) } )
-            except Exception:
-                print ('JobReport collecting info input files:  problems with ', i)
-        # collect info about outputfiles,
-        # (metadata should be by file because of the combined trfs)  
-        ofiles=[]                   
-        for of in self.outputFiles():
-            ofile={'lfn' : os.path.basename( of.filename() ),
-                   'GUID' : of.guid(),
-                   'dataset' : of.metaData( 'dataset' ),
-                   'size' : of.metaData( 'size' ),
-                   'events' : of.metaData( 'events' ),
-                   'checkSum': None,
-                   'more': {'metadata':of.metaData( moreOnly = True ) }
-                   } 
-            ofiles.append(ofile)
-        # collect info about the error lines
-        errorLineInfo={}
-        for eltrf in itertools.chain( self.validationErrors(), self.errors() ):
-            try:
-                errorLineInfo[ eltrf.producer ].append( eltrf.xmlNode().getContents() )
-            except KeyError:
-                errorLineInfo[ eltrf.producer ] = [ eltrf.xmlNode().getContents() ]
-        # not always the info is available 
-        info_try_block={'AtlasRelease':'unknown',
-                        'DBRelease':'unknown',
-                        'Workdir':'unknown',
-                        'RunDirUsedDisk':None,'RunDirAvailableDisk':None,
-                        'athCode':None,'athAcronym':'unknown'}
-        info_try_block_odict={'Machine':'unknown','Environment':'unknown'}
-        for info in info_try_block.keys():
-            try:
-                info_try_block[info]=self.info( info ).getContents()[ info ]
-            except Exception:
-                pass
-        for info in info_try_block_odict.keys():
-            try:
-                info_try_block_odict[info]=self.info( info ).getContents()
-            except Exception:
-                pass
-        nevents = 0
-        for oFile in self.outputFiles():
-            evts = oFile.metaData( 'events' )
-            if evts is not None and evts > nevents:
-                nevents = evts
-        trfExitCode = self.exitCode()
-        trfCode = self.errorCode()
-        errorCount = self.errorCount()
-        fatalCount = self.fatalCount()
-        # N.B. This is a bit dangerous - it's setting a different report in the gpickle
-        # c.f. any other report file. It would be better to improve the exitCode and errorCode
-        # directly when there are non-zero error and fatal counts. But it seems to be needed by
-        # Tier-0 for now.
-        trfAcronym = AtlasErrorCodes.getAcronym( trfCode )
-        if trfExitCode == 0:
-            if errorCount > 0:
-                trfAcronym = AtlasErrorCodes.NEEDCHECK.acronym
-            if fatalCount > 0:
-                # There is a problem when trfExitCode=0 and there are fatal errors so reset to a TRF_EXE error
-                print ("Warning: Trf exit code was 0, but fatal errors were detected. Resetting trf exit code to TRF_EXC.")
-                trfAcronym = 'TRF_EXC'
-                trfCode = AtlasErrorCodes.getCode( trfAcronym )
-
-            
-        # define the info structured as discussed with prodsys 
-        self.gpickle_dict={'prodsys':{'athCode':info_try_block['athCode'],
-                                      'athAcronym':info_try_block['athAcronym'],
-                                      'trfExitCode':trfExitCode,
-                                      'trfCode':trfCode,
-                                      'trfAcronym':trfAcronym,
-                                      'nevents':nevents,
-                                      'jobInputs':ifiles,
-                                      'jobOutputs':ofiles,
-                                      'more':{'Machine':info_try_block_odict['Machine']}
-                                     }, 
-                           'more':{
-                                      'Producer':self._trf,
-                                      'ProducerVersion':self._trfVersion,
-                                      'ProducerComment':self._trfComment, 
-                                      'Command':self._task.command,  
-                                      'Fatals':fatalCount, 
-                                      'Errors':errorCount,
-                                      'ErrorLines':errorLineInfo,
-                                      'Warnings':self.warningCount(),
-                                      'AtlasRelease':info_try_block['AtlasRelease'],
-                                      'DBRelease':info_try_block['DBRelease'],
-                                      'Workdir':info_try_block['Workdir'],
-                                      'Environment':info_try_block_odict['Environment'],
-                                      'RunDirUsedDisk':info_try_block['RunDirUsedDisk'],
-                                      'RunDirAvailableDisk':info_try_block['RunDirAvailableDisk'] 
-                                  }
-                          } 
-        return self.gpickle_dict
-
-    ## Write out jobReport.gpickle file.
-    #  @param filenameBase The name of the file to be created is based on @em filenameBase.
-    #  Defaults to JobReport.defaultFilenameBase
-    #  @param writeFinalCopy Boolean value to determine if the final version of the
-    #  jobReport.gpickle file should be written. Defaults to @c True.
-    #  @remarks The return boolean value indicates if the creation of the @em gpickle file was successful.
-    #  return Boolean 
-    def writeGPickle(self,filenameBase=defaultFilenameBase,writeFinalCopy=True):
-        if not filenameBase.endswith('.gpickle'):
-            filenameBase += '.gpickle'
-        filename = self.producerFilename( filenameBase )
-        try:
-            with open( filename, 'w' ) as errorFile:
-                pickle.dump( self.GPickleContents(), errorFile, 0 ) # text format
-        except Exception as msg:
-            print ("WARNING: Could not write gpickle job report to file %s: %s" % ( filename, msg ))
-            return False
-        self.__modified = False
-        if writeFinalCopy and self._trf:
-            shutil.copyfile(filename,filenameBase)
-        return True
-
-    ## Setter function for the private JobReport.__ignoreUnknown property.
-    #  @param ignoreUnknown Boolean value to set the private variable to.
-    #  @return None
-    def setIgnoreUnknown( self, ignoreUnknown ):
-        """Mask all TRF_UNKNOWN errors. See getExitCode()."""
-        self.__ignoreUnknown = ignoreUnknown
-
-    ## Setter function for the private JobReport.__ignoreAll property.
-    #  @param ignoreAll Boolean value to set the private variable to.
-    #  @return None
-    def setIgnoreAll( self, ignoreAll ):
-        """Force transform to return successful exit code [0]
-        regardless of the actual exit codes. See exitCode()"""
-        self.__ignoreAll = ignoreAll
-
-    ## Setter function for the private JobReport.__ignoreErrors property.
-    #  @param ignoreErrors Boolean value to set the private variable to.
-    #  @return None
-    def setIgnoreErrors( self, ignoreErrors ):
-        """Force transform to return successful exit code [0] if athena returns successful [0]
-        regardless of the actual exit codes. See exitCode()"""
-        self.__ignoreErrors = ignoreErrors
-
diff --git a/Tools/PyJobTransformsCore/python/VTimer.py b/Tools/PyJobTransformsCore/python/VTimer.py
deleted file mode 100644
index 4ddcb524ab904e006d7be45da0aca2209dcf67cb..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/VTimer.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-from __future__ import print_function
-
-import pickle
-import os, time
-from resource import getrusage, RUSAGE_SELF, RUSAGE_CHILDREN
-    
-def pymon():
-    """Borrowed from PyMonUtils.py. Using RUSAGE_SELF and RUSAGE_CHILDREN."""
-    cpu = -1
-    vmem = -1
-    rss = -1
-    try:
-        mem = open( '/proc/%d/statm' % os.getpid(), 'r' )
-        cpu_self = getrusage( RUSAGE_SELF )
-        cpu_children = getrusage( RUSAGE_CHILDREN )
-        cpu = cpu_self.ru_utime + cpu_self.ru_stime + cpu_children.ru_utime + cpu_children.ru_stime # in seconds
-        mem = mem.readlines()[ 0 ].split()
-        pageSize = os.sysconf( 'SC_PAGE_SIZE' ) / ( 1024. * 1024. )
-        vmem = int( mem[0] ) * pageSize
-        rss  = int( mem[1] ) * pageSize
-    finally:
-        return cpu, vmem, rss
-    
-
-
-class VTimerContent( object ):
-    def __init__( self ):
-        self.reset()
-
-    def toDict( self ):
-        return { 'wallTime_Start' : self.wallTime_Start,
-                 'wallTime_Stop' : self.wallTime_Stop,
-                 'wallTime' : self.wallTime,
-                 'cpuTime_Start' : self.cpuTime_Start,
-                 'cpuTime_Stop' : self.cpuTime_Stop,
-                 'cpuTime' : self.cpuTime,
-                 'rss_Start' : self.rss_Start,
-                 'rss_Stop' : self.rss_Stop,
-                 'rss' : self.rss,
-                 'vmem_Start' : self.vmem_Start,
-                 'vmem_Stop' : self.vmem_Stop,
-                 'vmem' : self.vmem }
-
-    def reset( self ):
-        self.started = None
-        self.wallTime_Start = 0
-        self.cpuTime_Start = 0
-        self.wallTime_Stop = 0
-        self.cpuTime_Stop = 0
-        self.rss_Start = 0
-        self.vmem_Start = 0
-        self.rss_Stop = 0
-        self.vmem_Stop = 0
-        self.cpuTime = 0
-        self.wallTime = 0
-        self.rss = 0
-        self.vmem = 0
-
-class VTimer( object ):
-    def __init__( self, name = 'VTimer', logger = None ):
-        self.name = name
-        self._resultsDict = None
-        self._resultsStack = []
-        self._resultsFile = None
-        self.logger = logger
-
-    def reset( self ):
-        self._resultsDict = None
-        self._resultsStack = []
-        self._resultsFile = None
-        self._print( 'All timers have been deleted.' )
-        return
-
-    def _makeNewResultsDict( self, name ):
-        return { 'name' : name, 'vTimerContent' : VTimerContent(), 'trfList' : [] }
-
-    def _print( self, x, severity = 'info', altTimerName = None ):
-        if altTimerName is None:
-            tName = self.name
-        else:
-            tName = altTimerName
-        x = '%s %s' % ( tName, x )
-        try:
-            getattr( self.logger, severity )( x )
-        except Exception:
-            print (x)
-
-    def toHMS( self, seconds = 0 ):
-        if seconds < 0:
-            return '%s secs' % seconds
-        seconds = int( seconds )
-        hours = seconds / 3600
-        seconds -= 3600 * hours
-        minutes = seconds / 60
-        seconds -= 60 * minutes
-        return '%02d hrs %02d mins %02d secs' % ( hours, minutes, seconds )
-
-    def start( self, name = 'DEFAULT' ):
-        try:
-            currentResultsDict = self._resultsStack[ -1 ]
-        except IndexError: # Initial use when _resultsDict is still None.
-            self._resultsDict = self._makeNewResultsDict( name )
-            self._resultsStack.append( self._resultsDict )
-            currentResultsDict = self._resultsDict
-        else: # normal case. New sub-trf started.
-            newResultsDict = self._makeNewResultsDict( name )
-            currentResultsDict[ 'trfList' ].append( newResultsDict )
-            self._resultsStack.append( newResultsDict )
-            currentResultsDict = newResultsDict
-        tContent = currentResultsDict[ 'vTimerContent' ]
-        tContent.started = True
-        tContent.wallTime_Start = time.time()
-        tContent.cpuTime_Start, tContent.vmem_Start, tContent.rss_Start = pymon()
-        self._print( 'start [%s] wall-time: %s, vmem: %s Mb, rss: %s Mb' % ( name, time.strftime("%a %b %d %H:%M:%S %Y %Z", time.localtime( tContent.wallTime_Start ) ), tContent.vmem_Start, tContent.rss_Start ) )
-
-    def _getDiffFromComponentTrfs( self, resultsDict ):
-        trfList = resultsDict[ 'trfList' ]
-        _rss, _vmem, _wallTime, _cpuTime = 0, 0, 0, 0
-        for rDict in trfList:
-            tContent = rDict[ 'vTimerContent' ]
-            _rss += tContent.rss
-            _vmem += tContent.vmem
-            _wallTime += tContent.wallTime
-            _cpuTime += tContent.cpuTime
-        tContent = resultsDict[ 'vTimerContent' ]
-        return tContent.wallTime - _wallTime, tContent.cpuTime - _cpuTime, tContent.vmem - _vmem, tContent.rss - _rss
-
-    def stop( self, name = 'DEFAULT'):
-        try:
-            currentResultsDict = self._resultsStack.pop( -1 )
-        except IndexError:
-            self._print( 'not initialised.' )
-            return
-        tContent = currentResultsDict[ 'vTimerContent' ]
-        # check if correct timer is to be stopped.
-        if currentResultsDict[ 'name' ] != name:
-            self._print( 'trying to stop wrong timer. Current timer is %s.' % currentResultsDict[ 'name' ] )
-            self._resultsStack.append( currentResultsDict ) # re-insert back onto stack.
-            return
-        if tContent.started is None:
-            self._print( 'not started [%s]' % name )
-            # not re-inserting back onto _resultsStack as should not be on the stack in the first place.
-            return
-        elif tContent.started is False:
-            self._print( 'already stopped [%s]' % name )
-            # not re-inserting back onto _resultsStack as should not be on the stack in the first place.
-            return
-        tContent.started = False # stop timer
-        tContent.cpuTime_Stop, tContent.vmem_Stop, tContent.rss_Stop = pymon()
-        tContent.wallTime_Stop = time.time()
-        tContent.cpuTime = tContent.cpuTime_Stop - tContent.cpuTime_Start
-        tContent.wallTime = tContent.wallTime_Stop - tContent.wallTime_Start
-        tContent.rss = tContent.rss_Stop - tContent.rss_Start
-        tContent.vmem = tContent.vmem_Stop - tContent.vmem_Start
-        # Insert an 'Other' entry in currentResultsDict['trfList']
-        # This special entry will be the diff between the currentResultsDict and it's component trfs.
-        # Any other time consuming executions not already listed as a component trf will be 'caught'
-        # in this artificially created component trf entry.
-        if currentResultsDict[ 'trfList' ]:
-            otherResultsDict = self._makeNewResultsDict( 'Other' )
-            other_tContent = otherResultsDict[ 'vTimerContent' ]
-            other_tContent.started = False 
-            other_tContent.wallTime, other_tContent.cpuTime, other_tContent.vmem, other_tContent.rss = self._getDiffFromComponentTrfs( currentResultsDict )
-            currentResultsDict[ 'trfList' ].append( otherResultsDict )
-        self._print( 'stop [%s] wall-time: %s, vmem: %s Mb, rss: %s Mb' % ( name, time.strftime("%a %b %d %H:%M:%S %Y %Z", time.localtime( tContent.wallTime_Stop ) ), tContent.vmem_Stop, tContent.rss_Stop ) )
-        self.report( currentResultsDict )
-
-    def report( self, resultsDict ):
-        name = resultsDict[ 'name' ]
-        tContent = resultsDict[ 'vTimerContent' ]
-        if tContent.started is None: # timer not yet started
-            self._print( 'not started [%s]' % name )
-            return
-        elif tContent.started: # VTimer still running.
-            _cpuTime_Stop, _vmem_Stop, _rss_Stop = pymon()
-            _wallTime_Stop = time.time()
-            _rss = _rss_Stop - tContent.rss_Start
-            _vmem = _vmem_Stop - tContent.vmem_Start
-            _wallTime = _wallTime_Stop - tContent.wallTime_Start
-            _cpuTime = _cpuTime_Stop - tContent.cpuTime_Start        
-        else: # False. VTimer already stopped.
-            _rss = tContent.rss
-            _vmem = tContent.vmem
-            _cpuTime = tContent.cpuTime
-            _wallTime = tContent.wallTime
-        self._print( 'report [%s] wall-time: %s, cpu-time: %s, vmem: %s Mb, rss: %s Mb' % ( name, self.toHMS( _wallTime ), self.toHMS( _cpuTime ), _vmem, _rss ) )
-
-    def setResultsFile( self, fileName ):
-        """Write results file for composite/top level transform only."""
-        if fileName is None:
-            self._resultsFile = None # reset results file
-            return
-        if self._resultsFile is None and not os.path.exists( fileName ):
-            self._resultsFile = fileName
-
-    def writeResultsToFile( self, fileName, force = None ):
-        if self._resultsFile != fileName and not force: 
-            return
-        try:
-            fileObj = open( fileName, 'w' )
-        except Exception:
-            self._print( ' : Could not open %s for writing.' % fileName )
-            return
-        pickle.dump( self._resultsDict, fileObj, pickle.HIGHEST_PROTOCOL )
-#        tDict = {}
-#        for t, tResult in self._resultsDict.iteritems():
-#            if t == 'DEFAULT':
-#                continue
-#            tDict[ t ] = tContent.toDict()
-#        pickle.dump( tDict, fileObj, pickle.HIGHEST_PROTOCOL )
-        fileObj.close()
-
-vTimer = VTimer( 'Transform Timer' )
diff --git a/Tools/PyJobTransformsCore/python/ValidateRootFile.py b/Tools/PyJobTransformsCore/python/ValidateRootFile.py
deleted file mode 100755
index ee8590381d840718b00a78deb14227d664fcddde..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/ValidateRootFile.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/env python
-
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-from __future__ import print_function
-
-import sys,os
-from PyUtils import RootUtils
-
-#return values
-#Positive number: Number of events
-#-1.. access problem
-#-2.. corruption found
-
-def checkPFCorruption(filename,verbose=False):
-    if not os.access(filename,os.R_OK):
-        print ("ERROR can't access file",filename)
-        return -1
-
-    ROOT = RootUtils.import_root()  # noqa: F841
-    from ROOT import TFile,TTree
-    
-    try:
-        f=TFile.Open(filename)
-    except Exception:
-        print ("Can't open file",filename)
-        return -1
-
-    n=None
-
-    keys=f.GetListOfKeys()
-    for k in keys:
-        try:
-            tn=k.GetName()
-            t=f.Get(tn)
-            if not isinstance(t,TTree): return
-        except Exception:
-            print ("Can't get tree %s from file %s",tn,fn)
-            f.Close()
-            return -1
-
-        if (verbose): print ("Working on tree",tn)
-        n=t.GetEntriesFast()
-        for i in range(n):
-            s=t.GetEntry(i)
-            if s<=0:
-                print ("Tree %s: Found corruption in event %i" % (i,n))
-                f.Close()
-                return -2
-            else:
-                if verbose and i>0 and i%100==0:
-                    print ("Checking event",i)
-        print ("Tree %s: %i event(s) ok" % (tn,n))
-
-        pass #end of loop over trees
-
-    f.Close()
-    print ("ROOT file",filename,"looks ok")
-    if n is None:
-        print ("Failed to determine number of events in file %s. No tree named 'CollectionTree'" % filename)
-        return 0
-    return n
-
-
-if __name__ == "__main__":
-    if len(sys.argv)!=2:
-        print ("Usage: ",sys.argv[0]," <file>")
-    else:
-        fn=sys.argv[1]
-        checkPFCorruption(fn)#,True)
diff --git a/Tools/PyJobTransformsCore/python/athena_wrapper.py b/Tools/PyJobTransformsCore/python/athena_wrapper.py
deleted file mode 100755
index d3e2296b3b8e91a045b63db3ec13c7fbaa251532..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/athena_wrapper.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/bin/sh
-
-# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
-
-## @package athena_wrapper
-#
-#  @brief Wrapper script for @c athena.py.
-#  @details This script attempts to:
-#  @li Mimic @c athena.py's memory management settings.
-#  @li Translate relevant options given at the transform command line to @c athena.py options.
-#  @li Handle any exceptions that arise from the execution of @c athena.py and create the
-#  appropriate AtlasErrorCodes.ErrorInfo object and a corresponding job report file.
-#
-
-"""date"      # executable for the shell, but a string block to python 
-# First part (shell part) copied from athena.py
-
-# "detect" valgrind
-usesvalgrind=`echo "$LD_PRELOAD" | grep valgrind`
-
-python_path=`which python`
-
-### Ugly hack to preload libtcmalloc.so
-
-#use tcmalloc by default if TCMALLOCDIR is defined
-export USETCMALLOC=0
-
-# possibly, allow user to run with full tcmalloc by setting this variable
-if [ -z $USETCMALLOCMINIMAL ]; then
-    export USETCMALLOCMINIMAL=1
-fi
-
-# if any of these variables is defined, we use tcmalloc to do some
-# profiling, then we need the full library !
-if [ -n "$CPUPROFILE" ] || [ -n "$HEAPPROFILE" ] || [ -n "$HEAPCHECK" ]; then
-    USETCMALLOCMINIMAL=0
-fi
-
-if [ -z $TCMALLOCDIR ]; then
-   echo "WARNING: TCMALLOCDIR not defined, will use libc malloc"
-   USETCMALLOC=0
-else
-   USETCMALLOC=1
-fi
-
-export ATHENA_ADD_PRELOAD=''
-
-genericOptions=""
-
-for a in ${@}
-do
-    case $a in
-        --leak-check*)      
-            USETCMALLOC=0
-            genericOptions="$genericOptions $a";;
-        --delete-check*)    
-            USETCMALLOC=0
-            genericOptions="$genericOptions $a";;
-        --stdcmalloc)        
-            USETCMALLOC=0
-            genericOptions="$genericOptions $a";;
-        --tcmalloc)         
-            USETCMALLOC=1
-            genericOptions="$genericOptions $a";;
-        --rss*)
-            rssval=`expr "$a" : '--rss=\([0-9]*\)'`
-            if [ "$rssval" != "" ]; then
-                ulimit -m $((rssval*1024))
-                echo "Maximum resident memory: " `ulimit -m` "Kb"
-            fi;;
-        --vmem*)
-            vmemval=`expr "$a" : '--vmem=\([0-9]*\)'`
-            if [ "$vmemval" != "" ]; then
-                ulimit -v $((vmemval*1024))
-                echo "Maximum virtual memory: " `ulimit -v` "Kb"
-            fi;;
-        --preloadlib*)
-            ATHENA_ADD_PRELOAD=${a#*=};;
-        *)
-            genericOptions="$genericOptions $a";;
-    esac
-done
-
-if [ $USETCMALLOC == 1 ]; then
-   if [ -z $TCMALLOCDIR ]; then
-      echo "ERROR: TCMALLOCDIR not defined"
-      exit 1
-   fi
-   # test, if minimal tcmalloc is available. fallback to full library, if not
-   if [ $USETCMALLOCMINIMAL == 1 ]; then
-      if [ ! -e "$TCMALLOCDIR/libtcmalloc_minimal.so" ]; then
-         echo "WARNING: $TCMALLOCDIR/libtcmalloc_minimal.so does not exist. Falling back to libtcmalloc"
-         USETCMALLOCMINIMAL=0
-      else
-         echo "Preloading tcmalloc_minimal.so"
-      fi
-   fi
-   # finally, preload and run with correct tcmalloc, if requested
-   if [ $USETCMALLOCMINIMAL == 0 ]; then
-      if [ ! -e "$TCMALLOCDIR/libtcmalloc.so" ]; then
-         echo "ERROR: $TCMALLOCDIR/libtcmalloc.so does not exist"
-         exit 1
-      fi
-      echo "Preloading tcmalloc.so"
-      if [ -z $LD_PRELOAD ]; then
-         export LD_PRELOAD="$TCMALLOCDIR/libtcmalloc.so"
-      else
-         export LD_PRELOAD="$TCMALLOCDIR/libtcmalloc.so:$LD_PRELOAD"
-      fi
-   else
-      if [ -z $LD_PRELOAD ]; then
-         export LD_PRELOAD="$TCMALLOCDIR/libtcmalloc_minimal.so"
-      else
-         export LD_PRELOAD="$TCMALLOCDIR/libtcmalloc_minimal.so:$LD_PRELOAD"
-      fi
-   fi
-fi
-
-# optionally add user-specific preload library
-if [ -n "$ATHENA_ADD_PRELOAD" ]; then
-    echo "Preloading $ATHENA_ADD_PRELOAD"
-    if [ -z $LD_PRELOAD ]; then
-        export LD_PRELOAD="$ATHENA_ADD_PRELOAD"
-    else
-        export LD_PRELOAD="$ATHENA_ADD_PRELOAD:$LD_PRELOAD"
-    fi
-else
-    unset ATHENA_ADD_PRELOAD
-fi
-
-
-if [ "$usesvalgrind" != "" ]; then
-   ${python_path} -tt "$0" ${genericOptions}        # no exec, a requirement of valgrind?
-   exit $?
-else
-   exec ${python_path} -tt "$0" ${genericOptions}
-fi
-"""           # python execution starts here, the shell never reaches this
-#"""  # extra line to fool emacs
-
-import sys
-from past.builtins import execfile
-from AthenaCommon.Include import IncludeError
-from PyJobTransformsCore import trferr, trfconsts
-from PyJobTransformsCore.JobReport import JobReport
-from PyJobTransformsCore import AtlasErrorCodes
-
-# flake8: noqa
-
-## The err variable will be used to contain an ErrorInfo instance
-#  after the trferr.errorHandler handles an exception.
-err = None
-
-# Hack to support spaces in the value of the '-c' parameter.
-# It is assumed that the '-c' option is the last option to be given (if at all)
-try:
-    ## Position of the '-c' option given at the command line.
-    cPos = sys.argv.index( '-c' )
-except Exception:
-    pass
-else:
-    sys.argv[ cPos + 1 ] = ' '.join( sys.argv[ cPos + 1 : ] )
-    while len( sys.argv ) > cPos + 2:
-        sys.argv.pop()
-
-try:
-    sys.argv[ 0 ] = trfconsts.athena_py
-    ## The athena executable is expected to be the first argument
-    athena_exe = sys.argv[ 1 ]
-    sys.argv.pop( 0 ) # remove first argument (i.e. athena full path)    
-    print (' '.join( sys.argv ) )
-    execfile( athena_exe )
-
-# Known exceptions not deriving from exceptions.Exception
-# (the only way to catch the object)
-except IncludeError as e:
-    err = trferr.errorHandler.handleException( e )    
-
-except KeyboardInterrupt as e: 
-    err = trferr.errorHandler.handleException( e )
-
-except SystemExit as e:
-    err = trferr.errorHandler.handleException( e )
-    try:
-        ## Retrieve the error argument. Previous version of @c athena.py does not provide any arguments when successful.
-        #  This will result in an exception when @c arg argument is accessed.
-        rc = e.args[ 0 ]
-        # newer versions of @c athena.py provide an error argument.
-        # Raise Exception to make use of the same try/exception structure.
-        if rc == 0:
-            raise Exception
-    except Exception: # successful athena job
-        print ('%s - exit code 0.' % ' '.join( sys.argv ))
-        sys.exit( 0 )
-    else: # unsuccessful athena job
-        ## Create a blank JobReport instance and populate it with the error detected.
-        jobReport = JobReport()
-        jobReport.setProducer( 'athena' )
-        jobReport.addError( err )
-        jobReport.write()
-        print ('%s - exit code %s' % ( ' '.join( sys.argv ), rc ))
-        sys.exit( rc )
-
-# Exceptions derived from exceptions.Exception
-except Exception as e:
-    err = trferr.errorHandler.handleException( e )
-    if err is None:
-        err = AtlasErrorCodes.ErrorInfo( acronym = 'ATH_EXC_PYT',
-                                         severity = AtlasErrorCodes.FATAL,
-                                         message = '%s: %s' % ( e.__class__.__name__, e.args ) )
-
-# Some throw a string
-except str as e:
-    err = AtlasErrorCodes.ErrorInfo( acronym = 'ATH_EXC_PYT',
-                                     severity = AtlasErrorCodes.FATAL,
-                                     message = e )
-# Catch all other exceptions
-except Exception:
-    err = AtlasErrorCodes.ErrorInfo( acronym = 'ATH_EXC_PYT',
-                                      severity = AtlasErrorCodes.FATAL )
-
-if err is not None:
-    jobReport = JobReport()
-    jobReport.setProducer( 'athena' )
-    jobReport.addError( err )
-    jobReport.write()
-    sys.exit( jobReport.exitCode() )
diff --git a/Tools/PyJobTransformsCore/python/basic_trfarg.py b/Tools/PyJobTransformsCore/python/basic_trfarg.py
deleted file mode 100755
index f8cbb6be2b70c0bd16dcc7eb5aada65407389aa4..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/basic_trfarg.py
+++ /dev/null
@@ -1,1348 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-## @package basic_trfarg
-#
-#  @brief Package contains the basic argument types for JobTransforms.
-#  @details Classes defined in this package are not intended to be used directly.
-#  @see Argument classes designed to be used can be found in the full_trfarg package.
-
-from __future__ import print_function
-
-import os, sys, time, fnmatch, subprocess, copy, re
-from PyJobTransformsCore import fileutil, envutil
-from PyJobTransformsCore.trfutil import AODFile, BSFile, CommentLine, DPDFile, ESDFile, EvgenFile, FileType, HitsFile, PoolDataFile, expandStringToList, strip_suffix
-from PyJobTransformsCore.trferr import TransformDefinitionError, TransformArgumentError, InputFileError, OutputFileError
-from PyJobTransformsCore.JobReport import FileInfo
-from PyJobTransformsCore.TransformLogger import TransformLogger
-
-
-## @brief Base class of all transform argument classes.
-#  @details The Argument base class specifies basic properties of a command line argument.
-#  Only fully specified argument classes can be used in a concrete trf.JobTransform implementation.
-#  A fully specified argument is derived from one of the basic argument classes defined
-#  in this package and should override the member function isFullArgument() allowing
-#  it to return @c True.
-#  @remarks The @em base class Argument is itself derived from the TransformLogger to benefit from logging facilities.
-#  It is probably better to gain such facilities through delegation rather than inheritence. 
-class Argument( TransformLogger ):
-    ## Argument constructor
-    #  @param help A short help (String) to assist the user with the use of the argument.
-    #  @param name Name of the argument (String). Defaults to string obtained from Argument.defaultName().
-    #  @return Argument instance
-    def __init__( self, help = 'default', name = 'default' ):
-        if help == 'default': help = self.defaultHelp()
-        if name == 'default': name = self.defaultName()
-        TransformLogger.__init__( self, name )
-        # The name (String) of the argument.
-        self._name = name
-        # The help (String) to assist with the population of the argument.
-        self._help = help
-        ## The current value atributed to this argument.
-        self._value = None
-        ## The original value of the argument prior to any changes.
-        # Allows for any changed value to be reverted to its original value.
-        self._originalValue = None
-        ## Numerical position of the argument in a list of arguments.
-        self._position = 0
-
-    ## Allow the argument to be asked if it is valid i.e. if _value has been set.
-    #  @return Boolean
-    def __bool__( self ):
-        return self._value is not None
-
-    ## Getter function for the short help (String).
-    #  @return String
-    def help( self ):
-        return self._help
-
-    ## @brief Retrieve the full help (String) supplemented by information in the Argument object.
-    #  @details Various additional information are added depending on whether the argument is
-    #  @em optional, it's @em position in the command line and the presence of any @em default values.
-    #  @return String
-    def fullHelp( self ):
-        help = ''
-        if self.isOptional():
-            help += '['
-        else:
-            help += ' '
-        if self.hasPosition():
-            help += '%2d ' % self.position()
-        else:
-            help += ' ? '
-        help += '%s' % self._name
-        if self.isOptional(): help += ']'
-        help += ' (%s)' % self.basicType()
-        if self.hasDefault(): help += ' default=%s' % repr( self.default() )
-        return '%s # %s' % ( help, self.help() )
-
-    ## Retrieve default help string.
-    #  @remarks Must be re-implemented in derived classes.
-    #  @return String
-    def defaultHelp( self ):
-        return self.__class__.__doc__
-
-    ## Check if argument instance has the _default property.
-    #  @return Boolean
-    def hasDefault( self ):
-        return hasattr( self, '_default' )
-
-    ## Getter function for the _default property. 
-    #  @return value of _default property (or @c None is property missing).
-    def default( self ):
-        return getattr( self, '_default', None )
-
-    ## Setter function for the _default property of this argument.
-    #  This implies that this argument is @em optional.
-    #  @param default The value to assigned to the _default property. 
-    #  @remarks If @em default is @c None, the _default property (if it exists) is removed.
-    #  @exception TransformDefinitionError, TransformArgumentError 
-    #  is raised when the default value cannot be set successfully.
-    #  @return None
-    def setDefault( self, default ):
-        if default is None:
-            if self.hasDefault(): del self._default
-        else:
-            # test that the default value can be converted to the correct type
-            try: 
-                self.toPython( default )
-            except TransformDefinitionError as msg:
-                # re-raise with different error message
-                raise TransformDefinitionError( 'Unable to set default value: %s' % msg )
-            except TransformArgumentError as msg:
-                raise TransformArgumentError( 'Unable to set default value: %s' % msg )
-            # store original value (so self.setValue() will do the conversion)
-            self._default = default
-
-    ## Setter function for the _name property.
-    #  @return None
-    def setName( self, name ):
-        self._name = name
-
-    ## Getter function for the _name property.
-    #  @return String
-    def name( self ):
-        return self._name
-
-    ## Retrieve the default name of the argument which is the String derived from the
-    #  argumentType() function with its first character converted to lowercase.
-    #  @return String
-    def defaultName( self ):
-        meta = self.argumentType()
-        if meta == '':
-            return self.__class__.__name__
-        return meta[ 0 ].lower() + meta[ 1: ]
-
-    ## Set the value of the argument.
-    #  @param value @em value is converted to the correct type based on the different
-    #  toPython() methods defined by different/derived arguments. 
-    #  If @c None, the argument value's is cleared.  
-    #  @return None
-    def setValue( self, value ):
-        if value is None:
-            self._value = None
-            self._originalValue = None
-            self.logger().debug( "Cleared value" )
-        else:
-            oldValue = self._value
-            self._value = self.toPython( value )
-            if self._value != oldValue:
-                self._originalValue = value
-    
-    ## Getter function for the _value property.
-    #  @return The _value property
-    def value( self ):
-        return self._value
-
-    ## Check if the argument's value has been set.
-    #  @return Boolean
-    def hasValue( self ):
-        return self._value is not None
-
-    ## The original input value as provided at the command line.
-    #  @return The _originalValue property
-    def originalValue( self ):
-        return self._originalValue
-
-    ## Setter for the _position property.
-    #  @param pos The position of the argument value at the command line.
-    #  @return None
-    def setPosition( self, pos ):
-        self._position = max( 0, pos )
-
-    ## Check if the position property has been set.
-    #  @return Boolean
-    def hasPosition( self ):
-        return self._position > 0
-
-    ## Getter function for the _position property.
-    #  @return Integer
-    def position( self ):
-        return self._position
-
-    ## Retrieve the type of argument, by inspecting and inferring from it's class name.
-    #  The argument type is essentially the class name without the 'Arg' suffix.
-    #  @return String
-    def argumentType( self ):
-        meta = self.__class__.__name__
-        # remove the "Arg" postfix from the classname
-        meta = strip_suffix( meta, 'Arg' )
-        return meta
-
-    ## Determine if the argument if defined in a transform must be given a value or
-    #  if there is a default value to rely on.
-    #  @return Boolean
-    def isOptional( self ):
-        return self.hasDefault()
-
-    ## Create a formatting string that will be used in templates that accept dictionaries
-    #  when populating the variable components of the template
-    #  (rather than tuples of positional sensitive values).
-    #  @return String
-    def namedTypeFormat( self ):
-        return '%%(%s)%s' % ( self._name, self.typeFormat() )
-
-    ## Generate an object member assignment string in Python syntax.
-    #  @param objName The name of the object which the argument (self) is a property of. Defaults to @c None.
-    #  @return String
-    def pythonVariableTemplate( self, objName = None ):
-        plate = ''
-        if objName: plate += objName + '.'
-        plate += '%s = %s' % (self._name,self.namedTypeFormat())
-        return plate
-
-    ## Identical to pythonVariableTemplate().
-    #  @return String
-    def runArgsTemplate( self, objName ):
-        return self.pythonVariableTemplate( objName )
-
-    ## Generate a formatted help String.
-    #  @return String
-    def runArgsComment( self ):
-        return os.linesep + CommentLine( self._help ).smallComment()
-        
-    ## Deprecated function.
-    #  @warning Deprecated. Do not use.
-    def jobOrTask( self ):
-        return 'task'
-
-    ## Return the argument and it's value in a dictionary.
-    #  @return Dictionary
-    def metaData( self ):
-        if not self:
-            return {}
-        else:
-            return { self.name() : self.value() }
-
-    ## Return a single character representing the type of the value of the argument according to
-    #  Python's String formatting syntax. Eg. An Integer is @c d, a String is @c s, etc.
-    #  @return a single character.
-    def typeFormat( self ):
-        return 'r'
-
-    ## Private helper function. Raise exception with error message in case something is not implemented in a class.
-    #  @param what The name of the method not implemented.
-    #  @exception TransformDefinitionError is raised invariantly when function is called.
-    def _notImplemented( self, what ):
-        raise TransformDefinitionError( '%s not implemented in class %s' % ( what, self.__class__.__name__ ) )
-
-    ## Convert a given @em val to the Python type the argument expects.
-    #  @param val Convert the String obtained at the command line to the required Python type.
-    #  @remarks This is a virtual method to be re-implemented by a derived class.
-    #  A call to _notImplemented() invariantly raises a TransformDefinitionError exception.
-    def toPython( self, val ):
-        self._notImplemented( 'toPython()' )
-
-    ## Retrieve the basic Python type for this argument.
-    #  @warning Deprecated. Do not use.
-    #  @remarks This is a virtual method to be re-implemented by a derived class.
-    #  A call to _notImplemented() invariantly raises a TransformDefinitionError exception.
-    def basicType( self ):
-        self._notImplemented( 'basicType()' )
-
-    ## Retrieve the type associated with this argument as defined by the Production System.
-    #  @remarks This is a virtual method to be re-implemented by a derived class.
-    #  A call to _notImplemented() invariantly raises a TransformDefinitionError exception.
-    def type( self ):
-        self._notImplemented( 'type()' )
-
-    ## Retrieve the metaType associated with this argument as defined by the Production System.
-    #  @remarks This is a virtual method to be re-implemented by a derived class.
-    #  A call to _notImplemented() invariantly raises a TransformDefinitionError exception.
-    def metaType( self ):
-        self._notImplemented( 'metaType()' )
-
-    ## This function determines if the argument has been designed for internal use only or
-    #  permitted to be used in a transformation.
-    #  @return Boolean
-    def isFullArgument( self ):
-        return False
-
-
-## Mix-in argument helper base class allowing the setting of value ranges.
-class ArgRange:
-    ## ArgRange constructor.
-    #  @param minimum Set the minimum value permissible. The minimum property is not created if not declared in the constructor.
-    #  @param maximum Set the maximum value permissible. The maximum property is not created if not declared in the constructor.
-    def __init__( self, minimum = None, maximum = None ):
-        if minimum is not None: self.minimum = minimum
-        if maximum is not None: self.maximum = maximum
-
-    ## Check the existence of the mininum property.
-    #  @return Boolean
-    def hasMinimum( self ):
-        return hasattr( self, 'minimum' )
-
-    ## Check the existence of the maximum property.
-    #  @return Boolean
-    def hasMaximum( self ):
-        return hasattr( self, 'maximum' )
-
-    ## Getter function for the minimum property.
-    #  @return Integer (@c None if minimum property does not exist )
-    def getMinimum( self ):
-        return getattr( self, 'minimum', None )
-
-    ## Getter function for the maximum property.
-    #  @return Integer (@c None if maximum property does not exist )
-    def getMaximum( self ):
-        return getattr( self, 'maximum', None )
-
-    ## Check if @em val is within the set range.
-    #  @param val The value to check against the range.
-    #  @return Integer (@c 0 if within the range, @c -1 if less than the minimum and @c 1 if greater than the maximum. 
-    def checkRange( self, val ):
-        if self.hasMinimum() and val < self.minimum: return -1
-        if self.hasMaximum() and val > self.maximum: return +1
-        return 0
-
-
-## Mix-in argument helper base class allowing a choice of permitted values.
-class ArgChoices:
-    ## ArgChoices constructor
-    #  @param choices List of choices or a comma-delimited String of choices.
-    #  @remarks If choices list is empty, then any value is accepted.
-    #  @return ArgChoices instance 
-    def __init__( self, choices ):
-        self.setChoices( choices )
-
-    ## Generate the default help String.
-    #  @return String
-    def defaultHelp( self ):
-        help = Argument.defaultHelp( self )
-        choicesHelp = self.choicesHelp()
-        if choicesHelp:
-            help += '. ' + choicesHelp
-        return help
-
-    ## Getter function for _choices property.
-    #  @return List
-    def choices( self ):
-        return self._choices
-
-    ## Setter function for the _choices property.
-    #  @param choices List of choices or a comma-delimited String of choices.
-    #  @return None
-    def setChoices( self, choices ):
-        try:
-            self._choices = choices.split( ',' )
-        except AttributeError:
-            if isinstance( choices, list ):
-                self._choices = choices
-            else:
-                self._choices = []
-
-    ## Help String associated with the choices allowed.
-    def choicesHelp( self ):
-        if self._choices:
-            return 'Possible values: %s' % ( ','.join( [ str( c ) for c in self._choices ] ) )
-        else:
-            return ''
-
-    ## Check if @em val is one of choices.
-    #  @param val The value to check.
-    #  @remarks checkChoices always returns @c True if the _choices property is @c [].
-    #  @return Boolean
-    def checkChoices( self, val ):
-        return not self._choices or val in self._choices
-
-
-## An argument designed to contain an Integer.
-class IntegerArg( Argument ):
-    ## IntegerArg constructor.
-    #  @param help The help String for this argument.
-    #  @param name The name of this argument. Defaults to @em default. 
-    #  @return IntegerArg instance
-    def __init__( self, help, name = 'default' ):
-        Argument.__init__( self, help, name )
-
-    ## The Python type of this argument's value.
-    #  @return String
-    def basicType( self ):
-        return 'int'
-
-    ## Retrieve the type associated with this argument as defined by the Production System.
-    #  @return String
-    def type( self ):
-        return 'natural'
-
-    ## Retrieve the metaType associated with this argument as defined by the Production System.
-    #  @return String
-    def metaType( self ):
-        return 'plain'
-
-    ## Return a single character representing the type of the value of the argument according to
-    #  Python's String formatting syntax. Eg. An Integer is @c d, a String is @c s, etc.
-    #  @return a single character.
-    def typeFormat( self ):
-        return 'd'
-
-    ## Convert a given @em val to the Python type the argument expects.
-    #  @param val Convert the String obtained at the command line to an Python Integer.
-    #  @exception TransformArgumentError is raised if the conversion failed.
-    def toPython( self, val ):
-        try:
-            return int( val )
-        except ValueError :
-            raise TransformArgumentError( '%s=%s is not of type %s' % ( self.name(), repr( val ), self.basicType() ) )
-
-
-## An argument designed to contain a Float.
-class FloatArg( Argument ):
-    ## FloatArg constructor.
-    #  @param help The help String for this argument.
-    #  @param name The name of this argument. Defaults to @em default. 
-    #  @return FloatArg instance
-    def __init__( self, help, name = 'default' ):
-        Argument.__init__( self, help, name )
-
-    ## The Python type of this argument's value.
-    #  @return String
-    def basicType(self):
-        return 'float'
-
-    ## Retrieve the type associated with this argument as defined by the Production System.
-    #  @return String
-    def type(self):
-        return 'float'
-
-
-    def metaType(self):
-        return 'plain'
-
-    
-    def typeFormat(self):
-        return 'g'
-
-
-    def toPython(self,val):
-        """Turn a command line argument string into an float python object"""
-        try: return float(val)
-        except ValueError :
-            raise TransformArgumentError( '%s=%s is not of type %s' %
-                                          (self.name(), repr(val), self.basicType()) )
-
-
-
-class StringArg( Argument ):
-    """Basic argument type. Any string."""
-    def __init__(self,help,name='default'):
-        Argument.__init__(self,help,name)
-
-
-    def basicType(self):
-        return 'str'
-
-
-    def type(self):
-        return 'string'
-
-
-    def metaType(self):
-        return 'plain'
-
-
-    def toPython(self,val):
-        try: return str(val+'')
-        except TypeError :
-            raise TransformArgumentError( '%s=%s is not of type %s' %
-                                          (self.name(), repr(val), self.basicType()) )
-
-
-
-
-class StringChoicesArg( ArgChoices, StringArg ):
-    """A string from a possible list of strings. Tested before running"""
-    def __init__(self,choices,help,name='default',caseSensitive=False):
-        ArgChoices.__init__(self, choices)
-        StringArg.__init__(self,help,name)
-        self._caseSensitive = caseSensitive
-
-
-    def setDefault(self,default):
-        """Add check that the default value is one of the allowed values"""
-        if default is not None:
-            try: default = self.toPython(default)
-            except TransformDefinitionError :
-                raise TransformDefinitionError( 'Default value %s=%s is not of type %s' %
-                                                (self._name, repr(default), self.basicType()) )
-            if not self.checkChoices(default):
-                raise TransformDefinitionError( 'Default value %s=%s is not one of %s' %
-                                                (self._name, repr(default), self.choices()) )
-
-        Argument.setDefault(self,default)
-
-
-    def setValue(self,val):
-        """If case insensitive, convert value to the correct case (if found in choices)"""
-        if not self._caseSensitive:
-            #convert value to the correct case
-            valUpper = val.upper()
-            for c in self.choices():
-                if valUpper == c.upper() and val != c:
-                    self.logger().warning( 'Changing case of %s to %s', val, c )
-                    val = c
-                    break
-        Argument.setValue(self,val)
-
-
-    def preRunAction(self):
-        """Return boolean indicating if <argVal> is one of choices"""
-        val = self.value()
-        name = self.name()
-        choices = self.choices()
-        if not ArgChoices.checkChoices(self,val):
-            raise TransformArgumentError( '%s=%r is not one of %s' %
-                                          (name, val, ','.join(choices)) )
-        self.logger().debug( '%s is in list %s -> OK', repr(val), repr(choices) )
-
-            
-#
-# end of class StringChoicesArg
-#
-
-class BoolArg( ArgChoices, Argument ):
-    """Basic argument type. A boolean. Recognised input (string) values:
-    'False', 'True' (case insensitive) """
-    
-    def __init__(self,help,name='default'):
-        ArgChoices.__init__(self,['True', 'False'])
-        Argument.__init__(self,help,name)
-
-
-    def basicType(self):
-        return 'bool'
-
-
-    def type(self):
-        return 'bool'
-
-
-    def metaType(self):
-        return 'plain'
-
-
-    def toPython(self,val):
-        if isinstance(val, bool): return val is not False
-        if isinstance(val, str):
-            if val.lower() == 'true':  return True
-            if val.lower() == 'false': return False
-            raise TransformArgumentError( '%s=%r is not one of %s' %
-                                          (self.name(), val, ','.join(self.choices())) )
-        else:
-            raise TransformArgumentError( '%s=%r is not of type %s' %
-                                          (self.name(), val, self.basicType()) )
-
-
-
-
-class FileArg( StringArg ):
-    def __init__(self,help,type,name='default', **kwargs ):
-        StringArg.__init__(self,help,name)
-        self._fileType = type
-        self._fileInfo = None
-        try:
-            self._temporary = kwargs[ 'temporary' ]
-        except KeyError:
-            self._temporary = False
-
-    def fileInfo( self ):
-        return self._fileInfo
-    
-    def __bool__(self):
-        """Return whether an input filename is given"""
-        return Argument.__bool__(self) and self.value() != 'NONE'
-
-
-    def type(self):
-        return 'LFN'
-
-
-    def setValue(self,value):
-        """Convert NONE value to all upper case"""
-        if isinstance( value, str ):
-            # treat empty string as NONE
-            if value == '':
-                value = 'NONE'
-            else:
-                #convert value to the correct case
-                valUpper = value.upper()
-                if valUpper == 'NONE' and value != 'NONE':
-                    self.logger().info( 'Changing case of %s to %s', value, valUpper )
-                    value = valUpper
-        if value != self.originalValue():
-            self.__eventCount = None
-            self.__fields = {}
-        Argument.setValue(self,value)
-
-
-    def metaData(self):
-        """By default no metadata"""
-        return {}
-
-    
-    def getGUID(self):
-        if not self: return None
-        return self._fileType.getGUID(self.value())
-
-
-    def checkFile(self):
-        """check that file exists (possibly with attempt number) and is non-empty"""
-        val = self.value()
-        if val.startswith( 'LFN:' ):
-            self.logger().info( '%s is an LFN. Omitting any local file checks.', val )
-            return
-#        if not fileutil.exists(val):
-#            found = fileutil.exists_suffix_number(val + '.')
-#            if not found:
-#                raise InputFileError( val, 'not found. Argument %s' % (self.name()), 'TRF_INFILE_NOTFOUND' )
-#            if found != val:
-#                self.logger().warning('replacing %s with %s' % (val,found) )
-#                self.setValue(found)
-#                val = found
-        if fileutil.getsize(val) == 0:
-            raise InputFileError( val, 'empty file. Argument %s' % (self.name()), 'TRF_INFILE_EMPTY' )
-                   
-
-    def fileSize(self):
-        """File size in bytes"""
-        if not self:
-            return -1
-        return fileutil.getsize(self.value())
-
-    
-    def fileType(self):
-        if self:
-            return self._fileType.type(self.value())
-        else:
-            return self._fileType.type()
-
-
-    def fileContents(self):
-        if self:
-            return self._fileType.contents(self.value())
-        else:
-            return self._fileType.contents()
-
-
-    def checkFileType(self):
-        """Check if filename ends with .<type>"""
-        return self._fileType.checkType(self.value())
-
-
-    def checkFileContents(self):
-        """Check if filename ends with <suffix>"""
-        return self._fileType.checkContents(self.value())
-
-
-    def baseFilename(self):
-        """Filename without the path and type"""
-        return self._fileType.baseFilename(self.value())
-
-
-    def bareFilename(self):
-        """Filename without the path, the contents and the type."""
-        return self._fileType.bareFilename(self.value())
-
-
-
-class DataFileArg( FileArg ):
-    def __init__(self,help,type,name='default', **kwargs ):
-        FileArg.__init__(self,help,type,name, **kwargs) #temporary
-        self.__eventCount = None
-        # interpreted filename fields as used in the MC production system
-        self.__fields = {}
-        try:
-            self._intermediate = kwargs[ 'intermediate' ]
-        except KeyError:
-            self._intermediate = False
-
-    @staticmethod
-    def extractDataset( filename, omitFromName = None ):
-        """Extracts dataset from filename and return a tuple of the dataset name 
-        and the filename. If omitFilename is True, only the dataset is returned."""
-        if omitFromName is None:
-            omitFromName = True
-        originalFN = filename
-        dirName = os.path.dirname( filename )
-        filename = os.path.basename( filename )
-        try:
-            hashPos = filename.index( '#' )
-        except ValueError:
-            return "", originalFN
-        singleHash = True
-        # Check if the format used includes the '##'.
-        try:
-            if filename[ hashPos + 1 ] == '#': # double hash encountered
-                singleHash = False
-        except IndexError: # problem with format of filename. filename ends with a single '#'!
-            hashPos = -1
-            print( "Error trying to extract dataset from %s." % filename )
-        dataset = filename[ 0 : hashPos ]
-        if omitFromName:
-            dsPrefix = ''
-        else:
-            dsPrefix = dataset + '#'
-        if singleHash:
-            # 'myCoolDataset#myCoolDataset._0001.crp'
-            if dirName:
-                fname = os.sep.join( [ dirName, dsPrefix + filename[ hashPos + 1 : ] ] )
-            else:
-                fname = dsPrefix + filename[ hashPos + 1 : ]
-            return dataset, fname
-        else: # double hash encountered
-            # 'myCoolDataset##._0001.crp'
-            try:
-                fname = dsPrefix + dataset + filename[ hashPos + 2 : ]
-            except IndexError: # problem with format of filename. filename ends with a double '#'!
-                print( "Error trying to extract filename from %s." % filename )
-            else:
-                if dirName:
-                    fname = os.sep.join( [ dirName, fname ] )
-                return dataset, fname
-        return dataset, originalFN
-
-    #
-    # Member functions from Argument (overriding them)
-    #
-    def preRunAction(self):
-        """Set pool message level"""
-        PoolDataFile.setMessageLevel(PoolDataFile.defaultMessageLevel,self.logger())
-
-    #
-    # Extra member functions
-    #
-    def eventCount(self):
-        if self.__eventCount is None and self:
-            self.logger().info("Counting events of %s...", self.originalValue())
-            start = time.time()
-            self.__eventCount = self._fileType.eventCount( self )
-            if self.__eventCount is not None:
-                duration = time.time() - start
-                megaBytes = self.fileSize() / float( 1024**2 )
-                if duration > 0.0:
-                    eventsPerSec = self.__eventCount / duration
-                    megasPerSec  = megaBytes / duration
-                else:
-                    eventsPerSec = 0.0
-                    megasPerSec  = 0.0
-                self.logger().info("Counted %d events (%.1f MB) in %.1f seconds --> %.1f events/s and %.1f MB/s",
-                                   self.__eventCount, megaBytes, duration, eventsPerSec, megasPerSec)
-            else:
-                self.logger().info("No event count available for %s", self.originalValue() )
-        return self.__eventCount
-
-
-class InputFileArg( FileArg ):
-    """Input file. Existence of file is checked before run"""
-    def __init__( self, help, type, name = 'default', **kwargs ):
-        FileArg.__init__( self, help, type, name, **kwargs )
-        
-
-    def metaType(self):
-        return 'inputLFN'
-
-
-    def preRunAction(self):
-        """Check that the file exists, and is readable"""
-        if not self: return
-        self.checkFile()
-        self.logger().debug( 'Inputfile %s is usable -> OK', self.originalValue() )
-        
-
-    def eventCount(self):
-        return None
-
-    
-    def metaData(self):
-        """No metadata"""
-        return {}
-
-
-
-class InputDataFileArg( DataFileArg ):
-    """List of input data files. Existence and readability of all files is checked before run"""
-    def __init__( self, help, type, name = 'default', **kwargs ):
-        DataFileArg.__init__( self, help, type, name, **kwargs )
-        self._fileInfo = {}
-        
-    def __bool__(self):
-        """Return whether an input filename list is given"""
-        return Argument.__bool__( self ) and len( self._value ) > 0
-        
-    def basicType(self):
-        return 'list'
-
-    def type(self):
-        return 'LFNlist'
-
-    def metaType(self):
-        return 'inputLFNlist'
-
-    def preRunAction(self):
-        """Execute checkFile routines"""
-        if not self: return
-        DataFileArg.preRunAction(self)
-        self.checkFile()
-        if not self._temporary:
-            self.prepareFileInfo()
-
-    def checkFile(self):
-        # Previously, test for existence done here but this is now done when the filename is first stored in the object. See trfutil.py::getCorrectedFilename().
-        return
-
-    def value( self, pos = None, getDataset = None, omitFromName = None ):
-        """Attempt to extract a dataset name from the value provided. This is a Tier0 request.
-        pos can take integer values 0-n to refer to specific values in its list.
-        If pos is set to None, the return value will be entire list is returned. 
-        If getDataset is True, (dataset,filename) tuple is returned."""
-        if omitFromName is None:
-            omitFromName = True
-        if getDataset is None:
-            getDataset = False
-        if pos is None:
-            if getDataset:
-                return [ DataFileArg.extractDataset( x, omitFromName ) for x in self._value ] # return a list of (dataset, filename) tuples.
-            return [ DataFileArg.extractDataset( x, omitFromName )[1] for x in self._value ] # return a list of filenames.
-        elif pos >= 0 and pos < len( self._value ):
-            if getDataset:
-                return DataFileArg.extractDataset( self._value[ pos ], omitFromName ) # return (dataset, filename) tuple
-            return DataFileArg.extractDataset( self._value[ pos ], omitFromName )[ 1 ] # return just the filename
-        else:
-            raise IndexError( "Could not get dataset/filename tuple. The index provided %s is out of range." % pos )
-
-    def setValue( self, value ):
-        if value != self.originalValue():
-            self.__eventCount = None
-            self.__fields = {}
-        FileArg.setValue( self, value )
-
-    def toPython(self,valIn):
-        """If valIn is a list, check if the names are in the dataset format.
-        If so, convert to string for further processing later else just return
-        valIn as it is. Strings of space or comma separated entries will be 
-        converted to lists accordingly. There is also support for parallel-
-        vector notation."""
-        if isinstance( valIn, list ):
-            valInStr = valIn.__str__()
-            # check if dataset prefix or parallel vector format is found in the list items.
-            if re.search("[#[]", valInStr ):
-                # convert list to a string and continue processing to extract dataset.
-                valIn = valInStr[1:-1].replace( "'", "" ).replace(" ","")
-            else:
-                return valIn
-        # make a list of python types out of the strings
-        if isinstance( valIn, str ):
-            if valIn == 'NONE': return []
-            # parallel vector notation support
-            try:
-                return expandStringToList( valIn )
-            except Exception as msg:
-                raise TransformDefinitionError( '%s=%s: %s' % ( self.name(), valIn, msg ) )
-        else:
-            # if we get here, there is problem
-            raise TransformDefinitionError( '%s=%s: value is not a list or a string' % (self.name(),valIn) )
-
-    def prepareFileInfo(self):
-        fInfos = self._fileInfo
-        fInfoTemp = {}
-        # # ensure GUID consistency (determine once per filename)
-        for ds, filename in self.value( getDataset = True ):
-            mData = self._fileType.getMetaData( filename )
-            mData.update( { 'dataset' : ds } )
-            try:
-                guid = copy.copy( fInfos[ filename ].guid() )
-                if guid is None:
-                    raise Exception
-            except ( KeyError, Exception ):
-                # create new GUID
-                fInfoTemp[ filename ] = FileInfo( filename, self._fileType.getGUID( filename ), mData )
-            else:
-                # add the metadata
-                fInfoTemp[ filename ] = FileInfo( filename, guid, mData )
-        self._fileInfo = fInfoTemp
-#        events = self.eventCount()
-#        if events is not None:
-#            fInfo.addMetaData( { 'events' : events } )
-
-    def fileSize( self, filename = None ):
-        """File size in bytes of all files in the list combined"""
-        if not self: return -1
-        if filename is None:            
-            totalSize = 0
-            for f in self.value():
-                totalSize += fileutil.getsize( f )
-            return totalSize
-        if filename in self.value():
-            return fileutil.getsize( filename )
-        return -1
-        
-
-    def getGUID(self):
-        if not self: return None
-        if self._fileInfo:
-            return [ x.guid() for x in self._fileInfo ]
-        return [ self._fileType.getGUID(f) for f in self.value() ]
-
-
-    def fileType(self):
-        """File type of the first file in the list"""
-        if self:
-            try:
-                val = self.value()[0]
-            except Exception:
-                pass
-            else:
-                return self._fileType.type( val )
-        return self._fileType.type()
-
-
-    def fileContents(self):
-        """File contents of the first file in the list"""
-        if self:
-            try:
-                val = self.value()[0]
-            except Exception:
-                pass
-            else:
-                return self._fileType.contents( val )
-        return self._fileType.contents()
-
-
-
-    def checkFileType(self):
-        """Check if all filenames have expected type"""
-        for f in self.value():
-            if not self._fileType.checkType(f):
-                return False
-        return True
-
-
-    def checkFileContents(self):
-        """Check if all filenames have expected contents"""
-        for f in self.value():
-            if not self._fileType.checkContents(f):
-                return False
-        return True
-
-    def baseFilename(self):
-        """First filename without the path and type"""
-        try:
-            return self._fileType.baseFilename( self.value()[0] )
-        except Exception:
-            return ""
-
-    def bareFilename(self):
-        """First filename without the path, the contents and the type."""
-        try:
-            return self._fileType.bareFilename( self.value()[0] )
-        except Exception:
-            return ""
-
-
-class InputTarFileArg( InputFileArg ):
-    def __init__( self, help, destdir = os.curdir, name = 'default', **kwargs ): #temporary = True ):
-        InputFileArg.__init__( self, help, FileType( type = 'tar|tar.gz|tgz' ), name, **kwargs )
-        self._filelist = []
-        self._destdir = destdir
-
-
-    def setValue(self,value):
-        self._filelist = []
-        super(InputTarFileArg,self).setValue(value)
-        
-
-    def filelist(self,pattern=None):
-        """Return a list of filenames in archive. <pattern> is an optional regular expression to match the filenames.
-        This list contains the filename paths as in the archive, not including the destination directory set in the constructor."""
-        if not self._filelist:
-            if not self:
-                raise TransformDefinitionError( "%s.filelist() called before value is set" % self.name() )
-            filename = self.value()
-            cmd = 'tar -t'
-            if self.isZipped(): cmd += ' -z'
-            cmd += ' -f %s' % filename
-            self.logger().info(cmd)
-            p = subprocess.Popen(cmd,bufsize=1,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
-            while p.poll() is None:
-                line = p.stdout.readline().strip()
-                if not line: continue
-                self._filelist.append(line)
-          # hack for slving bug on poll (Python 2.4?)      
-            p.wait()
-            while True:
-                line = p.stdout.readline().strip()
-                if not line: break
-                self._filelist.append(line)
-
-            if p.returncode:
-                self._filelist.clear()
-                raise TransformArgumentError( "Argument %s: problem reading filelist from archive %s" % (self.name(), filename) )
-            
-##             tar = tarfile.open(self.value())
-##             self._filelist = tar.getnames()
-##             tar.close()
-            
-        if pattern is None:
-            return self._filelist
-        else:
-            return [ s for s in self._filelist if re.match( pattern, s ) ]    
-
-
-    def isZipped(self):
-        if not self: return False
-        val = self.value()
-        return val.endswith('.gz') or val.endswith('.tgz')
-
-
-    def extract(self):
-        """Extract all files from the archive and check their presence"""
-        # check that we have a value
-        if not self:
-            raise TransformDefinitionError( "Argument %s extract() called before value is set" % self.name() )
-        # create and goto subdir if needed
-        curdir = None
-        destdir = self._destdir
-        filename = self.value()
-        if destdir != os.curdir:
-            curdir = os.getcwd()
-            if not os.path.exists(destdir):
-                os.mkdir(destdir)
-            elif not os.path.isdir(destdir):
-                raise  TransformDefinitionError( "Argument %s: requested destination directory '%s' is not a directory" % (self.name(),destdir) )
-            os.chdir(destdir)
-        try:
-            # do the extraction while ignoring the original modification time
-            cmd = 'tar -mxv'
-            if self.isZipped(): cmd += ' -z'
-            cmd += ' -f %s' % filename
-            self.logger().info(cmd)
-            p = subprocess.Popen(cmd,shell=True)
-            status = p.wait()
-            if status:
-                raise TransformArgumentError( "Argument %s: problem extracting archive %s" % (self.name(),filename) )
-            # check that all files are extracted
-            for f in self.filelist():
-                if not os.path.exists( f ):
-                    raise TransformArgumentError( "Argument %s: failed to extract file %s from archive %s" % (self.name(),f,filename) )
-
-        finally:
-            # go back to original dir, whatever happens
-            if curdir: os.chdir(curdir)
-
-        
-    def destinationDir(self):
-        return self._destdir
-
-
-    def preRunAction(self):
-        """Check presence of file and untar/zip it"""
-        if not self: return
-        InputFileArg.preRunAction(self)
-        self.extract()
-
-
-
-class InputTarFileAndSetupArg( InputTarFileArg ):
-    def __init__( self, help, name, setupfile, envVars = None, destdir = os.curdir, **kwargs ): #temporary = True ):
-        """Input tarfile is untarred and a setup script is executed during preRunActions.
-        <setupfile>: name of the setup file. If the extension is .py, it will be executed
-                     as pyton (with execfile). Otherwise it will be sourced as a shell script.
-                     Any environment setup in the shell script is imported into the python
-                     environment (os.environ), as far as it matches <envVars>.
-        This full path to the file is searched for
-                   in the tarball: the file found in the highest directory will be taken.
-                   It can contain the usual (unix) shell wildcards.
-        <envVars>: a regular expression selecting the environment variables to export from
-                   the shell setup script into the python running environment. None (default) means all.
-                   In the case of a python script, all environment variables (when added to os.environ)
-                   are imported, and the value of <envVars> is ignored.
-        <destdir>: the destination directory for the untarred files (default: current directory)
-        """
-        InputTarFileArg.__init__( self, help, destdir, name, **kwargs )
-        self.__setupfile = setupfile
-
-
-    def untarAndSetup(self):
-        """Untar the tarball and run the setup file"""
-        # check that file is there and non-empty
-        self.checkFile()
-        # untar archive
-        self.extract()
-        # run the setup script
-        setup = self.findSetupFile()
-        if setup is None: # Failure to find it
-            raise TransformArgumentError( "Could not find setup script %s in %s=%s" % (self.__setupfile, self.name(), self.originalValue()) )
-        # if no setup file is given, don't run it
-        if setup == '': return
-        fullsetup = os.path.join(self.destinationDir(),setup)
-        if not os.path.exists(fullsetup):
-            raise TransformArgumentError( "Setup script %s not found after untarring %s=%s" % (self.__setupfile, self.name(), self.originalValue()) )
-        self.logger().info("Executing setup file %s", fullsetup)
-        setupdir,setupbase = os.path.split(fullsetup)
-        # go to the directory of the setup script
-        pwd = os.getcwd()
-        if setupdir and setupdir != os.curdir: os.chdir(setupdir)
-        if fullsetup.endswith('.py'):
-            from past.builtins import execfile
-            # avoid pollution of global namespace
-            env = {}
-            execfile( setupbase,env )
-        else:
-            envutil.source_setup( setupbase )
-        # go back to original directory
-        os.chdir(pwd)
-        
-
-    def preRunAction(self):
-        """Untar and run setup file. Raises an exception if setup file is not found."""
-        if not self: return
-        self.untarAndSetup()
-
-
-    def findSetupFile(self):
-        """Return the full sub-path to the setup file inside the tarball.
-        Does not include the destination directory.
-        Return '' (empty string) if a setup file is not specified.
-        Return None if the specified setup file is not found in the tarball."""
-        setup = self.__setupfile
-        if not setup: return ''
-        found = None
-        foundLevel = 99999
-        setupdir,setupbase = os.path.split(setup)
-        if setupdir == os.curdir: setupdir = ''
-        # get the number of sub-directories to match
-        if setupdir:
-            dircount = setupdir.count(os.sep) + 1
-        else:
-            dircount = 0
-
-        for f in self.filelist():
-            dirsplit = f.split(os.sep)
-            base = dirsplit[-1]
-            matchdir = os.sep.join(dirsplit[-dircount-1:-1])
-            fm=os.path.join(matchdir,base)
-            if fnmatch.fnmatch(fm,setup):
-                level = len(dirsplit) - 1
-                if level < foundLevel:
-                    foundLevel = level
-                    found = f
-
-        return found
-
-
-
-class OutputFileArg( DataFileArg ):
-    """Baseclass for all output file classes. File is deleted before run (if needed). Existence of file is tested after run"""
-    def __init__( self, help, type, name = 'default', temporary = False, intermediate = False, **validationDict):
-        DataFileArg.__init__( self, help, type, name, temporary = temporary, intermediate = intermediate )
-        self.__validationDict = validationDict
-    
-    def metaType(self):
-        return 'outputLFN'
-    
-    def preRunAction(self):
-        """Remove output file before running"""
-        DataFileArg.preRunAction(self)
-        val = self.value()
-        self._fileInfo = None
-        if not self:
-            self.logger().debug( 'No output file expected. Nothing to be done.' )
-        elif fileutil.exists(val):
-            raise OutputFileError( val, 'already exists. Please use a different name. Argument %s' % self.name() )
-#            try:
-#                os.remove(val)
-#            except:
-#                raise OutputFileError( val, 'could not be removed before running. Argument %s' % self.name() )
-#            else:
-#                mess = 'Removed output file %s -> OK' % (val)
-    
-    def validateFile( self ):
-        if self._temporary or self._intermediate:
-            self.logger().info( '%s is a temporary/intermediate file. It will not be validated.', self.name() )
-            return
-        try:
-            self._fileType.validateFile( self, **self.__validationDict )
-#        except TransformValidationError:
-#            self.logger().info( 'Retrying validation of %s in %d seconds.' % ( self.name(), TRF_SETTING[ 'validationSleepInterval' ] ) )
-#            time.sleep( TRF_SETTING[ 'validationSleepInterval' ] )
-#            self._fileType.validateFile( self, **self.__validationDict )
-        except AttributeError:# file type does not implement validateFile()
-            pass
-    
-    def postRunAction(self):
-        """Check that output file exists."""
-        if not self:
-            self.logger().debug('No output file expected. Nothing to be done.')
-        else:
-            self.validateFile()
-            if self._temporary: return
-            self.prepareFileInfo()
-    
-    def getDataForAmi(self, alltheData):
-        if not self: 
-            return
-        if self._temporary or self._intermediate: 
-            # print ('basic_trfarg.py file is temporary or intermediate')
-            return
-        if isinstance(self._fileType, AODFile) or isinstance(self._fileType,ESDFile) or isinstance(self._fileType,DPDFile) or isinstance(self._fileType,EvgenFile) or isinstance(self._fileType,HitsFile):
-            try:
-                FT=self._fileInfo.metaData('fileType')
-                if FT is None: FT='unset'
-                necoll=self._fileType.writeSize(self)
-                if necoll is not None:
-                    # returns filetype, number_of_events and tuple_of_sizes
-                    alltheData.append(FT.upper())
-                    alltheData.append(necoll[0])
-                    alltheData.append(necoll[1])
-                    return
-            except Exception:
-                print ("basic_trfarg.py  exception caught:", sys.exc_type, ":", sys.exc_value)
-        else:
-            print ('basic_trfarg.py  Not checking object sizes for this file type')
-    
-    def prepareFileInfo(self):
-        """Populate _fileInfo with metadata and guid."""
-        ds, filename = self.value( getDataset = True )
-        mData = self._fileType.getMetaData( filename )
-        mData.update( { 'dataset' : ds,
-                        'size' : self.fileSize(),
-                        'events' : self.eventCount() } )
-        try:
-            guid = copy.copy( self._fileInfo.guid() )
-            if guid is None:
-                raise Exception
-        except ( KeyError, Exception ):
-            # create new GUID
-            self._fileInfo = FileInfo( filename, self._fileType.getGUID( filename ), mData )
-        else:    
-            self._fileInfo = FileInfo( filename, guid, mData )
-    
-    def value( self, getDataset = None, omitFromName = None ):
-        """Attempt to extract a dataset name from the value provided. This is a Tier0 request."""
-        if omitFromName is None:
-            omitFromName = True
-        if getDataset:
-            return DataFileArg.extractDataset( self._value, omitFromName ) # return (dataset, filename) tuple
-        return DataFileArg.extractDataset( self._value, omitFromName )[ 1 ] # return filename
-    
-    def metaData(self):
-        """output filenames are not added to the metadata"""
-        return {}
-    
-
-
-class OutputDataFileArg( OutputFileArg ):
-    """Class for input POOL data files"""
-    def __init__( self, help, type, name = 'default', temporary = False, intermediate = False, **validationDict ):
-        OutputFileArg.__init__( self, help, type, name, temporary, intermediate, **validationDict )
-
-
-if __name__ == '__main__':
-    a=InputDataFileArg("default",BSFile(),"default")
-
-    x = "abc.AOD.*.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc.AOD.0[1-7].root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc.AOD._1.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-    
-    x = "abc.AOD._1.root,abc.AOD._2.root,abc.AOD._3.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = ["abc.AOD._1.root", "abc.AOD._2.root", "abc.AOD._3.root"]
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc##.AOD._1.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc#abc.AOD._1.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-    
-    x = "abc.AOD._1.root,abc#abc.AOD._2.root,abc##.AOD._3.root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc.AOD.[A,B,C]._[1,2,3].[X,Y,Z].root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = ["abc.AOD._[1,2,3].root"]
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = "abc.AOD._[10,11,12].root,abc#abc.AOD._[21,22,23].root,abc##.AOD._[31,32,33].root"
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = ["abc.AOD._[10,11,12].root","abc#abc.AOD._[21,22,23].root","abc##.AOD._[31,32,33].root"]
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = '/afs/cern.ch/user/j/jboyd/gencomm/GetInterestingEvents/BSfiles/87764/evt.194586._0001.data,/afs/cern.ch/user/j/jboyd/gencomm/GetInterestingEvents/BSfiles/87764/DSTEST##.evt.194728._[0001,0002,0003].data,/afs/cern.ch/user/j/jboyd/gencomm/GetInterestingEvents/BSfiles/87764/DSTEST#DSTEST.evt.216134._0001.data'
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-    x = ["abc##.AOD._[SFO_1,SFO_4]._0001.[2,3]", "abc#abc.AOD._[SFO_1,SFO_4].[3,4]"]
-    a.setValue( x )
-    print ("\nInput: %s" % x)
-    for ds,fn in a.value( getDataset = True ):
-        print ("dataset: %s, filename: %s" % (ds,fn))
-
-
-
diff --git a/Tools/PyJobTransformsCore/python/envutil.py b/Tools/PyJobTransformsCore/python/envutil.py
index 387f799c88855f819ca60431498413a9a86954e2..c007120ca9839c964b203a334a0f3f3e0e3018e7 100755
--- a/Tools/PyJobTransformsCore/python/envutil.py
+++ b/Tools/PyJobTransformsCore/python/envutil.py
@@ -1,21 +1,12 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-from __future__ import print_function
+# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
 import os, sys, re, glob
 from PyJobTransformsCore import fileutil
-#from exceptions import EnvironmentError
-
-from future import standard_library
-standard_library.install_aliases()
-import subprocess
 
 __doc__ = """Environment variables utilities"""
 
 #some names of environment variables to avoid typing mistakes
-PYTHONPATH = 'PYTHONPATH'
 LD_LIBRARY_PATH = 'LD_LIBRARY_PATH'
-PATH = 'PATH'
 
 # possible wildcards used in unix shell filename completion (and in module glob)
 filenameWildCards = r'\?|\*|\[.*\]'
@@ -34,207 +25,6 @@ def has_wildcards(filename):
         return True
     else:
         return False
-    
-
-
-def _get_sys_path_extras():
-    extras = []
-    #start with clear python (the same a the current one!)
-    cmd = 'unset PYTHONPATH ; %s -c "import sys ; print sys.path"' % (sys.executable)
-    status,output = subprocess.getstatusoutput(cmd)
-    if status: raise EnvironmentError('Can not determine python sys.path extras')
-    extras = eval( output )
-    # remove first entry
-    if len(extras) <= 1:
-        return []
-    else:
-        return extras[1:]
-    
-
-def update_syspath():
-    pythonPath = os.environ.get(PYTHONPATH)
-    if pythonPath: 
-        sys.path = sys.path[0:1] + pythonPath.split(os.pathsep) + _get_sys_path_extras()
-        
-
-def update_env(newenv):
-    """Update os.environ, and sys.path from PYTHONPATH"""
-    # update os.environ
-    os.environ.update(newenv)
-    # PYTHONPATH needs to be propagated to sys.path
-    if newenv.get(PYTHONPATH): update_syspath()
-    
-
-def append_path_env( env_name, what, sep=os.pathsep ):
-    """Append <what> to pathlike environment variable <env_name>.
-    (A list of paths separated by <sep>).
-    If environment variable does not yet exist, it will
-    be created and its value will be set that <what>"""
-    var = os.environ.get(env_name)
-    if not var:
-        newvar = what
-    else:
-        newvar = var + sep + what
-
-    update_env( { env_name : newvar } )
-
-
-def prepend_path_env( env_name, what, sep=os.pathsep ):
-    var = os.environ.get(env_name)
-    if not var:
-        newvar = what
-    else:
-        newvar = what + sep + var
-
-    update_env( { env_name : newvar } )
-
-
-def append_path_env_if( env_name, what, sep=os.pathsep ):
-    """As append_path_env, except that <what> is only appended if it
-    is not already present in the path."""
-    var = os.environ.get(env_name)
-    newvar = None
-    if not var:
-        newvar = what
-    elif what not in var.split(sep):
-        newvar = var + sep + what
-
-    if newvar: update_env( { env_name : newvar } )
-    
-
-def prepend_path_env_if( env_name, what, sep=os.pathsep ):
-    var = os.environ.get(env_name)
-    newvar = None
-    if not var:
-        newvar = what
-    elif what not in var.split(sep):
-        newvar = what + sep + var
-
-    if newvar: update_env( { env_name : newvar } )
-    
-
-def append_path_env_force( env_name, what, sep=os.pathsep ):
-    """As append_path_env, except that <what> will be removed
-    first if it is already present (not at the end)"""
-    var = os.environ.get(env_name)
-    newvar = None
-    if not var:
-        newvar = what
-    else:
-        varList = var.split(sep)
-        try:
-            idx = varList.index(what)
-        except ValueError:
-            newvar = var + sep + what
-        else:
-            if idx != len(varList) - 1:
-                varList.remove(what)
-                varList.append(what)
-                newvar = sep.join(varList)
-
-    if newvar: update_env( { env_name : newvar } )
-
-
-def prepend_path_env_force( env_name, what, sep=os.pathsep ):
-    """As prepend_path_env, except that <what> will be removed
-    first if it is already present (not at the beginning)"""
-    var = os.environ.get(env_name)
-    newvar = None
-    if not var:
-        newvar = what
-    else:
-        varList = var.split(sep)
-        try:
-            idx = varList.index(what)
-        except ValueError:
-            newvar = what + sep + var
-        else:
-            if idx != 0:
-                varList.remove(what)
-                varList.insert(0,what)
-                newvar = sep.join(varList)
-
-    if newvar: update_env( { env_name : newvar } )
-
-
-
-def remove_path_env( env_name, whatRE, sep=os.pathsep ):
-    """Remove all entries that match the regular expression <whatRE>
-    from the path contained in environment variable named <env_name>."""
-    var = os.environ.get(env_name)
-    if not var: return
-    varList = var.split(sep)
-    pat = re.compile(whatRE)
-    newList = []
-#    print ("Starting with %s" % var)
-    for item in varList:
-        if not pat.search(item):
-            newList.append(item)
-        else:
-            pass
-#            print ("Removing %s" % item)
-           
-    newvar = sep.join(newList)
-#    print ("Left with: %s" % newvar)
-    update_env( { env_name : newvar } )
-
-
-
-def insert_path_env( env_name, index, what, sep=os.pathsep ):
-    var = os.environ.get(env_name)
-    if not var:
-        newvar = what
-    else:
-        valList = var.split(sep)
-        n = len(valList)
-        if index >= n:
-            newvar = var + sep + what 
-        elif index <= -n:
-            newvar = what + sep + var
-        else:
-            valList.insert(index,what)
-            newvar = sep.join(valList)
-
-    update_env( { env_name : newvar } )
-
-            
-
-def replace_path_env( env_name, findRE, replaceRE, sep=os.pathsep ):
-    """Replace all occurences matching the regular expression <findRE>
-    with <replaceRE> in the path entries given in the environment variable <env_name>.
-    <replaceRE> may contain regular expression references like \1 to refer to groups
-    defined in <findRE>. (see module re and function sub() therein).
-    It returns the number of replacements done."""
-    var = os.environ.get(env_name)
-    if not var: return
-    oldList = var.split(sep)
-    pat = re.compile(findRE)
-    newList = []
-    nReplaced = 0
-    for item in oldList:
-        newItem = pat.sub(replaceRE,item)
-        if newItem != item: nReplaced += 1
-        newList.append(newItem)
-
-    newvar = sep.join(newList)
-    update_env( { env_name : newvar } )
-    return nReplaced
-
-
-def find_path_env( env_name, whatRE, sep=os.pathsep ):
-    """Find all entries in the path in environment variable <env_name> that match
-    the regular expression given in <whatRE>"""
-    found = [ ]
-    var = os.environ.get(env_name)
-    if var:
-        varList = var.split(sep)
-        pat = re.compile(whatRE)
-        for item in varList:
-            if pat.search(item):
-                found.append(item)
-
-    return found
-    
 
 
 def find_file_split( filename, dirlist = [ os.getcwd() ], access = os.R_OK, depth = 0 ):
@@ -421,34 +211,6 @@ def find_files_env( filename, env_var_name, access = os.R_OK, sep = defaultPathS
     return find_files( filename, envList, access, depth )
 
 
-def find_library( lib ):
-    """Search for library in LD_LIBRARY_PATH. Return full path of library if found, and None otherwise."""
-    global _libraryNameRE
-    libfull = None
-    if _libraryNameRE.search(lib): # fully specified ending
-        libfull = find_file_env( lib, LD_LIBRARY_PATH )
-    else:
-        libname = lib
-        #could contain wildcards
-        libsfull = find_files_env( libname, LD_LIBRARY_PATH )
-        # filter results for valid shared library ending (basically to get rid of *.cmtref)
-        libsfull = [ l for l in libsfull if _libraryNameRE.search(l) ]
-        if not libsfull:
-            # add generic ending
-            libname = lib + '.so*'
-            libsfull = find_files_env( libname, LD_LIBRARY_PATH )
-            # filter results for valid shared library ending (basically to get rid of *.cmtref)
-            libsfull = [ l for l in libsfull if _libraryNameRE.search(l) ]
-        if libsfull:
-            libfull = libsfull[0] # take the first
-                
-    if not libfull and not lib.startswith('lib'):
-        # try with prefix 'lib'
-        libfull = find_library( 'lib' + lib )
-
-    return libfull
-
-
 def find_libraries( lib ):
     """Search for libraries in LD_LIBRARY_PATH. Return list of full paths of libraries if found.
     <lib> can contain wildcards, in which case all files matching the wildcard will be returned.
@@ -540,99 +302,3 @@ def find_python_modules( mod ):
             f = find_python_module_file( m )
             if f: found.append( f )
         return found
-
-
-def find_python_module( mod ):
-    """Search for python module in sys.path (PYTONPATH + some system paths).
-    Return full path of python module file if found, and None otherwise."""
-    b,e = os.path.splitext(mod)
-    if e in _pyext: # Filename with extension, with or without wildcards
-        # turn python module syntax into filename syntax
-        filename = b.replace('.',os.sep) + e
-        return find_file( filename, sys.path )
-    elif not has_wildcards( mod ): # module name (no extension), without wildcards
-        return find_python_module_file( mod.replace(os.sep, '.') )
-    else: # module name (no extension) with wildcards
-        # first expand the wildcards
-        filename = mod.replace('.',os.sep) + '.*'
-        for df in find_files_split( filename, sys.path ):
-            f = df[1]
-            b,e = os.path.splitext(f)
-            if e in _pyext:
-                modname = b.replace(os.sep, '.')
-                mf = find_python_module_file( modname )
-                if mf: return mf
-
-    return None
-        
-
-def find_executable( exe ):
-    """Find executable file in PATH"""
-    return find_file_env( exe, PATH, os.X_OK )
-
-
-def examine_library(lib):
-    """Examine why shared library <lib> can not be loaded.
-    It returns the list of libraries that can not be found."""
-    full_lib = find_library(lib)
-    if not full_lib:
-        return [lib]
-    # library is found. Check on dependents
-    missLibs = []
-    lddOut = subprocess.getoutput( 'ldd %s' % (full_lib) )
-    notFoundRE = re.compile(r"^\s*(?P<lib>[\w.-]+)\s+.*not found")
-    for line in lddOut.split(os.linesep):
-        match = notFoundRE.search( line )
-        if match:
-            misLib = match.group('lib')
-            missLibs.append( misLib )
-
-    return missLibs
-
-    
-def getenv_from_output(cmd,envRE=None):
-    """Return the enviroment in a dictionary as extracted from the output generated by
-    shell command <cmd>. Only environment variable names that match the regular expression envRE
-    will be included. Recognised syntax: name=value at the beginning of a line (spaces allowed).
-    Does *not* change os.environ"""
-
-    status,output = subprocess.getstatusoutput( cmd )
-    if status != 0:
-        raise EnvironmentError( 'Error executing command: %s. exitstatus=%d  output=\"%s\"' % (cmd,status,output) )
-    if not envRE: envRE=r'\S+?'
-    newenv = { }
-    envPat = re.compile(r'(?:^|;)\s*(%s)\s*=\s*(\S+?)\s*(?:;|$)' % (envRE))
-    for line in output.split(os.linesep):
-        match = envPat.search(line)
-        if match:
-            name = match.group(1)
-            value = match.group(2)
-            if value:
-                # remove quotes around strings
-                quotes="\"\'"
-                if value[0] in quotes and value[-1] in quotes:
-                    value = value[1:-1]
-                newenv[name] = value
-
-    return newenv
-
-
-def source_setup(setupshell,options='',envRE=None):
-    """Extract (part of) the environment of setup shell script <setupshell> using regular expressions <envRE>
-    to select which environment variables to setup. envRE=None (default) means all environment vars.
-    The <options> are given as arguments to the setup script. The environment is added to os.environ."""
-    if not fileutil.exists(setupshell):
-        raise EnvironmentError( 'setup script %s not found' % (setupshell) )
-
-    # make sure that path starts with . (dot) if no absolute path is given
-    # This is needed and desired because when sourcing a file, it is looked for
-    # in PATH, whereas this is never what people intend (always want local)
-    if os.path.isabs(setupshell):
-        fullsetup = setupshell
-    elif not (setupshell.startswith(os.curdir) or setupshell.startswith(os.pardir)):
-        fullsetup = os.path.join(os.curdir,setupshell)
-    source_cmd = 'source %s %s' % ( fullsetup, options )
-    print (source_cmd)
-    newenv = getenv_from_output( source_cmd + '; printenv', envRE )
-    update_env(newenv)
-    return newenv
diff --git a/Tools/PyJobTransformsCore/python/extraMetadata.py b/Tools/PyJobTransformsCore/python/extraMetadata.py
deleted file mode 100644
index ec41a5980677aee2fb1be9ea218131ad7ddb4735..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/extraMetadata.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-#
-#  Created by Alvin on 01/09/2009.
-#  
-#
-import re
-
-# Format of dictionary
-#
-# Dictionary must be called extraMetadataDict.
-#
-# key -  Arbitrary filter name
-# value - When a compiled pattern is provided, it is used to match against lines in the log file.
-#         Any other value will be treated as a forced insertion of a metadata.
-
-extraMetadataDict = \
-{ 
-'geometryVersion' : re.compile( r'TrfJobReport metaData_geometryVersion = (ATLAS-(?:GEO|CSC)\w*-[0-9\-]+)\s*\Z' ),
-'conditionsTag'   : re.compile( r'TrfJobReport metaData_conditionsTag = ((?:OFL|COM)COND-\w+-[0-9\-]+)\s*\Z' ),
-'beamType'        : re.compile( r'TrfJobReport metaData_BeamType = (\w+)\s*\Z' ),
-}
diff --git a/Tools/PyJobTransformsCore/python/fileutil.py b/Tools/PyJobTransformsCore/python/fileutil.py
index f0eb54d4c405770ad213fe54b5235ba5b6da4e74..286791f07cb2af3e287b70a7e6a9e8173d29e854 100755
--- a/Tools/PyJobTransformsCore/python/fileutil.py
+++ b/Tools/PyJobTransformsCore/python/fileutil.py
@@ -1,7 +1,5 @@
 # Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
-from __future__ import print_function
-
 import os, sys, re, time
 from PyJobTransformsCore import dummyaccess, rfio
 import stat as statconsts
diff --git a/Tools/PyJobTransformsCore/python/full_trfarg.py b/Tools/PyJobTransformsCore/python/full_trfarg.py
deleted file mode 100755
index f42949ea9ab18d9efbf58b957810d71b8c04045c..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/full_trfarg.py
+++ /dev/null
@@ -1,715 +0,0 @@
-# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
-
-__doc__ = """End-user Fully specialised arguments that can be used in JobTransform implemenations."""
-
-import os
-import copy
-from PyJobTransformsCore.basic_trfarg import Argument, BoolArg, InputDataFileArg, InputTarFileAndSetupArg, InputTarFileArg, IntegerArg, OutputDataFileArg, OutputFileArg, StringArg
-from PyJobTransformsCore.envutil import find_joboptions
-from PyJobTransformsCore.trfutil import AODFile, BSFile, DPDFile, ESDFile, EvgenFile, FTKIPFile, HistogramFile, HitsFile, JiveXMLTGZFile, MonitorHistFile, NtupleFile, RDOFile, SANFile, expandStringToList, strip_suffix
-from PyJobTransformsCore.trferr import JobOptionsNotFoundError, TransformArgumentError, TransformDefinitionError
-from PyJobTransformsCore import fileutil
-
-
-class InputGeneratorFileArg(InputTarFileArg):
-    """Input file used by the particle generator to generate events"""
-    def __init__(self,help='default',destdir='.',name='default', **kwargs ):
-        InputTarFileArg.__init__(self,help,destdir,name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-    
-
-class InputEvgenFileArg(InputDataFileArg):
-    """Input file that contains generated events"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,EvgenFile(),name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputEvgenFileArg(OutputDataFileArg):
-    """Output file that contains generated events"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        OutputDataFileArg.__init__(self,help,EvgenFile(),name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-
-
-class InputHitsFileArg(InputDataFileArg):
-    """Input file that contains hits"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,HitsFile(),name,**kwargs)
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputHitsFileArg(OutputDataFileArg):
-    """Output file that contains hits"""
-    def __init__(self,help='default',name='default', temporary = False , intermediate = False, **validationDict ):
-        OutputDataFileArg.__init__(self,help,HitsFile(),name, temporary, intermediate, **validationDict)
-
-    def isFullArgument(self):
-        return True
-
-
-class InputRDOFileArg(InputDataFileArg):
-    """Input file that contains RDO's"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,RDOFile(),name,**kwargs)
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputRDOFileArg(OutputDataFileArg):
-    """Output file that contains RDO's"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict):
-        OutputDataFileArg.__init__(self,help,RDOFile(),name, temporary, intermediate, **validationDict)
-
-    def isFullArgument(self):
-        return True
-
-
-class InputBSFileArg(InputDataFileArg):
-    """Input file that contains BSs"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,BSFile(),name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputBSFileArg(OutputDataFileArg):
-    """Output file that contains BSs"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        OutputDataFileArg.__init__(self,help,BSFile(),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputSkimmedBSFileArg(OutputDataFileArg):
-    """Output file that contains skimmed BSs"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        vDict = { 'testIfExists' : False }
-        vDict.update( validationDict )
-        OutputDataFileArg.__init__(self,help,BSFile(),name, temporary, intermediate, **vDict )
-
-    def isFullArgument(self):
-        return True
-    
-    def prepareFileInfo( self ):
-        #Somewhat complicated case: The file could have _0001.data appended to its name or not exist at all
-        filename=self.value()
-        idx=filename.rfind("._")
-        if idx==-1 or not filename[idx+2:idx+3].isdigit():
-            filenameTDAQ=filename+"._0001.data"
-        else:
-            filenameTDAQ=filename[:idx]+"._0001.data"
-
-        #print "Filename",filename
-        #print "FilenameTDAQ",filenameTDAQ
-            
-        if  fileutil.exists(filename):
-            self.logger().info("Found skimmed bystream file called %s", filename)
-            OutputDataFileArg.prepareFileInfo( self )
-            return
-        elif  fileutil.exists(filenameTDAQ):
-            self.logger().info("Found skimmed bystream file called %s, renaming back to %s", filenameTDAQ, filename)
-            os.rename(filenameTDAQ,filename) #try - except?
-            #That's of course a hack that will work only in local file system. 
-            OutputDataFileArg.prepareFileInfo( self )
-            return
-        else:
-            self.logger().info("No skimmed bystream file corresponding to %s found.", filename)
-            return
-        
-class InputESDFileArg(InputDataFileArg):
-    """Input file that contains ESDs"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,ESDFile(),name,**kwargs)
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputESDFileArg(OutputDataFileArg):
-    """Output file that contains ESDs"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        OutputDataFileArg.__init__(self,help,ESDFile(),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class InputAODFileArg(InputDataFileArg):
-    """Input file that contains AODs"""
-    def __init__(self,help='default',name='default', **kwargs ):
-        InputDataFileArg.__init__(self,help,AODFile(),name,**kwargs)
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputAODFileArg(OutputDataFileArg):
-    """Output file that contains AODs"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        OutputDataFileArg.__init__(self,help,AODFile(),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class OutputSANFileArg(OutputFileArg):
-    """Output file that contains SANs"""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        OutputFileArg.__init__(self,help,SANFile(),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class HistogramFileArg(OutputFileArg):
-    """Output file that contains histograms."""
-    def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ):
-        OutputFileArg.__init__(self,help,HistogramFile(),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class InputNtupleFileArg(InputDataFileArg):
-    """List of NTUP input files"""
-    def __init__(self,help='default', name='default', tree_names=None, **kwargs):
-        if tree_names is None:
-            tree_names = []
-        InputDataFileArg.__init__( self, help, NtupleFile( tree_names = tree_names ),name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-
-class OutputFTKIPFileArg(OutputDataFileArg):
-    '''Output file for FTK inputs'''
-    def __init__(self, help='default', name='default', temporary=False, intermediate=False, **validationDict):
-        OutputDataFileArg.__init__(self,help,FTKIPFile(),name, temporary, intermediate, **validationDict)
-
-    def isFullArgument(self):
-        return True
-    
-class OutputJiveXMLTGZFileArg(OutputDataFileArg):
-    '''Output for tar+gzipped JiveXML files'''
-    def __init__(self, help='default', name='default', temporary=False, intermediate=False, **validationDict):
-        OutputDataFileArg.__init__(self,help,JiveXMLTGZFile(),name, temporary, intermediate, **validationDict)
-
-    def isFullArgument(self):
-        return True
-        
-
-class NtupleFileArg(OutputFileArg):
-    """Output file that contains ntuples."""
-    def __init__( self, help = 'default', name = 'default', temporary = False, intermediate = False, tree_names = None, **validationDict ):
-        if tree_names is None:
-            tree_names = []
-        OutputFileArg.__init__( self, help, NtupleFile( tree_names = tree_names ),name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class InputMonitorHistArg(InputDataFileArg):
-    """List of HIST input files"""
-    def __init__(self,help='default', name='default', **kwargs):
-        InputDataFileArg.__init__( self, help, MonitorHistFile(), name, **kwargs )
-
-    def isFullArgument(self):
-        return True
-
-
-class MonitorHistArg(OutputFileArg):
-    """Derived class for monitoring HIST."""
-    def __init__( self, help = 'default', name = 'default', temporary = False, intermediate = False, **validationDict ):
-        OutputFileArg.__init__( self, help, MonitorHistFile(), name, temporary, intermediate, **validationDict )
-
-    def isFullArgument(self):
-        return True
-
-
-class InputDPDFileArg(InputDataFileArg): 
-    """Input file that contains DPD's""" 
-    def __init__(self,help='default',name='default', **kwargs ): 
-        InputDataFileArg.__init__(self,help,DPDFile(),name, **kwargs) 
-        
-    def isFullArgument(self): 
-        return True
-
-
-class OutputDPDFileArg(OutputDataFileArg): 
-     """Output file that contains DPD's""" 
-     def __init__(self,help='default',name='default', temporary = False, intermediate = False, **validationDict ): 
-         OutputDataFileArg.__init__(self,help,DPDFile(),name, temporary, intermediate, **validationDict )
-
-     def isFullArgument(self): 
-         return True
-
- 
-class NbOutputDPDArg(IntegerArg): 
-    """Number of simultaneous output DPD streams.""" 
-    def __init__(self,help='default',name='default'): 
-        IntegerArg.__init__(self,help,name) 
-        
-    def isFullArgument(self): 
-        return True
-
-class BasicBoolArg(BoolArg):
-    """Basic Boolean"""
-    def __init__(self,help='default',name='default'):
-        BoolArg.__init__(self,help,name)
-
-    def isFullArgument(self):
-        return True
-
-class BasicStringArg(StringArg):
-    """Basic string"""
-    def __init__(self,help='default',name='default'):
-        StringArg.__init__(self,help,name)
-        
-    def isFullArgument(self):
-        return True
-    
-class BasicIntArg(IntegerArg):
-    '''Anonymous integer'''
-    def __init__(self,help='default',name='default'): 
-        IntegerArg.__init__(self,help,name) 
-        
-    def isFullArgument(self): 
-        return True
-
-
-class BasicExec(Argument): 
-    """Transfrom a string into generic multi-line python fragment where each line is an element in a list.""" 
-    def __init__(self,help='default',name='default'): 
-        Argument.__init__(self,help,name) 
-    def isFullArgument(self): 
-        return True
-    def basicType(self):
-        return 'list'
-    def metaType(self):
-        return 'plain'
-
-    def toPython(self,valIn):
-        from PyJobTransformsCore.trfutil import StringToExec
-        try:
-            valOut=StringToExec(valIn)
-            return valOut
-        except Exception:
-            raise TransformArgumentError( '%s=%s: syntax error in BasicExec' % (self.name(),valIn) )
-        return None
-    
-class AsetupArg(StringArg):
-    '''String holding arguments to be passed to asetup before running a substep'''
-    def __init__(self,help='default',name='default'): 
-        super(AsetupArg, self).__init__(help,name) 
-        
-    def isFullArgument(self): 
-        return True  
-
-
-class ListOfStringsArg(StringArg): 
-    """List of strings separated by commas""" 
-    def __init__(self,help='default',name='default'): 
-        StringArg.__init__(self,help,name)
- 
-    def isFullArgument(self): 
-        return True
-
-    def basicType(self):
-        return 'list'
-
-    def toPython(self,valIn):
-        # if already a list, nothing to be done
-        if isinstance(valIn,list):
-            return valIn
-        # make a list of python types out of the string separated either by , or ,, 
-        else:
-            try:
-                valTmp=valIn.replace(',,',',')
-                valList=valTmp.split(',')
-            except Exception:
-                raise TransformArgumentError( '%s=%s: syntax error in list of strings' % (self.name(),valIn) )
-            return valList
-
-
-class DpdConfigArg(ListOfStringsArg):
-    """Configuration fragment for DPD.""" 
-    def __init__(self,help='default',name='default'): 
-        ListOfStringsArg.__init__(self,help,name)
-
-
-class JobOptionsArg(StringArg):
-    """(List of) JobOptions file(s) to use."""
-    def __init__(self,help='default',package='',name='default'):
-        """<package> = string with comma separated list of default packages to be searched in.
-        The current working directory will be automatically added as the first place to look."""
-        pacType=type(package)
-        if pacType == str:
-            self._packages = package.split(',')
-        elif pacType == list:
-            self._packages = package
-        else:
-            raise TypeError("%s constructor argument \'package\' is not of type str or list (got type %s)",
-                            self.__class__.__name__, pacType.__name__)
-        # prepend current directory if not already present
-        if os.curdir not in self._packages: self._packages.insert(0,os.curdir)
-        self._fullFiles = []
-        self._exeEnv = {} # copy of python environment after last execution of joboptions
-        StringArg.__init__(self,help,name)
-
-    def __bool__(self):
-        return Argument.__bool__(self) and  len(self.value()) > 0
-
-    def isFullArgument(self):
-        return True
-
-    def basicType(self):
-        return 'list'
-
-    def toPython(self,valIn):
-        # if already a list, nothing to be done
-        valType = type(valIn)
-        if valType == list:
-            return valIn
-        # make a list of python types out of the strings
-        elif valType == str:
-            if valIn == 'NONE': return []
-            # split the comma separated list (outside brackets)
-            try:
-                return expandStringToList( valIn, False )
-            except Exception as msg:
-                raise TransformDefinitionError( '%s=%s: %s' % ( self.name(), valIn, msg ) )
-        else:
-            # if we get here, there is problem
-            raise TransformDefinitionError( '%s=%s: value is not a list or a string' % (self.name(),valIn) )
-
-    def defaultHelp(self):
-        help=Argument.defaultHelp(self)
-        if self._packages: help += '. Default packages: %s' % ','.join(self._packages)
-        return help
-
-    def hasPackage(self):
-        return len(self._packages) > 0
-
-    def package(self):
-        return self._packages
-
-    def value(self):
-        """Prepend default package to all joboptions without explicit package (i.e. subdirectory)"""
-        val = Argument.value(self)
-        newVal = []
-        for v in val:
-            if self._packages and os.sep not in v:
-                for p in self._packages:
-                    full = os.path.join(p,v)
-                    if find_joboptions( full ):
-                        self.logger().info( "Found %s in %s", v, p )
-                        newVal.append( full )
-                        break
-                else:
-                    newVal.append( v ) # either in local dir or not found (will trigger error later)
-            else:
-                newVal.append( v )
-        return newVal
-
-    def setValue(self,value):
-        """Convert NONE value to all upper case"""
-        self._fullFiles = []  # is redetermined on-demand
-        if type(value) is str:
-            #convert value to the correct case
-            valUpper = value.upper()
-            if valUpper == 'NONE' and value != 'NONE':
-                self.logger().info( 'Changing case of %s to %s', value,valUpper )
-                value = valUpper
-            # treat empty string as NONE
-            if value == '': value = 'NONE'
-        Argument.setValue(self,value)
-
-    def fullFilenames(self):
-        if not self._fullFiles:
-            for val in self.value():
-                fullFile = find_joboptions( val )
-                if not fullFile:
-                    raise JobOptionsNotFoundError( val, "Argument %s" % self.name() )
-                self._fullFiles.append(fullFile)
-        return self._fullFiles
-
-    def preRunAction(self):
-        """Check than jobOptions file can be found"""
-        if not self: return
-        shortList = self.value()
-        fullList = self.fullFilenames()
-        for i in range(len(shortList)):
-            short = shortList[i]
-            full  = fullList[i]
-            self.logger().info( 'Found %s in %s', short, strip_suffix(full,short) )
-
-    def exeJobOptions( self, env = {} ):
-        """Execute the lines in the jobOptions file in environment <env>. The runtime
-        environment will be added to <env> (theApp etc)"""
-        if not self:
-            return
-        from PyJobTransformsCore import FakeAthena
-        FakeAthena.theApp.setLoggerParentName( self.loggerName() )
-        shortList = self.value()
-        fullList = self.fullFilenames()
-        for i in range( len( shortList ) ):
-            short = shortList[ i ]
-            full  = fullList[ i ]
-            self.logger().info( "Executing jobOptions file %s.", full )
-            FakeAthena.theApp.exeJobOptions( short, env )
-        self._exeEnv.update( copy.copy( env ) )
-
-    def exeEnvironment(self):
-        """copy of python environment after last execution of joboptions"""
-        return self._exeEnv
-
-    def metaData(self):
-        if not self:
-            return {}
-        else:
-            # return list as string with comma separated entries
-            return { self.name() : ','.join(self.value()) }
-#
-# end of class JobOptionsArg
-#
-
-
-class JobConfigArg(JobOptionsArg):
-    """Joboptions file with user settings, in particular the configuration settings"""
-    def __init__(self,help='default',package='',name='default'):
-        # split comma separated string into list
-        if type(package) is str: package = package.split(',')
-        # always add 'PyJobTransforms' package (which contain common jobConfig files)
-        commonPack = 'PyJobTransforms'
-        if commonPack not in package: package.append(commonPack)
-        JobOptionsArg.__init__(self,help=help,package=package,name=name)
-        self.__config = None
-
-    def isFullArgument(self):
-        return True
-
-    def setConfig(self,config):
-        """Set the configuration object"""
-        self.__config = config
-
-    def preRunAction(self):
-        if not self: return
-        JobOptionsArg.preRunAction(self)
-        # execute it, so that any changes to the TransformConfig are processed
-        env = {}
-        if self.__config:
-            # add config object with generic name
-            env['runConfig'] = self.__config
-        JobOptionsArg.exeJobOptions(self,env)
-
-    def runArgsTemplate(self,objName):
-        plate = [ JobOptionsArg.runArgsTemplate(self,objName) ]
-        conf = self.__config
-        if conf:
-            plate += [ '# Make run configuration object available with generic name' ,
-                       'from %s import %s' % (conf.__module__,conf.name()),
-                       'runConfig = %s' % (conf.name(),) ]
-        return os.linesep.join(plate)
-
-
-class RandomSeedArg(IntegerArg):
-    """Randomseed for AtGenRndmSvc"""
-    def __init__(self,help,name='default'):
-        IntegerArg.__init__(self,help,name)
-
-    def isFullArgument(self):
-        return True
-
-    def jobOrTask(self):
-        return 'job'
-
-
-class RunNumberArg(IntegerArg):
-    """Run number of data file"""
-    def __init__(self,help='default',name='default'):
-        IntegerArg.__init__(self,help,name)
-        
-    def isFullArgument(self):
-        return True
-
-
-class FirstEventArg(IntegerArg):
-    """First event number to use"""
-    def __init__(self,help='default',name='default'):
-        IntegerArg.__init__(self,help,name)
-
-    def isFullArgument(self):
-        return True
-
-    def jobOrTask(self):
-        return 'job'
-
-
-class SkipEventsArg(IntegerArg):
-    """Number of events to skip"""
-    def __init__(self,help='default',name='default'):
-        IntegerArg.__init__(self,help,name)
-
-    def isFullArgument(self):
-        return True
-
-    def jobOrTask(self):
-        return 'job'
-
-    def metaData(self):
-        """No metadata for skipevents"""
-        return {}
-
-        
-class MaxEventsArg(IntegerArg):
-    """Maximum number of events to process"""
-    def __init__(self,help='default',name='default'):
-        IntegerArg.__init__(self,help,name)
-        self.setDefault( -1 )
-        self.setValue( -1 )
-
-    def isFullArgument(self):
-        return True
-
- 
-class GeometryVersionArg(StringArg):
-    """Geometry Version"""
-    def __init__(self,help='default',name='default'):
-        StringArg.__init__(self,help,name)
-        
-    def isFullArgument(self):
-        return True
-
-
-class ConditionsTagArg(StringArg):
-    """IOVDbSvc global tag in case it is needed to change the DEFAULT value"""
-    def __init__(self,help='default',name='default'):
-        StringArg.__init__(self,help,name)
-        
-    def isFullArgument(self):
-        return True
-
-
-class TriggerConfigArg(StringArg):
-    """Configuration string to use for TrigT1 and HLT. Set to 'NONE' to switch off trigger,
-    and set to 'DEFAULT' to use the default of the used release."""
-    def __init__(self,help='default',name='default'):
-        StringArg.__init__(self,help,name)
-
-    def isFullArgument(self):
-        return True
-    
-    def setValue(self,value):
-        valueUpper = value.upper()
-        if valueUpper == 'OFF': # for backwards compatibility
-            value = 'NONE'
-        elif valueUpper == 'NONE':  # get consistent NONE value
-            value = 'NONE'
-        elif valueUpper == 'DEFAULT': # or consistent DEFAULT value
-            value = 'DEFAULT'
-        Argument.setValue(self,value)
-
-
-class OptionArg(Argument):
-    """Placeholder Argument that is treated as an option. Option arguments are always
-    optional. If a default is not given when added to a JobTransform, then the
-    default is taken from the associated option.
-    <name>: name of the option it represents. Only names of existing options are accepted.
-    (long name without the leading --)"""
-    def __init__(self,name,runOptions=None):
-        self.__runOptions = runOptions
-        Argument.__init__(self,'',name)
-        if runOptions:
-            value = getattr(runOptions,self.name())
-            default = runOptions.getProperty(self.name()).default()
-            if not default: default = value
-            if not self.hasDefault(): Argument.setDefault(self,default)
-            if not self.hasValue()  : Argument.setValue(self,value)
-    # Implement required functions of Argument
-    #
-    def isFullArgument(self):
-        return True
-
-    def basicType(self):
-        return "string"
-
-    def type(self):
-        return "string"
-
-    def metaType(self):
-        return "option"
-
-    def toPython(self,val):
-        # any type is accepted
-        return val
-
-    # Override functions of Argument
-    #
-    def setValue(self,val):
-        """Pass value on to the option"""
-        Argument.setValue( self, val )
-        setattr( self.__runOptions, self.name(), Argument.value( self ) )
-
-    def value(self):
-        """Return the option value"""
-        return getattr(self.__runOptions,self.name())
-
-    def help(self):
-        return self.__runOptions.getProperty(self.name()).help()
-    
-
-class DBReleaseArg( InputTarFileAndSetupArg ):
-    """Tarball containing the DBRelease to use"""
-    def __init__( self, name = 'DBRelease', destdir = os.curdir ):
-        InputTarFileAndSetupArg.__init__( self, help = 'default', name = name,
-                                          setupfile = 'setup.py', envVars = None, destdir = destdir, temporary = True )
-
-    def isFullArgument(self):
-        return True
-
-    def untarAndSetup(self):
-        """Install the DBRelease"""
-        try:
-            InputTarFileAndSetupArg.untarAndSetup(self)
-        except TransformArgumentError as e:
-            e.setError('TRF_DBREL_TARFILE')
-            raise
-        # unset DBRELEASE_REQUESTED to avoid needless crash in check in JobTransform
-        try:
-            del os.environ['DBRELEASE_REQUESTED']
-        except KeyError:
-            pass
-
-    def install(self):
-        """Install the DBRelease if a filename is given."""
-        if not self: return
-        self.untarAndSetup()
-
-    def metaData(self):
-        """No metadata"""
-        return {}
-
-    def metaType(self):
-        return 'dbrelease'
-
-
-class EventSelectorQueryArg(StringArg):
-    """String with an event selector query in it"""
-    def __init__(self,help='default',name='EventSelectorQuery'):
-        StringArg.__init__(self,help,name)
-        
-    def isFullArgument(self):
-        return True
-    
diff --git a/Tools/PyJobTransformsCore/python/pool.py b/Tools/PyJobTransformsCore/python/pool.py
deleted file mode 100755
index 4330a6ef78177a566335cae07479e664b8be5e00..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/pool.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-import os
-from envutil import find_path_env, append_path_env_if
-
-POOL_HOME = 'POOL_HOME'
-
-def setup_environment():
-    # get POOL_HOME
-    pool_home = os.environ.get(POOL_HOME)
-    if not pool_home:
-        # try to extract from PATH
-        poolRE = r'.*/POOL/.*/bin'
-        pool_path = find_path_env('PATH', poolRE)
-        if not pool_path:
-            raise EnvironmentError('Could not determine POOL_HOME')
-
-        pool_home = os.path.dirname(pool_path[0])
-        os.environ[POOL_HOME] = pool_home
-
-    pool_bin = os.path.join( pool_home, 'bin' )
-
-    append_path_env_if('PYTHONPATH',pool_bin)
-
-
-setup_environment()
-
-from PyFCAction import *  # noqa: F401 F403
diff --git a/Tools/PyJobTransformsCore/python/runargs.py b/Tools/PyJobTransformsCore/python/runargs.py
index cf97bbb42f9e1099e9b450ccd345e798a5f46144..d05d636005ca98d375e4f4e61ac6dfce4f774046 100755
--- a/Tools/PyJobTransformsCore/python/runargs.py
+++ b/Tools/PyJobTransformsCore/python/runargs.py
@@ -1,14 +1,8 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-from __future__ import print_function
+# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
 __author__ = "clat@hep.ph.bham.ac.uk"
 
 import os
-from PyJobTransformsCore.TransformConfig import Descriptor, JobConfig
-from PyJobTransformsCore.trfutil import VALIDATION_DICT
-from PyJobTransformsCore.trferr import TransformArgumentError
-
 
 class RunArguments:
     """Dynamic class that holds the run arguments as named members with values."""
@@ -18,423 +12,3 @@ class RunArguments:
             if not arg.startswith('__'):
                 myself += '%s   %s = %s' % (os.linesep, arg, repr(getattr(self,arg)))
         return myself
-    
-
-
-
-class CommandLineOption( Descriptor ):
-    def __init__(self, shortOption, longOption, help, takesArgument=False, default=None, choices=None):
-        """For options with or without argument:
-        <shortOption>: short option name, to be used with - (added). Empty string if not defined.
-        <longOptions> long option name, to be used with -- (added). Empty string if not defined.
-        <help> Help string explaining the option
-        For options with argument:
-        <takesArgument>: should be set to True
-        <default> : default value for options that take an argument.
-        <choices> : list of possible values the option can have. For options without argument
-                    this is (python) True or False.
-        """
-        self.__takesArgument = takesArgument
-        if not takesArgument:
-            # option without an argument is considered a boolean
-            default = False
-            choices = [True,False]
-        self.__shortName = shortOption.lstrip('-')
-        self.__longName = longOption.lstrip('-')
-        self.__actionFunc = None
-        Descriptor.__init__(self,help,default,choices)
-    
-    
-    def setActionFunction(self,func):
-        """Set function to be called when option value is set. If the option takes
-        an argument, the function must take an argument of type string.
-        If the option does not take an argument, the function must not take
-        any arguments, and will only be called if the option is set to True.
-        """
-        self.__actionFunc = func
-    
-    
-    def shortName(self):
-        return self.__shortName
-    
-    
-    def shortOption(self):
-        return '-' + self.__shortName
-    
-    
-    def longName(self):
-        return self.__longName
-    
-    
-    def longOption(self):
-        return '--' + self.__longName
-    
-    
-    def takesArgument(self):
-        return self.__takesArgument
-    
-    
-    def processOption(self,value):
-        if value is None: return
-        func = self.__actionFunc
-        if not func: return
-        if self.__takesArgument:
-            func(value)
-        elif value is True:
-            func()
-    
-    
-    def _checkType(self,variableName,value):
-        """Allowed types:
-        For option not taking argument: bool (True,False) or string (\'True\' or \'False\')
-        For option taking argument: string or bool
-        """
-        valType = type(value).__name__
-        if not self.__takesArgument:
-            # convert certain strings to bool
-            if valType == 'str':
-                # empty string is given by getopt (when option is set)
-                # convert to boolean if possible.
-                try:
-                    value = { 'TRUE' : True, 'FALSE' : False, '' : True }[ value.upper() ]
-                except Exception:
-                    raise TransformArgumentError( '%s value of %s not recognised.' % ( variableName, value ) )
-            elif not isinstance( value, bool ):
-                raise TransformArgumentError( '%s should be boolean. Got %s (%s) instead.' % (variableName, value, valType) )
-        elif valType not in ( 'str', 'bool' ):
-            raise TransformArgumentError( '%s should be a string or bool. Got %s (%s) instead.' % (variableName, value, valType) )
-        return value
-    
-    
-    def _checkValue(self,variableName,value):
-        if self.__takesArgument:
-            # handle the argument (always of the string type)
-            choices = self.allowedValues()
-            try:
-                valueList = value.split( ',' )
-            except Exception:
-                valueList = [ value ]
-            # Convert value to the correct case, if there is a list of string choices
-            if choices:
-                newValueList = []
-                for v in valueList:
-                    for c in choices:
-                        try:
-                            x = v.upper() == c.upper()
-                        except AttributeError:
-                            if v == c:
-                                newValueList.append( v )
-                                break
-                        else:
-                            if x:
-                                if v != c:
-                                    v = c
-                                newValueList.append( v )
-                                break
-                    else:
-                        raise TransformArgumentError( '%s value %r is not in %s' % ( variableName, v, choices ) )
-                try: # string list
-                    return ','.join( newValueList )
-                except Exception: # assume boolean
-                    return newValueList[0]
-        # check against list of possible values
-        return Descriptor._checkValue(self,variableName,value)
-    
-    
-    
-#        if self.__takesArgument:
-#            # handle the argument (always of the string type)
-#            choices = self.allowedValues()
-#            try:
-#                valueList = value.split( ',' )
-#            except:
-#                valueList = [ value ]
-#            # Convert value to the correct case, if there is a list of string choices
-#            if choices:
-#                for v in valueList:
-#                    for c in choices:
-#                        if v.upper() == c.upper():
-#                            if v != c:
-#                                v = c
-#                            break
-#                    else:
-#                        raise TransformArgumentError( '%s value %r is not in %s' % ( variableName, v, choices ) )
-#                return ','.join( valueList )
-#        if self.__takesArgument:
-#            # handle the argument (always of the string type)
-#            choices = self.allowedValues()
-#            # Convert value to the correct case, if there is a list of string choices
-#            if choices:
-#                valUpper = value.upper()
-#                for c in choices:
-#                    if valUpper == c.upper() and value != c:
-#                        self.logger().warning( 'Changing case %s=%s to %s' % (variableName,value,c) )
-#                        value = c
-#                        break
-#        # check against list of possible values
-#        return Descriptor._checkValue(self,variableName,value)
-    
-    def _setValue(self,variableName,value):
-        print ("Setting %s to %r" % (variableName,value))
-        if value is not None and (self.__takesArgument or value):
-            self.processOption(value)
-    
-    
-    def _checkAllowedValues(self,variableName,allowedValues):
-        # Check all allowed values
-        if not self.__takesArgument:
-            for val in allowedValues:
-                if val is not True and val is not False:
-                    raise TransformArgumentError( '%s should be True or False. Got %s (%s) instead.' % (variableName, val, type(val).__name__) )
-        else:
-            for val in allowedValues:
-                valType = type(val).__name__
-                if valType not in [ 'str', 'bool' ]:
-                    raise TransformArgumentError( '%s should be a string or boolean. Got %s (%s) instead.' % (variableName, val, valType) )
-        return allowedValues
-    
-
-
-
-
-
-class RunOptions(JobConfig):
-    """Class to hold the running options"""
-    __slots__ = ()
-    help       = CommandLineOption('h', 'help', 'Print detailed help')
-    #loglevel   = CommandLineOption('l', 'loglevel', '**DEPRECATED**', True, 'INFO', TransformLogger.messageLevels.keys())
-    test       = CommandLineOption('t', 'test', 'Run in test mode. Tests/checks omitted using the --omitvalidation=XXX option applied everywhere rather than being limited to validation routines. GUIDs are not extracted either.')
-    command    = CommandLineOption('c', 'command', 'Python code to be pre-executed before any scripts', True )
-    mcinput    = CommandLineOption('', 'mcinput', 'Monte Carlo input file used. Omit if real data is used. Enabling this option also implies setting --usesqlite=True', True, False, choices = [ True, False ] )
-    usesqlite  = CommandLineOption('', 'usesqlite', 'Force the use of local/symlinked sqlite files rather than ORACLE.', True, False, choices = [ True, False ]  )
-    omitvalidation = CommandLineOption('', 'omitvalidation', 'Omit certain validation routines. Use "ALL" to omit all validation routines, use "NONE" to remove all validation ommissions.', True, choices = list(VALIDATION_DICT.keys()) + ['NONE'] )
-    athenaopts = CommandLineOption('a', 'athenaopts', 'Options to be passed on to athena', True)
-    argdict    = CommandLineOption('',  'argdict', 'Run arguments given in a python dictionary in a pickle file', True)
-    
-    # options setting log file error masks.
-    ignoreunknown = CommandLineOption('', 'ignoreunknown', '**DEPRECATED** Mask out any unknown errors found (i.e. ignore TRF_UNKNOWN errors)' )
-    ignoreall = CommandLineOption( '', 'ignoreall', 'Force transformations, composite ones in particular, to proceed regardless of any errors encountered.')
-    ignoreerrors = CommandLineOption('', 'ignoreerrors', 'Mask all errors. Transform returns successful if athena.py is successful. Contributes to NEEDCHECK flag. This option no longer accepts an additional file to supplement existing error ignore filters. For that purpose, use =--extraignorefilter=XXX= option.', True, False, choices = [ True, False, 'ALL' ] )
-    
-    # Memory optimisation options to be passed to Athena.
-    tcmalloc   = CommandLineOption('', 'tcmalloc', '**DEPRECATED** Use tcmalloc for memory allocation instead of stdcmalloc' )
-    leakcheckexecute = CommandLineOption('', 'leakcheckexecute', 'Perform basic memory leak checking. This disables the use of tcmalloc')
-    
-    # Memory-related flags to be set in the execution shell
-    rss = CommandLineOption( '', 'rss', 'Set maximum resident memory value (in megabytes).', True )
-    vmem = CommandLineOption( '', 'vmem', 'Set maximum virtual memory (in megabytes).', True )
-    
-    # file containing dictionary of metadata filters (forced insertions and additional metadata filters )
-    extrametadatadict = CommandLineOption( '', 'extrametadatadict', 'Full path to file containing extraMetadataDict, a dictionary of compiled patterns and/or strings for explicit inclusion', True )
-    
-    # file containing additional error ignore filters (in the format set out in atlas_error_ignore.db)
-    extraignorefilters = CommandLineOption( '', 'extraignorefilters', 'Full path to file containing additional error ignore filters in the format set out in atlas_error_ignore.db', True )
-    
-    # temporary options for testing
-    usenewmeta = CommandLineOption( '', 'usenewmeta', 'Use new metadata format. Experimental', True, False, choices = [ True, False ] )
-    
-    #upload to AMI
-    uploadtoami = CommandLineOption( '-m', 'uploadtoami', 'Upload performance data to AMI. Should be used only by T0 or T1. Value is probability of job to really report. (For throtling reporting of MC jobs).', True, '0.0' )
-    
-    def __init__(self):
-        JobConfig.__init__(self,'runOpts')
-    
-    
-    def __str__(self,indent=''):
-        """String containing a formatted description of the options"""
-        help = []
-        maxOpt = 0
-        optsList = []
-        optsHelp = []
-        # make options list and determine max string length
-        nOpts = len(self)
-        for opt in self:
-            hlp = opt.doc()
-            if (opt.takesArgument()):
-                optName=''
-                if opt.shortOption() != '':
-                    optName += '%s <value>' % opt.shortOption()
-                if opt.longOption() != '':
-                    if optName: optName += ', '
-                    optName += '%s=<value>' % opt.longOption()
-                allowed = opt.allowedValues()
-                if allowed:
-                    hlp += '. Possible values: %s' % allowed
-            else:
-                optName = '%s, %s' % ( opt.shortOption(), opt.longOption() )
-            maxOpt = max(maxOpt,len(optName))
-            optsList.append( optName )
-            optsHelp.append( hlp )
-        helpColumn = min(40,len(indent) + maxOpt + 2)
-        helpIndent = ' '*helpColumn # indent for help continuation lines
-        for iOpt in range(nOpts):
-            opt = optsList[iOpt]
-            helpSpaceCount = max(2,helpColumn - len(opt) - len(indent))
-            helpSpace = ' '*helpSpaceCount
-            h = optsHelp[iOpt]
-            hLines = h.split(os.linesep)
-            # add first line
-            help += [ '%s%s%s%s' % (indent, opt, helpSpace, hLines[0]) ]
-            # add other lines (if present)
-            if len(hLines) > 1:
-                help += [ '%s%s' % (helpIndent,s) for s in hLines[1:] ]
-        return os.linesep.join(help)
-    
-    
-    def toLongName(self,name):
-        """Convert any allowed option name to the long option name without '--'"""
-        longName = None
-        shortName = None
-        if name.startswith('--'):
-            longName = name[2:]
-        elif name.startswith('-'):
-            shortName = name[1:]
-        elif len(name) == 1:
-            shortName = name
-        else:
-            longName = name
-        if longName and self.isLongOption(longName):
-            return longName
-        if shortName:
-            for opt in self:
-                if opt.shortName() == shortName:
-                    return opt.longName()
-        raise KeyError
-    
-    
-    def isLongOption(self,name):
-        """Return bool indicating whether <name> is one of the allowed long options.
-        <name> can optionally begin with --
-        """
-        if name.startswith('--'): name = name[2:]
-        for opt in self:
-            if opt.longName() == name:
-                return True
-        return False
-    
-    def isShortOption(self,name):
-        if name.startswith('-'): name = name[1:]
-        for opt in self:
-            if opt.shortName() == name:
-                return True
-        return False
-    
-    def setOption(self,name,value):
-        setattr(self,self.toLongName(name),value)
-    
-    def processOptions(self):
-        """Call associated functions on all options"""
-        for opt in self:
-            opt.processOption(getattr(self,opt.name()))
-    
-    def extractOptions(self, argList):
-        """Scan the command line argument list given in <argList>. It returns a new
-        list where all the options have been removed from <argList>.
-        It considers an argument a valid option if it is:
-        -<shortname>
-        -<shortname> <value>
-        --<longname>
-        --<longname>=<value>
-        <longname>=<value>
-        Any argument starting with '-' or '--' will be considered an option, and
-        will be checked against the list of known options. If no match is found,
-        it is considered an error unless the argument is a negative number
-        (integer or float), in which case it will be included in the returned list.
-        If an argument equals '--', then all following arguments are considered not options,
-        and will be included in the returned list.
-        If there is any problems with the options, an TransformArgumentError exception is raised.
-        """
-        cleanList = []
-        nArgs = len(argList)
-        iArg = 0
-        while iArg < nArgs:
-            arg = argList[iArg]
-            if arg == '--':
-                # no more options. Add all remaining arguments
-                iArg += 1
-                while iArg < nArgs:
-                    cleanList.append( argList[iArg] )
-                    iArg += 1
-                return cleanList
-            # get argument name
-            equal=arg.find('=')
-            value = None
-            if equal == -1:
-                name = arg
-            else:
-                name = arg[:equal]
-                if equal+1 < len(arg):
-                    value=arg[equal+1:]
-                    # convert to boolean if possible.
-                    try:
-                        value = { 'TRUE' : True, 'FALSE' : False }[ value.upper() ]
-                    except Exception:
-                        pass
-                else:
-                    value=''
-            if name.startswith('--'):
-                # cases --<longname> and --<longname>=<value>
-                longName = name[2:]
-                if not self.isLongOption(longName):
-                    raise TransformArgumentError('option %s not supported' % name)
-                option = self.getProperty(longName) # gets the option descriptor
-                if option.takesArgument():
-                    if equal == -1:
-                        value = option.default()
-#                        raise TransformArgumentError('option %s value missing. Syntax: %s=<value>' % (name,name) )
-                    # set the value
-                    setattr(self,longName,value)
-                else:
-                    if equal != -1:
-                        raise TransformArgumentError('option %s does not take a value (Found =%s)' % (name,value) )
-                    # set value to True
-                    setattr(self,longName,True)
-            elif name.startswith('-'):
-                # cases -<shortname>, -<shortname> <value> or a negative number
-                if not self.isShortOption(name):
-                    try:
-                        # test if it is a negative number
-                        float(name)  # covers both float and int
-                    except Exception:
-                        raise TransformArgumentError('option %s not supported' % name)
-                    else:
-                        # it is a negative number: add it to the list, and go to next
-                        cleanList.append( arg )
-                        iArg += 1
-                        continue
-                longName = self.toLongName(name)
-                option = self.getProperty(longName) # gets the option descriptor
-                if option.takesArgument():
-                    if equal != -1:
-                        raise TransformArgumentError('option %s syntax error: no \'=\' allowed. Value should be specified as next argument: %s <value>' % (name,name) )
-                    # value is next argument
-                    iArg += 1
-                    if iArg >= nArgs:
-                        raise TransformArgumentError('option %s missing value. Syntax: %s <value>' % (name,name) )
-                    value = argList[iArg]
-                    # convert to boolean if possible.
-                    try:
-                        value = { 'TRUE' : True, 'FALSE' : False }[ value.upper() ]
-                    except Exception:
-                        pass
-                    setattr(self,longName,value)
-                else:
-                    if equal != -1:
-                        raise TransformArgumentError('option %s syntax error: no \'=\' allowed in name (option takes no value.)' % (name,) )
-                    # set value to True
-                    setattr(self,longName,True)                    
-            elif equal != -1 and self.isLongOption(name):
-                option = self.getProperty(name)
-                if not option.takesArgument():
-                    raise TransformArgumentError('option %s takes no value' % (name,))
-                # case <longname>=<value>
-                setattr(self,name,value)
-            else:
-                # not an option
-                cleanList.append( arg )
-            # move to next argument    
-            iArg += 1
-            # end of while iArg < nArgs
-        return cleanList
-    
-
diff --git a/Tools/PyJobTransformsCore/python/trf.py b/Tools/PyJobTransformsCore/python/trf.py
deleted file mode 100755
index 072a8bfc2a93d40d66cf8af97f74dbcce4248b14..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/trf.py
+++ /dev/null
@@ -1,2262 +0,0 @@
-# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
-
-## @package trf
-#
-#  @brief Main package containing the @em JobTransform class.
-#  @details Main module of @em PyJobTransformsCore package containing the base class @em JobTransform.
-
-from past.builtins import execfile
-import os, sys, time, stat, re, math, subprocess, signal, inspect
-import stat as statconsts
-from copy import copy
-try:
-    import cPickle as pickle
-except Exception:
-    import pickle
-import fileutil
-from PyJobTransformsCore import trfenv, trferr, trfconsts, AtlasErrorCodes
-from PyJobTransformsCore.trferr import AthenaLogChecker, InputFileError, JobOptionsNotFoundError, TransformArgumentError, TransformDefinitionError, TransformEnvironmentError, TransformErrorDiagnoser, TransformValidationError
-from PyJobTransformsCore.trfutil import Author, CommentLine, PostJobOptionsFile, PreJobOptionsFile, SQLiteSupport, TRF_SETTING, VersionString, find_joboptions, get_atlas_release, get_files, setDefaultSignalHandlers, setTrfSignalHandler, strip_suffix
-from PyJobTransformsCore.JobReport import JobReport, JobInfo, _extraMetadataDict
-from PyJobTransformsCore.TransformLogger import TransformLogger
-from PyJobTransformsCore.TransformConfig import TransformConfig
-from PyJobTransformsCore.runargs import RunArguments, RunOptions, VALIDATION_DICT
-from PyJobTransformsCore.VTimer import vTimer
-from PyJobTransformsCore.basic_trfarg import Argument
-from PyJobTransformsCore.full_trfarg import OptionArg, JobConfigArg
-from AthenaCommon import ExitCodes  
-
-## @class JobTransform
-# 
-#  @brief Base class for all transformations
-#
-#  @details All concrete job transformations must derive from this class, and add appropriate arguments to it 
-#  (with the @c add() member function, called when instantiating a derived class.)
-#
-#  Derived classes can override member functions, in particular @c runJob() in the case of composite transforms.
-#  @warning Not intended for use directly without subclassing (and adding transform arguments).
-class JobTransform(TransformLogger):
-    
-    ## job transform source file suffix  
-    _fileSuffix = '_trf'
-
-    ## run arguments name 
-    _runArgsName = 'runArgs'
-
-    ## Command line options shared between composite transforms
-    _sharedRunOpts = [ 'athenaopts', 'ignoreunknown', 'ignoreall', 'ignoreerrors', 'tcmalloc', 'leakcheckexecute', 'rss', 'vmem', 'extrametadatadict', 'extraignorefilters', 'usenewmeta', 'omitvalidation', 'command', 'mcinput', 'usesqlite', 'uploadtoami' ]
-
-    ## Options that are also accepted as arguments (via full_trfarg.OptionArg)
-    _optionArgNames = [ 'athenaopts', 'ignoreunknown', 'ignoreall', 'ignoreerrors', 'tcmalloc', 'leakcheckexecute', 'rss', 'vmem', 'extrametadatadict', 'extraignorefilters', 'usenewmeta', 'omitvalidation', 'command', 'mcinput', 'usesqlite', 'uploadtoami' ]
-    
-    ## When @c _ignoreUnknown is @em True, unknown errors are ignored.
-    _ignoreUnknown = False
-    
-    ## When @c _ignoreAll is @em True, the transformation is permitted to execute as long as possible.
-    _ignoreAll = False
-    
-    ## When @c _ignoreErrors is @em True, a successful athena job will force any errors 
-    #  detected by the transformation to be ignored.
-    _ignoreErrors = False
-    
-    ## @brief Constructor for the JobTransform class. Not intended for direct use. Vanilla instance is invalid. 
-    #  @details JobTransform instance in intended to to be subclassed and customised for specific job transformation purposes 
-    #  with input files (optional) and output files.
-    #  @param authors List of trfutil.Author instances
-    #  @param help Documentation string describing what the transformation does
-    #  @param version Version number based on the svn tag. Default value retrieved from @c cmt/version.cmt
-    #  @param skeleton Name of skeleton job options file to be used by @c athena.py
-    #  @param name Name of transform used in logging. Default value is the file's name with its @em _trf suffix omitted.
-    #  @param config TransformConfig instance contains the job's configuration. Default value is @c TransformConfig('defaultConfig')
-    #  @param lastInChain Boolean value to define a trf as the last in a chain of trfs. Default value is @c True.
-    #  @warning Not intended for use directly without subclassing (and adding transform arguments).
-    def __init__( self,
-                  authors=[], 
-                  help='Base class. Not intended for direct use.', 
-                  version='default', 
-                  skeleton='default', 
-                  name='default',
-                  config=TransformConfig('defaultConfig'),
-                  lastInChain = True ):
-        # set filename of python file that defines the transformation
-        base = self.__module__.split('.')[-1]
-        if base == '__main__':
-            #Get a list of frames
-            framelist = inspect.getouterframes(sys._getframe())
-            nframes = len(framelist)
-            #Last frame in the list should be the toplevel trf 
-            #frame is an python tuple object, 2nd item [1] is filename
-            filename = os.path.basename(framelist[nframes-1][1])
-            # make sure the extension is .py (and not .pyc or .pyo)
-            base,ext = os.path.splitext(filename)
-            del filename
-        ## Transform file name. Accessed via filename().
-        self._filename = base + '.py'
-        # set the name of the transformation
-        if name == 'default':
-            # start from filename where constructor is called
-            name = os.path.splitext(self._filename)[0]
-            # remove default trf name suffix
-            name = strip_suffix( name, JobTransform._fileSuffix )
-        ## Name of the transform. Accessed via name().
-        self._name = name
-        # Install new signal handler.
-        setTrfSignalHandler( self._completeReport )
-        ## @var _skeleton 
-        #  Skeleton (top-level) job option for the transforms. Accessed via skeletonFilename().
-        if skeleton == 'default':
-            self._skeleton = self.defaultSkeletonFilename()
-        elif skeleton == '':
-            self._skeleton = None
-        else:
-            self._skeleton = skeleton
-        # set the version. Default is svn tag as found in file cmt/version.cmt
-        if version == 'default':
-            version = 'unknown'
-            framelist = inspect.getouterframes(sys._getframe())
-            nframes = len(framelist)
-            #Last frame in the list should be the toplevel trf 
-            #frame is an python tuple object, 2nd item [1] is filename
-            filename = os.path.basename(framelist[nframes-1][1])
-            dirname = os.path.dirname( filename )
-            packroot = os.path.dirname( dirname )
-            cmtversion = os.path.join(packroot,'cmt','version.cmt')
-            try:
-                with open( cmtversion ) as cmtversionfile:
-                    for line in cmtversionfile:
-                        _v = line.strip()
-                        if _v:
-                            version = _v
-                            break
-            except Exception:
-                pass
-        ## Specify if this is the last trf in a chained series of trfs.
-        #  This allows certain processes to be omitted/added depending on it's position in the trf chain. 
-        self._lastInChain = lastInChain
-        ## Transform version determined using @c cmt/version.cmt. 
-        #  @see Accessed via version().
-        self._version = version
-        ## Transform configuration.
-        #  @see Accessed via runConfig().
-        self._config = config
-        ## Handle to application (athena.py) subprocess.
-        self._runJobProcess = None
-        ## Associate a empty jobReport with the transform.
-        #  @see Accessed via jobReport().
-        self._jobReport = JobReport()
-        # Name and set the version (based on the current transform) of the associated job report.
-        self._jobReport.setProducer(self.name(), self.version())
-        ## List of areas to be included in the job report.
-        self._jobReportOptions   = []
-        ## List of areas not to be included in the job report.
-        self._jobReportNotOptions = []
-        ## Information (limited by jobReportOptions) printed at start of the jobReport.
-        self._startReportOptions = ['Environment','Machine','AtlasRelease','DBRelease']
-        ## Information (extending on jobReportNotOptions) omitted at start of the jobReport.
-        self._startReportNotOptions = []
-        ## Information (limited by jobReportOptions) printed at end of the jobReport.
-        self._endReportOptions   = []
-        ## Information (extending on jobReportNotOptions) omitted at end of the jobReport.
-        self._endReportNotOptions = ['Environment','Machine','RunDirListing']
-        # Initialise parent class
-        TransformLogger.__init__(self,name)
-        # Set logger
-        trferr.errorHandler.setLoggerParentName( self.loggerName() )
-        ## Handle to the log file associated with this transform.
-        self._logFile = None
-        ## Name of the log file.
-        #  @see Accessed via logFilename().
-        self._logFilename = name + '.log'
-        ## Name of the file used to store the stderr written out by the transform.
-        self._stderrFilename = name + '.stderr'
-        ## Name of the file used to store the stdout written out by the transform.
-        self._stdoutFilename = name + '.stdout'
-        ## List of loggers associated with the transform.
-        self._loggers = []
-        ## Help text.
-        #  @see Accessed via getHelp().
-        self._help    = help
-        ## List of authors (trfutil.Author instances) associated with the transform.
-        #  @see Append using addAuthors().
-        self._authors = []
-        ## Minimum events permitted. Used in validation checks.
-        #  @see Use setMinMaxEvents() to set new values.
-        self._minEvents = 0
-        ## Maximum events permitted. Used in validation checks.
-        #  @see Use setMinMaxEvents() to set new values.
-        self._maxEvents = 0
-        ## Used to determine the error filters to apply when parsing log file. Value used in job report.
-        self._atlasrelease = None
-        ## Used in mechanism to determine DBRELEASE. Value used in job report.
-        self._dbrelease = None
-        ## List of options to be passed to @c athena.py.
-        #  @see Append using addAthenaOption().
-        self._athenaOptions = []
-        ## List of non-athena.py options. Used to pass options to the athena_wrapper.py script.
-        self._genericOptions = []
-        ## Pre-execute one-liner to be passed to athena.py's @c -c parameter.
-        #  @see Set using setPECommand().
-        self._PECommand = ''
-        ## True when Monte Carlo data is used as input. Enables routines specific to MC input data.
-        #  @see Set using enableMCInput().
-        self._mcInput = None
-        ## True when SQlite is to be used rather than ORACLE. 
-        #  @see Set using enableSQLite().
-        self._useSQLite = None
-        ## Set to true if random throw gives a number smaller than the value of input argument --uploadtoami
-        self._exportToAmi = False
-        ## Dictionary of key/values of arguments.  
-        self._namedArgs = {}
-        ## List of arguments in strict order.
-        #  @see Accessed via argumentList().
-        self._positionalArgs = []
-        ## Dictionary of arguments that have default values.
-        self._optionalArgs = {}
-        ## Dictionary of arguments that do not have default values.
-        self._requiredArgs = {}
-        ## A empty runargs.RunArguments instance that will hold run arguments as named members with values. 
-        #  @see Accessed via runArgs().
-        self._runArgs = RunArguments()
-        ## List of basic_trfarg.OutputFileArg instances.
-        self._outputFiles = []
-        ## List of basic_trfarg.InputFileArg instances.
-        self._inputFiles = []
-        ## List of pre-run actions (primarily) from arguments added.
-        self._preRunActions = []
-        ## List of post-run actions (primarily) from arguments added.
-        self._postRunActions = []
-        ## List of job options that will be prepended ahead of the skeleton job option.
-        self._preJobOptionsFiles = []
-        ## List of job options that will be appended behind the skeleton job options.
-        self._postJobOptionsFiles = []
-        # store the various jobOptions templates
-        ## Template for author details
-        self._authorsTemplate = ''
-        ## Template for run arguments header
-        self._runArgsHeaderTemplate = ''
-        ## Template for run arguments code
-        self._runArgsCodeTemplate = ''
-        ## Template for run arguments header
-        self._runArgsArgumentsTemplate = ''
-        ## Template for skeleton job option header. 
-        #  @warning Used by automatic skeleton job option generator which has not been implemented.
-        self._skeletonHeaderTemplate = ''
-        ## Template for skeleton job option code
-        #  @warning Used by automatic skeleton job option generator which has not been implemented.
-        self._skeletonCodeTemplate = ''
-        ## Template for the skeletion job option
-        #  @warning Used by automatic skeleton job option generator which has not been implemented.
-        self._skeletonTemplate = ''
-        # set and initialise command line options
-        ## Store all available command line options.
-        #  @see Accessed via runOptions().
-        self._runOpts = RunOptions()
-        # Assign actions to command line options.
-        runOpts = self._runOpts
-        runOpts.getProperty('help').setActionFunction( self.printHelpAndExit )
-        runOpts.getProperty('athenaopts').setActionFunction( self.addAthenaOption )
-        runOpts.getProperty('argdict').setActionFunction( self.readArgsFromPickle )
-        runOpts.getProperty('test').setActionFunction( self.enableTest )
-        runOpts.getProperty('command').setActionFunction( self.setPECommand )
-        runOpts.getProperty('mcinput').setActionFunction( self.enableMCInput )
-        runOpts.getProperty('usesqlite').setActionFunction( self.enableSQLite )
-        runOpts.getProperty('ignoreerrors').setActionFunction( self.enableMaskAllErrors )
-        runOpts.getProperty('ignoreunknown').setActionFunction( self.enableIgnoreUnknownErrors )
-        runOpts.getProperty('ignoreall').setActionFunction( self.enableIgnoreAllErrors )
-        runOpts.getProperty('tcmalloc').setActionFunction( self.enableTCMalloc )
-        runOpts.getProperty('leakcheckexecute').setActionFunction( self.enableLeakCheckExecute )
-        runOpts.getProperty('rss').setActionFunction( self.setRSS )
-        runOpts.getProperty('vmem').setActionFunction( self.setVMEM )
-        runOpts.getProperty('extrametadatadict').setActionFunction( self.updateExtrasMetadataDict )
-        runOpts.getProperty('extraignorefilters').setActionFunction( self.updateIgnoreFilters )
-        runOpts.getProperty('omitvalidation').setActionFunction( self.omitValidation )
-        runOpts.getProperty('usenewmeta').setActionFunction( self.useNewMeta ) # Experimental
-        runOpts.getProperty('uploadtoami').setActionFunction( self.uploadToAMI ) 
-#        runOpts.processOptions()
-        ## Dictionary of options (full_trfarg.OptionArg instances) as denoted by the @em _optionArgNames class variable.
-        self._optionArgs = {}
-        for opt in JobTransform._optionArgNames:
-            arg = OptionArg(opt,runOpts)
-            self._optionArgs[opt.lower()] = arg
-            self._addLogger(arg)
-        self.addAuthors( authors )
-        # define runArgsHeaderTemplate contents
-        self.addTemplates( 'runArgsHeader',
-                           "# Run arguments file auto-generated on %s by:" % (time.ctime()) ,
-                           "# JobTransform: %s" % (name) ,
-                           "# Version: %s" % (version) )
-        # define skeletonHeaderTemplate contents
-        self.addTemplates( 'skeletonHeader',
-                           "# JobOptions skeleton file auto-generated on %s by:" % (time.ctime()) ,
-                           "# JobTransform: %s" % (name) ,
-                           "# Version: %s" % (version) )
-        # define skeletonCodeTemplate contents
-        self.addTemplates( 'skeletonCode', "import os,sys" )
-        # define runArgsCodeTemplate contents
-        self.addTemplates( 'runArgsCode',
-                           "from PyJobTransformsCore.runargs import RunArguments",
-                           "",
-                           "%s = RunArguments()" % self.runArgsName() )
-
-    ## @brief Add an new error to the list of errors in the associated job report.
-    #  @details An instance of AtlasErrorCodes.ErrorInfo is created and added to list of errors maintained by the transform's job report.
-    #  @param acronym Acronym to assign to the error. Defaults to @b ATH_FAILURE. 
-    #  Refer to @c share/atlas_error_categories.db for valid acronyms.
-    #  @param severity Assign a severity level for the error. Defaults to @b FATAL.
-    #  Refer to severity levels defined in @c AtlasErrorCodes.py.
-    #  @param **kwargs Optional dictionary of any other parameters that may be recognised by @c AtlasErrorCodes.ErrorInfo.
-    #  @return The AtlasErrorCodes.ErrorInfo instance created.
-    def addError( self, acronym = "ATH_FAILURE", severity = AtlasErrorCodes.FATAL, **kwargs ):
-        e = AtlasErrorCodes.ErrorInfo( acronym = acronym, severity = severity, **kwargs )
-        self._jobReport.addError( e )
-        return e
-
-    ## @brief Add a new error to the list of validation errors in the associated job report.
-    #  @details An instance of AtlasErrorCodes.ErrorInfo is created and added to list of validation 
-    #  errors maintained by the transform's job report.
-    #  @param e An instance of trferr.TransformValidationError exception.
-    #  @return The AtlasErrorCodes.ErrorInfo instance created.
-    def addValidationError( self, e ):
-        ve = AtlasErrorCodes.ErrorInfo( acronym = e.error, severity = AtlasErrorCodes.FATAL, message = e.message, **e.extras )
-        self._jobReport.addValidationError( ve )
-        return ve
-
-    ## @brief Common signal handler.
-    #  @details This function is installed in place of the default signal handler and attempts to terminate the 
-    #  transform gracefully. When a signal is caught by the transform, the stdout from the running application process 
-    #  (i.e. @c athena.py) is allowed to continue uninterrupted and write it's stdout to the log file (to retrieve 
-    #  the traceback) before the associated job report records the fact that a signal has been caught and complete 
-    #  the report accordingly. 
-    #  @param signum Signal number. Not used since this is a common handle assigned to predefined signals using the 
-    #  @c _installSignalHandlers(). This param is still required to satisfy the requirements of @c signal.signal().
-    #  @param frame Not used. Provided here to satisfy the requirements of @c signal.signal().
-    #  @return Does not return. Raises SystemExit exception.
-    #  @exception SystemExit()
-    def _completeReport( self, signum, frame ):
-        if self._runJobProcess is not None:
-            print ("Signal handler: Killing %s with %s." % ( self._runJobProcess.pid, signal.SIGTERM ))
-            os.kill( self._runJobProcess.pid, signal.SIGTERM )
-        # Restoring signal handlers.
-        setDefaultSignalHandlers()
-        self.addError( acronym = 'ATH_FAILURE', severity = AtlasErrorCodes.FATAL, message = 'Signal %s received.' % signum )
-        while self._runJobProcess and self._runJobProcess.poll() is None:
-            try:
-                line = self._runJobProcess.stdout.readline() or ''
-            except IOError:
-                break
-            if line and not self._logFile.closed:
-                self._logFile.write(line)
-        try:
-            self._logFile.close()
-        except Exception:
-            pass
-        try:
-            rc = self._runJobProcess.returncode
-        except Exception:
-            rc = None
-        if rc < 0 or rc is None:
-            os.system( 'dmesg > dmesg_trf.txt' )
-            rc = 8 #ExitCodes---> 8: an unknown exception occurred
-        print ("Signal handler: athCode=%s" % rc)
-        print ("Signal handler: athAcronym=%s" % str(ExitCodes.what(rc) ))
-        # overwrite producer for new errors that are added
-        self._jobReport.setProducer(self.name(),self.version())
-        # adding the exit status from athena        
-        self._jobReport.addInfo( JobInfo('athCode', str(rc) ) )
-        self._jobReport.addInfo( JobInfo('athAcronym', str(ExitCodes.what(rc) ) ) )
-        # Alter behaviour of athenaReport depending on the following flags.
-        self._jobReport.setIgnoreUnknown( self._ignoreUnknown )
-        self._jobReport.setIgnoreAll( self._ignoreAll )
-        self._jobReport.setIgnoreErrors( self._ignoreErrors )
-        self.dumpReport(self._endReportOptions,self._endReportNotOptions)
-        self.writeReports()
-        ec = self._jobReport.exitCode()
-        print ("Signal handler: Raising SystemExit with exit code %s." % ec)
-        raise SystemExit( ec )
-
-    ## @brief Default skeleton job options file name.
-    #  @details The filename is based on the name of the transform i.e. <tt> skeleton.[TRANSFORM_NAME].py </tt>
-    #  @return String
-    def defaultSkeletonFilename(self):
-        return 'skeleton.' + self._name + '.py'
-
-    ## Getter function for skeleton job options file name.
-    #  @see _skeleton attribute.
-    #  @return String
-    def skeletonFilename(self):
-        return self._skeleton
-
-    ## Check if the skeleton job option has been set.
-    #  @return Boolean 
-    def hasSkeleton(self):
-        return self._skeleton is not None
-
-    ## @brief Getter function for current log file name.
-    #  @see _logFilename attribute.
-    #  @return String
-    def logFilename(self):
-        return self._logFilename
-
-    ## @brief File name of file containing run arguments used.
-    #  @details The filename is based on the name of the transform i.e. <tt> runargs.[TRANSFORM_NAME].py </tt>
-    #  @return String
-    def runArgsFilename(self):
-        return 'runargs.' + self._name + '.py'
-
-    ## Getter function for name of runargs.RunArguments instance.
-    #  @see _runArgsName class attribute.
-    #  @return String
-    def runArgsName(self):
-        return JobTransform._runArgsName
-
-    ## Getter function for object containing the various run arguments associated with the transform.
-    #  @see _runArgs attribute.
-    #  @return runargs.RunArguments instance
-    def runArgs(self):
-        return self._runArgs
-
-    ## Getter function for object containing run options associated with the transform.
-    #  @see _runOpts attribute.
-    #  @return runargs.RunOptions instance
-    def runOptions(self):
-        return self._runOpts
-
-    ## Getter function for version associated with the transform.
-    #  @see _version attribute.
-    #  @return String
-    def version(self):
-        return self._version
-
-    ## Getter function for object containing the properties of the transform.
-    #  @see _config attribute.
-    #  @return TransformConfig.TransformConfig instance
-    def runConfig(self):
-        return self._config
-
-    ## Getter function for the output files associated with the transform.
-    #  @see _outputFiles attribute.
-    #  @return List of basic_trfarg.OutputFileArg instances.
-    def outputFiles(self):
-        return self._outputFiles
-    
-    ## Getter function for the input files associated with the transform.
-    #  @see _inputFiles attribute.
-    #  @return List of basic_trfarg.InputFileArg instances.
-    def inputFiles(self):
-        return self._inputFiles
-    
-    ## Getter function for the job report associated with the transform.
-    #  @see _jobReport attribute.
-    #  @return JobReport.JobReport instance.
-    def jobReport(self):
-        return self._jobReport
-
-    ## Getter function for the maximum number of output events.
-    #  @see _maxEvents attribute.
-    #  @return Integer.
-    def maxEvents(self):
-        return self._maxEvents
-
-    ## Getter function for minimum number of input events.
-    #  @see _minEvents attribute.
-    #  @return Integer.
-    def minEvents(self):
-        return self._minEvents
-    
-    ## @brief Customise printout for job report.
-    #  @param options List of items (or a comma-delimited string of items) to print. Defaults to @b None.
-    #  @param notOptions List of items (or a comma-delimited string of items) to omit. Defaults to @b None.
-    #  @warning @a notOptions items take precedence over @a options items.
-    #  @see _jobReportOptions and _jobReportNotOptions attributes.
-    #  @return None
-    #  @remarks allowed values for @a options and @a notOptions are: 
-    #  @li @c '' (empty string) - print everything
-    #  @li @c 'Summary' - print the Summary section
-    #  @li @c 'Errors'  - print the Error section ( @c FATALs, @c ERRORs and @c WARNINGs)
-    #  @li @c 'Environment' - print all shell environment variables
-    #  @li @c 'Machine' - print the machine section (CPU, binary version etc.)
-    #  @li @c 'AtlasRelease' - print the Atlas release version number
-    #  @li @c 'DBRelease' - print database release version number
-    #  @li @b [ITEM] - print information on @b [ITEM] 
-    def setJobReportOptions(self,options=None,notOptions=None):
-        if options is not None:
-            # convert string to list
-            if type(options).__name__ == 'str':
-                self._jobReportOptions = options.split(',')
-            else:
-                self._jobReportOptions = copy(options)
-        if notOptions is not None:
-            # convert string to list
-            if type(notOptions).__name__ == 'str':
-                self._jobReportNotOptions = notOptions.split(',')
-            else:
-                self._jobReportNotOptions = copy(notOptions)
-
-    ## @brief Print a customised version of the job report based on the @a options and @a notOptions to @a output.
-    #  @param options List of items (or a comma-delimited string of items) to print. Defaults to @b None.
-    #  @param notOptions List of items (or a comma-delimited string of items) to omit. Defaults to @b None.
-    #  @param output File-like object. Defaults to @em sys.stdout.
-    #  @see setJobReportOptions() remarks for permitted values for @a options and @a notOptions.
-    #  @return None
-    def dumpReport(self,options=None,notOptions=None,output=sys.stdout):
-        # options it the AND or stored and requested options
-        if not options: # catches None, '' and []
-            # use default
-            dumpOpts = self._jobReportOptions
-        else:
-            # convert string to list
-            if type(options).__name__ == 'str': options = options.split(',')
-            if not self._jobReportOptions:
-                # allow all
-                dumpOpts = options
-            elif 'None' in self._jobReportOptions:
-                # print nothing
-                return  # no point in doing anything else
-                dumpOpts = ['None']
-            else:
-                dumpOpts = [ opt for opt in options if opt in self._jobReportOptions ]
-                # if no overlap, print nothing
-                if not dumpOpts:
-                    return  # no point in doing anything else
-                    dumpOpts = ['None']
-        # notOptions is the OR of stored and requested options
-        if notOptions is None:
-            # use default
-            dumpNotOpts = self._jobReportNotOptions
-        else:
-            if type(notOptions).__name__ == 'str': notOptions = notOptions.split(',')
-            dumpNotOpts = notOptions
-            for opt in self._jobReportNotOptions:
-                if opt not in dumpNotOpts: dumpNotOpts.append( opt )
-        self._jobReport.dump( dumpOpts, dumpNotOpts, output )
-
-    ## @brief Top-level job report writer.
-    #  @details Write various job reports to file, depending on the error code.
-    #  @warning All reports apart from the metadata in XML format (@em metadata.xml) are written out 
-    #  if the transform exitcode is not 0 (SUCCESS).
-    #  @see JobReport.JobReport.write(), JobReport.JobReport.writeAll(), JobReport.JobReport.writeTXT(), 
-    #  JobReport.JobReport.writeJobInfoXML() and JobReport.JobReport.writeGPickle() functions.
-    #  @return None
-    def writeReports(self):
-        if self._jobReport.exitCode() == 0:
-            self._jobReport.writeAll( writeFinalCopy = self._lastInChain )
-        else:
-            self._jobReport.write( writeFinalCopy = self._lastInChain )
-            self._jobReport.writeTXT( writeFinalCopy = self._lastInChain )
-            self._jobReport.writeJobInfoXML( writeFinalCopy = self._lastInChain )
-            self._jobReport.writeGPickle( writeFinalCopy = self._lastInChain ) 
-            # do not write metadata.xml
-
-    ## @brief Setter function for the minimum and maximum number of output events.
-    #  @param minEvents Minimal number of output events. If @b None is given, there is no change. 
-    #  If set to @b 0, minimum event checking is disabled. 
-    #  @param maxEvents Maximum number of output events. Defaults to @b None (i.e. no change).
-    #  If set to @b 0, maximum event checking is disabled.
-    #  @see _maxEvents and _minEvents attributes.
-    #  @return None
-    def setMinMaxEvents(self,minEvents,maxEvents=None):
-        try:
-            if minEvents < 0 or maxEvents < 0 or maxEvents < minEvents:
-                raise Exception
-        except Exception:
-            self.logger().info( "%s should be greater or equal to %s", maxEvents, minEvents )
-            return
-        self._minEvents = minEvents
-        self.logger().info("Setting minimum number of output events to %d", minEvents)
-        self._maxEvents = maxEvents
-        self.logger().info("Setting maximum number of output events to %d", maxEvents)
-
-    ## @brief Ensure that the exit code is not affected by the presence of unknown errors.
-    #  @details Aligns the transform exit code with that of @c athena.py (@em only if the latter was successful) 
-    #  even if errors were detected by the transform.
-    #  @warning Deprecated. Use enableMaskAllErrors() instead.
-    #  @see _ignoreUnknown and _ignoreErrors attributes.
-    #  @return None
-    def enableIgnoreUnknownErrors( self ):
-        self._ignoreUnknown = True
-        self._ignoreErrors = True
-        self.logger().warning( "--ignoreunknown is deprecated. Using --ignoreerrors=True instead." )
-
-    ## @brief Ignore @em all errors during execution of the transform.
-    #  @details Allow transform to proceed as far as possible. All validation routines are omitted as well.
-    #  @see _ignoreAll attribute.
-    #  @return None
-    def enableIgnoreAllErrors( self ):
-        VALIDATION_DICT[ 'ALL'] = False # remove all validation
-        self._ignoreAll = True
-
-    ## @brief Enable the tcmalloc memory allocation mechanism.
-    #  @warning Deprecated. A default memory allocation mechamism is no longer set. 
-    #  The default should be (and is now) dictated solely by the application (i.e. @c athena.py).
-    #  @return None
-    def enableTCMalloc( self ):
-        self.logger().warning( "tcmalloc is the current default. Ignoring --tcmalloc option." )
-
-    ## @brief Add the @c --leakcheckexecute option to @c athena.py 
-    #  @details Ignored if @c --tcmalloc option found in the @em _athenaOptions list.
-    #  @see addAthenaOption() function used to set the @c --leak-check-execute option.
-    #  @return None  
-    def enableLeakCheckExecute( self ):
-        if "--tcmalloc" in self._athenaOptions:
-            self.logger().warning( "--leakcheckexecute cannot be used with --tcmalloc. Ignoring --leakcheckexecute option." )
-        else:
-            self.addAthenaOption( "--leak-check-execute" )
-
-    ## @brief Set the RSS memory limit.
-    #  @param val The value to set the RSS memory limit to.
-    #  @see _genericOptions attribute which will contain the RSS option.
-    #  @return None
-    def setRSS( self, val ):
-        if val is None:
-            return
-        self._genericOptions.append( "--rss=%s" % val )
-
-    ## @brief Set the VMEM limit.
-    #  @param val The value to set the VMEM limit to.
-    #  @see _genericOptions attribute which will contain the VMEM option.
-    #  @return None
-    def setVMEM( self, val ):
-        if val is None:
-            return
-        self._genericOptions.append( "--vmem=%s" % val )
-
-    ## @brief Include more metadata to retrieve from the log file created.
-    #  @details Callback function associated with the command line option @c --extrametadatadict.
-    #  Add user-defined dictionary entries to existing @em _extraMetadataDict dictionary.
-    #  @see JobReport._extraMetadataDict attribute.
-    #  @remarks file provided must contain the _extraMetadataDict dictionary. Refer to extraMetadata.py.
-    #  @return None
-    def updateExtrasMetadataDict( self, fname ):
-        try:
-            execfile( fname )
-        except IOError:
-            self.logger().warning( "Error reading file %s containing extra metadata.", fname )
-        else:
-            _extraMetadataDict.update( locals()['extraMetadataDict'] )
-
-    ## @brief Set transform to run in test mode.
-    #  @details Use with @c --omitvalidation option to omit specified checks from all areas (i.e. not just within validation routines). 
-    #  @see omitValidation() function and trfutil.TRF_SETTING dictionary.
-    #  @return None
-    def enableTest( self ):
-        self.logger().info( 'Running in test mode.' )
-        TRF_SETTING[ 'testrun' ] = True
-        TRF_SETTING[ 'validationRetry' ] = 0
-        TRF_SETTING[ 'TRFRetry' ] = 0
-
-    ## @brief Set the pre-execute command string.
-    #  @details Setter function for the @em _PECommand attribute. The string of code is executed before athena.py is run.
-    #  @param peCommandStr This string is passed to the @c athena.py @c -c option.
-    #  @see _PECommand attribute.
-    #  @return None
-    def setPECommand( self, peCommandStr ):
-        self._PECommand = peCommandStr
-
-    ## Setter function that allows the transform to execute routines based on the assumption that Monte Carlo inputs are used.
-    #  @param val A boolean value is expected.
-    #  @see _mcInput attribute.
-    #  @return None
-    def enableMCInput( self, val ):
-        self._mcInput = val
-
-    ## Setter function to force the transform to access the SQLite database (rather than the ORACLE database).
-    #  @param val A boolean value is expected.
-    #  @see _useSQLite attribute.
-    #  @return None
-    def enableSQLite( self, val ):
-        self._useSQLite = val
-
-    ## @brief Allow various validation routines to be omitted.
-    #  @param omitStr Valid strings are based on the keys of the trfutil.VALIDATION_DICT dictionary. Multiple values can be comma-delimited (without spaces). Use 'ALL' to omit all validation routines, use 'NONE' to remove all validation ommissions.
-    #  @see trfutil.VALIDATION_DICT dictionary.
-    #  @return None
-    def omitValidation( self, omitStr ):
-        omitList = omitStr.split( ',' )
-        if 'NONE' in omitList: # remove all validation ommissions
-            for oEntry in VALIDATION_DICT:
-                VALIDATION_DICT[ oEntry ] = None
-        else:
-            for oEntry in omitList:
-                VALIDATION_DICT[ oEntry ] = False
-
-    def uploadToAMI(self, percentage):
-        import random
-        random.seed()
-        if random.random() < float(percentage):
-            self._exportToAmi = True
-            print ('trf.py will send to ami. ')
-        else:
-            self._exportToAmi = False
-            print ('trf.py will NOT send to ami. ')
-    
-    
-    ## @brief Ensure that the exit code is not affected by the presence of unknown errors.
-    #  @details Aligns the transform exit code with that of @c athena.py (@em only if the latter was successful) 
-    #  even if errors were detected by the transform.
-    #  @param val Boolean value expected. For backward-compatibility, also accepts 'ALL' as a synonym for 'True'.
-    #  @see _ignoreUnknown and _ignoreErrors attributes and the JobReport.JobReport.exitCode() function.
-    #  @remarks All masked errors are still recorded, just not taken into account when generating the exit code.
-    #  @return None
-    def enableMaskAllErrors( self, val ):
-        try:
-            val = val.upper()
-        except Exception:
-            pass
-        else:
-            if val == 'ALL':
-                val = True
-            else:
-                val = False
-                self.logger().info( "Unknown value. --ignoreerrors=[True|False]. Setting to False." )
-        self._ignoreErrors = val or self._ignoreUnknown
-        if self._ignoreErrors:
-            self._ignoreUnknown = True
-
-    ## Add user-defined ignore error filters.
-    #  @param fname Name of file containing the filter. The file format must be the same as @c share/atlas_error_ignore.db.
-    #  @see share/atlas_error_ignore.db.
-    #  @return None
-    def updateIgnoreFilters( self, fname ):
-        AtlasErrorCodes.readIgnorePatterns() # read default file in release
-        AtlasErrorCodes.readIgnorePatterns( fname ) # add user file
-
-    ## Enable new format for jobReport.gpickle and metadata.xml.
-    #  @param val Boolean value expected.
-    #  @see JobReport.JobReport.useNewMetadataFormat attribute.
-    #  @return None
-    def useNewMeta( self, val ):
-        self._jobReport.useNewMetadataFormat = val
-
-    ## @brief Set the Athena message level and Python logging level.
-    #  @details Value for level has to be one of "ALL", "VERBOSE", "INFO", "WARNING", "ERROR", "DEBUG", "FATAL".
-    #  @param level Logging level as a String
-    #  @warning Deprecated.
-    def _setMessageLevel(self,level):
-        if level is None:
-            return
-        if ( level in [ "ALL", "VERBOSE", "INFO", "WARNING", "ERROR", "DEBUG", "FATAL" ] ):
-            TransformLogger.setLoggerLevel(self,level)
-            self.addAthenaOption( "-l %s" % level )
-
-    ## Setter function for job transform parent.
-    #  @param parentTrf Job transform parent instance to be associated with.
-    #  @remarks The tranform inherits particular runtime options (as specified by @em _sharedRunOpts) from its parent.
-    #  @see _sharedRunOpts attribute.
-    #  @warning Not sure of the purpose of this function since sub-transforms in composite transforms 
-    #  are given a full set of relevant options. This function may be removed in the future.
-    #  @return None
-    def setParent(self,parentTrf):
-        # transfer shared command line options from parent to child
-        parentOpts = parentTrf.runOptions()
-        for n in self._sharedRunOpts:
-            try:
-                setattr(self._runOpts,n,getattr(parentOpts,n))
-            except Exception:
-                pass
-
-    ## Add or replace options at the command line for @c athena.py
-    #  @param option A single string of all command line options to be passed to @c athena.py. Multiple options should be space-delimited.
-    #  @remarks If @a option is already present in the athena options list, its value will be overwritten.
-    #  @see _athenaOptions attribute.
-    #  @return None
-    def addAthenaOption(self,option):
-        option = option.strip()
-        if not option:
-            return
-        if option in ( '--stdcmalloc', '--leak-check-execute' ):
-            try:
-                self._athenaOptions.remove( '--tcmalloc' )
-            except ValueError:
-                pass
-            else:
-                self.logger().warning( '%s cannot be used with the default --tcmalloc option. --tcmalloc option removed.', option )
-        # remove old option if needed
-        firstWord = option.split()[0]
-        for opt in self._athenaOptions:
-            if opt.split()[0] == firstWord:
-                self._athenaOptions.remove(opt)
-                break
-        # add it to the list
-        self._athenaOptions.append( option )
-        
-    ## Reset skeleton template 
-    #  @remarks This function serves to invalidate the existing skeleton template forcing it to be 
-    #  recreated on demand to ensure the latest version is used.
-    #  @see _skeletonTemplate attribute.
-    #  @return None
-    def updateFullTemplates(self):
-        # Invalidate full templates, so they get recreated when needed
-        self._skeletonTemplate = ''
-
-    ## Adds template entries to an existing template.
-    #  @param name Name of template to add new template entry strings to.
-    #  @param *vargs List of template entry strings to be added.
-    #  @remarks Each time a template is added/updated, updateFullTemplates() is called to ensure that 
-    #  the top level template (i.e. @em _skeletonTemplate) is refreshed.
-    #  @see updateFullTemplates() function.
-    #  @return None  
-    def addTemplates(self, name, *vargs):
-        # count the number of non-None arguments
-        name = '_%sTemplate' % (name)
-        val = getattr(self, name)
-        for arg in vargs: val += str(arg) + os.linesep
-        setattr(self, name, val)
-        self.updateFullTemplates()
-
-    ## Associates authors to the transform
-    #  @param authorList Python list of trfutil.Author instances. 
-    #  Other values accepted include a single trfutil.Author instance and a comma-delimited string of author names.
-    #  @exception trferr.TransformDefinitionError is raised when @em authorList is not recognised.
-    #  @return None
-    def addAuthors(self, authorList):
-        if not authorList: return
-        # make sure I got a list
-        authorsAdd = authorList
-        if isinstance(authorList,Author):
-            authorsAdd = [ authorList ]
-        else:
-            if isinstance(authorList, str):
-                authorsAdd = [ Author(a) for a in authorList.split(',') ]
-            elif isinstance(authorList, list):
-                pass
-            else:
-                raise TransformDefinitionError('Author type %s not supported' % type(authorList))
-        self._authors += authorsAdd
-        # update authors template
-        authStrings = [ str(a) for a in authorsAdd ]
-        firstLine  = "# Authors: "
-        otherLines = "#" + ' '*(len(firstLine)-1)
-        sep = os.linesep + otherLines
-        i = 0
-        if not self._authorsTemplate:
-            self.addTemplates( 'authors', "%s%s" % (firstLine,authStrings[i]) )
-            i += 1
-        if i < len(authorsAdd):
-            self.addTemplates( 'authors', "%s%s" % (otherLines,sep.join(authStrings[i:])) )
-
-    ## Add a command line argument definition.
-    #  @param arg An Argument class (or any of its sub-classes) instance.
-    #  @param default Assign a default value to the argument to be added. 
-    #  A @em None value makes the argument @em optional. Defaults to @em None.
-    #  @remarks Arguments added are assigned positional numbers (for command line invocation)
-    #  in the order they were added.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @warning Add all @em non-optional arguments before @em optional ones.
-    #  @exception trferr.TransformDefinitionError is raised when the argument is not @em fully-specified 
-    #  (i.e. a intermediate class of Argument or even the Argument class itself that is not designed to 
-    #  be instantiated.).
-    #  @exception trferr.TransformDefinitionError is raised if an @em optional argument (i.e. an argument 
-    #  that does not require a value as it has been assigned a default one) is proceeded by a @em non-optional argument.
-    #  @exception trferr.TransformArgumentError is raised when a duplicate argument is given.
-    #  @return None
-    def _addArgument(self, arg, default=None):
-        # ensure usage of fully specified arguments
-        if not arg.isFullArgument():
-            raise TransformDefinitionError( 'Class %s is not a fully specified Argument class.' % arg.__class__.__name__ )
-        name = arg.name()
-        key = name.lower()
-        #check that I have not two arguments with the same name
-        if key in self._namedArgs:
-            raise TransformArgumentError( 'Duplicate argument name: %s' % name )
-        # add to named and positional lists
-        self._namedArgs[key] = arg
-        self._positionalArgs.append( arg )
-        arg.setPosition( len(self._positionalArgs) )
-        # handle default
-        if default is not None:
-            arg.setDefault(default)
-            self.setArgument(key,default)
-            # argument is optional
-            self._optionalArgs[key] = arg
-        if not arg.hasDefault():
-            # argument in not optional
-            # check that all optional arguments are at the end
-            if len(self._optionalArgs) > 0:
-                raise TransformDefinitionError( 'Non-optional argument (%s) after optional argument(s)' % name )
-            self._requiredArgs[key] = arg
-
-    ## Add arguments with the preRunAction attribute.
-    #  @param arg basic_trfarg.Argument instance.
-    #  @param prepend If true, add action to the start of the list, instead of appending it (default false)
-    #  @warning This function is not designed to be used directly.
-    #  @return None
-    def _addPreRunAction( self, arg, prepend = False ):
-        if prepend:
-            self._preRunActions.insert(0, arg)
-        else:
-            self._preRunActions.append( arg )
-
-    ## Add arguments with the postRunAction attribute.
-    #  @param arg basic_trfarg.Argument instance.
-    #  @param prepend If true, add action to the start of the list, instead of appending it (default false)
-    #  @warning This function is not designed to be used directly.
-    #  @return None
-    def _addPostRunAction( self, arg, prepend = False ):
-        if prepend:
-            self._postRunActions.insert(0, arg)
-        else:
-            self._postRunActions.append( arg )
-
-    ## Add a PreJobOptionsFile argument.
-    #  @param jo trfutil.PreJobOptionsFile instance.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @return None
-    def _addPreJobOptionsFile( self, jo ):
-        self._preJobOptionsFiles.append( jo )
-
-    ## Add a PostJobOptionsFile argument.
-    #  @param jo trfutil.PostJobOptionsFile instance.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @return None
-    def _addPostJobOptionsFile( self, jo ):
-        self._postJobOptionsFiles.append( jo )
-
-    ## Add a TransformLogger argument.
-    #  @param log TransformLogger instance.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @return None
-    def _addLogger( self, log ):
-        log.setLoggerParentName( self.loggerName() )
-        self._loggers.append( log )
-
-    ## Add an output file argument.
-    #  @param outfile An basic_trfarg.OutputFileArg instance.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @return None
-    def _addOutputFile(self,outfile):
-        if outfile not in self._outputFiles: 
-            self._outputFiles.append( outfile )
-
-    ## Add an input file argument.
-    #  @param infile An basic_trfarg.InputFileArg instance.
-    #  @warning This function is not designed to be used directly. Use add() instead.
-    #  @return None
-    def _addInputFile(self,infile):
-        if infile not in self._inputFiles: 
-            self._inputFiles.append( infile )
-
-    ## Add an argument as part of the transform customisation procedure.
-    #  @param arg1 basic_trfarg.Argument instance to be added.
-    #  @param *vargs Optional list of parameter values specific to the argument to be added.
-    #  @param **kwargs Optional dictionary of parameter key/value pairs specific to the argument to be added.
-    #  @exception trferr.TransformDefinitionError is raised if @em arg1 is an full_trfarg.OptionArg not recognised by the transform.
-    #  @warning Optional parameters @em vargs and/or @em kwargs will be passed directly to corresponding argument 
-    #  creating functions WITHOUT FURTHER CHECKS so an exception will be raised if an unrecognised parameter is used.
-    #  @return None
-    def add(self, arg1, *vargs, **kwargs):
-        # substitute OptionArg (placeholder argument) by the real option
-        if isinstance(arg1,OptionArg):  
-            try:
-                arg1 = self._optionArgs[arg1.name()]
-            except KeyError:
-                raise TransformDefinitionError( "Option \"%s\" not supported (requested with OptionArg)" % (arg1.name()) )
-        # add the config object to the config argument
-        if isinstance(arg1,JobConfigArg): arg1.setConfig(self._config)
-        # add object to various places
-        if isinstance(arg1,Argument):
-            self._addArgument(arg1,*vargs,**kwargs)
-            if arg1.metaType().startswith('output'): self._addOutputFile( arg1 )
-            if arg1.metaType().startswith('input') and arg1.name().startswith('input'): self._addInputFile( arg1 )
-        if isinstance(arg1,Author): self.addAuthors(arg1,*vargs,**kwargs)
-        if hasattr(arg1,'preRunAction'): self._addPreRunAction(arg1)
-        if hasattr(arg1,'postRunAction'): self._addPostRunAction(arg1)
-        if isinstance(arg1,PreJobOptionsFile): self._addPreJobOptionsFile(arg1)
-        if isinstance(arg1,PostJobOptionsFile): self._addPostJobOptionsFile(arg1)
-        if isinstance(arg1,TransformLogger): self._addLogger(arg1)
-
-    ## @brief Generate the filename for the job option file responsible for Athena messaging service.
-    #  @details The filename is based on the name of the transform i.e. <tt> [TRANSFORM_NAME]_messageSvc_jobOptions.py </tt>
-    #  @return String
-    def messageSvcFilename(self):
-        return self.name() + '_messageSvc_jobOptions.py'
-
-    ## @brief Generate the contents of the Athena messaging service job options file.
-    #  @details @em LoggedMessageSvc is the default service. 
-    #  The fallback (if the package is not available) is the standard @em MessageSvc.
-    #  This function creates the required job options file.
-    #  @return None
-    def _makeMessageSvcJobOptions(self):
-        with open( self.messageSvcFilename(), 'w'  ) as joFile:
-            jo = [  CommentLine("Replacing MessageSvc with LoggedMessageSvc").bigComment() ,
-                    "print ('Using AthenaServices.SummarySvc...')",
-                    "from AthenaServices import SummarySvc",
-                    "from AthenaServices.AthenaServicesConf import AthenaSummarySvc",
-                    "AthenaSummarySvc.SummaryFile = 'AthenaSummary_%s.txt'" % self.name(),
-                    "SummarySvc.useAthenaSummarySvc()" ]
-            joFile.write( os.linesep.join(jo) + os.linesep )
-
-    ## Setter function for option arguments.
-    #  @param optName Name of the option argument concerned. 
-    #  @param optValue The value to set the option argument to.
-    #  @exception trferr.TransformArgumentError is raised if @em optName is not recognised.
-    #  @see _optionArgs attribute.
-    #  @return None
-    def setOptionArg(self,optName,optValue):
-        key=optName.lower()
-        try:
-            self._optionArgs[key].setValue(optValue)
-        except KeyError:
-            raise TransformArgumentError( "Option %s not supported" % (optName,) )
-
-    ## Check if @em optName is a recognised option argument.
-    #  @param optName Name of the option argument concerned.
-    #  @see _optionArgs attribute.
-    #  @return Boolean
-    def isOptionArg(self,optName):
-        return optName.lower() in self._optionArgs
-
-    ## Setter function for pre-defined arguments.
-    #  @param argName Name of the argument concerned. 
-    #  @param argValue The value to set the argument to.
-    #  @exception trferr.TransformArgumentError is raise if @em argName is not recognised.
-    #  @remarks Both the associated argument and the corresponding run argument entry is updated.
-    #  @see _namedArgs and _runArgs attributes.
-    #  @return None
-    def setArgument(self,argName,argValue):
-        key=argName.lower()
-        try:
-            arg = self._namedArgs[key]
-            arg.setValue(argValue) # get back the python type of the value
-            setattr(self._runArgs, arg.name(), arg.value())
-        except KeyError:
-            raise TransformArgumentError( '%s does not have an argument named %s' % (self.name(), argName) )
-
-    ## @brief List the arguments of this transform.
-    #  @details Print list of arguments (in positional order) of this transform instance.
-    #  @return None
-    def ls(self):
-        for arg in self._positionalArgs:
-            self.logger().info( '%s=%s # %s', arg.name(), arg.value(), arg.getHelp() )
-
-    ## Getter function for the job transform name.
-    #  @see _name attribute.
-    #  @return String
-    def name(self):
-        return self._name
-
-    ## Getter function for the job transform file name.
-    #  @see _filename attribute.
-    #  @return String
-    def filename(self):
-        return self._filename
-
-    ## @brief Getter function for run argument template.
-    #  @details Run argument template is generated on demand from individual templates 
-    #  associated with all arguments of the current transform. 
-    #  The final run argument template will also include other miscellaneous templates.
-    #  @remarks A <tt> maxEvents = -1 </tt> entry is added to the run argument template 
-    #  if the @em maxevents argument was omitted at the command line.
-    #  This allows new-style transforms based on Reco_trf.py to optionally specify
-    #  the @em maxevents argument.
-    #  @see _runArgsArgumentsTemplate attribute.
-    #  @return String
-    def getRunArgsTemplate(self):
-        self._runArgsArgumentsTemplate = ''
-        for arg in self._positionalArgs:
-            self.addTemplates( 'runArgsArguments',  arg.runArgsComment(),  arg.runArgsTemplate(self.runArgsName()) )
-        # This allows Reco_trf.py and related transforms to omit specifying maxEvents argument.
-        if 'maxevents' not in self._namedArgs:
-            self.addTemplates( 'runArgsArguments',  '\n# Explicitly added. MaxEvents argument not provided at commandline.',  '%s.maxEvents = -1' % self.runArgsName() )
-        lines = [ self._runArgsHeaderTemplate,
-                  self._authorsTemplate,
-                  CommentLine.hashLine(),
-                  self._runArgsCodeTemplate,
-                  self._runArgsArgumentsTemplate ]
-        return os.linesep.join( lines ) 
-
-    ## Getter function for the skeleton template.
-    #  @remarks The template is only generated if it does not already exist.
-    #  @see _skeletonTemplate attribute
-    #  @return String
-    def getSkeletonTemplate(self):
-        if not self._skeletonTemplate:
-            lines = [ self._skeletonHeaderTemplate,
-                      self._authorsTemplate,
-                      CommentLine.hashLine(),
-                      self._skeletonCodeTemplate ]
-            self._skeletonTemplate = os.linesep.join( lines ) 
-        return self._skeletonTemplate
-
-    ## Getter function for an argument based on its name.
-    #  @param name Name of the argument concerned.
-    #  @remarks The @em name used is case-insensitive.
-    #  @exception trferr.TransformArgumentError is raised if the @em name given is not recognised.
-    #  @return basic_trfarg.Argument instance
-    def getArgument(self,name):
-        key = name.lower()
-        try:
-            return self._namedArgs[key]
-        except KeyError:
-            raise TransformArgumentError("JobTransform %s does not have argument %s" % (self._name,name) )
-
-    ## @brief Getter function for argument of a given type.
-    #  @details The first argument of which the argumentType() = typename
-    #  (i.e. className without the ending 'Arg') is retrieved.
-    #  @param typename basic_trfarg.Argument class name without the suffix @em Arg.
-    #  @see basic_trfarg.Argument.argumentType() function.
-    #  @return basic_trfarg.Argument instance (or None if unsuccessful)
-    def getArgumentOfType(self,typename):
-        for arg in self._positionalArgs:
-            if arg.argumentType() == typename: return arg
-        self.logger().warning( "Argument of %s type not found. Returning None.", typename )
-        return None
-
-    ## Getter function for the list of arguments in positional order.
-    #  @see _positionalArgs attribute.
-    #  @return List of basic_trfarg.Argument instances.
-    def argumentList(self):
-        return self._positionalArgs
-
-    ## Retrieve current arguments as a dictionary with the argument name and its original value as the key and value, respectively.
-    #  @return Dictionary <tt> { arg1.name() : arg1.originalValue(), ... } </tt>
-    def argumentOriginalDict(self):
-        d = { }
-        for arg in self._positionalArgs:
-            d[arg.name()] = arg.originalValue()
-        return d
-
-    ## Retrieve current arguments as a dictionary with the argument name and its current value as the key and value, respectively.
-    #  @return Dictionary instance <tt> { arg1.name() : arg1.value(), ... } </tt>
-    def argumentValueDict(self): 
-        d = { }
-        for arg in self._positionalArgs:
-            d[arg.name()] = arg.value()
-        return d
-
-    ## Write all current run arguments to file
-    #  @details The run argument template is populated with @em current argument values and written out to file.
-    #  @remarks A copy of the the @em original argument values is also written to a corresponding pickle file.
-    #  @return None
-    def writeRunArgs(self):
-        filename = self.runArgsFilename()
-        self.logger().info( 'Writing runArgs to file \"%s\"', filename )
-        with open( filename, 'w' ) as f:
-            f.write( self.getRunArgsTemplate() % self.argumentValueDict() )
-        filename = os.path.splitext(filename)[0] + '.gpickle'
-        self.writeArgsToPickle(filename)
-        
-    ## Write the skeleton job options file
-    #  @warning Not in use. Automatically raises trferr.TransformDefinitionError.
-    def writeSkeleton(self):
-        raise TransformDefinitionError( "Auto-generation of skeleton jobOptions file not yet supported." +
-                                        " It must be specified in the constructor of %s" % self.__class__.__name__  )
-        filename = self.skeletonFilename()
-        self.logger().info( 'Writing skeleton to file \"%s\"', filename )
-        with open( filename, 'w' ) as f:
-            f.write( self.getSkeletonTemplate() % self.argumentValueDict() )
-
-    ## Getter function for help text associated with the transform.
-    #  @see _help attribute.
-    #  @return String
-    def getHelp(self):
-        return self._help
-
-    ## Generate short usage message associated with the transform.
-    #  @return String
-    def getUsage(self):
-        cmd = self.filename()
-        use = 'usage: %s [options]' % cmd
-        for arg in self._positionalArgs:
-            if arg.isOptional():
-                use += ' [%s]' % arg.name().lower()
-            else:
-                use += ' <%s>' % arg.name().lower()
-        use += os.linesep
-        use += '       Arguments should be given as name=value.'
-        use += os.linesep
-        use += '       Option -h,--help: get detailed help'
-        return use
-
-    ## Retrieve pre-formatted help messages for all run options available to the transform.
-    #  @return String
-    def getFullOptionsHelp(self):
-        return 'Options:' + os.linesep + self.runOptions().__str__('   ')
-
-    ## Retrieve pre-formatted help messages for all arguments in the transform.
-    #  @return String
-    def getFullArgsHelp(self):
-        help = [ 'Arguments:' ]
-        help += [ '  ' + arg.fullHelp() for arg in self._positionalArgs ]
-        return os.linesep.join( help )
-
-    ## Retrieve complete transform help message.
-    #  @return String
-    def getFullHelp(self):
-        use = [ 'JobTransform %s version %s'  % (self.name(),self.version()) ,
-                self.getHelp(),
-                self.getUsage(),
-                self.getFullOptionsHelp(),
-                self.getFullArgsHelp() ]
-        return os.linesep.join(use)
-
-    ## Print short usage message to @c sys.stdout
-    #  @return None
-    def usage(self):
-        print (self.getUsage())
-
-    ## Print full help message to @c sys.stdout
-    #  @return None
-    def help(self):
-        print (self.getFullHelp())
-
-    ## Add all arguments to _positionalArgs, ensures proper help is provided
-    #   @details Placeholder, to be implemented in task specific transforms.
-    #   @details Included here for backwards compatibility with older trf.
-    def addAllArgs(self):
-        return
-
-    ## Print full help message to @c sys.stdout and exit program.
-    #  @return None
-    def printHelpAndExit(self):
-        self.addAllArgs()
-        self.help()
-        sys.exit()
-
-    ## Convert a Python exception caught by the transform to an appropriate AtlasErrorCodes.ErrorInfo instance.
-    #  @param e A Python Exception instance.
-    #  @remarks An unknown exception is treated as a AtlasErrorCodes.FATAL error with an @b TRF_EXC error acronym.
-    #  @return AtlasErrorCodes.ErrorInfo instance
-    def _handleException(self,e):
-        err = trferr.errorHandler.handleException(e)
-        if err is not None:
-            err.who = 'JobTransform.%s' % self.name()
-        else:
-            err = AtlasErrorCodes.ErrorInfo( acronym='TRF_EXC', severity = AtlasErrorCodes.FATAL, message='%s: %s' % (e.__class__.__name__ ,e.args) )
-        return err
-
-    ## @brief Ensure the existence of the skeleton job options file.
-    #  @details (It is intended that) The skeleton jobOptions file is auto-generated if it has not been specified.
-    #  @warning An exception will be raised if the skeleton job options file has not been specified as the 
-    #  automatic skeleton job options generator (i.e. the writeSkeleton() function) has not been implemented yet.
-    #  @exception trferr.TransformEnvironmentError is raised if the skeleton job options file cannot be located.
-    #  @see writeSkeleton() function to further understand warning.
-    #  @return None
-    def ensureSkeleton(self):
-        fn = self._skeleton
-        if fn is None:
-            # auto-generate skeleton jobOptions file
-            self._skeleton = self.defaultSkeletonFilename()
-            self.writeSkeleton()
-            fn = self._skeleton
-        # get joboptions search path
-        # try to find the file in the search path
-        if not find_joboptions( fn ):
-            raise TransformEnvironmentError( 'Skeleton file %s not found in %s' % (fn,trfconsts.JOBOPTIONSPATH) )
-
-    ## Read transform arguments from pickle file and populate the current transform's arguments with its corresponding values.
-    #  @remarks Only supported arguments will be used whilst all others will be ignored.
-    def readArgsFromPickle(self,filename):
-        with open( filename ) as argFile:
-            argDict = pickle.load(argFile)
-        for n,v in argDict.items():
-            try:
-                self.setArgument(n,v)
-            except TransformArgumentError:
-                pass
-
-    ## Write the dictionary of all the original argument values to file.
-    #  @see argumentOriginalDict() function.
-    def writeArgsToPickle(self,filename):
-        fileutil.remove(filename)
-        with open( filename, 'w' ) as argFile:
-            pickle.dump(self.argumentOriginalDict(), argFile)
-
-    ## @brief Process list of arguments including the options.
-    #  @details Transform arguments and options are populated with information from the @em argList provided.
-    #  @param argList List of arguments (Strings) at the command line.
-    #  @exception trferr.TransformArgumentError is raised when positional arguments are specified
-    #  after named arguments. 
-    #  The same exception is also raised when too many positional arguments are given.
-    #  @see setOptionArg() and setArgument() functions.
-    #  @return None
-    def processArgs(self, argList):
-        #Print usage when no arguments are given
-        if len(argList) == 0:
-            self.usage()
-            sys.exit()
-        args = self._runOpts.extractOptions(argList)
-        # process arguments
-        posArgs = self._positionalArgs
-        nArgs = len(args)
-        # fill the dictionary with all given arguments
-        lastNamed=''
-        pos = 0
-        for i in range(nArgs):
-            val = args[i]
-            equal = val.find('=')
-            if equal == -1:
-                #positional argument. Not allowed after named argument
-                if lastNamed:
-                    raise TransformArgumentError(
-                        'No positional arguments allowed after named arguments.' +
-                        ' Positional argument %d (%s) after named argument %s=%r' %
-                        (i+1,val,lastNamed,self.getArgument(lastNamed).value()) )
-                try:
-                    name = posArgs[pos].name()
-                except IndexError:
-                    raise TransformArgumentError ('too many arguments: %d (max=%d)' %
-                                                  (nArgs, len(posArgs)))
-                pos += 1
-            else:
-                #named argument or option
-                name = val[:equal]
-                val = val[equal+1:]
-                if self.isOptionArg(name):
-                    self.setOptionArg(name,val)
-                    continue
-                else:
-                    lastNamed = name
-                    pos += 1
-            self.setArgument( name, val )
-
-    ## @brief Process the information in the configuration object.
-    #  @details Transform arguments and options are populated with information from the @em argList provided.
-    #  Configuration auxiliary files are retrieved, min and max events are set, 
-    #  max event handling strategy is set and filter efficiency (<=1) taken into account.
-    #  @warning This function must be called @em after the pre-run actions of the individual arguments.
-    #  @see doPreRunActions() function.
-    #  @return None
-    def processConfig(self):
-        config = self._config
-        if not config: return
-        # print configuration
-        for line in str(config).split(os.linesep):
-            self.logger().info( line )
-        # get all the auxiliary files
-        if config.auxfiles:
-            get_files( config.auxfiles, keepDir=False, errorIfNotFound=True )
-        # How to handle the case when number of input events is smaller than maxEvents
-        self._maxEventsStrategy = config.maxeventsstrategy
-        # the the limits on the number of events
-        minEvts = config.minevents
-        if minEvts > 0:
-            self.setMinMaxEvents(minEvts,config.maxeventsfactor*minEvts)
-        # compensate number of events to loop for generator efficiency
-        eff = config.efficiency
-        if eff < 1.0:
-            maxEvts = self.getArgumentOfType('MaxEvents')
-            if maxEvts and maxEvts != -1:
-                oldEvents = maxEvts.value()
-                if oldEvents > 0:
-                    newEvents = int( math.ceil(oldEvents/eff) )
-                    maxEvts.setValue(newEvents)
-                    self.logger().info("Processing %d events instead of %d to account for filtering efficiency %f", newEvents, oldEvents, eff )
-
-    ## Dump to file the complete shell command to facilitate the re-execution of the transform.
-    #  @remarks The arguments of the transform are written to a separate file as well.
-    #  @see writeArgsToPickle() function.
-    #  @return None
-    def writeLastCommand(self):
-        lastname = 'last.' + self._name
-        with open( lastname, 'w' ) as lastfile:
-            cmd = self.filename()
-            for arg in self._positionalArgs:
-                cmd += ' %s=%s' % ( arg.name(), arg.originalValue() )
-            # allow for overrides when re-executing last command
-            cmd += ' $@'
-            lastfile.write( 'exec ' + cmd + os.linesep )
-        os.chmod(lastname, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH )
-        # write last arguments in pickle file
-        argsFilename = 'last.runargs.gpickle'
-        self.writeArgsToPickle( argsFilename )
-
-    ## Determine the atlas release used.
-    #  @remarks If the release can be determined, the @c ATLAS_RELEASE environment variable is set to that value.
-    #  @return None
-    def _determineAtlasRelease(self):
-        self._atlasrelease = get_atlas_release()
-        if self._atlasrelease:
-            self.logger().info( "Using Atlas Release %s", self._atlasrelease )
-            os.environ['ATLAS_RELEASE'] = self._atlasrelease
-        else:
-            self.logger().warning( "Could not determine Atlas Release" )
-
-    ## @brief Determine the database release used with the @c DBRELEASE environment variable
-    #  @details This value is checked against the requested version (i.e. @c DBRELEASE_REQUESTED) 
-    #  and the minimum required version (i.e. @c DBRELEASE_REQUIRED )
-    #  @return None
-    def _determineDBRelease(self):
-        # try environment variable DBRELEASE
-        self._dbrelease = os.environ.get('DBRELEASE')
-        if self._dbrelease:
-            self.logger().info( "Got Database Release number %s from environment variable DBRELEASE", self._dbrelease )
-        else:
-            self.logger().info( "Database Release no longer needed for r19 and beyond" )
-            return
-        if self._dbrelease == 'current': # no further check required.
-            return
-        # check consistency with requested version
-        dbfound = self._dbrelease
-        dbwanted = os.environ.get('DBRELEASE_REQUESTED')
-        if dbwanted and dbwanted != dbfound:
-            raise TransformEnvironmentError( "Wrong DBRelease version: %s. Requested version: %s" % (dbfound,dbwanted), error='TRF_DBREL_VERSION' )
-        # check consistency with required version
-        dbneeded = os.environ.get('DBRELEASE_REQUIRED')
-        if dbneeded and VersionString(dbfound) < VersionString(dbneeded):
-            raise TransformEnvironmentError( "Wrong DBRelease version: %s. Need at least version %s" % (dbfound,dbneeded), error='TRF_DBREL_VERSION' )
-
-    ## @brief Gather information on the environment
-    #  @details The information gathered includes information on the Atlas release, database release, 
-    #  platform info, cpu info, run directory and execution environment. 
-    #  @return None
-    def gatherEnvironmentInfo(self):
-        # ATLAS offline release number
-        self._determineAtlasRelease()
-        self._jobReport.addInfo( JobInfo('AtlasRelease', self._atlasrelease) )
-        # Database Release number
-        self._determineDBRelease()
-        self._jobReport.addInfo( JobInfo('DBRelease', self._dbrelease) )
-        # What machine are we running on
-        machineInfo = JobInfo('Machine')
-        # Platform information
-        import platform
-        joinList = lambda x : '-'.join(x)  # noqa: E731
-        asIs = lambda x: x                 # noqa: E731
-        platformEnv = { 'architecture'   : joinList ,
-                        'dist'           : joinList ,
-                        'machine'        : asIs ,
-                        'node'           : asIs ,
-                        'platform'       : asIs ,
-                        'processor'      : asIs ,
-                        'python_version' : asIs ,
-                        'system'         : asIs  }
-        for var,func in platformEnv.items():
-            varFunc = getattr(platform,var)
-            machineInfo.addContents( JobInfo(var, func( varFunc() ) ) )
-        # CPU information from /proc/cpuinfo
-        log = self.logger()
-        cpumodel=''
-        cpucache=''
-        modelstring='UNKNOWN'
-        fname='/proc/cpuinfo'
-        modelRE=re.compile(r'^model name\s+:\s+(\w.+)')
-        cacheRE=re.compile(r'^cache size\s+:\s+(\d+ KB)')
-        try:
-            with open( fname ) as f:
-                for line in f:
-                    model=modelRE.search(line)
-                    if model:
-                        cpumodel=model.group(1)
-                        log.info( 'Found CPU model: ' + cpumodel )
-                    cache=cacheRE.search(line)
-                    if cache:
-                        cpucache=cache.group(1)
-                        log.info( 'Found CPU cache size: ' + cpucache )
-                        machineInfo.addContents( JobInfo('cachesize',cpucache) )
-                    # stop after 1st pair found - can be multiple cpus
-                    if cpumodel and cpucache:
-                        modelstring=cpumodel + " " + cpucache
-                        break
-        except OSError:
-            log.warning('Could not determine CPU model (file %s not found)', fname)
-        if modelstring == 'UNKNOWN':
-            log.warning('CPU model and/or cache size not found')
-        machineInfo.addContents( JobInfo('model',modelstring) )
-        self._jobReport.addInfo(machineInfo)
-        # Working directory
-        self._jobReport.addInfo( JobInfo('Workdir',os.getcwd()) )
-        # Environment variables
-        envInfo = JobInfo('Environment')
-        excludeEnv = ( 'YP', 'SSH_.*', '(COLOR|X)?TERM', 'KONSOLE_.*', '.*ROOT', 
-                       'VISUAL', 'EDITOR', 'PAGER', 'SHLVL', 'DISPLAY', 'PS[12]', 'MANPATH',
-                       'GROUP_DIR', 'MAIL', '.*_HOME', 'WWW_.*', 'GTK_.*', 'ENV',
-                       'SAVEPATH', 'NNTPSERVER', '_?HPX_.*', 'LC_.*', 'LANG', 'LS_COLORS',
-                       'HEP_ENV', 'KDE_.*', '.*SESSION', 'SESSION.*', 'XMODIFIERS',
-                       'X?PRINT.*', 'INPUTRC', 'LESS.*', 'AFSHOME', 'USERPATH', 'IFS', 'LAMHELPFILE',
-                       'CLUSTER_DIR', 'ENVIRONMENT', 'GS_LIB', 'ROOTPATH', 'XAUTHORITY'
-                       '.*_DCOP', 'DCOP_.*', 'DOTFONTPATH', 'INITIALISED',
-                       'SAVEHIST', 'HISTSIZE',
-                       'cmt', 'jcmt', 'CVS.*', 'CMTCVSOFFSET', os.sep )
-        excludeEnvRE = re.compile( '^%s$' % '|'.join(excludeEnv) )
-        for n,v in os.environ.items():
-            if not excludeEnvRE.match(n):
-                envInfo.addContents( JobInfo(n,v) )
-        # add everything to jobReport
-        self._jobReport.addInfo(envInfo)
-        # print out part of the information
-        self.dumpReport(self._startReportOptions,self._startReportNotOptions)
-
-    ## @brief Gather information on the run directory.
-    #  @details The information gathered on the run directory includes a full file listing (one line per entry), 
-    #  the disk space (in bytes) used and what is still available.
-    #  @return Dictionary
-    def _getRunDirInfo(self):
-        listing = []
-        totalSize = 0
-        format = "%-10s %10s %24s %s"
-        listing.append( format % ("access", "size", "modification_time", "filename") )
-        for dirpath,dirnames,filenames in os.walk(os.curdir):
-            for f in filenames + dirnames:
-                try:
-                    fullname = os.path.join(dirpath,f)
-                    filestat = fileutil.lstat(fullname)
-                    filesize = filestat.st_size
-                    filetime = filestat.st_mtime
-                    filemode = fileutil.mode_to_string(filestat.st_mode)
-                    # prettify the filename printout
-                    if os.path.isdir(fullname):
-                        fullname += os.sep
-                    elif os.path.islink(fullname):
-                        target = os.readlink(fullname)
-                        fullname += ' -> ' + target
-                        filesize = len(target)
-                    if fullname.startswith( os.curdir + os.sep ): fullname = fullname[2:]
-                    listing.append( format % (filemode, filesize, time.ctime(filetime), fullname) )
-                    totalSize += filesize
-                except OSError:
-                    self.logger().info("lstat failed on %s (probably the file was removed)",f)
-                    pass
-        statvfs = os.statvfs(os.curdir)
-        availSize = statvfs.f_bavail * statvfs.f_bsize
-        return { 'RunDirListing'       : os.linesep.join(listing) ,
-                 'RunDirUsedDisk'      : totalSize ,
-                 'RunDirAvailableDisk' : availSize }
-
-    ## Switch between old and new metadata formats.
-    #  @remarks New metadata format is used when the @c --usenewmeta=True is specified at the command line.
-    #  @return None
-    def addMetaData( self ):
-        if self._jobReport.useNewMetadataFormat:
-            self.addMetaData_new()
-        else:
-            self.addMetaData_old()
-
-    ## @brief Gather metadata information from various sources for the job report.
-    #  @details Metadata is extracted from the log file, 
-    #  argument instances, configuration, input and output files.
-    #  @remarks This new format attributes metadata direct to the entity it refers to (as it should).
-    #  Provenance information is preserved i.e. sequence of sub-transforms, input and output files are 
-    #  attributed to the (sub-)transforms responsible.
-    #  @return None
-    def addMetaData_new(self):
-        # gather metadata from various sources
-        addMeta = {}
-        # A copy of JobReport._extraMetadataDict is made to allow for modification 
-        # to facilitate loop traversal with deletion.
-        tempMDict = _extraMetadataDict.copy()
-        # gather metadata from logfile
-        logfile = self._logFilename
-        if os.path.exists(logfile):
-            self.logger().info( "Scanning logfile %s for metadata...", logfile )
-            # pattern in logfile:
-            #    MetaData: <name> [unit]=<value>
-            metaPat = re.compile( r"^MetaData:\s+(.*?)\s*=\s*(.*)$" )
-            with open( logfile ) as theLog:
-                for line in theLog:
-                    line = line.rstrip()
-                    match=metaPat.search(line)
-                    if match:
-                        name=match.group(1).split()[0]  # take first word (second word is optional unit)
-                        value=match.group(2)
-                        self.logger().info( "Found MetaData: %s=%s", name,value )
-                        addMeta[name] = value
-                        continue
-                    # gather extra metadata from extraMetadata
-                    for mData in tempMDict.keys():
-                        mDataRE = tempMDict[ mData ]
-                        try:
-                            addMeta[ mData ] = mDataRE.match( line ).group( 1 )
-                        except AttributeError:
-                            if not isinstance( mDataRE, re._pattern_type  ): # user-defined metadata
-                                addMeta[ mData ] = tempMDict.pop( mData ) # remove once added.
-                        else:
-                            tempMDict.pop( mData ) # match occured so removing
-        # gather metadata from arguments
-        for arg in self._positionalArgs:
-            addMeta.update( arg.metaData() )
-        # gather metadata from jobconfiguration
-        if self._config is not None:
-            addMeta.update( self._config.metaData() )
-        self._jobReport._task.addMetaData( addMeta )
-        # Update inputfiles in jobReport
-        for f in self._inputFiles:
-            fileInfoDict = f.fileInfo()
-            if not isinstance( fileInfoDict, dict ):
-                if fileInfoDict is None:
-                    fileInfoList = []
-                else:
-                    fileInfoList = [ fileInfoDict ] 
-            else:
-                fileInfoList = fileInfoDict.values()
-            for fInfo in fileInfoList:
-                self._jobReport.addInputFile( fInfo )
-        # Update outputfiles in jobReport
-        for f in self._outputFiles:
-            fileInfoDict = f.fileInfo()
-            if not isinstance( fileInfoDict, dict ):
-                if fileInfoDict is None:
-                    fileInfoList = []
-                else:
-                    fileInfoList = [ fileInfoDict ] 
-            else:
-                fileInfoList = fileInfoDict.values()
-            for fInfo in fileInfoList:
-                # fill in missing info from jobReport
-                reportInfo = self._jobReport.outputFile(fInfo.filename())
-                if reportInfo:
-                    for name,value in fInfo.metaData().items():
-                        if value is None:
-                            reportValue = reportInfo.metaData(name)
-                            if reportValue is not None:
-                                fInfo.addMetaData( { name:reportValue } )
-                # Update outputfiles in jobReport
-                self._jobReport.addOutputFile(fInfo)
-
-    ## @brief Gather metadata information from various sources for the job report.
-    #  @details Metadata is extracted from the log file, 
-    #  argument instances, configuration, input and output files. 
-    #  @remarks This old format forces metadata to be associated with output files regardless of the context.
-    #  Provenance (as described in addMetaData_new() function) is not preserved.
-    #  @return None
-    def addMetaData_old(self):
-        # gather metadata from various sources
-        addMeta = {}
-        # a copy of JobReport._extraMetadataDict is made to allow for modification 
-        # to facilitate loop traversal with deletion.
-        tempMDict = _extraMetadataDict.copy()
-        # gather metadata from logfile
-        logfile = self._logFilename
-        if os.path.exists(logfile):
-            self.logger().info( "Scanning logfile %s for metadata...", logfile )
-            # pattern in logfile:
-            #    MetaData: <name> [unit]=<value>
-            metaPat = re.compile( r"^MetaData:\s+(.*?)\s*=\s*(.*)$" )
-            with open(logfile) as theLog:            
-                for line in theLog:
-                    line = line.rstrip()
-                    match=metaPat.search(line)
-                    if match:
-                        name=match.group(1).split()[0]  # take first word (second word is optional unit)
-                        value=match.group(2)
-                        self.logger().info( "Found MetaData: %s=%s", name, value )
-                        addMeta[name] = value
-                        continue
-                    # gather extra metadata from extraMetadata
-                    for mData in tempMDict.keys():
-                        mDataRE = tempMDict[ mData ]
-                        try:
-                            addMeta[ mData ] = mDataRE.match( line ).group( 1 )
-                        except AttributeError:
-                            if not isinstance( mDataRE, re._pattern_type  ): # user-defined metadata
-                                addMeta[ mData ] = tempMDict.pop( mData ) # remove once added.
-                        else:
-                            tempMDict.pop( mData ) # match occured so removing
-        # gather metadata from arguments
-        for arg in self._positionalArgs:
-            addMeta.update( arg.metaData() )
-        # gather metadata from jobconfiguration
-        if self._config is not None:
-            addMeta.update( self._config.metaData() )
-        for f in self._outputFiles:
-            info = f.fileInfo()
-            if info:
-                # This is the original way metadata is stored... forced association with output files
-                info.addMetaData( addMeta )
-                # fill in missing info from jobReport
-                reportInfo = self._jobReport.outputFile(info.filename())
-                if reportInfo:
-                    for name,value in info.metaData().items():
-                        if value is None:
-                            reportValue = reportInfo.metaData(name)
-                            if reportValue is not None:
-                                info.addMetaData( { name:reportValue } )
-                # Update outputfiles in jobReport
-                self._jobReport.addOutputFile(info)
-
-    ## Depending on the type of input file specified, a local copy is made to optimisation and stability purposes.
-    #  @return None
-    def preStageInputFiles(self):
-        from PyJobTransformsCore.FilePreStager import theFileStagerRobot
-        self._addLogger( theFileStagerRobot )
-        fileList = []
-        for f in self._inputFiles:
-            if f:
-                value = f.value()
-                if type(value).__name__ == 'list':
-                    fileList += value
-                else:
-                    fileList.append(value)
-        theFileStagerRobot.addFilesToStagerIfNeeded( fileList )
-        filesNotStaged = theFileStagerRobot.waitUntilAllFilesStaged()
-        if filesNotStaged:
-            problemFiles = ''
-            for filename,status in filesNotStaged.items():
-                problemFiles += os.linesep + "%s:%s" % (filename,status)
-            raise InputFileError("Could not stage following files from tape:%s" % problemFiles )
-
-    ## Check arguments for corresponding values otherwise use defaults.
-    #  @exception trferr.TransformArgumentError is raised when an argument is not presented with a value and 
-    #  does not possess a default value either.
-    #  @return None
-    def checkArguments(self):
-        # fill in missing arguments with default values
-        for arg in self._positionalArgs:
-            if not arg.hasValue() and arg.hasDefault():
-                self.setArgument( arg.name(), arg.getDefault() )
-        # check that all arguments have a value
-        noValue = [ arg.name() for arg in self._positionalArgs if not arg.hasValue() ]
-        if len(noValue):
-            raise TransformArgumentError( 'Argument(s) not set: %s' % (','.join(noValue),) )
-
-    ## @brief Check the number of input events are as expected.
-    #  @details The maximum number of events desired/required is determined by querying the @em maxEvents argument. 
-    #  If the value is < 1 (i.e. 0 or -1), the check is abandoned. This user provided value is compared 
-    #  against the actual number of events found in the @em first of the list of input files and an appropriate 
-    #  response is initiated. Command-line options to omit certain validation routines will affect the 
-    #  response of this function as well.
-    #  @exception trferr.InputFileError is raised when the comparison between the desired number of events value 
-    #  and the actual number of events is not as expected.
-    #  @return None
-    def checkInputEvents(self):
-        maxEventsArg = self.getArgumentOfType("MaxEvents")
-        try:
-            maxEvents = maxEventsArg.value()
-            # check first file in input file list only.
-            if len(self._inputFiles) > 1:
-                self.logger.info('Checking maxEvents against {0} only ({1} are assumed to have the same event count)'.format(self._inputFiles[0].name(), [f.name() for f in self._inputFiles[1:]]))
-            inputFile = self._inputFiles[0]
-            if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False:
-                raise Exception
-        except Exception:
-            self.logger().info( "Skipping input file max event check." )
-            return
-        # Do nothing if all events are to be used (-1) rather than set it to the actual number.
-        # Also, if no events (0) are to be used, simply return
-        if maxEvents < 1:
-            return
-        # maxEvents given a value so test that it is appropriate.
-        events = inputFile.eventCount()
-        if events is None:
-            if not TRF_SETTING[ 'testrun' ]:
-                self.logger().info( "Skipping input file event check. File does not lend itself to event counting." )
-        elif events == 0:
-            # cross-check with filesize
-            totalSize = inputFile.fileSize()
-            if totalSize > 0:
-                self.logger().warning("Found 0 events in %s, but total filesize %s. Ignoring event count.",
-                                      inputFile.name(), totalSize )
-            else:
-                raise InputFileError(inputFile.originalValue(),' empty file(s). Argument %s' %
-                                     (inputFile.name(),) ,
-                                     error='TRF_INFILE_EMPTY')
-        elif events < maxEvents:
-            if self._maxEventsStrategy =='ABORT':
-                raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' %
-                                     (events,maxEvents),
-                                     error='TRF_INFILE_TOOFEW')
-            elif self._maxEventsStrategy =='INPUTEVENTS':
-                maxEventsArg.setValue(events)
-                self.logger().warning("Replacing %s=%d by number of events (%d) in input file(s) %s",
-                                      maxEventsArg.name(),maxEvents,events,inputFile.originalValue())
-            elif self._maxEventsStrategy =='IGNORE':
-                self.logger().warning("%s=%d larger than number of events (%d) in input file(s) %s",
-                                      maxEventsArg.name(),maxEvents,events,inputFile.originalValue())
-            else:
-                self.logger().warning("Unknown maxEventsStratety (%s). Ignoring that %s=%d is larger than number of events (%d) in input file(s) %s",
-                                      self._maxEventsStrategy,maxEventsArg.name(),maxEvents,events,inputFile.originalValue())
-            # Do check on minimum number of input events 
-            if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False:
-                self.logger().info( "Input file event min/max test omitted." ) 
-            elif self._minEvents and events < self._minEvents:
-                raise InputFileError(inputFile.originalValue(),': too few events (%d < %d) in input file' %
-                                     (events,self._minEvents),
-                                     error='TRF_INFILE_TOOFEW')
-        else:
-            self.logger().info( "Input file event check successful." )
-
-    ## @brief Function hook to facilitate specialised event matching routines.
-    #  @details This hook is to allow transforms to match the number of 
-    #  input and output events as appropriate for the file type in question.
-    #  @return None
-    def matchEvents( self ):
-        pass
-
-    ## @brief Top-level function to execute all available pre-run actions (in specified arguments).
-    #  @details Special attention is given to the SQLiteSupport argument: The pre-run action associated with the 
-    #  @em SQLiteSupport argument the executed only if the @c --useSQLite=True command-line 
-    #  option is used (if the option is omitted, the default is @c False).
-    #  @remarks The processConfig() function is also executed to process the configuration object.
-    #  @return None
-    def doPreRunActions(self):
-        self.logger().debug("Going to execute Pre-Run commands...")
-        # run arguments specific preRunActions
-        for cmd in self._preRunActions:
-            if isinstance( cmd, SQLiteSupport ):
-                # Unless _useSQLite is explicitly set, it takes the value of _mcInput 
-                # as the former takes precedence over the latter when determining whether 
-                # SQLiteSupport is enabled.
-                if self._useSQLite is None:
-                    self._useSQLite = self._mcInput
-                    self.logger().info( "%s use of SQLite.", { True : 'Enabling', False : 'Disabling', None : 'Disabling' }[ self._useSQLite ] )
-                if not self._useSQLite:
-                    continue
-            cmd.preRunAction()
-        # Process the configuration object
-        self.processConfig()
-        self.logger().debug("Done executing Pre-Run commands")
-
-
-    def doUpload(self):
-        
-        isProd = False
-        
-        try:
-            out = subprocess.Popen(["voms-proxy-info","-fqan"],  stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
-            lines=out.split("\n")
-            for l in lines:
-                # print (l.strip())
-                if l.startswith('/atlas/Role=production'): isProd=True
-        except OSError:
-            print ("trf.py - Not a prodSys environment.")
-                     
-        
-        if isProd is False and os.getenv('TZAMIPW') is None: print ('Performance data will not get stored in the AMI db.'          )
-        elif self._exportToAmi is True and self.name()!='Digi_trf':  # digi is off as it has no AMItag and is fast
-        
-            isMC = False
-            isStream=''
-            isAMItag=''
-            isRun=-1
-
-            from PyUtils.MetaReader import read_metadata
-            # this loop just tries to find runnumber, stream, amitag. should not look at NTUP files as these have not metadata embeded
-            for arg in self._positionalArgs: 
-                if  arg.name()=='outputAODFile' or arg.name()=='outputESDFile' :
-                    print ('========o= inputFilePeeker ============')
-                    print (arg.name(), arg.value())
-                    inFile=arg.value()
-                                        
-                    inputFileMetadata={}
-                    try:
-                        inputFileMetadata = read_metadata(inFile,None,'peeker')[inFile]
-                    except Exception as err:
-                        print ("Unable to open file:",inFile)
-                        print ('caught:\t',err)
-                    
-                    try:
-                        if 'IS_SIMULATION' in inputFileMetadata['eventTypes']:
-                            isMC = True
-                        print ('isMC     ', isMC)
-                        
-                        if 'triggerStreamOfFile' in inputFileMetadata:
-                            isStream = inputFileMetadata['triggerStreamOfFile']
-                        print ('isStream ',isStream)
-                        
-                        if 'AMITag' in inputFileMetadata:
-                            isAMItag = inputFileMetadata['AMITag']
-                        print ('isAMItag ',isAMItag)
-                        
-                        if 'runNumbers' in  inputFileMetadata:
-                            isRun    = inputFileMetadata['runNumbers'][0]
-                        print ('isRun    ', isRun)
-                        
-                        if isMC is True:
-                            print ('this is MC. Changin stream->procstep and runnumber -> pandaid')
-                            isStream=self.name()
-                            if inFile[1].isdigit():
-                                isRun = inFile[1]
-                            else:
-                                isRun = 0
-                    except Exception:
-                        print ("Problem in decoding variables.")
-                        print (sys.exc_info()[0])
-                        print (sys.exc_info()[1])
-                    except Exception:
-                        print ("Unexpected error:", sys.exc_info()[0])
-                    print ('=====================')
-                   
-            if isAMItag!='':
-                print ('trf.py STARTING UPLOAD the final values -> stream:',isStream,'\trunnumber:',isRun,'\tamitag:',isAMItag)
-                
-                import PyJobTransforms.performanceDataUploader as pu
-                
-                uploader = pu.PerformanceUploader(isProd)
-                uploader.establishConnection()
-                # this loop finds sizes and formats and uploads to AMI for all the files.
-                for cmd in self._postRunActions:
-                    try:
-                        # print ('trf.py _postRunAction ',cmd)
-                        fs=[]
-                        cmd.getDataForAmi(fs)
-                        print ('trf.py returned from getting data on object sizes')
-                        if len(fs)==3:
-                            if fs[1]>0:
-                                try:
-                                    uploader.uploadDataSize(fs[0], int(isRun), isStream ,isAMItag, fs[1], fs[2])
-                                    print ('trf.py object size data upload DONE')
-                                except Exception as exc:
-                                    print (exc)
-                                except Exception:
-                                    print ("Unexpected error:", sys.exc_info()[0])
-                    except Exception as e:
-                        print ('trf.py WARNING: Could not send size data to AMI ' , e)
-                        print (sys.exc_info()[0])
-                        print (sys.exc_info()[1])
-                    except Exception:
-                        print ("Unexpected error:", sys.exc_info()[0])
-                
-                if self._name=='AtlasG4_trf' or self._name=='Evgen_trf' or self._name=='Digi_trf': 
-                    perffile='ntuple.pmon.gz' 
-                else:
-                    perffile='ntuple_'+self._name+'.pmon.gz'
-                    
-                if os.access(perffile,os.F_OK):
-                    try:
-                        print ('trf.py: uploading job performance data to AMI')
-                            
-                        try:
-                            uploader.uploadPerfMonSD(isAMItag, self._name, isStream, int(isRun), perffile)
-                        except Exception as exc: 
-                            print (exc)
-                        except Exception:
-                            print ("Unexpected error:", sys.exc_info()[0])
-                        
-                        print ('trf.py upload of job performance data done!')
-                    except Exception as e:
-                        print ('trf.py WARNING: Could not send job info to AMI ' , e)
-                        print (sys.exc_info()[0])
-                        print (sys.exc_info()[1])
-                    except Exception:
-                        print ("Unexpected error:", sys.exc_info()[0])
-                else:
-                    print ('there is no perfmon file: ', perffile)
-        
-        
-        
-    ## Top-level function to execute all available post-run actions (in specified arguments).
-    #  @remarks Event matching is performed unless validation routines are explicitly omitted.
-    #  The addMetaData() function is also executed to gather metadata from output files.
-    #  @return None
-    def doPostRunActions(self):
-        self.logger().debug("Going to execute Post-Run commands...")
-        
-        for cmd in self._postRunActions:
-            try:
-                cmd.postRunAction()             
-            except TransformValidationError as e:
-                self.addValidationError( e )
-                
-        from PyJobTransforms.performanceDataUploader import timelimited
-        try:
-            timelimited(120, self.doUpload)
-        except Exception as exc: 
-            print (exc)
-        except Exception:
-            print ("Unexpected error:", sys.exc_info()[0])
-            
-        
-        if not ( VALIDATION_DICT['ALL'] is False or VALIDATION_DICT[ 'testMatchEvents' ] is False ):
-            self.matchEvents()
-        else:
-            self.logger().info( "Skipping event number matching." )
-        # add metadata to output files and update jobReport
-        self.addMetaData()
-        self.logger().debug("Done executing Post-Run commands")
-        
-    
-    ## @brief The top-level execution routine for a transform.
-    #  @details This function consitutes the very core of a transform as it executes all other functions required 
-    #  before and after the main Athena job is executed.
-    #  @remarks Derived classes may override this function as it is typically done for composite transforms.
-    #  @exception trferr.JobOptionsNotFoundError is raised when the top-level job options file cannot be found.
-    #  @exception trferr.TransformArgumentError is raised when the required number of events is not within limits.
-    #  @return JobReport.JobReport instance containing results from the current job.
-    def runJob(self):
-        # TEST Error insertion
-        # self.addError( acronym = 'ATH_FAILURE', severity = AtlasErrorCodes.FATAL, message = 'TEST ONLY' )
-        #
-        # start with the pre-run actions
-        self.doPreRunActions()
-        # gather environment only after preRunActions, as they may change the environment
-        self.gatherEnvironmentInfo()
-        # Do check on minimum and maximum number of requested output events (only if not running in test mode)
-        maxEvents = None
-        if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False:
-            self.logger().info( "Event min/max test omitted." )
-        else:
-            maxEventsArg = self.getArgumentOfType("MaxEvents")
-            try:
-                maxEvents = maxEventsArg.value()
-            except Exception:
-                pass
-        if maxEvents and maxEvents != -1:
-            # undo any efficiency correction
-            if self._config: maxEvents *= self._config.efficiency
-            if self._minEvents and maxEvents < self._minEvents:
-                raise TransformArgumentError("Too few requested events (%s=%d < %d)" % (maxEventsArg.name(),maxEvents,self._minEvents), error='TRF_ARG_MAXEVT_TOOFEW' )
-            if self._maxEvents and maxEvents > self._maxEvents:
-                raise TransformArgumentError("Too many requested events (%s=%d > %d)" % (maxEventsArg.name(),maxEvents,self._maxEvents), error='TRF_ARG_MAXEVT_TOOMANY' )
-        # pre-stage any files that need it
-        self.preStageInputFiles()
-        # Check the number of input events compared to maxEvents
-        self.checkInputEvents()
-        # Prepare for running athena job
-        # open the logfile
-        self._logFile = fileutil.Tee(self._logFilename,'a')
-        # get/check the needed top jobOptions files
-        self.ensureSkeleton()
-        self.writeRunArgs()
-        topOptions = [ self.runArgsFilename() ]
-        for jo in self._preJobOptionsFiles:
-            fn = jo.filename()
-            if fn: topOptions.append( fn ) 
-        topOptions += [ self.skeletonFilename() ]
-        for jo in self._postJobOptionsFiles:
-            fn = jo.filename()
-            if fn: topOptions.append( fn )
-        # Use new message service i.e. LoggedMessageSvc
-        self._makeMessageSvcJobOptions()
-        topOptions.append( self.messageSvcFilename() )
-        # add the contents of the top joboptions to the logfile
-        line = CommentLine('Start of top joboptions').bigComment('=')
-        self._logFile.write(line + os.linesep)
-        for jo in topOptions:
-            fullJo = find_joboptions( jo )
-            if not fullJo:
-                raise JobOptionsNotFoundError(jo,'Top jobOptions file not found')
-            self.logger().info( 'Found top jobOptions %s in %s', jo, strip_suffix(fullJo,jo) )
-            with open( fullJo ) as joFile:
-                for line in joFile:
-                    self._logFile.write(line)
-        line = CommentLine('End of top joboptions').bigComment('=')
-        self._logFile.write(line + os.linesep)
-        # Run the athena job
-        trfenv.setup_athena_runtime(trfconsts.athena_py)
-        wrapper = os.path.join( trfenv.trfPath, 'athena_wrapper.py' )
-        if self._PECommand:
-            peCommand = [ '-c', self._PECommand ]
-        else:
-            peCommand = []
-        # peCommand must be the last entry as athena_wrapper.py expects this.
-        athenaScriptArgs = [ wrapper, trfenv.athena_exe ] + topOptions + self._athenaOptions + self._genericOptions + peCommand
-        # Create a script to allow for the trf to be re-run easily. 
-        athenaScript = './%s_runathena' % self.name()
-        # Set this to true if we actually want to execute the 'wrapper' script (for asetup, e.g.)
-        runViaScript = False
-        try:
-            with open( athenaScript, 'w' ) as athenaFile:
-                # If we have an asetup to add, add it here....
-                if 'asetup' in self._namedArgs:
-                    self.logger().info('Found asetup arg: %s', self._namedArgs['asetup'].value())
-                    athenaFile.write('#! /bin/sh' + os.linesep)
-                    athenaFile.write('%s/scripts/asetup.sh %s' % (os.environ['AtlasSetup'], self._namedArgs['asetup'].value()) + os.linesep)
-                    runViaScript = True
-                athenaFile.write(  ' '.join( athenaScriptArgs ) + os.linesep )
-            os.chmod( athenaScript,
-                      statconsts.S_IXUSR | statconsts.S_IXGRP | statconsts.S_IXOTH |
-                      statconsts.S_IRUSR | statconsts.S_IRGRP | statconsts.S_IROTH |
-                      statconsts.S_IWUSR )
-        except Exception as e:
-            self.logger().warning( 'Encountered an error while trying to create %s. %s', athenaScript, e )
-        # the actual execution
-        if runViaScript:
-            self.logger().info( 'Executing %s: %s', self.name(), athenaScript )
-            self._runJobProcess = subprocess.Popen( args = athenaScript, bufsize = 1, shell = False,stdout = subprocess.PIPE, stderr = subprocess.STDOUT )
-        else:
-            self.logger().info( 'Executing %s: %s', self.name(), ' '.join( athenaScriptArgs ) )
-            self._runJobProcess = subprocess.Popen( args = athenaScriptArgs, bufsize = 1, shell = False,stdout = subprocess.PIPE, stderr = subprocess.STDOUT )
-        # Poll stdout of the process and write to log file
-        while self._runJobProcess.poll() is None:
-            line = self._runJobProcess.stdout.readline()
-            if line:
-                self._logFile.write(line)
-        # adding the exit status from athena
-        rc = self._runJobProcess.returncode
-        self.logger().info( '%s has completed running of Athena with exit code %s.', self.name(), rc )
-        if rc < 0:
-            # dump dmesg to file when the athena job receives a signal.
-            os.system( 'dmesg > dmesg_athena.txt' )
-            if rc == -signal.SIGKILL:
-                self.logger().error( 'Athena received signal %s = SIGKILL. Athena was killed, job will terminate. ', rc )
-            elif rc == -signal.SIGTERM:
-                self.logger().error( 'Athena received signal %s = SIGTERM. Athena was terminated, job will terminate. ', rc )
-            else:
-                # After dicsussion with athena core people, we decided it's best to encode the 
-                # signal exit code in a 'shell like' way, adding 128 to it
-                self.logger().error( 'Athena received signal %s. Exit code reset to Athena exit code %d.', -rc, 128 + abs(rc) )
-                rc = 128 + abs(rc)
-        # Add the athena exit codes and acroynm directly into the main job report before 
-        # it is lost due to some exception occuring between now and the end of this function. 
-        # This ensures that --ignoreerrors=True works properly.
-        self._jobReport.addInfo( JobInfo( 'athCode', str( rc ) ) )
-        self._jobReport.addInfo( JobInfo( 'athAcronym', str( ExitCodes.what( rc ) ) ) )
-        self._logFile.close()
-#        trfenv.cleanup_athena_runtime() #------->AT:  does nothing!
-        # Create temporary job report to fill in information gathered for this job run. 
-        # This temporary job report will be merged with the main job report in the 
-        # calling function execute().
-        athenaReport = JobReport()
-        # overwrite producer for new errors that are added
-        athenaReport.setProducer( self.name(), self.version() )
-        # Check the logfile for errors
-        checker = AthenaLogChecker(self._atlasrelease)
-        checker.checkLogFile(self._logFilename,athenaReport,self.logger() )
-        # Alter behaviour of athenaReport depending on the following flags.
-        athenaReport.setIgnoreUnknown( self._ignoreUnknown )
-        athenaReport.setIgnoreAll( self._ignoreAll )
-        athenaReport.setIgnoreErrors( self._ignoreErrors )
-        athenaReport.addInfo( JobInfo( 'athCode', str( rc ) ) )
-        athenaReport.addInfo( JobInfo( 'athAcronym', str( ExitCodes.what( rc ) ) ) )
-        # do post-processing if successful running
-        status = athenaReport.exitCode()
-        if status == 0:
-            self.doPostRunActions()
-            # do check on number of events in output files
-            if VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testEventMinMax' ] is False:
-                self.logger().info( "Output file event min/max test omitted." )
-            else:
-                for f in self._outputFiles:
-                    info = f.fileInfo()
-                    events = info and info.metaData("events")
-                    if events:
-                        if self._minEvents and events < self._minEvents:
-                            raise TransformArgumentError("Too few events in file %s (%d < %d)" % (info.filename(),events,self._minEvents),error='TRF_OUTFILE_TOOFEW' )
-                        if self._maxEvents and events > self._maxEvents:
-                            raise TransformArgumentError("Too many events in file %s (%d > %d)" % (info.filename(),events,self._maxEvents),error='TRF_OUTFILE_TOOMANY' )
-        return athenaReport
-
-    ## This @em execute() function allows derived transforms to specify specialised @em runJob() functions whilst 
-    #  ensuring that the resulting job report objects are correctly handled as the routines executed are independent 
-    #  of any transform specialisation.
-    #  @warning Derived transforms must not override this function.
-    #  @return JobReport.JobReport instance
-    def execute(self):
-        self.logger().info( 'Using %s', trfenv.trfPath )
-        try:
-            #clean up old stuff
-            fileutil.remove(self._logFilename)
-            fileutil.remove(self._stderrFilename)
-            fileutil.remove(self._stdoutFilename)
-            for f in JobReport.defaultFiles: fileutil.remove(f)
-            # check the arguments
-            self.checkArguments()
-            # write the last command to file
-            self.writeLastCommand()
-            # set up runtime environment
-            trfenv.setup_runtime()
-            # run the sub-job
-            self._jobReport.addReport( self.runJob(), 'MERGE' )
-        # Catch all exceptions
-        except Exception as e:
-            self.logger().error( "During execution of %s, exception caught: %s", self.name(), e )
-            self._jobReport.addError( self._handleException(e) )
-        # run the error diagnoser on all errors
-        errorDocter = TransformErrorDiagnoser()
-        for error in self._jobReport.errors():
-            errorDocter.diagnoseError(error)
-        # Alter behaviour of _jobReport depending on the following flags.
-        self._jobReport.setIgnoreUnknown( self._ignoreUnknown )
-        self._jobReport.setIgnoreAll( self._ignoreAll )
-        self._jobReport.setIgnoreErrors( self._ignoreErrors )
-        errorcode = self._jobReport.errorCode()
-        exitcode  = self._jobReport.exitCode()
-        self.logger().info( "JobTransform completed for %s with error code %s (exit code %d)", self.name(),errorcode,exitcode )
-        dirInfo = self._getRunDirInfo()
-        # in case of ERROR, add workdir contents
-        printListing = errorcode and 'KEY_INTERRUPT' not in self._jobReport.errorAcronym()
-        for n,v in dirInfo.items():
-            if printListing or n != 'RunDirListing': self._jobReport.addInfo( JobInfo(n,v) )
-        return self._jobReport
-
-    ## Execute transformation using the command line arguments and writes final reports to file.
-    #  @return JobReport.JobReport instance
-    def exeSysArgs(self):
-        # process argument list
-        try:
-            self.processArgs(sys.argv[1:])
-        except SystemExit as e:
-            if not hasattr(e,'args') or not e.args or e.args[0] == 0:
-                # normal system exit call without any error (in printHelpAndExit())
-                self.logger().error("Got zero SystemExit exception")
-                return self._jobReport
-            else:
-                self.logger().error("Got SystemExit exception with code %s", e.args[0])
-                self._jobReport.addError( self._handleException(e) )
-        except Exception as e:
-            self._jobReport.addError( self._handleException(e) )
-        else:
-            # Run timing routines
-            vTimer.logger = self.logger()
-            _n = self.name()
-            vTimer.setResultsFile( 'TransformTimer_%s.pickle' % _n )
-            vTimer.start( _n )
-            # execute with filled arguments
-            self.execute()
-            vTimer.stop( _n )
-            vTimer.writeResultsToFile( 'TransformTimer_%s.pickle' % _n )
-        # print out part of the information
-        self.dumpReport(self._endReportOptions,self._endReportNotOptions)
-        # write reports depending on status
-        self.writeReports()
-        return self._jobReport
-
-    ## Run transformation using arguments from a RunArguments object and writes final reports to file.
-    #  @return JobReport.JobReport instance
-    def exeRunArgs(self, runArgs):
-        # fill arguments
-        try:
-            for arg in self._positionalArgs:
-                name = arg.name()
-                if hasattr(runArgs,name):
-                    self.setArgument( name, getattr(runArgs,name) )
-        except Exception as e:
-            self._jobReport.addError( self._handleException(e) )
-        else:
-            # Run timing routines
-            vTimer.logger = self.logger()
-            _n = self.name()
-            vTimer.setResultsFile( 'TransformTimer_%s.pickle' % _n )
-            vTimer.start( _n )
-            # execute with filled arguments
-            self.execute()
-            vTimer.stop( _n )
-            vTimer.writeResultsToFile( 'TransformTimer_%s.pickle' % _n )
-        # print out part of the information
-        self.dumpReport(self._endReportOptions,self._endReportNotOptions)
-        # write all reports
-        self.writeReports()
-        return self._jobReport
-
-    ## @brief Transforms a dictionary into a list of @c ['KEY=VALUE'].
-    #  @details @em RunOptions are represented in the dictionary as @c dic['--ignoreunknown']=''. Only the key is added to the list.
-    #  @remarks Generic convenience function used internally only.
-    #  @return List of Strings
-    def dicToList( self, dic ):
-        _l = []
-        for key, value in dic.items():
-            if value == '':
-                _l.append( key )
-            else:
-                _l.append( key + '=' + str( value ) )
-        return _l
-
-    ## Run transformation using arguments from a dictionary and writes final reports to file.
-    #  @return JobReport.JobReport instance
-    def exeArgDict(self, argDict):
-        # First look for run options
-        argList=self.dicToList(argDict)
-        self._runOpts.extractOptions(argList)
-        # Now fill regular arguments
-        try:
-            for arg in self._positionalArgs:
-                name = arg.name()
-                if name in argDict:
-                    self.setArgument( name, argDict[name] )
-        except Exception as e:
-            self._jobReport.addError( self._handleException(e) )
-        else:
-            # Run timing routines
-            vTimer.logger = self.logger()
-            _n = self.name()
-            vTimer.setResultsFile( 'TransformTimer_%s.pickle' % _n )
-            vTimer.start( _n )
-            # execute with filled arguments
-            self.execute()
-            vTimer.stop( _n )
-            vTimer.writeResultsToFile( 'TransformTimer_%s.pickle' % _n )
-        # print out part of the information
-        self.dumpReport(self._endReportOptions,self._endReportNotOptions)
-        # write all reports
-        self.writeReports()
-        return self._jobReport
-
diff --git a/Tools/PyJobTransformsCore/python/trfValidateRootFile.py b/Tools/PyJobTransformsCore/python/trfValidateRootFile.py
deleted file mode 100755
index 8340ae856c3bcfe6000849233f155311984e44be..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/trfValidateRootFile.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-# @Package PyJobTransforms.trfValidateRootFile
-# @brief Functionality to test a Root file for corruption
-# @author bjorn.sarrazin@cern.ch
-
-from __future__ import print_function
-
-
-
-import sys
-
-
-from PyUtils import RootUtils
-ROOT = RootUtils.import_root()
-from ROOT import TFile, TTree, TDirectory, TStopwatch, TDirectoryFile
-
-
-
-def checkBranch(branch, msg):
-
-    msg.debug('Checking branch %s...', branch.GetName())
-
-    nBaskets=branch.GetWriteBasket()
-
-    msg.debug('Checking %s baskets...', nBaskets)
-
-    for iBasket in range(nBaskets):
-        basket=branch.GetBasket(iBasket)
-        if not basket:
-            msg.warning('Basket %s of branch %s is corrupted.', iBasket, branch.GetName())
-            return 1
-
-    listOfSubBranches=branch.GetListOfBranches()
-    msg.debug('Checking %s subbranches...', listOfSubBranches.GetEntries())
-    for subBranch in listOfSubBranches:
-        if checkBranch(subBranch,msg)==1:
-            return 1
-
-    msg.debug('Branch %s looks ok.', branch.GetName())
-    return 0    
-
-
-def checkTreeBasketWise(tree, msg):
-
-    listOfBranches=tree.GetListOfBranches()
-
-    msg.debug('Checking %s branches ...', listOfBranches.GetEntries())
-
-    for branch in listOfBranches:
-        if checkBranch(branch,msg)==1:
-            msg.warning('Tree %s is corrupted (branch %s ).', tree.GetName(), branch.GetName())
-            return 1
-
-    return 0
-
-
-def checkTreeEventWise(tree, msg):
-
-    nEntries=tree.GetEntries()
-
-    msg.debug('Checking %s entries...', nEntries)
-
-    for i in range(nEntries):
-        if tree.GetEntry(i)<0:
-            msg.warning('Event %s of tree %s is corrupted.', i, tree.GetName())
-            return 1
-
-    return 0
-
-
-def checkDirectory(directory, type, requireTree, msg):
-
-    msg.debug('Checking directory %s...', directory.GetName())
-
-    listOfKeys=directory.GetListOfKeys()
-
-    msg.debug('Checking %s keys... ', listOfKeys.GetEntries())
-
-    for key in listOfKeys:
-
-        msg.debug('Looking at key %s...', key.GetName())
-        msg.debug('Key is of class %s.', key.GetClassName())
-
-        object=directory.Get(key.GetName())
-        if not object:
-            msg.warning("Can't get object of key %s.", key.GetName())
-            return 1
-
-        if ( object.GetName().find('Meta') > -1 ) and isinstance(object,TDirectoryFile):
-            msg.warning("Will ignore Meta TDirectoryFile %s!", object.GetName() )
-            continue
-
-        if requireTree and not isinstance(object, TTree):
-            msg.warning("Object %s is not of class TTree!", object.GetName())
-            return 1
-
-        if isinstance(object,TTree):
-
-            msg.debug('Checking tree %s ...', object.GetName())
-            
-            if type=='event':
-                if checkTreeEventWise(object, msg)==1:
-                    return 1
-            elif type=='basket':    
-                if checkTreeBasketWise(object, msg)==1:
-                    return 1
-
-            msg.debug('Tree %s looks ok.', object.GetName())
-            
-        if isinstance(object, TDirectory):
-            if checkDirectory(object, type, requireTree, msg)==1:
-                return 1
-
-    msg.debug('Directory %s looks ok.', directory.GetName())
-    return 0
-
-
-def checkFile(fileName, type, requireTree, msg):
-
-    msg.info('Checking file %s.', fileName)
-
-    file=TFile.Open(fileName)
-
-    if not file:
-        msg.warning("Can't access file %s.", fileName)
-        return 1
-
-    if not file.IsOpen():
-        msg.warning("Can't open file %s.", fileName)
-        return 1
-
-    if file.IsZombie():
-        msg.warning("File %s is a zombie.", fileName)
-        file.Close()
-        return 1
-
-    if file.TestBit(TFile.kRecovered):
-        msg.warning("File %s needed to be recovered.", fileName)
-        file.Close()
-        return 1
-
-    if checkDirectory(file, type, requireTree, msg)==1:
-        msg.warning("File %s is corrupted.", fileName)
-        file.Close()
-        return 1
-
-    file.Close()
-    msg.info("File %s looks ok.", fileName)
-    return 0
-
-
-def usage():
-    print ("Usage: validate filename type requireTree verbosity")
-    print ("'type'  must be either 'event' or 'basket'")
-    print ("'requireTree' must be either 'true' or 'false'")
-    print ("'verbosity' must be either 'on' or 'off'")
-
-    return 2
-
-
-def main(argv):
-
-    import logging
-    msg = logging.getLogger(__name__)
-    ch=logging.StreamHandler(sys.stdout)
-    #    ch.setLevel(logging.DEBUG)
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-    ch.setFormatter(formatter)
-    msg.addHandler(ch)
-
-
-    clock=TStopwatch()
-    
-    argc=len(argv)
-
-    if (argc!=5):
-        return usage()
-
-    fileName=argv[1]
-    type=argv[2]
-    tree=argv[3]
-    verbosity=argv[4]
-
-
-    if type!="event" and type!="basket":
-        return usage()
-
-    if tree=="true":
-        requireTree=True
-    elif tree=="false":
-        requireTree=False
-    else:    
-        return usage()
-
-    if verbosity=="on":
-        msg.setLevel(logging.DEBUG)
-    elif verbosity=="off":
-        msg.setLevel(logging.INFO)
-    else:
-        return usage()
-  
-    rc=checkFile(fileName,type, requireTree, msg)
-    msg.debug('Returning %s', rc)
-    
-    clock.Stop()
-    clock.Print()
-
-    return rc
-
-    
-if __name__ == '__main__':                
-
-
-    rc=main(sys.argv)
-    sys.exit(rc)
-    
diff --git a/Tools/PyJobTransformsCore/python/trfconsts.py b/Tools/PyJobTransformsCore/python/trfconsts.py
deleted file mode 100755
index d82b85c63a63dd7773777a7f1108168501a43a8a..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/trfconsts.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-
-athena_py = 'athena.py'
-# names of some environment variables
-JOBOPTIONSPATH='JOBOPTSEARCHPATH'
-LD_PRELOAD = 'LD_PRELOAD'
-IFS = 'IFS'
-PYTHONPATH='PYTHONPATH'
-LD_LIBRARY_PATH='LD_LIBRARY_PATH'
-DATAPATH='DATAPATH'
-PATH='PATH'
diff --git a/Tools/PyJobTransformsCore/python/trfenv.py b/Tools/PyJobTransformsCore/python/trfenv.py
deleted file mode 100755
index aaee0c4b66324fc1c42045449dc82dc8db65095a..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/trfenv.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-# prepare the runtime environment for the transformations
-import os,re,sys
-#check python version
-minPyVersion = 0x20400f0
-if sys.hexversion < minPyVersion:
-    gotVersion = "%d.%d.%d" % ( (sys.hexversion >> 24) & 0xFF, (sys.hexversion >> 16) & 0xFF, (sys.hexversion >> 8 ) & 0xFF )
-    minVersion = "%d.%d.%d" % ( (minPyVersion >> 24) & 0xFF, (minPyVersion >> 16) & 0xFF, (minPyVersion >> 8 ) & 0xFF )
-    raise EnvironmentError( "Used python version (%s) too old. Requiring at least version %s" % (gotVersion,minVersion) )
-
-from PyJobTransformsCore.trferr import TransformEnvironmentError
-from PyJobTransformsCore import trfconsts,envutil
-
-# no imports out of scope!
-__all__ = [ ] 
-
-trfPath = os.path.dirname(__file__)
-trfPackagePath = os.path.realpath(os.path.dirname(trfPath))
-athena_exe = None
-
-# setup the run-time environment
-def setup_runtime():
-    # check that LD_LIBRARY_PATH is set
-
-    try:
-        os.environ[trfconsts.LD_LIBRARY_PATH]
-    except KeyError:
-        raise TransformEnvironmentError( 'LD_LIBRARY_PATH not set' )
-    
-    # add current dir to jo path
-    try:
-        joPathEnv = os.environ[ trfconsts.JOBOPTIONSPATH ]
-    except KeyError:
-        jobOptionsPath = [ '' ]
-    else:
-        jobOptionsPath = re.split( ',|' + os.pathsep, joPathEnv )
-        if '' not in jobOptionsPath:
-            jobOptionsPath.insert(0, '')
-    os.environ[ trfconsts.JOBOPTIONSPATH ] = ','.join(jobOptionsPath)
-    #
-    # setting up some basic running environment (translated from athena.py)
-    #
-##     ifs = os.environ.get(trfconsts.IFS,'')
-##     ifs += ':'
-##     os.environ[trfconsts.IFS] = ifs
-
-
-def setup_athena_runtime(athena_py=trfconsts.athena_py):
-    global athena_exe
-    # try to find the athena executable
-    athena_exe = envutil.find_executable(athena_py)
-    if not athena_exe:
-        raise TransformEnvironmentError( '%s not found in %s=%s' % (trfconsts.athena_py, trfconsts.PATH, os.environ[trfconsts.PATH]) )
-
-def cleanup_athena_runtime():
-    pass
-
-
-
diff --git a/Tools/PyJobTransformsCore/python/trferr.py b/Tools/PyJobTransformsCore/python/trferr.py
index 173323949b64d69eebef4f566600f35bd9fb94a5..4366303cdac1a5f65e4bfdb3ca70a87e7a026afe 100755
--- a/Tools/PyJobTransformsCore/python/trferr.py
+++ b/Tools/PyJobTransformsCore/python/trferr.py
@@ -1,373 +1,6 @@
 # Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
-from __future__ import print_function
-
-################################################################################
-# Exception classes
-################################################################################
-__all__ = [ 'TransformError', 'TransformDefinitionError', 'TransformArgumentError',
-            'TransformEnvironmentError', 'TransformConfigError', 'TransformErrorDiagnoser',
-            'TransformValidationError', 'FileError', 'InputFileError', 'OutputFileError', 
-            'JobOptionsNotFoundError', 'TransformErrorHandler', 'AthenaLogChecker',
-            'TransformThreadTimeout', 'TransformThreadError' ]
-
-import re
 import os
-from PyJobTransformsCore import fileutil, trfconsts, AtlasErrorCodes, VTimer
-from PyJobTransformsCore.TransformLogger import TransformLogger
-from AthenaCommon.Include import IncludeError
-
-from future import standard_library
-standard_library.install_aliases()
-
-# some constants for shared library loading problems
-systemLibs = [ 'libc.so', 'libg2c.so', 'libstdc++.so', 'libshift.so',
-               'libpthread.so', 'libm.so', 'libgcc_s.so',
-               'libXpm.so', 'libX11.so', 'libXpm.so', 'libX11.so' ]
-systemLibsRE = re.compile('|'.join([re.escape(l) for l in systemLibs]))
-
-# severity levels
-WARNING=AtlasErrorCodes.WARNING
-ERROR=AtlasErrorCodes.ERROR
-FATAL=AtlasErrorCodes.FATAL
-NO_ERROR=AtlasErrorCodes.NO_ERROR
-
-_libraryRE = r'(?P<library>\S+?)(?:\s|:|$)'
-_shLibREs = [ re.compile(r'[cC]ould not load module ' + _libraryRE),
-              re.compile(r'[eE]rror (?:in|while) loading shared librar(?:y|ies): ' + _libraryRE ) ]
-
-
-def examineLoadLibrary(lib):
-    """Return tuple (acronym,diagnosis) for library <lib>"""
-    from PyJobTransformsCore.envutil import examine_library
-
-    # turn module name into library name
-    if not lib.startswith('lib') and not lib.endswith('.so'):
-        lib = 'lib' + lib + '.so'
-    print ("Examining library " + lib)
-    diagLines = []
-    errorAcronym = None
-    missingSystemLibs = []
-    missingOtherLibs = []
-    misLibs = examine_library(lib)
-    for l in misLibs:
-        if systemLibsRE.search(l):
-            missingSystemLibs.append(l)
-        else:
-            missingOtherLibs.append(l)
-    if missingSystemLibs:
-        if len(missingSystemLibs) == 1: libWord = 'library'
-        else:                           libWord = 'libraries'
-        diagLines.append( 'Site problem: Missing system %s: %s' % (libWord, ','.join(missingSystemLibs)) )
-        if not errorAcronym: errorAcronym = "ATH_SITE_SYSLIBS"
-
-    if missingOtherLibs:
-        if len(missingOtherLibs) == 1: libWord = 'library'
-        else:                          libWord = 'libraries'
-        diagLines.append( 'Can not find %s: %s  Please check software installation.' % (libWord,','.join(missingOtherLibs)) )
-        if not errorAcronym: errorAcronym = "ATH_SITE_LIBS"
-    return (errorAcronym,os.linesep.join(diagLines))    
-
-def diagnoseAthenaCrash(error):
-    """Try to get some more info on an athena crash"""
-    haveDiagnosis = False
-    diagLines = []
-    errorAcronym = None
-    stderr = error.stderr
-    stdout = error.stdout
-    stdall = ''
-    if stderr:
-        stdall += os.linesep + stderr
-    if stdout:
-        stdall += os.linesep + stdout
-    stdall = stdall.strip()
-    for line in stdall.split(os.linesep):
-        for libRE in _shLibREs:
-            match = libRE.search(line)
-            if match:
-                lib = match.group('library')
-                errorAcronym,diag = examineLoadLibrary(lib)
-                if diag: diagLines.append(diag)
-                if errorAcronym:
-                    error.setCategory(errorAcronym)
-                    error.severity = FATAL
-                    haveDiagnosis = True
-    if diagLines:
-        error.diagnosis = os.linesep.join(diagLines)
-        error.severity = FATAL
-        haveDiagnosis = True
-    return haveDiagnosis
-
-def diagnoseGeometryError(error):
-    if error.acronym == 'TRF_GEO_MISMATCH':
-        error.diagnosis = 'ABORT task and resubmit with the geometryVersion that was used for producing the input file'
-        return True
-    return False
-
-def diagnoseSharedLibError(error):
-    message = error.message
-    if not message: return False
-    for libRE in _shLibREs:
-        match = libRE.search(message)
-        if match:
-            lib = match.group('library')
-            errorAcronym,diag = examineLoadLibrary(lib)
-            if diag:
-                error.diagnosis = diag
-            if errorAcronym:
-                error.setCategory(errorAcronym)
-                error.severity = FATAL
-            return True
-    return False
-
-
-class TransformErrorDiagnoser:
-    """Class to diagnose the reasons for errors"""
-    def __init__(self):
-        self._diagnosers = {}
-        # add a bunch of diagnosers
-        self.addDiagnoser( '^TRF_ATHENACRASH',      diagnoseAthenaCrash )
-        self.addDiagnoser( '^TRF_GEO',              diagnoseGeometryError )
-        self.addDiagnoser( '^(ATH|TRF)_?(DLL|MOD)', diagnoseSharedLibError )
-
-    def clear(self):
-        """Clear all diagnosers"""
-        self._diagnosers.clear()
-
-    def addDiagnoser(self,acronymRE,diag):
-        """Add a diagnoser function <diag> to be called if the error acronym matches
-        the regular expression <acronymRE>.
-        <diag> is a function taking an ErrorInfo as an argument and returning a boolean
-        indicating if a diagnosis was found and written into the error object."""
-        if type(acronymRE) is str: acronymRE = re.compile(acronymRE)
-        self._diagnosers[acronymRE] = diag
-
-    def diagnoseError(self,error):
-        """If possible, add a diagnosis to the ErrorInfo <error>.
-        Return a boolean indicating whether a diagnosis has been added.
-        The first diagnoser that adds a diagnosis (i.e. returns True),
-        will be taken and the following ones will not be called."""
-        acronym = error.acronym
-        #
-        # remove POOL and ROOT warning messages from stderr
-        #
-        if error.stderr:
-            stderrNew = []
-            stderrChanged = False
-            ignorePatterns = [ r"Warning in <TClass::TClass>: no dictionary for class .* is available",
-                               r"WARNING: \$POOL_CATALOG is not defined",
-                               r"using default `xmlcatalog_file:PoolFileCatalog.xml" ]
-            ignoreREs = []
-            # some warning messages come asynchonously, therefore remove only the matching part
-            for pat in ignorePatterns:
-                ignoreREs.append( re.compile(r'(?P<before>^.*?)' + pat + r'(?P<after>.*$)') )
-            for line in error.stderr.split(os.linesep):
-                newLine = line
-                for ignore in ignoreREs:
-                    match = ignore.search(newLine)
-                    if match:
-                        newLine = (match.group('before') or '') + (match.group('after') or '')
-                stderrNew.append( newLine )
-                if newLine != line: stderrChanged = True
-            if stderrChanged:
-                error.stderr = os.linesep.join(stderrNew)
-        #
-        # Try to find a diagnoser for this error
-        #
-        for acro,diag in self._diagnosers.items():
-            if acro.search(acronym):
-                print ("Diagnosing error %s with %s" % (acronym,diag.__name__))
-                if diag(error): break
-        #
-        # If no message, then set message to diagnosis (if present) or stderr (if present)
-        #
-        if not error.message:
-            if error.diagnosis: error.message = error.diagnosis
-            elif  error.stderr: error.message = error.stderr
-        return False
-
-
-##
-# @class AthenaLogChecker
-# @brief performs checking for fatals, errors, warnings communicates and error codes in log file produced by Athena application
-#
-class AthenaLogChecker:
-    _eventLoopPat = r'\S*EventLoopMgr\s*  INFO'
-    _startOfExecuteRE = re.compile( r'%s.*start of run' % (_eventLoopPat,) )
-    _eventNumberRE = re.compile( r'%s.*start of event (?P<event>[0-9]+)' % (_eventLoopPat) )
-    _noMoreEventNumberRE = re.compile( r'%s.*Message limit reached' % (_eventLoopPat) )
-    _startOfFinaliseRE = re.compile( r'\S+\s*(?:   INFO|  DEBUG|VERBOSE|    ALL).*[Ff]inali[zs]e' )
-
-    ## c'tor
-    # @param self "Me, myself and Irene"
-    # @param release Atlas software release name 
-    def __init__( self, release ):
-        self.release = release
-        self.reset()
-        # read error categories, error patterns and ignore patterns if needed
-        if not AtlasErrorCodes.categoriesCount()    : AtlasErrorCodes.readCategories()
-        if not AtlasErrorCodes.errorPatternsCount() : AtlasErrorCodes.readErrorPatterns()
-        if not AtlasErrorCodes.ignorePatternsCount(): AtlasErrorCodes.readIgnorePatterns()
-        self.vTimer = VTimer.VTimer( 'AthenaLogChecker' )
-
-    ## clears all counters
-    # @param self "Me, myself and Irene"
-    def reset(self):
-        """Reset everything for a clean (re)start"""
-        self.ignoreCount = 0
-        self.errorCount = 0
-        self.warningCount = 0
-        self.fatalCount = 0
-        self.stage = 'initialise'
-        self.event = None
-
-    ## increases counter for givern severity 
-    # @param self "Me, myself and Irene"
-    # @param severity a string, one of ["ERROR", "WARNING", "FATAL"]
-    # @param add value for increasing counter, default = 1
-    def countError(self,severity,add=1):
-        if severity == 'ERROR':
-            self.errorCount += add
-        elif severity == 'WARNING':
-            self.warningCount += add
-        elif severity == 'FATAL':
-            self.fatalCount += add
-                
-    ##
-    # @param self "Me, myself and Irene"
-    # @param filename path to log file to check
-    # @param report JobReport instance
-    # @param logger a logger instance
-    def checkLogFile(self,filename,report,logger=None):
-        """Check the logfile <filename> for known errors and add those to JobReport <report>.
-        Returns the number of lines checked."""
-        self.vTimer.start( '%s log' % report._trf )
-        self.reset()
-        if logger:
-            releaseName = 'ALL'
-            if self.release:
-                releaseName += ',' + self.release 
-            logger.info("Scanning athena logfile %s assuming ATLAS release %s ...",
-                        filename, releaseName)
-            logger.info("Athena initialise()...")
-        logFile = open(filename)
-        nLines = 0
-        for line in logFile:
-            nLines += 1
-            line = line.rstrip()
-            report.addError( self.processLine( line, logger ) )
-        logFile.close()
-        if logger:
-            logger.info("Done scanning %d lines of file %s. Summary:", nLines, filename)
-            logger.info("   Ignored : %d", self.ignoreCount )
-            logger.info("   Warnings: %d", self.warningCount )
-            logger.info("   Errors  : %d", self.errorCount )
-            logger.info("   Fatals  : %d", self.fatalCount )
-        self.vTimer.stop( '%s log' % report._trf )
-        return nLines
-
-    ##
-    # @param self "Me, myself and Irene"
-    # @param line line from log to check 
-    # @param logger a logger instance or None 
-    def processLine(self,line,logger=None):
-        """Check line for errors, warnings, fatals and recognised error patterns.
-        Return ErrorInfo object if any match is found, and None otherwise"""
-        line=line.rstrip()
-        # see where we are in the execution
-        if self.stage == 'initialise':
-            if  AthenaLogChecker._startOfExecuteRE.match(line):
-                if logger: logger.info("Athena execute()...")
-                self.stage = 'execute'
-                return None
-        elif self.stage == 'execute':
-            if AthenaLogChecker._startOfFinaliseRE.match(line):
-                if logger: logger.info("Athena finalise()...")
-                self.stage = 'finalise'
-                self.event = None
-                return None
-            match = AthenaLogChecker._eventNumberRE.match(line)
-            if match:
-                self.event = match.group('event')
-                if logger: logger.debug( "Athena event %s" , self.event )
-                return None
-            if AthenaLogChecker._noMoreEventNumberRE.match(line):
-                oldEvent = self.event
-                self.event = None
-                if logger and oldEvent is not None:
-                    logger.debug( "No more event numbers available" )
-                return None
-        # match ignore patterns
-        ignore = AtlasErrorCodes.matchIgnorePattern(line,self.release)
-        if ignore:
-            if ignore.re.pattern == r'.*?\s+?INFO .+':
-                return None
-            self.ignoreCount += 1
-            if logger:
-                logger.debug("ignoring error in line: \"%s\"", line)
-                logger.debug("    because it matched: \"%s\"", ignore.re.pattern)
-            return None
-        # then match known error patterns
-        match, err = AtlasErrorCodes.matchErrorPattern(line,self.release)
-        if err:
-            self.processError(err)
-            if logger:
-                logger.debug("matched error category %s in line: %s", err.category.acronym, line)
-                logger.debug("    because it matched: \"%s\"", match.re.pattern)
-            return err
-        # finally, perform generic error match
-        err = self.extractError(line)
-        if err:
-            self.processError(err)
-            if logger:
-                logger.verbose("non-matched error in line: %s", line)
-            return err
-        return None
-
-    def processError(self,err):
-        err.addEvents( self.event )
-        err.stage = self.stage
-        self.countError(err.severity)
-
-    ## extracs errors, warnings and fatalas from a line 
-    # @param self "Me, myself and Irene"
-    # @param line string to check
-    # @return ErrorInfo instance or None
-    def extractError(self,line):
-        """For ERROR,WARNING and FATAL messages, return ErrorInfo object
-        with who, severity and message field filled. For all other messages
-        return None"""
-        line=line.rstrip()
-        lineREC = re.compile(r"(^\S*\s*(?=WARNING|ERROR|FATAL))(WARNING|ERROR|FATAL)\:?\s+(.+$)")
-        match = lineREC.search(line)
-        if match:
-            who = match.group(1).strip()
-            # no more than 2 words in algorithm name
-            if ( len( who.split() ) > 2 ): return None
-            severity = match.group(2).strip()
-            if severity == 'FATAL':
-                severity = FATAL
-            elif severity == 'ERROR':
-                severity = ERROR
-            elif severity == 'WARNING':
-                severity = WARNING
-            message = match.group(3)
-            if not who: who = "(Unknown)"
-            if message.startswith(':'):
-                if len(message) > 1:
-                    message = message[1:]
-                else:
-                    message = ''
-            if not message: message = None
-            if severity == ERROR or severity == FATAL :
-                acronym = 'TRF_UNKNOWN'
-            else:
-                acronym = None
-            # count
-            return AtlasErrorCodes.ErrorInfo(acronym=acronym,severity=severity,who=who,message=message)
-        else:
-            return None
-
 
 class TransformError( Exception ):
     """Base class for PyJobTransform Exception classes"""
@@ -394,215 +27,15 @@ class TransformError( Exception ):
         self.args = (self.error,self.message)
 
 
-class TransformThreadError( TransformError ):
-    def __init__(self,message=None,error='TRF_TIMEOUT',**kwargs):
-        TransformError.__init__(self,message,error,**kwargs)
-
-
-class TransformThreadTimeout( TransformThreadError ):
-    pass
-
-
-class TransformDefinitionError( TransformError ):
-    """Exception raised in case of an error in the definition of the transformation"""
-    def __init__(self,message=None,error='TRF_DEF',**kwargs):
-        TransformError.__init__(self,message,error,**kwargs)
-
-
-class TransformArgumentError( TransformError ):
-    """Exception raised in case of an error in the argument values"""
-    def __init__(self,message=None,error='TRF_ARG',**kwargs):
-        TransformError.__init__(self,message,error,**kwargs)
-
-
-class TransformEnvironmentError( TransformError ):
-    """Exception raised in case of an error in the run-time environment"""
-    def __init__(self,message=None,error='TRF_ENV',**kwargs):
-        TransformError.__init__(self,message,error,**kwargs)
-        
-
 class TransformConfigError( TransformError ):
     """Exception raised in case of an error in the transform configuration"""
     def __init__(self,message=None,error='TRF_CONFIG',**kwargs):
         TransformError.__init__(self,message,error,**kwargs)
 
 
-class FileError( TransformArgumentError ):
-    def __init__(self,filename,message=None,error='TRF_FILE'):
-        TransformArgumentError.__init__(self,message,error) 
-        self._filename = filename
-
-    def filename(self):
-        return self._filename
-
-
-class OutputFileError( FileError ):
-    def __init__(self,filename,message=None,error='TRF_OUTFILE'):
-        mess = "Output file %s" % filename
-        if message:
-            mess += ' ' + message
-        FileError.__init__( self, filename, mess, error )
-
-
-class InputFileError( FileError ):
-    def __init__(self,filename,message=None,error='TRF_INFILE'):
-        mess = "Input file %s" % filename
-        if message:
-            mess += ' ' + message
-        FileError.__init__( self, filename, mess, error )
-
-
-class TransformValidationError( OutputFileError ):
-    """Exception raised when transform validation fails"""
-    def __init__( self, filename, message=None, error='TRF_OUTFILE_CORRUP' ):
-        OutputFileError.__init__( self, filename, message, error )
-
-
 class JobOptionsNotFoundError( TransformError ):
     """Exception raised in case a joboptions file can not be found"""
     def __init__(self,filename,message=None,error='ATH_JOP_NOTFOUND'):
         mess = "JobOptions file %s not found" % filename
         if message: mess += '. ' + message
         TransformError.__init__(self,mess,error)
-
-
-class TransformErrorHandler(TransformLogger):
-    def __init__(self):
-        self._name = self.__class__.__name__
-        TransformLogger.__init__(self,self._name)
-
-    def name(self):
-        return self._name
-
-    def handleException(self,e):
-        """Return a filled ErrorInfo object if the error is handled, or None
-        is the error is not handled"""
-        # print ("Handling Exception %s %s" % (e.__class__.__name__, e.args))
-        #
-        # general pre-processing
-        #
-        # add filename to EnvironmentError for printout
-        if isinstance(e,EnvironmentError):
-            fn = e.filename
-            if fn is not None and fn not in e.args: e.args += (fn,)
-        #
-        # specific processing
-        #
-        if isinstance(e,TransformError):      return self.handleTransformError(e)
-        elif isinstance(e,IncludeError):      return self.handleIncludeError(e)
-        elif isinstance(e,SystemExit):        return self.handleSystemExit(e)
-        elif isinstance(e,KeyboardInterrupt): return self.handleKeyboardInterrupt(e)
-        elif isinstance(e,RuntimeError):      return self.handleRuntimeError(e)
-        elif type(e) in (AttributeError,NameError,TypeError,SyntaxError):
-            return self.handlePythonSyntaxError(e)
-        elif isinstance(e,Exception):
-            if hasattr(e,'args') and type(e.args) is list and e.args:
-                args0 = e.args[0]
-                # test for some known strings
-                if isinstance(args0, str):
-                    if args0.find('Failed to load DLL') != -1:
-                        return self.handleDllLoadError(e)
-        # error was not handled
-        return None
-                        
-    def handleTransformError(self,e):
-        err = AtlasErrorCodes.ErrorInfo(acronym=e.error, severity = FATAL, message=e.message)
-        # transfer all extras
-        for n,v in e.extras.items():
-            if hasattr(err,n):
-                setattr(err,n,v)
-            else:
-                err.message += os.linesep + '%s=%s' % (n,v)
-        return err
-
-    def handleIncludeError(self,e):
-        try:
-            joPath = os.environ[trfconsts.JOBOPTIONSPATH]
-        except KeyError:
-            diag = 'Environment variable %s not defined' % (trfconsts.JOBOPTIONSPATH)
-        else:
-            diag = 'Not found in %s=%s' % (trfconsts.JOBOPTIONSPATH,joPath)
-        return AtlasErrorCodes.ErrorInfo( acronym = 'ATH_JOP_NOTFOUND', severity = FATAL, message = e.args, diagnosis = diag )
-
-    def handleSystemExit(self,e):
-        try:
-            status = e.args[ 0 ]
-        except Exception:
-            status = 0
-        if status == 0:
-            return AtlasErrorCodes.ErrorInfo( acronym = 'OK' )
-        return AtlasErrorCodes.ErrorInfo( acronym = 'ATH_FAILURE', severity = FATAL, message = 'SystemExit %s' % status )
-        
-    def handleKeyboardInterrupt(self,e):
-        return AtlasErrorCodes.ErrorInfo( acronym='TRF_KEY_INTERRUPT',
-                          severity = FATAL,
-                          diagnosis="You pressed the interrupt key on the keyboard (usually CRTL-C)")
-
-    def handlePythonSyntaxError(self,e):
-        return AtlasErrorCodes.ErrorInfo( acronym='TRF_PYT_SYNTAX', severity = FATAL, message='%s: %s' % (e.__class__.__name__, e.args) )
-
-    def handleRuntimeError(self,e):
-        mess = ''
-        if hasattr(e,'args'):
-            if type(e.args) is str:
-                mess = e.args
-            elif type(e.args) in (list,tuple) and type(e.args[0]) is str:
-                mess = e.args[0]
-        if 'C++ exception' in mess:
-            return AtlasErrorCodes.ErrorInfo( acronym='ATH_EXC_CXX', severity = FATAL, message='%s: %s' % (e.__class__.__name__, e.args) )
-        return None
-    
-    def handleDllLoadError(self,e):
-        # try to find the guilty one
-        import subprocess
-        from PyJobTransformsCore.trfutil import TRACEBACK_TEXT, find_in_stack
-        from PyJobTransformsCore.envutil import find_library
-
-        mess = None
-        diag = None
-        dllRE = r"^theApp.Dlls\s*[+]?="
-        stack = find_in_stack( dllRE )
-        if stack:
-            text = stack[TRACEBACK_TEXT]
-            dllNameRE = r"([\w\.\-]+)"
-            subRE = "%s%s%s%s" % (dllRE,r"\s*\[\s*\"", dllNameRE, r"\"\s*\]")
-            dll = re.sub( subRE, r"\1", text )
-            lib = 'lib%s.so' % (dll)
-            full_lib = find_library(lib)
-            mess = 'module %s can not be loaded' % (dll)
-            diag = ''
-            if not full_lib:
-                diag += '%s not found.' % (lib)
-            else:
-                self.logger().debug( "Found %s. Checking dependencies...", full_lib )
-                lddOut = subprocess.getoutput( 'ldd %s' % (full_lib) )
-                missLibs = [ ]
-                subRE = "%s%s%s" % (r"^\s*",dllNameRE,r"\s+.*not found\s*.*$")
-                for line in lddOut:
-                    if re.search( "not found", line ):
-                        misLib = re.sub( subRE, r"\1", line )
-                        missLibs.append( misLib )
-                        fullMissLib = find_library(misLib)
-                        if fullMissLib:
-                            dir = os.path.dirname(fullMissLib)
-                            if not fileutil.access( fullMissLib, os.R_OK ):
-                                diag += "%s is found in but can not be read from %s." % (fullMissLib,dir)
-                                diag += os.linesep
-                            else:
-                                diag += "%s is found but ldd can not load it from %s." % (misLib,dir)
-                                diag += os.linesep
-                                if dir.startswith('/afs/'):
-                                    diag += "Check your afs cache size, it may be too small."
-                if len(missLibs) >= 1:
-                    diag += '%s can not be found' % (', '.join(missLibs))
-            if diag:
-                diag += ' %s=%s' % (trfconsts.LD_LIBRARY_PATH,os.environ[trfconsts.LD_LIBRARY_PATH])
-            else:
-                diag=None
-        return AtlasErrorCodes.ErrorInfo( acronym='ATH_DLL_LOAD', severity = FATAL, message=mess, diagnosis=diag )
-
-#
-# end of class TransformErrorHandler:
-#
-defaultErrorHandler = TransformErrorHandler()
-errorHandler = defaultErrorHandler
diff --git a/Tools/PyJobTransformsCore/python/trfutil.py b/Tools/PyJobTransformsCore/python/trfutil.py
index 3f6e6e48f62931ee4466263ef40b3554de683bae..702ee9ddf82aa3e08e3435ebbf77abf91e79e825 100755
--- a/Tools/PyJobTransformsCore/python/trfutil.py
+++ b/Tools/PyJobTransformsCore/python/trfutil.py
@@ -1,584 +1,9 @@
 # Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
 
-from __future__ import print_function
+import os, shutil
 
-import os, sys, re, shutil, glob, time, signal, pickle
-import bz2
-import functools
-import tarfile
-import uuid
-from builtins import range
+from .envutil import find_files_env
 
-from subprocess import Popen, PIPE, STDOUT
-from threading import Thread
-try:
-    TRF_Thread_stop = Thread._Thread__stop
-except AttributeError:  # __stop does not exist in Python 3.0
-    TRF_Thread_stop = Thread._stop
-
-from .envutil import find_executable, find_file_env, find_files_env
-from PyJobTransformsCore import trfconsts, trfenv, fileutil
-from PyJobTransformsCore.trferr import TransformValidationError, TransformThreadError, TransformThreadTimeout, JobOptionsNotFoundError, FileError
-from PyJobTransformsCore.TransformLogger import TransformLogger
-from PyJobTransformsCore.VTimer import vTimer
-from PyUtils import RootUtils
-
-try:
-    import PyDumper.SgDumpLib as sdl
-except ImportError:
-    print ("Unable to import PyDumper.SgDumpLib.")
-    sdl = None
-
-VALIDATION_DICT = { 'ALL' : None, 'testIfEmpty' : None, 'testIfNoEvents' : None, 'testIfExists' : None, 'testIfCorrupt' : None, 'testCountEvents' : None, 'extraValidation' : None, 'testMatchEvents' : None, 'testEventMinMax' : None , 'stopOnEventCountNone' : None, 'continueOnZeroEventCount' :None}
-
-TRF_SETTING = { 'testrun' : False, 'validationTimeout' : 600, 'validationRetry' : 2, 'validationSleepInterval' : 10, 'TRFTimeout' : 600, 'TRFRetry' : 2, 'TRFSleepInterval' : 10 }
-
-LFN_VER_DICT = {}
-
-_PFNPat = re.compile( r'^(?P<url>\S*?)(?P<lfn>[A-Za-z0-9\.\-\_]+?)(?P<ver>\.\d+)?$' )
-
-_defaultSignalHandlerDict = {}
-
-class TrfAlarm(Exception):
-    '''Exception to break out of blocking calls when an alarm is raised'''
-    pass 
-
-def alarmHandler(signum, frame):
-    '''Signal handler for SIGALRM - raise alarm exception'''
-    raise TrfAlarm
-
-## ItemInList, AddValidItemToList & OverlapLists: two basic helper functions for lists of strings
-def ItemInList(item,aList):
-    if not isinstance(aList, list):
-        raise TypeError("ItemInList() does not support aList of type %s"%type(aList))
-
-    isInList=False
-    for i in aList:
-        if i==item:
-            isInList=True
-    return isInList
-
-def OverlapLists(List1, List2):
-    for i in range(0,List1.__len__()):
-        if List1[i] in List2:
-            return True
-    return False
-
-def AddValidItemToList(item,aList):
-    #Recursive loop if item is a list
-    if isinstance(item,list):
-        for i in item:
-            AddValidItemToList(i,aList)
-    #Add to list if item is a string and not already in the list
-    elif isinstance(item, str):
-        if not ItemInList(item,aList):
-            aList += [item]
-    else:
-        raise TypeError("AddValidItemToList() does not support item of type %s"%type(item))
-    return 
-
-## @brief Install common handler for various signals.
-#  @details All existing signal handlers are saved in the @em _trfSignalHandlerDict dictionary to allow for them to be restored.
-#  @param handler Common signal handler for all signals concerned.
-#  @warning Signal handling in Python is not 100% implemented. Certain signal (even those listed below) cannot be caught.
-#  @return None
-def setTrfSignalHandler( handler ):
-    xDict = {}
-    if not _defaultSignalHandlerDict: # default handlers have not been saved
-        xDict = _defaultSignalHandlerDict
-    for s in [ 'SIGABRT', 'SIGFPE', 'SIGBUS', 'SIGHUP', 'SIGILL', 'SIGINT', 'SIGIO', 'SIGPIPE', 'SIGQUIT', 'SIGSEGV', 'SIGSYS', 'SIGTERM', 'SIGXCPU', 'SIGXFSZ' ]:
-        try:
-            xDict[ s ] =  signal.signal( getattr( signal, s ), handler )
-        except Exception as e:
-            print ("Unable to attach custom signal handler to %s: %s" % ( s, e ))
-            continue
-    return xDict
-
-## @brief Restore signal handlers to the default ones
-#  @details Handlers are restored from @em _defaultSignalHandlerDict dictionary.
-#  @warning Signal handling in Python is not 100% implemented. Certain signal (even those listed below) cannot be caught.
-#  @return None
-def setDefaultSignalHandlers():
-    if _defaultSignalHandlerDict:
-        currentTRFSignalHandler = signal.getsignal( signal.SIGTERM )
-    else:
-        currentTRFSignalHandler = signal.SIG_DFL
-    currentTRFSignalHandler = signal.getsignal( signal.SIGTERM )
-    for s in [ 'SIGABRT', 'SIGFPE', 'SIGBUS', 'SIGHUP', 'SIGILL', 'SIGINT', 'SIGIO', 'SIGPIPE', 'SIGQUIT', 'SIGSEGV', 'SIGSYS', 'SIGTERM', 'SIGXCPU', 'SIGXFSZ' ]:
-        try:
-            signal.signal( getattr( signal, s ), _defaultSignalHandlerDict.get( s, signal.SIG_DFL ) )
-        except Exception as e:
-            print ("Unable to attach custom signal handler to %s: %s" % ( s, e ))
-            continue
-    return currentTRFSignalHandler
-
-## @brief List all processes and parents and form a dictionary where the 
-#  parent key lists all child PIDs
-def getAncestry():
-    psCmd = ['ps', 'a', '-o', 'pid,ppid', '-m']
-
-    try:
-        print ('Executing %s' % psCmd)
-        p = Popen(psCmd, stdout=PIPE, stderr=PIPE)
-        stdout = p.communicate()[0]
-        psPID = p.pid
-    except OSError as e:
-        print ('Failed to execute "ps" to get process ancestry: %s' % repr(e))
-        raise
-    
-    childDict = {}
-    for line in stdout.split('\n'):
-        try:
-            (pid, ppid) = line.split()
-            pid = int(pid)
-            ppid = int(ppid)
-            # Ignore the ps process
-            if pid == psPID:
-                continue
-            if ppid in childDict:
-                childDict[ppid].append(pid)
-            else:
-                childDict[ppid] = [pid]
-        except ValueError:
-            # Not a nice line
-            pass
-    return childDict
-
-## @brief Find all the children of a particular PID (calls itself recursively to descend into each leaf)
-#  @note  The list of child PIDs is reversed, so the grandchildren are listed before the children, etc.
-#         so signaling left to right is correct 
-def listChildren(psTree = None, parent = os.getpid()):
-    '''Take a psTree dictionary and list all children'''
-    if psTree is None:
-        psTree = getAncestry()
-    
-    children = []
-    if parent in psTree:
-        children.extend(psTree[parent])
-        for child in psTree[parent]:
-            children.extend(listChildren(psTree, child))
-    children.reverse()
-    return children
-
-
-## @brief Kill all PIDs 
-def infanticide(childPIDs, sleepTime = 3):
-    print ('Will kill these processes: %s' % childPIDs)
-    for pid in childPIDs:
-        try:
-            os.kill(pid, signal.SIGINT)
-        except OSError:
-            pass
-        
-    time.sleep(sleepTime)
-        
-    for pid in childPIDs:
-        try:
-            os.kill(pid, signal.SIGKILL)
-        except OSError:
-            pass
-
-
-## @brief Decorator to dump a stack trace when hit by SIGUSR
-def sigUsrStackTrace(func):
-    import signal
-    import traceback
-    
-    class SigUsr1(Exception):
-        pass
-    
-    def sigHandler(signum, frame):
-        print ('Handling signal %d in sigHandler' % signum)
-        raise SigUsr1
-    
-    def signal_wrapper(*args, **kwargs):
-        signal.signal(signal.SIGUSR1, sigHandler)
-        
-        try:
-            return func(*args, **kwargs)
-        
-        except SigUsr1:
-            print ('Transform received SIGUSR1. Exiting now with stack trace...')
-            traceback.print_exc()
-            myChildren = listChildren()
-            if myChildren:
-                print ('Will now try to kill child PIDs: %s' % myChildren)
-                infanticide(myChildren)
-            sys.exit(128 + signal.SIGUSR1)  
-            
-    functools.update_wrapper(signal_wrapper, func)
-    return signal_wrapper
-
-
-
-# In case things get stuck here, implement the SIGUSR stack trace wrapper
-@sigUsrStackTrace
-def timelimited_exec1( tl_func, tl_timeout = TRF_SETTING[ 'TRFTimeout' ], tl_retry = TRF_SETTING[ 'TRFRetry' ], tl_interval = TRF_SETTING[ 'TRFSleepInterval' ], **tl_func_kwargs ):
-    if tl_timeout <= 0:
-        raise ValueError( "tl_timeout parameter must be a positive number of seconds." )
-    if tl_retry <= 0:
-        raise ValueError( "tl_retry parameter must be a positive number." )
-    while tl_retry >= 0:
-        signal.signal(signal.SIGALRM, alarmHandler)
-        signal.alarm(int(tl_timeout))
-        shortCmd = tl_func.replace('\n', '\\n')
-        if len(shortCmd) > 80:
-            shortCmd = shortCmd[:77] + '...'
-        print ('Calling "%s" with timeout %ds' % (shortCmd, tl_timeout))
-        p = Popen(tl_func,shell=True,stdout=PIPE,stderr=STDOUT, preexec_fn = lambda : os.setpgid(0,0))
-        stdout = stderr = '' # Make sure these variables are valid.
-        try:
-            (stdout, stderr) = p.communicate()
-            signal.alarm(0)
-            rc = p.returncode
-        except TrfAlarm:
-            # Timeout on command happened
-            print ('Time limited exec command expired')
-            # Kill the subshell and all spawned children 
-            myChildren = listChildren()
-            infanticide(myChildren)
-            p.poll()
-            if p.returncode is None:
-                # Error - set some fallback value for rc
-                rc = -signal.SIGALRM
-            else:
-                rc = p.returncode
-        if rc == 0:
-            break
-        if tl_retry == 0: # success or no more retries.
-            print ("Maximum retry attempts exhausted!")
-            break
-        print ('Retrying %d more time(s).' % tl_retry)
-        tl_retry -= 1
-        print ('Waiting %d secs before retry.' % tl_interval)
-        time.sleep( tl_interval )
-        tl_timeout *= 1.5
-        print ('Increasing timeout duration to %d.' % tl_timeout)
-    return rc, stdout
-
-## Execute a function, allowing it to run for i secs before raising exception.
-#  j retries permitted with sleep interval k secs.
-def timelimited_exec( tl_func, tl_func_args = (), tl_pre_func = lambda:None, tl_post_func = lambda:None, tl_timeout = TRF_SETTING[ 'TRFTimeout' ], tl_retry = TRF_SETTING[ 'TRFRetry' ], tl_interval = TRF_SETTING[ 'TRFSleepInterval' ], **tl_func_kwargs ):
-    class TOThread( Thread ):
-        def __init__( self ):
-            Thread.__init__( self )
-            self.returnVal = None
-            self.error = None
-
-        def run( self ):
-            try:
-                try:
-                    tl_pre_func()
-                except Exception as err_pre:
-                    print ("%s failed: %s" % ( repr( tl_pre_func ), err_pre.message ) )
-                self.returnVal = tl_func( *tl_func_args, **tl_func_kwargs )
-                try:
-                    tl_post_func()
-                except Exception as err_post:
-                    print ("%s failed: %s" % ( repr( tl_post_func ), err_post.message ) )
-            except Exception as err_func:
-                self.error = err_func
-
-        def stop( self ):
-            self.error = TransformThreadTimeout( '%s took more than the permitted %d secs.' % ( repr( tl_func ), tl_timeout ) )
-            try:
-                tl_post_func()
-            except Exception as err_post:
-                print ("%s failed in stop(): %s" % ( repr( tl_post_func ), err_post.message ) )
-            TRF_Thread_stop( self )
-
-    if tl_timeout < 0:
-        raise ValueError( "tl_timeout parameter must be a positive number of seconds." )
-    if tl_retry < 0:
-        raise ValueError( "tl_retry parameter must be a positive number." )
-#    print ('Disabling transform signal handler.')
-    currentTRFSignalHandler = setDefaultSignalHandlers()
-    while tl_retry >= 0:
-        tl_thread = TOThread()
-        tl_thread.start()
-        tl_thread.join( tl_timeout )
-        if tl_thread.isAlive():
-            print ('%s took more than the permitted %d secs.' % ( repr( tl_func ), tl_timeout ))
-            try:
-                tl_thread.stop()
-            except Exception as err:
-                if tl_retry == 0:
-#                    print ('Re-enabling transform signal handler.')
-                    setTrfSignalHandler( currentTRFSignalHandler )
-                    raise TransformThreadError( 'Exception caught whilst attempting to stop %s: %s' % ( repr( tl_func ), err ) )
-                else:
-                    print ('Exception caught whilst attempting to stop %s.' % repr( tl_func ))
-                    tl_thread.error = TransformThreadError( 'Exception caught whilst attempting to stop %s: %s' % ( repr( tl_func ), err ) )
-        # Thread has stopped
-        if tl_thread.error is None:
-            x = tl_thread.returnVal
-            del tl_thread
-#            print ('Re-enabling transform signal handler.')
-            setTrfSignalHandler( currentTRFSignalHandler )
-            return x
-        # An error was detected
-        if tl_retry == 0: # No more retry
-            x = tl_thread.error
-            del tl_thread
-#            print ('Re-enabling transform signal handler.')
-            setTrfSignalHandler( currentTRFSignalHandler )
-            raise x
-        else: # Continue retrying
-            print ('Retrying %d more time(s).' % tl_retry)
-            tl_retry -= 1
-            del tl_thread
-        print ('Waiting %d secs before retry.' % tl_interval)
-        time.sleep( tl_interval )
-        tl_timeout *= 1.5
-#    print ('Re-enabling transform signal handler.')
-    setTrfSignalHandler( currentTRFSignalHandler )
-
-def validGUID(testString):
-    if re.match(r'[\dA-Fa-f]{8}-[\dA-Fa-f]{4}-[\dA-Fa-f]{4}-[\dA-Fa-f]{4}-[\dA-Fa-f]{12}$', testString):
-        return True
-    else:
-        return False
-    
-
-def getGUIDfromPFC(filename):
-    '''Retrieve a file GUID from the PFC. Returns a tuple with error code and guid (if found)'''
-    guid = None
-    catalog = os.environ.get('POOL_CATALOG')
-    if not catalog:
-        catalog = RootTTreeFile.defaultCatalog
-        os.environ['POOL_CATALOG'] = catalog
-    poolcmd = ['FClistGUID']
-    if not find_executable(poolcmd[0]):
-        raise EnvironmentError('Pool utility %s not found in PATH' % poolcmd[0] )
-    if filename.startswith('LFN:'):
-        poolcmd.extend(['-l', filename[4:]])
-    else:
-        poolcmd.extend(['-p', filename])
-    print ('Using %s for GUID retrieval' % poolcmd)
-    p = Popen(poolcmd, shell=False, stdout=PIPE, stderr=PIPE, close_fds=True)
-    (stdout, stderr) = p.communicate()
-    for line in stdout.split(os.linesep):
-        line = line.strip()
-        if validGUID(line):
-            guid = line
-            break
-    if p.returncode != 0:
-        print ("GUID retrieval failed: %s" % stderr)
-        return (1, None)
-    if guid is None:
-        print ('Did not find GUID in catalog %s (usually harmless)' % catalog)
-        return (0, None)
-    print ("GUID retrieval: %s (%s) found in %s" % ( guid, filename, catalog ))
-    return (0, guid)
-
-
-def addGUIDtoPFC(filename, guid, type = 'ROOT_All'):
-    '''Insert a GUID into the PFC. Returns standard error code'''
-    catalog = os.environ.get('POOL_CATALOG')
-    if not catalog:
-        catalog = RootTTreeFile.defaultCatalog
-        os.environ['POOL_CATALOG'] = catalog
-    poolcmd = ('FCregisterPFN', '-p', filename, '-t', type, '-g', guid)
-    if not find_executable(poolcmd[0]):
-        raise EnvironmentError('Pool utility %s not found in PATH' % poolcmd[0] )
-    print (poolcmd)
-    p = Popen(poolcmd, shell=False, stdout=PIPE, stderr=PIPE, close_fds=True)
-    (stdout, stderr) = p.communicate()
-    if p.returncode != 0:
-        print ("GUID insertion failed: %s" % stderr)
-        return 1
-    print ("Added GUID %s for file %s to %s" % ( guid, filename, catalog ))
-    return 0
-
-
-def StringToList(cmd):
-    valList=None
-    if isinstance(cmd,list):
-        valList=cmd
-    else:
-        try:
-            valList=cmd.split(',,')
-        except Exception:
-            raise ValueError("StringToList cannot interpret '%s' as a list."%str(cmd))
-    return valList
-
-def ntup_entries(fname, tree_names):
-    """Count events in ROOT-based files."""
-    if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ):
-        print ('Test run in progress. Event count (ROOT-based) disabled.')
-        return None
-    #work with string or list of strings
-    if not isinstance(tree_names,list):
-        tree_names=[tree_names]
-    if len( tree_names ) == 0:
-        return None
-    cmdSnippet = os.linesep.join( [
-        "from __future__ import print_function",
-        "import PyUtils.RootUtils as ru",
-        "import pickle",
-        "root = ru.import_root()",
-        "tree_names = %s" % tree_names,
-        "nevts = None",
-        "f = root.TFile.Open( '%s', 'READ' )" % fname,
-        "if isinstance( f, root.TFile ) and f.IsOpen():",
-        "    for tname in tree_names:",
-        "        t = f.Get( tname )",
-        "        if isinstance( t, root.TTree ):",
-        "            nevts = t.GetEntriesFast()",
-        "        del t",
-        "        if nevts:",
-        "            break",
-        "    f.Close()",
-        "del f",
-        "if nevts is None:",
-        "    exit( 1 )",
-        "print ('<nevts>%s</nevts>' % pickle.dumps( nevts ))",
-        "exit( 0 )" ] ) + os.linesep
-    rc, stdout = timelimited_exec1( 'python -c "%s"' % cmdSnippet )
-    if rc == 0:
-        found = re.search( "<nevts>(.*?)</nevts>", stdout, re.DOTALL )
-        if found:
-            result = pickle.loads( found.group( 1 ) )
-            return result
-    print (stdout)
-    return None
-
-def ElementToExec(cmd):
-    #this method is used by preExec, postExec and rec.UserExecs
-    #it handles special characters not allowed by production systems as followed:
-    # blank space   --> @
-    # double-quote  --> `
-    #in addition corrects erroneous over-use of quotes by job transform users
-    if cmd.find(',,') > -1:
-        print ("WARNING symbol ,, is not expected in StringToExec. There is probably a mistake in the configuration.",cmd)
-    if cmd.startswith("'") and cmd.endswith("'"):
-        print ("WARNING - suspicious exec syntax. Removing superfluous quotation signs.")
-        print ("original:", cmd)
-        cmd=cmd.strip("'")
-        print ("new:",cmd)
-    if cmd.startswith('"') and cmd.endswith('"'):
-        print ("WARNING - suspicious exec syntax. Removing superfluous quotation signs.")
-        print ("original:", cmd)
-        cmd=cmd.strip('"')
-        print ("new:",cmd)
-    newCmd=cmd.replace('@',' ')
-    newCmd=newCmd.replace('`','"')
-    if cmd!=newCmd:
-        print ("INFO StringToExec changed original: %s  to new: %s"%(cmd,newCmd))
-    return newCmd
-
-def StringToExec(cmd):
-    #first split create a list from string splitted by ,,
-    #then interpret each element to overcome the limitations of the production systems
-    valList=StringToList(cmd)
-    valOut=[]  
-    for iElement in valList:
-        newElement=ElementToExec(iElement)
-        valOut.append(newElement)
-    return valOut
-
-def strip_suffix( aString, aSuffix ):
-    """ Remove aSuffix from the end of aString (if present)."""
-    if not aSuffix: return aString
-    if aString.endswith( aSuffix ): aString = aString[:-len(aSuffix)]
-    return aString
-
-def strip_prefix( aString, aPrefix ):
-    """Remove aPrefix from the beginning of aString (if present)."""
-    if not aPrefix: return aString
-    if aString.startswith( aPrefix ): aString = aString[len(aPrefix):]
-    return aString
-
-def remove_filename_extension( aString ):
-    """Remove everything from <aString> starting from the last . (dot) after
-    the last path separator ('/' or '\') if any. If <aString> does not have a dot
-    or if it ends with a path separator, then nothing is removed."""
-    slash = aString.rfind(os.sep)
-    dot = aString.rfind(os.extsep,slash+1)
-    if slash == len(aString) - 1: return aString
-    if dot == -1: return aString
-    return aString[:dot]
-
-def tail( fileOrName, nLines ):
-    """Return as a string, the list <nLines> lines of file <fileOrName>.
-    If <filename> is a string, it is considered a filename. If it is
-    File object, it will be read from starting at the point where it is,
-    and will not be closed."""
-    logLines = []
-    closeFile = False
-    if type(fileOrName).__name__ == 'str':
-        logFile = open(fileOrName,'r')
-        closeFile = True
-    else:
-        logFile = fileOrName
-    for line in logFile:
-        logLines.append(line.rstrip())
-        if len(logLines) > nLines:
-            logLines.pop(0) # remove first line
-    if closeFile:
-        logFile.close()
-    return os.linesep.join(logLines)
-
-def examine_sh_exit_status(status):
-    """Return a string describing the exit status, if any it to be given.
-    Otherwise return None"""
-    sigNum = None
-    if status > 128:
-        sigNum = status - 128
-    elif status < 0:
-        sigNum = -status
-    if sigNum is not None:
-        import signal
-        for sigName in dir(signal):
-            if sigName.startswith('SIG') and not sigName.startswith('SIG_'):
-                sigValue = getattr(signal,sigName)
-                if sigValue == sigNum:
-                    return sigName
-    return None
-
-def find_datafile( filename ):
-    return find_file_env( os.path.expanduser( os.path.expandvars(filename) ),
-                          trfconsts.DATAPATH )
-
-def find_joboptions( jobOptionsFile, depth=0 ):
-    return find_file_env( os.path.expanduser( os.path.expandvars( jobOptionsFile ) ),
-                          trfconsts.JOBOPTIONSPATH, sep=',', depth=depth )
-
-def get_atlas_release(project='AtlasProduction'):
-    """Determine the Atlas Release used"""
-    atlasRelease = None
-    #
-    # guess from directory of this file
-    #
-    # make a list of patterns to match
-    nightlyBugFind      = os.sep + os.path.join('nightlies','bugfix',project,r'rel_(?P<nightly>[0-6])') + os.sep
-    nightlyBugReplace   = r'bug_\g<nightly>'
-    nightlyDevFind      = os.sep + os.path.join('nightlies','dev',project,r'rel_(?P<nightly>[0-6])') + os.sep
-    nightlyDevReplace   = r'dev_\g<nightly>'
-    nightlyBuildFind    = os.sep + os.path.join('nightlies','projects',project,r'rel_(?P<nightly>[0-6])') + os.sep
-    nightlyBuildReplace = r'rel_\g<nightly>'
-    releaseFind         = os.sep + os.path.join(project,r'(?P<release>[0-9][0-9]?(?:\.[0-9][0-9]?){2,3})') + os.sep
-    releaseReplace      = r'\g<release>'
-    patterns = { nightlyBugFind   : nightlyBugReplace ,
-                 nightlyDevFind   : nightlyDevReplace ,
-                 nightlyBuildFind : nightlyBuildReplace ,
-                 releaseFind      : releaseReplace }
-    dir = os.path.dirname( os.path.realpath(__file__) )
-    for find,replace in patterns.items():
-        match = re.search(find,dir)
-        if match:
-            atlasRelease = match.expand(replace)
-            print ("Got Atlas Release number %s from directory name of file %s" % (atlasRelease,__file__))
-            return atlasRelease
-    # 
-    # If not found, try some environment variables
-    #
-    for env in ['ATLAS_RELEASE','AtlasVersion']:
-        atlasRelease = os.environ.get(env)
-        if atlasRelease:
-            print ("Got Atlas Release number %s from environment variable %s" % (atlasRelease,env))
-            return atlasRelease
-    return None
 
 def linkPresent( filename, findBrokenLink = True ):
     """Check if the given filename path contains a symlink. This function also checks all it's parent directories. 
@@ -776,1557 +201,3 @@ def get_files( listOfFiles, fromWhere='data', doCopy='ifNotLocal', errorIfNotFou
             except OSError:
                 # try copying instead.
                 shutil.copyfile( srcFile, targetFile )
-
-
-TRACEBACK_FILENAME = 0
-TRACEBACK_LINENUMBER = 1
-TRACEBACK_FUNCTION = 2
-TRACEBACK_TEXT = 3
-
-def find_in_stack( whatRE, where = TRACEBACK_TEXT, stackList = None ):
-    if stackList is None:
-        import traceback
-        stackList = traceback.extract_tb(sys.exc_info()[2])
-    for s in stackList:
-        if re.search( whatRE, s[where] ):
-            return s
-    return None
-
-def load_module( module_filename ):
-    """Extension of dynamic import. Load as module a python file with any path in the filename.
-    File does not have to be in PYTHONPATH"""
-    # mold the filename
-    module_filename = os.path.normpath(module_filename)
-    # remove .py suffix
-    if module_filename.endswith('.py'): module_filename = module_filename[:-3]
-    # explicit current dir
-    if module_filename.startswith( os.curdir + os.sep ): module_filename = module_filename[2:]
-    # updir somewhere
-    if module_filename.startswith( os.pardir + os.sep ): module_filename = os.path.abspath( module_filename )
-    # first try to load as is
-    try:
-        if os.path.isabs(module_filename): raise ImportError
-        dotted = module_filename.replace(os.sep,'.')
-#        print ("load_module(): trying import %s" % dotted)
-        module = __import__( dotted )
-        modlist = dotted.split('.')
-        if len(modlist) > 1:
-            for submod in modlist[1:]:
-                module = getattr(module,submod) 
-    except ImportError:
-        # add path to system
-        moddir,modname = os.path.split( module_filename )
-        absdir = os.path.abspath( moddir )
-        if moddir != os.curdir:
-            # add module directory to search path
-            inserted = False
-            if absdir not in sys.path:
-                sys.path.insert( 1, absdir )
-                inserted = True
-#                print ("load_module(): new path: %s" % sys.path)
-#            print ("load_module(): trying import %s" % modname)
-            module = __import__( modname )
-            # cleanup search path
-            if inserted: sys.path.remove( absdir )
-        else:
-            module = __import__( modname )
-    return module
-
-def load_transforms( trf_py, nameRE = r".*" ):
-    """Return a list of JobTransform objects found in python file <trf_py>,
-    whose names match the regular expression <nameRE>. Default is all objects in file."""
-    # import transformation as module
-    module = load_module( trf_py )
-    trfs = [ ]
-    for t in dir(module):
-        obj = getattr(module,t)
-        if isinstance(obj,module.JobTransform) and re.search( nameRE, obj.name() ):
-            trfs.append( obj )
-    return trfs
-
-def load_errorhandlers( handler_py, nameRE = r".*" ):
-    """Load a list of TransformErrorHandler objects found in python file handler_py,
-    whose names (=class names) match the regular expression <nameRE>. Default is all objects in file."""
-    # import errorhandler as module
-    module = load_module( handler_py )
-    handlers = [ ]
-    for t in dir(module):
-        obj = getattr(module,t)
-        if isinstance(obj,module.TransformErrorHandler) and re.search( nameRE, obj.name() ):
-            handlers.append( obj )
-    return handlers
-
-def genLFN_PFN():
-    """Generates mapping between LFN and PFN. Assumes LFN and PFN are related by filename."""
-    if not os.environ.get('POOL_CATALOG'):
-        os.environ['POOL_CATALOG'] = PoolDataFile.defaultCatalog
-    # Look inside the PoolFileCatalog
-    listPFNcmd = 'FClistPFN'
-    if not find_executable( listPFNcmd ):
-        raise Exception( 'Required PFN extraction utility not found.' )
-    # Retrieve PFNs to populate LFN_VER_DICT
-    p = Popen(listPFNcmd,shell=True,stdout=PIPE,stderr=PIPE,close_fds=True)
-    while p.poll() is None:
-        p.stderr.readline()
-    if p.returncode != 0:
-        raise Exception( 'PFN retrieval failed. LFN/PFN map cannot be generated.' )
-    for line in p.stdout:
-        line = line.strip()
-        if line:
-            PFNPatResult = _PFNPat.search( line )
-            if PFNPatResult:
-                LFN_VER_DICT[ PFNPatResult.group('lfn') ] = PFNPatResult.group('ver')
-
-def getCorrectedFilename( fn, forceLFN = None ):
-    if forceLFN is None:
-        forceLFN = fn.startswith( 'LFN:' )
-        if forceLFN:
-            fn = fn[4:]
-    # Check local
-    if not forceLFN:
-        if fileutil.exists( fn ):
-            return fn
-        foundOtherVersion = fileutil.exists_suffix_number( fn + '.' )
-        if foundOtherVersion:
-            return foundOtherVersion
-    # Check PFC
-    if not LFN_VER_DICT:
-        genLFN_PFN()
-    PFNPatResult = _PFNPat.search( fn )
-    if not PFNPatResult:
-        raise Exception( 'Invalid filename: %s' % fn )
-    # Retrieve version from LFN_VER_DICT
-    try:
-        ver = LFN_VER_DICT[ PFNPatResult.group('lfn') ]
-    except KeyError:
-        raise Exception( '%s not found.' % fn )
-    # If version already provided in filename, check if it matches that in PFC.
-    if PFNPatResult.group('ver') and ver != PFNPatResult.group('ver'):
-        raise Exception( '%s does not match the version found in the PFC.' % fn )
-    if ver is None: # PFC does not contain version info for fn.
-        ver = ''
-    return 'LFN:' + PFNPatResult.group('url') + PFNPatResult.group('lfn') + ver
-
-def expandParallelVectorNotation( valIn ):
-    """A parallel Vector Notation decoder. Adapted from code by Brinick Simmons."""
-    if not valIn.count( '[' ):
-        return [ valIn ]
-    if valIn.count( '[' ) != valIn.count( ']' ):
-        raise Exception( 'Mismatched brackets.')
-    pieces = re.findall( r'\[[\S]+\]', valIn ) # get the bracket sections
-    if not pieces:
-        return [ valIn ]
-    if False in [ not re.findall( r'\[|\]', p[ 1:-1 ] ) for p in pieces ]:
-        raise Exception( 'Nested brackets detected.' )
-    ppieces = [ [ i.strip() for i in p[ 1:-1 ].split( ',' ) ] for p in pieces ]
-    for i in map( None, *ppieces ):
-        try:
-            if None in i:
-                raise Exception( 'Mismatched number of entries in bracket sets.' )
-        except TypeError:
-            # the entries in the list are not tuples.
-            # This implies a single set of brackets
-            # i.e. it is not possible to have mismatched entries
-            # in brackets e.g. [(1,2,3),(5,6)]
-            break
-    for bracket in pieces:
-        valIn = valIn.replace( bracket, '%s' )
-    return [ valIn % j for j in zip( *ppieces ) ]
-
-def expandStringToList( valIn, ensureExistence = None ):
-    """Comma separated list of filenames. Each filename in the list can be coded
-    with the following syntax:
-    
-    [DATASET#]DATASET.ABC.DEF._000[1,2,3,4,5].root
-    DATASET[##].ABC.DEF._000[1,2,3,4,5].root
-    DATASET[##].ABC.DEF._000[1-5].root ---> Note: [N-M] where N<=M, 0<=N<=9, 0<=M<=9. This is handled by glob()
-    DATASET[##].ABC.DEF.*.root         ---> This is handled by glob()
-
-    '#','##' are used to highlight the dataset so as to reflect it in the job report and metadata.xml files."""
-    listSep = ','
-    if ensureExistence is None:
-        ensureExistence = True
-    # split the comma separated list (outside brackets)
-    sepList = []
-    n = len(valIn)
-    i = 0
-    start = 0
-    while i < n:
-        c = valIn[i]
-        if c == '[':
-            i = valIn.find( ']', i + 1 )
-            if i == -1:
-                raise Exception( 'Mismatched brackets.' )
-        elif c == listSep:
-            if i == n - 1 or i - start < 1: # listSep found at last position or problem with position counters
-                raise Exception( 'Incorrect syntax. Check position of list separators used.' )
-            sepList.append( valIn[start:i] )
-            start = i + 1
-        i += 1
-    # add the last entry in the list
-    if start < n:
-        sepList.append( valIn[start:] )
-    all = []
-    from PyJobTransformsCore.basic_trfarg import DataFileArg
-    for val in sepList:
-        forceLFN = val.startswith( 'LFN:' )
-        add = []
-        ds = ''
-        # remove optional LFN: prefix
-        if forceLFN:
-            val = val[4:]
-        # detect dataset
-        if '#' in val:
-            # retrieve dataset and strip dataset from val.
-            ds, val = DataFileArg.extractDataset( val, omitFromName = True )
-        if ds:
-            ds += '#'
-        # Attempt to use glob to allow for wildcard expansion.
-        add.extend( glob.glob( val ) )
-        if not add: # glob did not succeed. Proceed with PVN.
-            add.extend( expandParallelVectorNotation( val ) )
-        # stitch filename components together 
-        dirName = os.path.dirname( val )
-        if ensureExistence:
-            if dirName: # filename includes directory path as well.
-                add = [ os.sep.join( [ dirName, ds + os.path.basename( getCorrectedFilename( fname, forceLFN ) ) ] ) for fname in add ]
-            else:
-                add = [ ds + getCorrectedFilename( fname, forceLFN ) for fname in add ]
-        elif ds:
-            if dirName:
-                add = [ os.sep.join( [ dirName, ds + os.path.basename( fname ) ] ) for fname in add ]
-            else:
-                add = [ ds + fname for fname in add ]
-        all += add
-    return all
-
-def getCachedFileInfo( filename, infoKey ):
-    from PyUtils.MetaReader import read_metadata
-    meta = read_metadata(filename,None,'lite')
-    try:
-        if isinstance(filename,list):
-            metalist = []
-            for fname in filename:
-                metalist.append(meta[fname][infoKey])
-            return metalist
-        else:
-            return meta[fname][infoKey]
-    except KeyError:
-        return None
-
-def corruptionTestROOT( filename, file_type ):
-    if isinstance( filename, list ):
-        raise TypeError( 'filename parameter takes a single file and not a list.' )
-
-    from PyJobTransformsCore.ValidateRootFile import checkPFCorruption
-    return checkPFCorruption(filename)
-
-
-def corruptionTestBS( filename, file_type,logger):
-    #First try AtlListBSEvents -c %filename:
-    cmd = 'AtlListBSEvents -c %s ' % filename
-    p = Popen(cmd,shell=True,stdout=PIPE,stderr=PIPE,close_fds=True)
-    while p.poll() is None:
-      line = p.stdout.readline()
-      if line:
-        logger.info("AtlListBSEvents Report: %s", line.strip())
-    rc = p.returncode
-    if rc == 0:
-      return rc
-    #AltListBSEvents.exe failed, fall back to PyDumper
-    else:
-      logger.info("AtlListBSEvents failed to validate %s, Using the (slower) PyDumper method ", filename)
-      cmdSnippet = os.linesep.join( [
-          "from sys import exit",
-          "import os",
-          "try:",
-          "    import PyDumper.SgDumpLib as sdl",
-          "    sc, out = sdl.run_sg_dump( files = [ '%s' ], output = os.devnull, pyalg_cls = 'PyDumper.PyComps:DataProxyLoader', use_recex_links = False, file_type = '%s' )" % ( filename, file_type ),
-          "except Exception as e:",
-          "    exit( 'Validation routine error: %s' % e )",
-          "exit( sc )" ] ) + os.linesep
-      rc, stdout = timelimited_exec1( 'python -c "%s"' % cmdSnippet )
-      if rc != 0:
-          print (stdout)
-      return rc
-
-
-class FileType:
-    """Utility class for file manipulation that supports various file access systems
-    (local files and rfio files).
-    It holds the filename, and it can hold optionally the file type, contents and suffix."""
-    defaultContents = None
-    defaultType = None
-
-    def __init__(self,type=defaultType,contents=defaultContents):
-        """Total filename expected: <dir>/<bare>.<contents>.<type>
-        type: file type without the dot (root,pool.root,tar,tar.gz,txt,...)
-              If None, then any type is allowed, and becomes equal
-              to the part after the last . (dot) in the filename.
-        contents: file contents (evgen,simu,digi,esd,aod,...)
-        bare: anything (except /)
-
-        Optional file attempt numbers are automatically added at the
-        end of the type and suffix."""
-        #
-        # Set the type
-        #
-        self.__type = type
-        if type is None:
-            typeRE = '[^.]+'
-        else:
-            typeRE = type
-        # Include the possibility of an attempt number at the end
-        self.__typeRE = re.compile( r"\.(?P<type>%s)(\.[0-9]+)?$" % typeRE )
-        #
-        # Set the contents
-        #
-        self.__contents = contents
-        if contents is None:
-            contentsRE = '[^.]+'
-        else:
-            # make contents case insensitive
-            contentsRE = ''
-            for c in contents:
-                contentsRE += r'[%s%s]' % (c.lower(),c.upper())
-        self.__contentsRE = re.compile( r'\.(?P<contents>%s)' % (contentsRE,) +
-                                        self.__typeRE.pattern )
-        # to get the base name: without type
-        self.__baseRE = re.compile( r'(?P<base>.*)' + self.__typeRE.pattern )
-        # to get the bare name: without type and contents
-        self.__bareRE = re.compile( r'(?P<bare>.*)' + self.__contentsRE.pattern )
-        self.__guid = None
-
-    def getGUID(self,filename):
-        if TRF_SETTING[ 'testrun' ]:
-            return None
-        guid = getCachedFileInfo( filename, 'file_guid' )
-        if guid is not None:
-            return guid
-        guid = str(uuid.uuid4()).upper()
-        print ("GUID retrieval: %s (%s) generated with uuid.uuid4()" % ( guid, filename ))
-        return guid
-            
-    def hasType(self):
-        return self.__type is not None
-
-    def type(self,filename=None):
-        if filename is None:
-            return self.__type
-        else:
-            match = self.__typeRE.search( os.path.basename(filename) )
-            if match: return match.group('type')
-        return ""
-
-    def hasContents(self):
-        return self.__contents is not None
-
-    def contents(self,filename=None):
-        if filename is None:
-            return self.__contents
-        else:
-            match = self.__contentsRE.search( os.path.basename(filename) )
-            if match: return match.group('contents')
-        return ""
-    
-    def validationType( self ):
-        """Check if the contents is one recognised by PyDumper.SgDumpLib.run_sg_dump()."""
-        vType = self.contents()
-        if vType not in ( 'rdo', 'bs', 'esd', 'aod', 'dpd' ):
-            return 'any'
-        return vType
-
-    def checkType(self,filename):
-        """Check if filename ends with .<type>"""
-        return not self.hasType() or self.__typeRE.search( filename )
-
-    def checkContents(self,filename):
-        """Check if filename ends with <suffix>"""
-        return not self.hasContents() or self.__contentsRE.search( filename )
-
-    def baseFilename(self,filename):
-        """Return filename without the path and type"""
-        match = self.__baseRE.search( os.path.basename(filename) )
-        if match:
-            return match.group('base')
-        else:
-            return filename
-
-    def bareFilename(self,filename):
-        """Return filename without the path, contents and type"""
-        match = self.__bareRE.search( os.path.basename(filename) )
-        if match:
-            return match.group('bare')
-        else:
-            return self.baseFilename(filename)
-
-    def getMetaData(self,filename):
-        """Return a dictionary with metadata name:value pairs.
-        If a value is None, it could not be determined at this level,
-        and needs to be determined at a higher level."""
-        return { 'fileType' : self.defaultContents }
-
-    def eventCount( self, arg ):
-        """Return number of events in file of argument arg.
-        Return None if event count is not applicable to file type."""
-        return None
-
-
-class BSFile( FileType ):
-    """ByteStream data file"""
-    defaultType = 'data'
-    defaultContents = 'bs'
-    
-    def __init__( self, contents = defaultContents, type = defaultType ):
-        FileType.__init__( self, type, contents )
-
-    def eventCount( self, arg ):
-        """Count the events of InputFile or OutputFile argument <arg>.
-        If the number of events can not be determined, return None"""
-        try:
-            logger = arg.logger()
-            fileList = arg.value()
-        except Exception as e:
-            print ("Event count failed for %s: %s" % ( arg, e ))
-            return None
-        if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ):
-            logger.info( 'Test run in progress. Event count disabled.' )
-            return None
-        resultList = getCachedFileInfo( fileList, 'nentries' )
-        if resultList is None or not isinstance( resultList, list ):
-            return resultList
-        # resultList is indeed a list
-        try:
-            return sum( resultList )
-        except TypeError:
-            # probably 'N/A' detected in resultList. Should be fixed with AthenaPython-00-04-19
-            return None
-
-    def validateFile( self, arg, **validationDict ):
-        """Validate the file. Use SgDumpLib.run_sg_dump() or event count routine."""
-#        ### TEST ONLY ###
-#        raise TransformValidationError( arg.value(), 'Test validation error (%s)' % arg.name(), 'TRF_OUTFILE_NOTFOUND' )
-        try:
-            logger = arg.logger()
-            fName = arg.value()
-            argName = arg.name()
-        except Exception as e:
-            print ("Could not validate file associated with %s: %s" % ( arg, e ))
-            return
-        if VALIDATION_DICT[ 'ALL' ] is False:
-            logger.info( "Skipping all validation routines." )
-            return
-        # Defined default validation values
-        vDict = { 'testIfEmpty' : True, 'testIfNoEvents' : True, 'testIfExists' : True, 'testIfCorrupt' : True, 'testCountEvents' : True, 'extraValidation' : None, 'testMatchEvents' : True, 'testEventMinMax' : True, 'stopOnEventCountNone' : True, 'continueOnZeroEventCount' : True}
-        # apply modifications to validation values from subclasses
-        vDict.update( validationDict )
-        # Check if any validation tests have been disabled at the command line and apply to vDict
-        for vTestName, vTestEnabled in VALIDATION_DICT.items():
-            if vTestEnabled is not None:
-                vDict[ vTestName ] = vTestEnabled
-        #Make sure filename is not a list
-        if isinstance(fName,list):
-          fName = fName[0] 
-        if not fileutil.exists( fName ):
-            if vDict[ 'testIfExists' ]:
-                raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' )
-            else:
-                logger.info( "Ignoring missing %s.", fName )
-                return
-        if fileutil.getsize( fName ) == 0:
-            if vDict[ 'testIfEmpty' ]:
-                raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' )
-            else:
-                logger.info( "Ignoring empty %s.", fName )
-                return
-        # Check if sdl can cope with the file type
-        if self.validationType() == 'any':
-            vDict[ 'testIfCorrupt' ] = False
-        if vDict[ 'testIfCorrupt' ] and sdl is not None:
-            logger.info( "Checking %s for corruption.", fName )
-            vTimer.start( '%s validation' % argName )
-            sc = corruptionTestBS( fName, self.validationType(),logger )
-            vTimer.stop( '%s validation' % argName )
-            if sc < 0:
-                logger.warning( "Execution of corruption test failed [%s].", sc )
-            elif sc > 0:
-                raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-        if vDict[ 'testCountEvents' ]:
-            logger.info( "Attempting to validate %s using event count routine.", fName )
-            vTimer.start( '%s validation' % argName )
-            eCount = arg.eventCount()
-            vTimer.stop( '%s validation' % argName )
-            if eCount == 0:
-                if vDict[ 'testIfNoEvents' ]:
-                  if not vDict[ 'continueOnZeroEventCount' ]:  
-                    raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-                  else:
-                    logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName)
-                else:
-                    logger.info( "Ignoring 0 events in %s.", fName )
-                    return
-            elif eCount is None:
-                if vDict[ 'stopOnEventCountNone' ]:
-                  raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' )
-                else:
-                  logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName )
-        if callable( vDict[ 'extraValidation' ] ):
-            vTimer.start()
-            extraValidationResult = None
-            try:
-                extraValidationResult = timelimited_exec( tl_func = vDict[ 'extraValidation' ], tl_func_args = ( fName, ), tl_pre_func = lambda:None, tl_post_func = lambda:None, tl_timeout = TRF_SETTING[ 'validationTimeout' ], tl_retry = TRF_SETTING[ 'validationRetry' ], tl_interval = TRF_SETTING[ 'validationSleepInterval' ] )
-            except TransformThreadTimeout:
-                logger.warning( 'Extra validation routine timed out.' )
-            except TransformThreadError as e:
-                logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e )
-            except Exception as e:
-                logger.warning( 'Extra validation routine error.\n%s', e )
-            vTimer.stop()
-            if not extraValidationResult:
-                raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' )
-#        # Fail if sdl check fails, does not fail over to event count routine.
-#        if sdl is None:
-#            eCount = arg.eventCount()
-#            if eCount == 0:
-#                raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-#            elif eCount == None:
-#                raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NOTREAD' )
-#        else:
-#            sc, out = sdl.run_sg_dump( files = [ fName ], output = os.devnull, pyalg_cls = 'PyDumper.PyComps:DataProxyLoader', use_recex_links = False, file_type = self.validationType(), msg = logger )
-#            if sc != 0:
-#                raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-        logger.info( "%s successfully validated.", fName )
-        return
-
-    def getGUID(self,filename):
-        if TRF_SETTING[ 'testrun' ]:
-            return None
-        guid = getCachedFileInfo( filename, 'file_guid' )
-        if guid is None:
-            raise FileError( filename, "File %s GUID not present in BS file.", filename )
-        return guid
-
-    def getMetaData(self,filename):
-        return { 'fileType' : self.defaultContents }
-
-
-class RootTTreeFile( FileType ):
-    """A root file containing TTree's"""
-    defaultType = 'root'
-    defaultContents = None
-    # Borrowed from PoolDataFile
-    defaultCatalogFilename = 'PoolFileCatalog.xml'
-    defaultCatalog = 'xmlcatalog_file:' + defaultCatalogFilename
-    
-    def __init__(self,contents=defaultContents,type=defaultType):
-        FileType.__init__(self,type,contents)
-        self.__rootMacroFile = os.path.join(trfenv.trfPackagePath,'share','CountEvents.cc')
-
-    def eventCount( self, arg ):
-        """Count events in Root TTree file using root"""
-        return None
-
-    def getGUID( self, filename ):
-        if TRF_SETTING[ 'testrun' ]:
-            return None
-        if not fileutil.exists( filename ):
-            print ("GUID retrieval failed: %s not found." % filename)
-            return None
-        # Use FClistGUID
-        rc, guid = getGUIDfromPFC(filename)
-        if guid is not None:
-            return guid
-        if rc != 0:
-            print ('Warning: Problem with PFC')
-        
-        # ROOT type files have no internal GUID, so if it wasn't in the PFC then
-        # we just make one up...
-        guid = str(uuid.uuid4())
-        print ('Generated GUID %s for %s' % (guid, filename))
-        # Do not register GUIDs for now - FC behaving badly!
-        rc = addGUIDtoPFC(filename, guid, type = 'ROOT_All')
-        if rc != 0:
-            print ('Warning: Failed to add new GUID to PFC')
-
-        return guid
-
-
-class PoolDataFile( RootTTreeFile ):
-    defaultType = 'pool.root'
-    defaultCatalogFilename = 'PoolFileCatalog.xml'
-    defaultCatalog = 'xmlcatalog_file:' + defaultCatalogFilename
-    defaultMessageLevel = 5
-    defaultContents = None
-
-    def __init__(self,contents=defaultContents,type=defaultType):
-        RootTTreeFile.__init__(self,contents,type)
-        
-    def getGUID(self, filename):
-        # Use FClistGUID
-        rc, guid = getGUIDfromPFC(filename)
-        if guid is not None:
-            return guid
-        if rc != 0:
-            print ('Warning: Problem with PFC')
-
-        # Use pool_extractFileIdentifier.py
-        poolcmd = ['pool_extractFileIdentifier.py', filename]
-        print ("GUID retrieval: Attempting to use %s." % poolcmd)
-        if not find_executable(poolcmd[0]):
-            raise EnvironmentError('Pool utility %s not found in PATH' % poolcmd[0] )
-        p = Popen(poolcmd,shell=False,stdout=PIPE,stderr=PIPE,close_fds=True)
-        (stdout, stderr) = p.communicate()
-        for line in stdout:
-            words = line.split(os.linesep)
-            try:
-                if filename in words[1]:
-                    guid = words[0]
-                    break
-            except Exception:
-                continue
-        if p.returncode != 0:
-            print ("GUID retrieval failed: %s" % stderr)
-            guid = None
-        if guid:
-            print ("GUID retrieval: Using embedded value %s (%s)" % (guid,filename))
-            return guid
-
-
-    def validateFile( self, arg, **validationDict ):
-        """Validate the file. Use SgDumpLib.run_sg_dump() or event count routine."""
-#        ### TEST ONLY ###
-#        raise TransformValidationError( arg.value(), 'Test validation error (%s)' % arg.name(), 'TRF_OUTFILE_NOTFOUND' )
-        try:
-            logger = arg.logger()
-            fName = arg.value()
-            argName = arg.name()
-        except Exception as e:
-            print ("Could not validate file associated with %s: %s" % ( arg, e ))
-            return
-        if VALIDATION_DICT[ 'ALL' ] is False:
-            logger.info( "Skipping all validation routines." )
-            return
-        # Defined default validation values
-        vDict = { 'testIfEmpty' : True, 'testIfNoEvents' : True, 'testIfExists' : True, 'testIfCorrupt' : True, 'testCountEvents' : True, 'extraValidation' : None, 'testMatchEvents' : True, 'testEventMinMax' : True , 'stopOnEventCountNone' : True, 'continueOnZeroEventCount' : True}
-        # apply modifications to validation values from subclasses
-        vDict.update( validationDict )
-        # Check if any validation tests have been disabled at the command line and apply to vDict
-        for vTestName, vTestEnabled in VALIDATION_DICT.items():
-            if vTestEnabled is not None:
-                vDict[ vTestName ] = vTestEnabled
-        if not fileutil.exists( fName ):
-            if vDict[ 'testIfExists' ]:
-                raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' )
-            else:
-                logger.info( "Ignoring missing %s.", fName )
-                return
-        if fileutil.getsize( fName ) == 0:
-            if vDict[ 'testIfEmpty' ]:
-                raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' )
-            else:
-                logger.info( "Ignoring empty %s.", fName )
-                return
-        # Check if sdl can cope with the file type
-        if self.validationType() == 'any':
-            vDict[ 'testIfCorrupt' ] = False
-        if vDict[ 'testIfCorrupt' ]:
-            logger.info( "Checking %s for corruption.", fName )
-            vTimer.start( '%s validation' % argName )
-            sc = corruptionTestROOT( fName, self.validationType() )
-            vTimer.stop( '%s validation' % argName )
-            if sc<0:
-                raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-        if vDict[ 'testCountEvents' ]:
-            logger.info( "Attempting to validate %s using event count routine.", fName )
-            vTimer.start( '%s validation' % argName )
-            eCount = arg.eventCount()
-            vTimer.stop( '%s validation' % argName )
-            if eCount == 0:
-                if vDict[ 'testIfNoEvents' ]:
-                  if not vDict[ 'continueOnZeroEventCount' ]:  
-                    raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-                  else:
-                    logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName)
-                else:
-                    logger.info( "Ignoring 0 events in %s.", fName )
-                    return
-            elif eCount is None:
-                if vDict[ 'stopOnEventCountNone' ]:
-                  raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' )
-                else:
-                  logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName )
-        if callable( vDict[ 'extraValidation' ] ):
-            vTimer.start()
-            extraValidationResult = None
-            try:
-                extraValidationResult = timelimited_exec( tl_func = vDict[ 'extraValidation' ], tl_func_args = ( fName, ), tl_pre_func = lambda:None, tl_post_func = lambda:None, tl_timeout = TRF_SETTING[ 'validationTimeout' ], tl_retry = TRF_SETTING[ 'validationRetry' ], tl_interval = TRF_SETTING[ 'validationSleepInterval' ] )
-            except TransformThreadTimeout:
-                logger.warning( 'Extra validation routine timed out.' )
-            except TransformThreadError as e:
-                logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e )
-            except Exception as e:
-                logger.warning( 'Extra validation routine error.\n%s', e )
-            vTimer.stop()
-            if not extraValidationResult:
-                raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' )
-#        # Fail if sdl check fails, does not fail over to event count routine.
-#        if sdl is None:
-#            eCount = arg.eventCount()
-#            if eCount == 0:
-#                raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-#            elif eCount == None:
-#                raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NOTREAD' )
-#        else:
-#            sc, out = sdl.run_sg_dump( files = [ fName ], output = os.devnull, pyalg_cls = 'PyDumper.PyComps:DataProxyLoader', use_recex_links = False, msg = logger )
-#            if sc != 0:
-#                raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-
-        logger.info( "%s successfully validated.", fName )
-        return
-    
-    def writeSize(self, arg):
-        fName = arg.value()
-        print ('trfutil.py  Checking object sizes in file ', fName )
-        if  isinstance(self, AODFile) or isinstance(self,ESDFile) or isinstance(self,DPDFile) or isinstance(self,EvgenFile) or isinstance(self,HitsFile):
-            ne=0
-            collectionSize={}
-            try:
-                # print ('trfutil.py checkFile running...')
-                import PyUtils.PoolFile as PF
-                # PF.PoolOpts.FAST_MODE = True
-                poolFile = PF.PoolFile( fName , False)
-                poolFile.checkFile( )
-                ne=poolFile.dataHeader.nEntries
-                if ne>0:
-                    for collection in poolFile.data:
-                        collectionSize[collection.name] = collection.diskSize / ne
-            except Exception as e:
-                print ("## Caught exception [%s] !!" % str(e.__class__))
-                print ("## What:",e)
-                print (sys.exc_info()[0])
-                print (sys.exc_info()[1])
-                return
-                
-            # fName+='.size'
-            # try:
-            #     with open( fName, 'w' ) as sizeFile:
-            #         pickle.dump(ne,sizeFile)
-            #         pickle.dump(collectionSize,sizeFile)
-            # except Exception as e:
-            #     print ('trfutil.py WARNING: Could not write consumption info to file ', fName , e)
-            #     print (sys.exc_info()[0])
-            #     print (sys.exc_info()[1])
-            #     return
-
-            #returns number_of_events and tuple of sizes
-            return [ne, collectionSize]	
-        else:
-            print ('not needed for file of this type')
-                
-        
-    def eventCount( self, arg ):
-        """Count the events of InputFile or OutputFile argument <arg>.
-        If the number of events can not be determined, return None"""
-        try:
-            logger = arg.logger()
-            fileList = arg.value()
-        except Exception as e:
-            print ("Event count failed for %s: %s" % ( arg, e ))
-            return None
-        if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ):
-            logger.info( 'Test run in progress. Event count disabled.' )
-            return None
-        resultList = getCachedFileInfo( fileList, 'nentries' )
-        if resultList is None or not isinstance( resultList, list ):
-            return resultList
-        # resultList is indeed a list
-        try:
-            return sum( resultList )
-        except TypeError:
-            # probably 'N/A' detected in resultList. Should be fixed with AthenaPython-00-04-19
-            return None
-
-
-    @staticmethod
-    def setMessageLevel(level,logger=None):
-        newLevel = str(level)
-        oldLevel = os.environ.get('POOL_OUTMSG_LEVEL')
-        if oldLevel != newLevel:
-            os.environ['POOL_OUTMSG_LEVEL'] = newLevel
-            if logger:
-                logger.info( "Setting POOL message level to %d.", level )
-
-
-class EvgenFile( PoolDataFile ):
-    defaultContents = 'evgen'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-
-
-class HitsFile( PoolDataFile ):
-    defaultContents = 'hits'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-
-
-class RDOFile( PoolDataFile ):
-    defaultContents = 'rdo'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-
-
-class ESDFile( PoolDataFile ):
-    defaultContents = 'esd'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-   
-
-class AODFile( PoolDataFile ):
-    defaultContents = 'aod'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-
-
-class SANFile( RootTTreeFile ):
-    defaultContents = 'san'
-    def __init__(self,contents=defaultContents):
-        RootTTreeFile.__init__(self,contents)
-
-
-class HistogramFile( FileType ):
-    defaultContents = 'hist'
-    defaultType = 'root'
-    def __init__(self,contents=defaultContents,type=defaultType):
-        FileType.__init__(self,type,contents)
-
-
-class FTKIPFile(FileType):
-    defaultContents = 'ftkip'
-    def __init__(self, contents=defaultContents):
-        FileType.__init__(self, contents)
-        
-    def eventCount(self, arg):
-        '''Count ftk i/p events using the ^F event flag'''        
-        fname = arg.value()
-        try:
-            eventCount = 0
-            f = bz2.BZ2File(fname, 'r')
-            for line in f:
-                if line.startswith('F'):
-                    eventCount += 1
-            return eventCount
-        except OSError as e:
-            print ("Got OSError: %s" % str(e))
-            return None
-        except IOError as e:
-            print ("Got IOError: %s" % str(e))
-            return None
-
-
-class JiveXMLTGZFile(FileType):
-    defaultContents = 'XML.tar.gz'
-    def __init__(self, contents=defaultContents):
-        FileType.__init__(self, contents)
-        
-    def eventCount(self, arg):
-        '''Count events by seeing how many JiveXML files we have in the tarball'''
-        fname = arg.value()
-        try:
-            f = tarfile.open(fname, 'r:*')
-            n = f.getnames()
-            f.close()
-            return len(n)
-        except tarfile.TarError as e:
-            print ('Error determining the number of events in %s: %s' % (fname, e))
-            return None
-
-## Helper function to make JiveXML tarball from all JiveXML_*.xml in the cwd
-def jiveXMLtgz(fname):
-    # This should really be somewhere else, not in the actual trf code. Demo only!
-    jiveXMLfiles = glob.glob('JiveXML_*.xml')
-    if len(jiveXMLfiles) == 0:
-        raise RuntimeError ('No JiveXML files to merge')
-    print ("Will tar.gz %d JiveXML files: %s" % (len(jiveXMLfiles), jiveXMLfiles))
-    try:
-        jXMLtargz = tarfile.open(fname, 'w:gz')
-        for jXMLfile in jiveXMLfiles:
-            jXMLtargz.add(jXMLfile)
-        jXMLtargz.close()
-    except tarfile.TarError as e:
-        raise RuntimeError ('Error while trying to create Jive XML tag.gz file %s: %s' % (fname, e))
-
-
-class NtupleFile( RootTTreeFile ):
-    defaultContents = 'ntup'
-    defaultType = 'root'
-    def __init__(self,contents=defaultContents,type=defaultType, tree_names = None):
-        RootTTreeFile.__init__(self,type,contents)
-        if tree_names is None:
-            tree_names = []
-        self.tree_names = tree_names
-    
-    def eventCount( self, arg ):
-        try:
-            fName = arg.value()
-        except Exception as e:
-            print ("Event count failed for %s: %s" % ( arg, e ))
-            return None
-        if not isinstance(fName, list):
-            fName=[fName]
-        nentries=0
-        for file in fName:
-            n=ntup_entries(fname=file, tree_names=self.tree_names)
-            try:
-                nentries += n
-            except TypeError:
-                return None
-        return nentries
-
-    def validateFile( self, arg, **validationDict ):
-        try:
-            logger = arg.logger()
-            fName = arg.value()
-            argName = arg.name()
-        except Exception as e:
-            print ("Could not validate file associated with %s: %s" % ( arg, e ))
-            return
-        if VALIDATION_DICT[ 'ALL' ] is False:
-            logger.info( "Skipping all validation routines." )
-            return
-        # Defined default validation values
-        vDict = { 'testIfEmpty' : True, 'testIfNoEvents' : True, 'testIfExists' : True, 'testIfCorrupt' : True, 'testCountEvents' : True, 'extraValidation' : None, 'testMatchEvents' : True, 'testEventMinMax' : True , 'stopOnEventCountNone' : True, 'continueOnZeroEventCount' : True}
-        # apply modifications to validation values from subclasses
-        vDict.update( validationDict )
-        # Check if any validation tests have been disabled at the command line and apply to vDict
-        for vTestName, vTestEnabled in VALIDATION_DICT.items():
-            if vTestEnabled is not None:
-                vDict[ vTestName ] = vTestEnabled
-        if not fileutil.exists( fName ):
-            if vDict[ 'testIfExists' ]:
-                raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' )
-            else:
-                logger.info( "Ignoring missing %s.", fName )
-                return
-        if fileutil.getsize( fName ) == 0:
-            if vDict[ 'testIfEmpty' ]:
-                raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' )
-            else:
-                logger.info( "Ignoring empty %s.", fName )
-                return
-
-        if vDict[ 'testIfCorrupt' ]:
-            logger.info( "Checking %s for corruption.", fName )
-            vTimer.start( '%s validation' % argName )
-            from PyJobTransformsCore.trfValidateRootFile import checkFile as checkNTUPFile
-            sc = checkNTUPFile(fileName=fName, type='basketWise', requireTree=False, msg=logger)
-            vTimer.stop( '%s validation' % argName )
-            if sc!=0:
-                raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-    
-            
-        if vDict[ 'testCountEvents' ] and self.tree_names:
-            logger.info( "Attempting to validate %s using event count routine.", fName )
-            vTimer.start( '%s validation' % argName )
-            eCount = arg.eventCount()
-            vTimer.stop( '%s validation' % argName )
-            if eCount == 0:
-                if vDict[ 'testIfNoEvents' ]:
-                  if not vDict[ 'continueOnZeroEventCount' ]:  
-                    raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-                  else:
-                    logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName)
-                else:
-                    logger.info( "Ignoring 0 events in %s.", fName )
-                    return
-            elif eCount is None:
-                if vDict[ 'stopOnEventCountNone' ]:
-                  raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' )
-                else:
-                  logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName )
-        else:
-            logger.info( "Event counting not tested for %s.", fName )
-        logger.info( "%s successfully validated.", fName )
-        return
-
-
-
-class MonitorHistFile( RootTTreeFile ):
-    defaultContents = 'monitor_hist'
-    defaultType = 'root'
-    def __init__(self,contents=defaultContents,type=defaultType):
-        RootTTreeFile.__init__(self,type,contents)
-
-    def eventCount( self, arg ):
-        try:
-            logger = arg.logger()
-            fName = arg.value()
-        except Exception as e:
-            print ("Event count failed for %s: %s" % ( arg, e ))
-            return None
-        if TRF_SETTING[ 'testrun' ] and ( VALIDATION_DICT[ 'ALL' ] is False or VALIDATION_DICT[ 'testCountEvents' ] is False ):
-            logger.info( 'Test run in progress. Event count (ROOT-based) disabled.' )
-            return None
-        ROOT = RootUtils.import_root(batch=True)
-        f = None
-        try:
-            f = timelimited_exec( tl_func = ROOT.TFile.Open, tl_func_args = ( fName, ), tl_pre_func = lambda:None, tl_post_func = lambda:None, tl_timeout = TRF_SETTING[ 'TRFTimeout' ], tl_retry = TRF_SETTING[ 'TRFRetry' ], tl_interval = TRF_SETTING[ 'TRFSleepInterval' ] )
-        except TransformThreadTimeout:
-            logger.warning( 'ROOT file opening timed out.' )
-        except TransformThreadError as e:
-            logger.warning( 'Thread for ROOT file opening failed to stop.\n%s', e )
-        except Exception as e:
-            logger.warning( 'ROOT file open error.\n%s', e )
-        if not f:
-            logger.warning("Could not open file [%s].", fName)
-            return None
-        # assume the first key starting with run_ gives the run number
-        rundir = None
-        keys = f.GetListOfKeys()
-        for key in keys:
-            if key.GetName()[:4] == 'run_' and key.GetName() != 'run_multiple':
-                rundir = key.GetName()
-        if rundir is None:
-            logger.warning( 'Unable to find run directory.' )
-            f.Close()
-            return None
-        logger.debug( 'Using run directory %s.', rundir )
-        hpath = '%s/GLOBAL/DQTDataFlow/events_lb' % rundir
-        h = f.Get(hpath)
-        if not isinstance( h, ROOT.TH1 ):
-            logger.warning( 'Unable to retrieve %s.', hpath )
-            f.Close()
-            return None
-        try:
-            nBinsX = h.GetNbinsX()
-        except Exception:
-            f.Close()
-            logger.warning( 'Unable to retrieve number of events.' )
-            return None            
-        nev = 0
-        for i in range(1, nBinsX):
-            if h[i] < 0:
-                # should not happen
-                logger.warning( 'Negative number of events for step %s.', h.GetXaxis().GetBinLabel(i) )
-                f.Close()
-                return None
-            if h[i] == 0:
-                continue
-            # h is not zero
-            if nev == 0:
-                nev = h[i]
-            else:
-                if nev != h[i]:
-                    # mismatch in number of events between different steps!
-                    logger.warning( 'Mismatch in events per step; most recent step seen is %s.', h.GetXaxis().GetBinLabel(i) )
-                    f.Close()
-                    return None
-        f.Close()
-        return nev
-
-    def validateFile( self, arg, **validationDict ):
-        """Validate the file. """
-        try:
-            logger = arg.logger()
-            fName = arg.value()
-            argName = arg.name()
-        except Exception as e:
-            print ("Could not validate file associated with %s: %s" % ( arg, e ))
-            return
-        if VALIDATION_DICT[ 'ALL' ] is False:
-            logger.info( "Skipping all validation routines." )
-            return
-        # Defined default validation values
-        vDict = { 'testIfEmpty' : True, 'testIfNoEvents' : True, 'testIfExists' : True, 'testIfCorrupt' : True, 'testCountEvents' : True, 'extraValidation' : None, 'testMatchEvents' : True, 'testEventMinMax' : True , 'stopOnEventCountNone' : True, 'continueOnZeroEventCount' : True}
-        # apply modifications to validation values from subclasses
-        vDict.update( validationDict )
-        # Check if any validation tests have been disabled at the command line and apply to vDict
-        for vTestName, vTestEnabled in VALIDATION_DICT.items():
-            if vTestEnabled is not None:
-                vDict[ vTestName ] = vTestEnabled
-        if not fileutil.exists( fName ):
-            if vDict[ 'testIfExists' ]:
-                raise TransformValidationError( fName, 'failed validation. File not created. Argument %s' % argName, 'TRF_OUTFILE_NOTFOUND' )
-            else:
-                logger.info( "Ignoring missing %s.", fName )
-                return
-        if fileutil.getsize( fName ) == 0:
-            if vDict[ 'testIfEmpty' ]:
-                raise TransformValidationError( fName, 'failed validation. Empty file. Argument %s' % argName, 'TRF_OUTFILE_EMPTY' )
-            else:
-                logger.info( "Ignoring empty %s.", fName )
-                return
-#         if vDict[ 'testIfCorrupt' ]:
-#             logger.info( "Checking %s for corruption.", fName )
-#             vTimer.start( '%s validation' % argName )
-#             sc = corruptionTestROOT( fName, self.validationType() )
-#             vTimer.stop( '%s validation' % argName )
-#             if sc<0:
-#                 raise TransformValidationError( fName, 'failed validation [%s]. File corrupt. Argument %s' % ( sc, argName ) )
-        if vDict[ 'testCountEvents' ] and 'HIST_' not in fName:
-            logger.info( "Attempting to validate %s using event count routine.", fName )
-            vTimer.start( '%s validation' % argName )
-            eCount = arg.eventCount()
-            vTimer.stop( '%s validation' % argName )
-            if eCount == 0:
-                if vDict[ 'testIfNoEvents' ]:
-                  if not vDict[ 'continueOnZeroEventCount' ]:  
-                    raise TransformValidationError( fName, 'failed validation. File contains no events. Argument %s' % argName, 'TRF_OUTFILE_TOOFEW' )
-                  else:
-                    logger.info(" WARNING - 0 events in %s, proceeding with empty file. ", fName)
-                else:
-                    logger.info( "Ignoring 0 events in %s.", fName )
-                    return
-            elif eCount is None:
-                if vDict[ 'stopOnEventCountNone' ]:
-                  raise TransformValidationError( fName, 'failed validation. Events could not be counted Argument %s' % argName, 'TRF_OUTFILE_NEVENTFAIL' )
-                else:
-                  logger.info( "No event count for file %s (corrupt or unreachable). Proceeding anyways.", fName )
-        elif 'HIST_' in fName:
-            logger.info('No event counting validation performed because file %s is of HIST_ subtype', fName)
-            
-        if callable( vDict[ 'extraValidation' ] ):
-            vTimer.start()
-            extraValidationResult = None
-            try:
-                extraValidationResult = timelimited_exec( tl_func = vDict[ 'extraValidation' ], tl_func_args = ( fName, ), tl_pre_func = lambda:None, tl_post_func = lambda:None, tl_timeout = TRF_SETTING[ 'validationTimeout' ], tl_retry = TRF_SETTING[ 'validationRetry' ], tl_interval = TRF_SETTING[ 'validationSleepInterval' ] )
-            except TransformThreadTimeout:
-                logger.warning( 'Extra validation routine timed out.' )
-            except TransformThreadError as e:
-                logger.warning( 'Thread running extra validation routine failed to stop.\n%s', e )
-            except Exception as e:
-                logger.warning( 'Extra validation routine error.\n%s', e )
-            vTimer.stop()
-            if not extraValidationResult:
-                raise TransformValidationError( fName, 'failed additional validation. Argument %s' % argName, 'TRF_OUTFILE' )
-        logger.info( "%s successfully validated.", fName )
-        return
-
-
-
-class DPDFile( PoolDataFile ):
-    defaultContents = 'dpd'
-    def __init__(self,contents=defaultContents):
-        PoolDataFile.__init__(self,contents)
-
-# Use default PoolDataFile validation.
-#    def validateFile( self, arg, **validationDict ):
-#        #'testIfCorrupt' flag present until there is concensus on the treatment of empty DPD files
-#        vDict = { 'testIfNoEvents' : False, 'testIfCorrupt' : False }
-#        vDict.update( validationDict )
-#        PoolDataFile.validateFile( self, arg, **vDict )
-
-
-class CommentLine:
-    """Utility class to generate python comment lines from a help string"""
-    def __init__(self,comment):
-        self.__comment = comment
-
-    @staticmethod
-    def getLine( char, width = 80 ): return '#' + width * char
-
-    @staticmethod
-    def dashLine( width = 80 ): return '#' + width * '-'
-
-    @staticmethod
-    def hashLine( width = 80 ): return '#' + width * '#'
-
-    def __str__(self):
-        return self.smallComment()
-
-    def smallComment(self):
-        return '# ' + self.__comment.replace(os.linesep, os.linesep + '# ')
-
-    def bigComment(self,char='-',width=80):
-        line = CommentLine.getLine(char,width)
-        return line + os.linesep + \
-            self.smallComment() + os.linesep + \
-            line
-
-
-#
-# end of class CommentLine
-#
-class Author:
-    def __init__(self,name,email):
-        self.__name = name
-        self.__email = email
-
-    def __str__(self):
-        return '%s <%s>' % (self.__name, self.__email)
-    
-    def name(self):
-        return self.__name
-
-    def email(self):
-        return self.__email
-
-
-class StringNumberList:
-    """List of strings only differing by a number they contain, where the list of numbers is coded in a special syntax.
-    <prefix>[<list>]<postfix> where '[' and ']' are litteral characters and <list> gives a recipe for a list of
-    numbers to be generated (with <n> and <m> integers): 
-    <n>,<m> (enumeration) or <n>-<m> (range including m) or any combination of those.
-    A python list of strings is generated where the [<list>] is replaced by the actual integers that the list represents.
-    The [<list>] part can also be omitted, in which case a list of one string is generated (as given).
-    The width of the integers is determined by the integer with the most number of digits in the [<list>], where leading 0's
-    are included in the count. The integers in the expanded filenames have leading 0's padded where needed."""
-
-    openBracket = '['
-    closeBracket = ']'
-
-    def __init__(self, codedList=None):
-        self.set( codedList )
-
-    def set(self, codedString):
-        self.__codedString = codedString
-        self.__numbers = None
-        self.__prefix = None
-        self.__suffix = None
-        self.__numberList = None
-        self.__digits = 0
-        # return self for easy chaining of commands
-        return self
-
-    def getPrefix(self,openBracket=-1):
-        """Get everything up to (but not including) the [.
-        If [ is not found, return the full string. In case of error, return None.
-        The <openBracket> argument (optional) is there for optimisation and gives
-        the position of the '[' in the coded string (or -1 if unknown)."""
-        if self.__prefix is not None: return self.__prefix
-        valIn = self.__codedString
-        if valIn is None: return None #signalling error
-        if openBracket != -1:
-            assert valIn[openBracket] == StringNumberList.openBracket
-            bopen = openBracket
-        else:
-            bopen = valIn.find(StringNumberList.openBracket)
-            if bopen == -1: self.__prefix = valIn
-        if bopen > 0:
-            self.__prefix = valIn[:bopen]
-        else:
-            self.__prefix = valIn
-        return self.__prefix
-
-    def getSuffix(self,closeBracket=-1):
-        """Get everything after the ].
-        If ] is not found, return emptry string. In case of error, return None"""
-        if self.__suffix is not None: return self.__suffix
-        valIn = self.__codedString
-        if valIn is None: return None #signalling error
-        if closeBracket != -1:
-            assert valIn[closeBracket] == StringNumberList.closeBracket
-            bclose = closeBracket
-        else:
-            bclose = valIn.find(StringNumberList.closeBracket)
-            if bclose == -1: self.__suffix = ""
-        if bclose < len(valIn) - 1:
-            self.__suffix = valIn[bclose+1:]
-        else:
-            self.__suffix = ""
-        return self.__suffix
-
-    def getNumbers(self,openBracket=-1,closeBracket=-1):
-        """Get the part in between [ and ], including the [].
-        If [] part is not found, return empty string. In case of error, return None"""
-        if self.__numbers is not None: return self.__numbers
-        valIn = self.__codedString
-        if valIn is None: return None #signalling error
-        if openBracket != -1:
-            assert valIn[openBracket] == StringNumberList.openBracket
-            bopen = openBracket
-        else:
-            bopen = valIn.find(StringNumberList.openBracket)
-        if closeBracket != -1:
-            assert valIn[closeBracket] == StringNumberList.closeBracket
-            bclose = closeBracket
-        else:
-            bclose = valIn.find(StringNumberList.closeBracket,bopen + 1)
-        if bopen == -1 and bclose == -1:
-            self.__numbers = ""
-        elif bopen == -1 or bclose == -1 or bclose < bopen + 2:
-            self.__numbers = None
-        else:
-            self.__numbers = valIn[bopen:bclose+1]
-        return self.__numbers
-
-    def getNumberList(self,openBracket=-1,closeBracket=-1):
-        """Return a tuple of size 2, containing the list of integers in the first field and
-        the number of digits in the second field. The list of integers is the result of
-        the decoding of the numbers coded in the [] part of the input string. The number
-        of digits is the maximum number of digits used in the numbers in the [] part,
-        where leading 0's are included in the count.
-        If no '[]' part is found, return an tuple with an empty list. In case of error,
-        returns (None,None)."""
-        if self.__numberList is not None: return (self.__numberList,self.__digits)
-        nums = self.getNumbers(openBracket,closeBracket)
-        if nums is None: return (None,None)
-        if nums=="": return (list(),0)
-        numList = [ ]
-        bclose = len(nums)
-        posB = 1
-        digits = 0
-        while posB <= bclose:
-            #always start with an digit
-            posE = posB
-            while posE < bclose and nums[posE].isdigit(): posE += 1
-            # require at leaste one digit
-            if posE == posB: return (None,None)
-            # convert to integer
-            digits = max(digits,posE - posB)
-            iNum = int(nums[posB:posE])
-            charE = nums[posE]
-            if charE == StringNumberList.closeBracket: # last number
-                numList.append( iNum )
-                break
-            elif charE == ',':  # single number
-                numList.append( iNum )
-                posB = posE + 1
-                continue
-            elif charE == '-':  # next comes end of range
-                posB = posE + 1
-                posE = posB
-                while posE < bclose and nums[posE].isdigit(): posE += 1
-                # require at leaste one digit
-                if posE == posB: return (None,None)
-                # convert to integer    
-                digits = max(digits,posE - posB)
-                iStop = int(nums[posB:posE])
-                if iStop < iNum: return (None,None)
-                numList += range(iNum,iStop+1)
-                charE = nums[posE]
-                if charE == ',':
-                    posB = posE + 1
-                    continue
-                elif charE == StringNumberList.closeBracket:
-                    break
-                else:
-                    return (None,None)
-            else: # spurious character
-                return (None,None)
-        self.__numberList = numList
-        self.__digits = digits
-        return (self.__numberList,self.__digits)
-
-    def convertStringList(self,codedString):
-        return self.set(codedString).getStringList()
-
-    def getStringList(self):
-        """Convert coded string <valIn> into an expanded list. If <valIn> contains a syntax error, None is returned."""
-        openBracket = self.__codedString.find(StringNumberList.openBracket)
-        closeBracket = self.__codedString.find(StringNumberList.closeBracket,openBracket + 1)
-        numList,digits = self.getNumberList(openBracket,closeBracket)
-        if numList is None: return None
-        if len(numList) == 0: return [ self.__codedString ]
-        prefix = self.getPrefix(openBracket)
-        if prefix is None: return None
-        suffix = self.getSuffix(closeBracket)
-        if suffix is None: return None
-        return [ ('%s%0*d%s'.strip() % (prefix,digits,i,suffix)).strip() for i in numList ]
-
-    
-class JobOptionsFile:
-    def __init__(self,filename):
-        self.__filename = filename
-
-    def filename(self):
-        return self.__filename
-
-    def setFilename(self,filename):
-        self.__filename = filename
-
-    def preRunAction(self):
-        """Check that joboptions file can be found.
-        Check is skipped if filename is empty"""
-        filename = self.filename()
-        if filename and not find_joboptions( filename ):
-            raise JobOptionsNotFoundError( filename )
-
-
-class PreJobOptionsFile(JobOptionsFile):
-    def __init__(self,filename):
-        JobOptionsFile.__init__(self,filename)
-
-    def preRunAction(self):
-        JobOptionsFile.preRunAction(self)
-
-
-class PostJobOptionsFile(JobOptionsFile):
-    def __init__(self,filename):
-        JobOptionsFile.__init__(self,filename)
-
-    def preRunAction(self):
-        JobOptionsFile.preRunAction(self)
-
-
-class GetFiles(TransformLogger):
-    def __init__(self, listOfFiles, fromWhere='data', doCopy='Never', errorIfNotFound=True, keepDir=True, depth=0):
-        TransformLogger.__init__(self)
-        self.listOfFiles = listOfFiles
-        self.fromWhere = fromWhere
-        self.doCopy = doCopy
-        self.keepDir = keepDir
-        self.depth = depth
-        self.errorIfNotFound = errorIfNotFound
-
-    def preRunAction(self):
-        get_files(self.listOfFiles, fromWhere=self.fromWhere, doCopy=self.doCopy, errorIfNotFound=self.errorIfNotFound, keepDir=self.keepDir, depth=self.depth)
-
-
-class SQLiteSupport(GetFiles):
-    """Make sure sqlite file access works on all nfs mounted disks by making copies to the run directory"""
-    def __init__(self):
-        self.__listOfFiles = [ "geomDB/*_sqlite", "sqlite200/*.db", "triggerDB/*.db" ]
-        # if CMTSITE is STANDALONE, then files much be found.
-        errorIfNotFound = os.environ.get('CMTSITE') == 'STANDALONE'
-        GetFiles.__init__(self, self.__listOfFiles, doCopy='ifNotLocal', errorIfNotFound=errorIfNotFound, keepDir=True, )
-        # remove any empty directories
-        for dir in [ os.path.dirname(f) for f in self.__listOfFiles ]:
-            if os.path.isdir(dir) and not os.listdir(dir):
-                if os.path.islink(dir):
-                    # if I leave the symlink in place CORAL will create an empty database file
-                    # and therefore make athena fail...
-                    self.logger().info('Removing local symlink of empty sqlite directory: %s', dir)
-                    os.remove(dir)
-                else:
-                    self.logger().info('Removing local copy of empty sqlite directory: %s', dir)
-                    os.rmdir(dir)
-
-    def preRunAction(self):
-        """Proceed with retrieve files with get_files()."""                
-        GetFiles.preRunAction(self)
-
-
-class ServiceOverride(PostJobOptionsFile,TransformLogger):
-    def __init__(self,serviceName,membersDict,environmentDict=None):
-        PostJobOptionsFile.__init__(self,"")
-        TransformLogger.__init__(self)
-        self.__service = serviceName
-        self.__members = membersDict
-        self.__environDict = environmentDict
-##        self.__createdJoFile = None
-
-    def preRunAction(self):
-        nMem = len(self.__members)
-        if nMem == 0: return
-        environDict = { }
-        if self.__environDict:
-            for env in self.__environDict:
-                if env in os.environ:
-                    val = os.environ[env]
-                    if val:
-                        environDict[env] = val
-                    else:
-                        environDict[env] = self.__environDict[env]
-            if not environDict: return
-        members = { }
-        for mem in self.__members:
-            try:
-                val = self.__members[mem] % environDict
-            except KeyError:
-                pass #don't set it if environment is needed but not present
-            else:
-                members[mem] = val
-        if not members: return
-        jo = [ "%s = Service( \"%s\" )" % (self.__service,self.__service) ]
-        filename = self.__service
-        for mem in members:
-            val = members[mem]
-            jo.append( "%s.%s = %r" % (self.__service, mem, val) )
-            filename += '_%s_%s' % (mem,val)
-        self.logger().info('Creating jobOptions file %s', filename)
-        joFile = open(filename,'w')
-        joFile.write( os.linesep.join(jo) + os.linesep )
-        joFile.close()
-        self.setFilename(filename)
-##        self.__createdJoFile = filename
-        # only now call baseclass preRunAction()
-        PostJobOptionsFile.preRunAction(self)
-
-    def postRunAction(self):
-        """Do some cleanup"""
-##        if self.__createdJoFile:
-##            self.logger().info('Removing jobOptions file %s' % self.__createdJoFile)
-##            fileutil.remove(self.__createdJoFile)
-##            self.__createdJoFile = None
-
-    def setMember(self,name,value):
-        self.__members[name] = value
-
-    def getMember(self,name):
-        return self.__members[name]
-
-
-class VersionString:
-    """A class that can be used to extract the version numbers encoded in a string,
-    and to do version number comparisons (== != < > etc).
-    The encoding has to be provided in the constructor in the form of a compiled
-    regular expression. A utility function withSeparator() is available to
-    generate the regular expression for version numbers separated by a given
-    character. The default pattern is numbers separated by a . (dot), with a
-    maximum of 5 numbers (levels)"""
-    @staticmethod
-    def withSeparator(sep,maxfields):
-        """Return compiled regular expression pattern where the
-        version is a sequence of numbers separated with <sep>.
-        It will match a maximum of <maxfields> consecutive version fields."""
-        sep = re.escape(sep)
-        if maxfields > 1:
-            fields = (r'(?:%s(\d+))?' % sep) * (maxfields-1)
-        else:
-            fields = ''
-        return re.compile(r'(?:^|%s)(\d+)%s(?:%s|$)' % (sep,fields,sep) )
-
-    def __init__(self,version,pattern=None):
-        if pattern is None: pattern = VersionString.withSeparator('.',5)
-        self.version = version
-        match = pattern.search(version)
-        if match:
-            self.numberList = [int(v) for v in match.groups() if v is not None]
-        else:
-            self.numberList = []
-
-    def __str__(self):
-        return self.version
-
-    def __lt__(self,other,returnOnEqual=False):
-        nSelf = len(self.numberList)
-        nOther = len(other.numberList)
-        n = min(nSelf,nOther)
-        for i in range(n):
-            numSelf = self.numberList[i]
-            numOther = other.numberList[i]
-            if numSelf < numOther: return True
-            if numSelf > numOther: return False
-        else:
-            if nSelf < nOther: return True
-            if nSelf > nOther: return False
-        # they are equal!
-        return returnOnEqual
-
-    def __eq__(self,other):
-        nSelf = len(self.numberList)
-        nOther = len(other.numberList)
-        if nSelf != nOther: return False
-        for i in range(nSelf):
-            if self.numberList[i] != other.numberList[i]: return False
-        # they are equal!
-        return True
-
-    def __le__(self,other):
-        return self.__lt__(other,True)
-
-    def __gt__(self,other):
-        return other.__lt__(self)
-
-    def __ge__(self,other):
-        return other.__le__(self)
-
-    def __ne__(self,other):
-        return not self.__eq__(other)
-
-    def __bool__(self):
-        return len(self.numberList) > 0
-
-    def dump(self):
-        print ('%s -> %s' % (self.version,self.numberList))
-
diff --git a/Tools/PyJobTransformsCore/python/xmlutil.py b/Tools/PyJobTransformsCore/python/xmlutil.py
deleted file mode 100755
index a20761dabb37c877aca1ef0ecc67541682c7801c..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/python/xmlutil.py
+++ /dev/null
@@ -1,264 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-from __future__ import print_function
-
-import os
-from copy import copy
-from xml.sax import saxutils
-
-indent = "   "
-
-def opentag(tag,attribs=None):
-    otag = '<' + tag
-    if attribs:
-        for key,val in attribs.items(): otag += ' %s=%s' % ( key, saxutils.quoteattr( str( val ) ) )
-    otag += '>'
-    return otag
-
-
-def closetag(tag):
-    return '</%s>' % tag
-
-
-def simplefield(tag,attribs=None):
-    field = '<' + tag
-    if attribs:
-        for key,val in attribs.items(): field += ' %s=%s' % ( key, saxutils.quoteattr( str( val ) ) )
-    field += '/>'
-    return field
-
-
-def onelinefield(tag,value,attribs=None):
-    return opentag(tag,attribs) + str(value) + closetag(tag)
-
-
-def multilinefield(tag,value,attribs=None):
-    sep = os.linesep + indent
-    return opentag(tag,attribs) + sep + str(value).replace(os.linesep, sep) + os.linesep + closetag(tag)
-
-
-
-class XMLNode:
-    """Describes an XML node with a name, contents and attributes.
-    The contents can be any object convertable to a string,
-    or a (nested) XMLNode object or a list of (nested) XMLNode objects."""
-    
-    def __init__(self,name,contents=None):
-        self.__name = name
-        self.__contents = None
-        if contents is not None: self.setContents(contents)
-        self.__attributes = {}
-
-
-    def __eq__(self,other):
-        if not isinstance(other,XMLNode): return False
-        return self.__name == other.__name and \
-               self.__contents == other.__contents and \
-               self.__attributes == other.__attributes
-        
-
-    def __ne__(self,other):
-        return not self.__eq__(other)
-
-
-
-    def __str__(self):
-        header = self.__name
-        if self.__attributes:
-            header += '('
-            for n,v in self.__attributes.items():
-                header += '%s=%s, ' % (n,v)
-            header = header[:-2] + ')'
-
-        toString = header
-        contents = self.__contents
-        if not contents:
-            toString += '='
-        else:
-            contType = type(contents).__name__
-            indent = '  '
-            if contType == 'list':
-                toString += ':'
-                for c in contents:
-                    contStr = str(c).replace(os.linesep,os.linesep + indent).strip()
-                    toString += os.linesep + indent + contStr
-            elif isinstance(contents,XMLNode):
-                toString += ':' + os.linesep + indent + str(contents)
-            else:
-                toString += '='
-                contents = str(contents)
-                nLines = contents.count(os.linesep) + 1
-                if nLines > 1:
-                    # make nice indentation
-                    indent = ' '*len(toString)
-                    toString += str(contents).replace(os.linesep,os.linesep + indent).strip()
-                else:
-                    toString += contents
-
-        return toString
-
-    def getContents(self):
-        """ Produces a dictionary with the contents
-        """
-        contType = type(self.__contents).__name__
-        returnedcont={}
-        if contType=='list':
-            for i in self.__contents: 
-                if isinstance(i,XMLNode): 
-                    returnedcont.update({i.__name:i.__contents})
-        else:
-            returnedcont={self.__name:self.__contents}
-        return returnedcont  
-                
-
-    def setName(self,name):
-        self.__name = name
-        return self
-    
-
-    def setContents(self,contents):
-        contType = type(contents).__name__
-        if isinstance(contents,XMLNode):
-            self.__contents = contents
-#            print ("%s: Setting contents %s" % (self.name(),contents.__class__.__name__))
-        elif contType == 'list':
-#            print ("%s: Setting contents list" % (self.name()))
-            self.__contents = contents
-        else:
-#            print ("%s: Setting contents %s" % (self.name(),type(contents).__name__))
-            self.__contents = str(contents)
-
-        return self
-    
-
-    def addContents(self,contents):
-        conttype = type(self.__contents).__name__
-        if conttype == 'instance': conttype = self.__contents.__class__.__name__
-        addtype = type(contents).__name__
-        if addtype == 'instance': addtype = contents.__class__.__name__
-#        print ("%s: Adding contents %s to %s" % (self.name(),addtype,conttype))
-        if self.__contents is None:
-            self.setContents(contents)
-        elif conttype == 'list':
-            if addtype == 'list':
-                self.__contents += contents
-            elif isinstance(contents,XMLNode):
-                self.__contents.append(contents)
-            else:
-                raise AttributeError('XMLNode: can not add a %s to a %s' % (addtype,conttype))
-        elif conttype == 'str':
-            if isinstance(contents,XMLNode):
-                raise AttributeError('XMLNode: can not add a %s to a %s' % (addtype,conttype))
-            else:
-                self.__contents += str(contents)
-        elif isinstance(self.__contents,XMLNode):
-            if isinstance(contents,XMLNode):
-                self.__contents = [ copy(self.__contents), contents ]
-            else:
-                raise AttributeError('XMLNode: can not add a %s to a %s' % (addtype,conttype))
-        else:
-            raise AttributeError('XMLNode: can not add a %s to a %s' % (addtype,conttype))
-            
-        return self
-    
-                 
-    def setAttribute(self,name,value):
-        self.__attributes[name] = value
-        return self
-    
-
-    def setAttributes(self,**kwargs):
-        self.__attributes.update( kwargs )
-        return self
-
-
-    def name(self):
-        return self.__name
-
-
-    def contents(self):
-        return self.__contents
-
-
-    def attributes(self):
-        return self.__attributes
- 
-
-    def getAttribute(self,name,default=None):
-        return self.__attributes.get(name,default)
-
-
-    def hasAttribute(self,name):
-        return name in self.__attributes
-
-
-    def find(self,name,depth=-1,**attribs):
-        """Return a list of all nested XMLNode members with name <name>.
-        It searches <depth> levels deep, where
-        depth=0 only checks the name of the current object
-        depth=1 checks only the direct children of this object.
-        depth=-1 checks too full depth.
-        Returns empty list if no matches are found."""
-        if depth == 0:
-            if name == self.__name:
-                for name,value in attribs:
-                    myvalue = self._attribute.get(name,None)
-                    if myvalue is None or myvalue != value:
-                        return []
-                return [ self ]
-            else:
-                return []
-        else:
-            contents = self.__contents
-            if contents is None:
-                return []
-            elif type(contents).__name__ == 'list':
-                all = []
-                for c in contents:
-                    found = c.find(name,depth-1)
-                    if found: all += found
-                return all
-            elif isinstance(contents,XMLNode):
-                if name == contents.name():
-                    return [ contents ]
-                else:
-                    return []
-
-        return []
-    
-
-    def getXML(self):
-        name = self.__name
-        contents = self.__contents
-        attrib = self.__attributes
-        if contents is None:
-            return simplefield(name,attrib)
-        elif type(contents).__name__ == 'list':
-            fields = []
-            for c in contents:
-                if isinstance(c,XMLNode):
-                    fields.append( c.getXML() )
-                else:
-                    fields.append( saxutils.quoteattr( str(c) ) )
-            xmlStr = os.linesep.join(fields)
-            return multilinefield(name,xmlStr,attrib)
-        elif isinstance(contents,XMLNode):
-            return multilinefield(name,contents.getXML(),attrib)
-        else:
-            contents = saxutils.quoteattr( str(contents) )
-            nLines = contents.count(os.linesep) + 1
-            if nLines > 1:
-                return multilinefield(name,contents,attrib)
-            else:
-                return onelinefield(name,contents,attrib)
-
-    
-if __name__ == '__main__':
-    v = ['from xxx import <>a;d("dsf' 'sdf").Time=0']
-    y=XMLNode("FileCatalog")
-    y.addContents( XMLNode("META").setAttributes(name="var1", type='string') )
-    y.addContents( XMLNode("file1").setAttributes(att_name="apples",att_value=v ) )
-    print (y.getXML())
-    
diff --git a/Tools/PyJobTransformsCore/share/atlas_error_categories.db b/Tools/PyJobTransformsCore/share/atlas_error_categories.db
deleted file mode 100755
index 8cf2b61d01e6e65369b2aafba4e476d77f00cf2f..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/atlas_error_categories.db
+++ /dev/null
@@ -1,119 +0,0 @@
-100,	TRF_NOTFOUND,  		Transformation not found in run directory
-60000,	TRF_SEGVIO,		segmentation violation
-60010,	TRF_SEGFAULT,		segmentation fault
-60100,	TRF_CBNTATHEXE,		CBNT_Athena::execute() error
-60101,	TRF_TRTDIGITEXE,	TRTDigitization::execute() error
-60200,	TRF_EGAMSHSHAPE,	egammaShowerShape: Cluster is neither in Barrel nor in Endcap  cannot calculate ShowerShape
-60201,	TRF_LAREMEC,		LArEMECEnergyCorrection::CalculateChargeCollection error
-60600,	TRF_KEY_INTERRUPT,	Transform Keyboard interrupt
-60701,	TRF_CBNTAUDIT,		CBNT_Audit could not allocate memory
-61000,	TRF_MOD,		ApplicationMgr Failed to load modules
-61010,	TRF_MOD_LOAD,		DllClassManager Could not load module
-61020,	TRF_MOD_EP,		DllClassManager Entry point failure in module
-61100,	TRF_ALGOINIT,		EventLoopMgr Unable to initialize Algorithm
-61200,	TRF_SVRINIT,		ServiceManager Unable to initialize Service
-62100,	TRF_DETSTORE,		pixelRoI service_i: can not locate service DetectorStore
-62200,	TRF_POOLCONF,		pool::PersistencySvc::UserDatabase::connectForRead: PFN is not existing in the catalog
-62300,	TRF_EVNTSEL,		ServiceManager: unable to initialize Service: EventSelector
-62400,	TRF_DISTKIT,		JobOptionsSvc error
-62500,	TRF_PDTSETUP,		PartPropSvc: could not open PDT file
-62510,	TRF_PDTFILE,		PartPropSvc: unable to access any PDT file
-62600,	TRF_ATHENACRASH,	AthenaCrash
-62700,	TRF_ATHENAPROXY,	DetectorStore: no valid proxy for default object
-62800,	TRF_PROPERTY,		JobOptionsSvc: unable to set property
-62900,	TRF_DLLLOAD,		DllClassManager: system Error
-62910,	TRF_DLLDECL,		ApplicationMgr: failure loading declared DLL's
-63000,	TRF_PYT,		Transform python errors
-63010,	TRF_PYT_SYNTAX,		Transform python syntax error
-63020,	TRF_PYT_IMPORT,		Transform python import error
-63100,	TRF_ARG,		Transform argument errors
-63110,  TRF_ARG_MAXEVT_TOOFEW,  maxEvents argument: Too few events requested
-63111,  TRF_ARG_MAXEVT_TOOMANY, maxEvents argument: Too many events requested
-63200,	TRF_DEF,		Transform definition errors
-63300,	TRF_ENV,		Transform environment errors
-63400,	TRF_EXC,		Transform unknown exceptions
-63500,  TRF_TIMEOUT,    Transform execution timeout
-63600,  TRF_RETRY,      Transform execution retries exhausted
-63900,  TRF_FILE,       Transform file errors
-64000,	TRF_INFILE,		Transform input file errors
-64010,	TRF_INFILE_NOTFOUND,	Transform input file not found
-64020,	TRF_INFILE_NOTREAD,	Transform input file not readable
-64030,	TRF_INFILE_EMPTY,	Transform input file empty
-64031,  TRF_INFILE_TOOFEW,      Transform input file contains too few events
-64032,  TRF_INFILE_TOOMANY,     Transform input file contains too many events
-64033,  TRF_INFILE_NEVENTFAIL,  Transform input file: Event counting failed   
-64040,	TRF_INFILE_CORRUP,	Transform input file corrupted
-64100,	TRF_OUTFILE,		Transform output file errors
-64110,	TRF_OUTFILE_NOTFOUND,	Transform output file not found
-64120,	TRF_OUTFILE_NOTREAD,	Transform output file not readable
-64130,	TRF_OUTFILE_EMPTY,	Transform output file empty
-64131,  TRF_OUTFILE_TOOFEW,     Transform output file contains too few events
-64132,  TRF_OUTFILE_TOOMANY,    Transform output file contains too many events
-64133,  TRF_OUTFILE_NEVENTFAIL, Transform output file: Event counting failed   
-64140,	TRF_OUTFILE_CORRUP,	Transform output file corrupted
-64150,  TRF_OUTFILE_EXISTS,     Transform output file already exists
-64200,  TRF_CONFIG,             Error in transform configuration file
-65000,  TRF_DB,                 Problems with Database
-65100,  TRF_DBREL,              Problems with DBRelease
-65110,  TRF_DBREL_NOTSETUP,     DBRelease not setup
-65120,  TRF_DBREL_VERSION,      Wrong version of DBRelease setup
-65130,  TRF_DBREL_TARFILE,      Problems with the DBRelease tarfile
-65200,  TRF_GEO,                Problems with geometry tag
-65210,  TRF_GEO_MISMATCH,       Mismatch between Geometry Tag in transform argument geometryVersion and in input file
-66000,  TRF_BADFILE,            Bad file descriptor
-69999,	TRF_UNKNOWN,		Unknown Transform error
-10000,	ATH,			Athena/Transformation error
-10010,  ATH_COREDUMP,   Core dump from CoreDumpSvc (see log for more details)
-10100,	ATH_CON,		At/Tr connection error
-10102,	ATH_CON_NOV,		Nova DB problems
-10103,	ATH_CON_CAL,		Calibration DB problems
-10104,	ATH_CON_ORA3113, 	Oracle error ORA-03113
-10110,	ATH_CON_COND,		Conditions database problems
-10120,  ATH_CON_SQLITE_LOCK,    nfs lock problems with sqlite database
-10130,  ATH_CON_MYSQL_LOST,     Lost connection to MySQL server
-10140,  ATH_CON_ORA_SES,        Oracle error ORA-02391: exceeded simultaneous SESSIONS_PER_USER limit
-10200,	ATH_CRA,		Athena crashes
-10210,	ATH_CRA_INI,		Athena init failed
-10212,	ATH_CRA_INI_NOPFN,	Missing PFN in PoolFileCatalog
-10213,	ATH_CRA_INI_SVCFAIL,	AuditorSvc init failed
-10214,	ATH_CRA_INI_NOPYTHIA,	Pythia DLL not loaded
-10220,	ATH_CRA_WIN,		Input file corrupted (Wrong input)
-10300,	ATH_MOD,		ApplicationMgr Failed to load modules
-10310,	ATH_MOD_LOAD,		DllClassManager Could not load module
-10400,	ATH_DLL,		Problems loading dynamic libraries
-10410,	ATH_DLL_LOAD,		Problem loading shared library
-10420,	ATH_DLL_DECL,		ApplicationMgr: failure loading declared DLL's
-10430,  ATH_DLL_PRELOAD,        Problems loading shared libraries in LD_PRELOAD 
-10500,	ATH_JOP,		JobOptions errors
-10510,	ATH_JOP_NOTFOUND,	JobOptions file not found
-10520,  ATH_JOP_ERROR,          Error in jobOptions
-10600,	ATH_KEY_INTERRUPT,	Athena Keyboard interrupt
-10700,  ATH_STO,                Athena StoreGateSvc errors
-10710,  ATH_STO_RETR,           StoreGateSvc retrieve errors
-10711,  ATH_STO_RETR_DEFAULT,   StoreGateSvc retrieve(default): No valid proxy for object
-10712,  ATH_STO_RETR_NONCONST,  StoreGateSvc retrieve(non-const): No valid proxy for object
-10713,  ATH_STO_RETR_CONST,     StoreGateSvc retrieve(const): No valid proxy for object
-10720,  ATH_STO_REC,            StoreGateSvc record: object not added to store
-10800,  ATH_DETSTO,             Athena DetectorStore errors
-10810,  ATH_DETSTO_RETR,           DetectorStore retrieve errors
-10811,  ATH_DETSTO_RETR_DEFAULT,   DetectorStore retrieve(default): No valid proxy for object
-10812,  ATH_DETSTO_RETR_NONCONST,  DetectorStore retrieve(non-const): No valid proxy for object
-10813,  ATH_DETSTO_RETR_CONST,     DetectorStore retrieve(const): No valid proxy for object
-10820,  ATH_DETSTO_REC,            DetectorStore record: object not added to store
-10900,  ATH_SITE,               Problems with software installation
-10910,  ATH_SITE_SYSLIBS,       Missing system libraries
-10920,  ATH_SITE_LIBS,          Missing libraries
-11000,  ATH_FAILURE,            Athena non-zero exit
-11100,  ATH_ALG,                Algorithm problem
-11110,  ATH_ALG_TIMEOUT,        Algorithm timeout
-13400,	ATH_EXC,		Athena unknown exception
-13410,	ATH_EXC_PYT,		Athena python exception
-13420,	ATH_EXC_CXX,		Athena C++ exception
-14100,  ATH_OUTFILE,            Athena output file errors
-14110,  ATH_OUTFILE_TOOLARGE,   Athena pool.root file too large (root opened second file)
-15000,  ATH_ELINK,              Problems with ElementLink
-15010,  ATH_G4_STUCK,           Geant4 got stuck in event
-15011,  ATH_G4_NONCONS,         Geant4 had significant energy non-conservation
-16000,  ATH_GEN,                Generator problem
-16010,  ATH_GEN_FATALMSG,       Generator reported a fatal error
-
diff --git a/Tools/PyJobTransformsCore/share/atlas_error_ignore.db b/Tools/PyJobTransformsCore/share/atlas_error_ignore.db
deleted file mode 100755
index a23291bbe9eef47b3438702a4330ba4af6cbdc8d..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/atlas_error_ignore.db
+++ /dev/null
@@ -1,81 +0,0 @@
-#Each line contains 3 fields, separated by comma's:
-#atlas_release_regexp,  who_prints_it,  error_message_regexp
-# For the regular expression syntax that can be used in <error_message_regexp>, see:
-# http://docs.python.org/lib/re-syntax.html
-# Note in particular the special regexp characters that need to be backslashed if meant litteral: ()[]{}^$.*+?
-#   In constructing the total regular expression used to match the lines:
-#     - whitespace is stripped of both ends of the fields <atlas_release_regexp> and <who_prints_it>,
-#       and from the right end of <error_message_regexp>
-#     - zero or more whitespace characters are allowed between <who_prints_it> and <error_message_regexp>
-#     - if the <who_prints_it> field is empty, the <error_message_regexp> is the total regexp.
-# error detection can be tested by running on a relevant log file:
-#  checklog.py someLogFile
-
-## Errors to ignore for ALL releases
-## =================================
-# Next line is necessary to avoid tripping the CoreDumpSvc failure on trivial INFO messages
-ALL   ,.*?, INFO .+
-ALL   ,ToolSvc.CscSplitClusterFitter,ERROR   Peak-to-Val dist is [-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?  Val-to-Peak dist is [-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?  Shouldnot be negative value :[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?  [-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)? [-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?
-ALL   ,AlgErrorAuditor,ERROR Illegal Return Code: Algorithm CscThresholdClusterBuilder reported an ERROR, but returned a StatusCode "SUCCESS"
-ALL   ,AlgErrorAuditor,ERROR Illegal Return Code: Algorithm InDetSCTRawDataProvider reported an ERROR, but returned a StatusCode "SUCCESS"
-ALL   ,(?:Py:)?Athena      ,  ERROR inconsistent case used in property name ".*?" of ApplicationMgr
-ALL   ,(?:Py:)?Athena      ,  ERROR Algorithm ".*?": not in TopAlg or other known list, no properties set
-ALL   ,(?:Py:)?Athena      ,  ERROR Algorithm ".*?": type missing, no properties set
-ALL   ,(?:Py:)?Athena      ,  ERROR attempt to add .* to non-existent property .*?
-ALL   ,(?:Py:)?Configurable,  ERROR .* undeclared or uses a backdoor
-ALL   ,(?:Py:)?Configurable,  ERROR children\(\) is deprecated
-ALL   ,(?:Py:)?Configurable,  ERROR getChildren\(\) returns a copy
-ALL   ,(?:Py:)?Configurable,  ERROR jobOptName\(\) is deprecated
-# Reco
-ALL   ,(?:Py:)?Configurable,  ERROR attempt to add a duplicate \(CellCalibrator.CellCalibrator.H1WeightCone7H1Tower\)
-ALL   ,(?:Py:)?ResourceLimits,ERROR failed to set max resource limits
-ALL   ,AlgErrorAuditor,       ERROR Illegal Return Code: Algorithm StreamESD reported an ERROR, but returned a StatusCode "SUCCESS"
-ALL   ,ToolSvc.LArCellBuilderFromLArRawChannelTool, ERROR Channel added twice! Data corruption.*?
-# Trigger BStoRDO 
-ALL   ,AthenaRefIOHandler,    ERROR Failed to set ElementLink
-ALL   ,ElementLink,           ERROR toPersistent: the internal state of link
-ALL   ,StoreGateSvc,          ERROR record: object not added to store
-ALL   ,StoreGateSvc,          ERROR  setupProxy:: error setting up proxy 
-ALL   ,AlgErrorAuditor,       ERROR Illegal Return Code: Algorithm MooHLTAlgo 
-ALL   ,AlgErrorAuditor,       ERROR Illegal Return Code: Algorithm TrigSteer_EF
-ALL   ,AlgErrorAuditor,       ERROR Illegal Return Code: Algorithm muFast_(?:Muon|900GeV)
- 
-# Trigger reco_ESD 
-ALL   ,THistSvc,              ERROR already registered an object with identifier "/EXPERT/
-ALL   ,RpcRawDataNtuple  ,  ERROR .*
-ALL   ,CBNT_L1CaloROD\S+ ,  ERROR .*
-ALL   ,CBNTAA_Tile\S+    ,  ERROR .*
-ALL   ,TileDigitsMaker   ,  ERROR .*
-ALL   ,MdtDigitToMdtRDO  ,  ERROR .* 
-ALL   ,HelloWorld        ,  ERROR .*
-ALL   ,HelloWorld        ,  FATAL .*
-ALL   ,PythiaB           ,  ERROR  ERROR in PYTHIA PARAMETERS
-ALL   ,ToolSvc           ,  ERROR Tool .* not found and creation not requested
-ALL   ,ToolSvc           ,  ERROR Unable to finalize the following tools
-ALL   ,ToolSvc           ,  ERROR Factory for Tool .* not found
-ALL   ,CBNT_Audit        ,  ERROR  Memory leak!.*
-ALL   ,ToolSvc.InDetSCTRodDecoder   ,  ERROR Unknown offlineId for OnlineId*
-ALL   ,THistSvc.sysFinali,  FATAL  Standard std::exception is caught
-ALL   ,,.*Message limit reached for .*
-ALL   ,,\s+ERROR IN C-S .*=.*
-ALL   ,,.*ERROR\s+\|.*
-ALL   ,,^\s*FATAL ERROR\s*$
-ALL   ,,ERROR \(poolDb\):
-ALL   ,,ERROR \(pool\):
-ALL   ,,ERROR - G4Navigator::ComputeStep\(\)
-ALL   ,,.*ERROR OCCURED DURING A SECONDARY SCATTER AND WAS
-ALL   ,THistSvc        , ERROR already registered an object with identifier .*
-ALL   ,,ERROR MuonDetectorManager::getCscReadoutElement stNameindex out of range .*
-ALL   ,muFast_\S+      , ERROR CSM for Subsystem \d+, MrodId \d+, LinkId \d+ not found
-ALL   ,TRTDetectorManager , FATAL Unable to apply Inner Detector alignments
-ALL   ,TRTDetectorManager , ERROR AlignableTransformContainer for key \/TRT\/Align is empty
-ALL   ,,ERROR in Single_Process::CalculateTotalXSec
-ALL   ,,.*ERROR WITH DELM.*
-#ALL   ,ToolSvc.TrigTSerializer,ERROR Errors while decoding
-ALL   ,AlgErrorAuditor,ERROR Illegal Return Code: Algorithm 
-
-
-## Errors to ignore for specific releases
-## ======================================
-## This was cleaned up at PyJobTransformsCore-00-09-27, removing the old r15 and r14 errors
-
diff --git a/Tools/PyJobTransformsCore/share/atlas_error_patterns.db b/Tools/PyJobTransformsCore/share/atlas_error_patterns.db
deleted file mode 100755
index 2f95847a4b3266d4feb6c62566961259cf739244..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/atlas_error_patterns.db
+++ /dev/null
@@ -1,79 +0,0 @@
-#Each line contains 4 fields, separated by comma's:
-#atlas_release, error_acronym, who_prints_it, error_message_regexp
-# For the regular expression syntax that can be used in <error_message_regexp>, see:
-# http://docs.python.org/lib/re-syntax.html
-# Note in particular the special regexp characters that need to be backslashed if meant litteral: ()[]{}^$.*+?
-# In constructing the total regular expression used to match the lines:
-#   - whitespace is stripped of both ends of the fields <atlas_release>, <error_acronym>, and <who_prints_it>,
-#     and from the right end of <error_message_regexp>
-#   - The regexp to match the lines is the concatination of <who_prints_it> and <error_message_regexp>, 
-#     where zero or more whitespace characters are allowed between <who_prints_it> and <error_message_regexp>
-#   - if the <who_prints_it> field is empty, the <error_message_regexp> is the total regexp
-ALL  ,TRF_POOLCONF        ,EventSelectorAt...,  ERROR (PersistencySvc) pool::PersistencySvc::UserDatabase::connectForRead: PFN is not existing in the catalog
-ALL  ,TRF_EVNTSEL         ,ServiceManager    ,  ERROR Unable to initialize Service: EventSelector
-ALL  ,TRF_DISTKIT         ,JobOptionsSvc     ,  ERROR \#016
-ALL  ,TRF_PDTSETUP        ,PartPropSvc       ,  ERROR Could not open PDT file
-ALL  ,TRF_PDTFILE         ,PartPropSvc       ,  ERROR Unable to access any PDT file
-ALL  ,TRF_ATHENACRASH     ,AthenaCrash,
-ALL  ,TRF_ATHENAPROXY     ,DetectorStore     ,  ERROR retrieve(default): No valid proxy for default object
-ALL  ,TRF_PROPERTY        ,JobOptionsSvc     ,  ERROR Unable to set property
-ALL  ,TRF_DLLLOAD         ,DllClassManager   ,  ERROR System Error
-ALL  ,TRF_DLLDECL         ,ApplicationMgr    ,  ERROR Failure loading declared DLL's
-ALL  ,TRF_MOD             ,ApplicationMgr    ,WARNING Failed to load modules
-ALL  ,TRF_MOD_LOAD        ,DllClassManager   ,  ERROR Could not load module
-ALL  ,TRF_MOD_EP          ,DllClassManager   ,  ERROR Entry point .* in module
-ALL  ,TRF_ALGOINIT        ,EventLoopMgr      ,  ERROR Unable to initialize Algorithm
-ALL  ,TRF_SVRINIT         ,ServiceManager    ,  ERROR Unable to initialize [sS]ervice
-ALL  ,TRF_CBNTAUDIT       ,CBNT_Audit        ,  ERROR\s+Could not getMem
-#ALL  ,TRF_GEO_MISMATCH    ,GeoModelSvc,  ERROR.*Geometry configured through jobOptions does not match TagInfo tags
-ALL  ,TRF_SEGVIO          ,,.*[sS]egmentation violation
-ALL  ,TRF_SEGFAULT        ,,.*[sS]egmentation fault
-ALL  ,TRF_CBNTATHEXE      ,,CBNT_Athena::execute\(\)
-ALL  ,TRF_EGAMSHSHAPE     ,,ERROR  egammaShowerShape: Cluster is neither in Barrel nor in Endcap, cannot calculate ShowerShape 
-ALL  ,TRF_LAREMEC         ,,LArEMECEnergyCorrection::CalculateChargeCollection
-ALL  ,TRF_INFILE_CORRUP   ,,Error in <TBuffer::CheckByteCount>: Byte count probably corrupted
-ALL  ,TRF_TRTDIGITEXE     ,,TRTDigitization::execute\(\)
-ALL  ,TRF_INFILE_TOOFEW   ,,.*TERMINATES NORMALLY: NO MORE EVENTS IN FILE
-ALL  ,TRF_INFILE_CORRUP   ,ByteStreamInputSvc\.sysInitialize\(\),FATAL\s+Standard std::exception is caught
-ALL  ,TRF_INFILE_CORRUP   ,,.*EventStorage reading problem: error reading data from disk
-ALL  ,TRF_OUTFILE_EXISTS  ,,Output file*already exists.*
-ALL  ,TRF_EXC             ,,Output file*already exists.*
-
-#
-# athena errors
-#
-ALL  ,ATH_CON_COND        ,IOVDbSvc          ,  FATAL\s+Cannot initialize Conditions Database
-ALL  ,ATH_CON_ORA3113     ,,Error ORA-03113
-ALL  ,ATH_CON_ORA_SES     ,IOVDbSvc, FATAL dbConnection is not correctly initialized. Stop.
-ALL  ,ATH_CON_SQLITE_LOCK ,,CORAL/RelationalPlugins/sqlite    Error SQLiteStatement.*database is locked
-ALL  ,ATH_CON_MYSQL_LOST  ,,MySQL server gone away|[Ll]ost connection to MySQL
-ALL  ,ATH_DLL_LOAD        ,\s*\S*\s+,.*error in loading shared library
-ALL  ,ATH_STO_RETR_DEFAULT     ,StoreGateSvc   ,  ERROR retrieve\(default\): No valid proxy for
-ALL  ,ATH_STO_RETR_NONCONST    ,StoreGateSvc   ,  ERROR retrieve\(non-const\): No valid proxy for
-ALL  ,ATH_STO_RETR_CONST       ,StoreGateSvc   ,  ERROR retrieve\(const\): No valid proxy for
-ALL  ,ATH_STO_REC              ,StoreGateSvc   ,  ERROR record: object not added to store
-ALL  ,ATH_DETSTO_RETR_DEFAULT  ,DetectorStore  ,  ERROR retrieve\(default\): No valid proxy for
-ALL  ,ATH_DETSTO_RETR_NONCONST ,DetectorStore  ,  ERROR retrieve\(non-const\): No valid proxy for
-ALL  ,ATH_DETSTO_RETR_CONST    ,DetectorStore  ,  ERROR retrieve\(const\): No valid proxy for
-ALL  ,ATH_DETSTO_REC           ,DetectorStore  ,  ERROR record: object not added to store
-ALL  ,ATH_JOP_NOTFOUND         ,,IncludeError: include file .* can not be found
-ALL  ,ATH_OUTFILE_TOOLARGE     ,,Fill: Switching to new file: \S+\.root_1
-ALL  ,ATH_JOP_ERROR            ,Athena      ,  ERROR inconsistent case used in property name ".*?" of ApplicationMgr
-ALL  ,ATH_JOP_ERROR            ,Athena      ,  ERROR Algorithm ".*?": not in TopAlg or other known list
-ALL  ,ATH_JOP_ERROR            ,Athena      ,  ERROR Algorithm ".*?": type missing, no properties set
-ALL  ,ATH_JOP_ERROR            ,Athena      ,  ERROR attempt to add .* to non-existent property .*?
-ALL  ,ATH_JOP_ERROR            ,Configurable,  ERROR .* undeclared or uses a backdoor
-ALL  ,ATH_JOP_ERROR            ,Configurable,  ERROR children\(\) is deprecated
-ALL  ,ATH_JOP_ERROR            ,Configurable,  ERROR getChildren\(\) returns a copy
-ALL  ,ATH_JOP_ERROR            ,Configurable,  ERROR jobOptName\(\) is deprecated
-ALL  ,ATH_ELINK                ,ElementLink,  ERROR
-ALL  ,ATH_ELINK                ,.*,  ERROR .*ElementLink
-ALL  ,ATH_G4_STUCK             ,,ERROR - G4Navigator::ComputeStep\(\)
-ALL  ,ATH_G4_NONCONS           ,,\*G4QH::DecIn2:\*Boost\* 4M=
-ALL  ,ATH_ALG_TIMEOUT          ,Reason:, Timeout \(\d+ msec\) reached
-ALL  ,ATH_COREDUMP             ,,Core dump from CoreDumpSvc
-
-#
-# Something new!
-#
-#ALL  ,TRF_BADFILE              ,,*Bad file descriptor*
diff --git a/Tools/PyJobTransformsCore/share/checklog.py b/Tools/PyJobTransformsCore/share/checklog.py
deleted file mode 100755
index 5719aaa9c8d99235ae2e88c934cab7b7213f7802..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/checklog.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-
-import os
-import sys
-from getopt import getopt
-from AthenaCommon.Logging import logging
-from AthenaCommon import ExitCodes
-from PyJobTransformsCore.trferr import TransformErrorDiagnoser, AthenaLogChecker
-from PyJobTransformsCore.JobReport import JobReport, JobInfo
-from PyJobTransformsCore.trfutil import get_atlas_release
-
-def usage():
-    print("Parse an athena logfile for errors.")
-    print("Usage: %s [options] <logfilename>" % os.path.basename(sys.argv[0]))
-    print("Options:")
-    print("  -h : print short help")
-    print("  -d : print details on the error matching")
-    print("  -x : write jobInfo.xml file")
-    print("  -r <release> : assume atlas release <release>")
-
-if len(sys.argv) <= 1:
-    usage()
-    sys.exit(1)
-
-#options
-debug = False
-writeXML = False
-atlas_release = None
-opts,args = getopt(sys.argv[1:],"dxhr:")
-for opt,arg in opts:
-    if opt == '-d': debug = True
-    if opt == '-x': writeXML = True
-    if opt == '-r': atlas_release = arg
-    if opt == '-h':
-        usage()
-        sys.exit()
-
-
-if len(args) < 1:
-    usage()
-    sys.exit(1)
-
-# setup logger
-log = logging.getLogger('AthenaLogChecker')
-if debug:
-    log.setLevel( logging.DEBUG )
-else:
-    log.setLevel( logging.INFO )
-
-
-# get ATLAS release if not given on command line
-if not atlas_release: atlas_release = get_atlas_release()
-
-# jobReport to store results
-report = JobReport()
-producer = os.path.basename(sys.argv[0])
-if len(args) > 1:
-    comment = 'for files %s' % ( ','.join(args) )
-else:
-    comment = 'for file %s' % ( ','.join(args) )
-report.setProducer( producer, comment=comment )
-report.addInfo( JobInfo( 'athCode', '0' ) )
-report.addInfo( JobInfo( 'athAcronym', str( ExitCodes.what( 0 ) ) ) )
-checker = AthenaLogChecker(atlas_release)
-for filename in args:
-    fileReport = JobReport()
-    fileReport.setProducer( os.path.basename(filename), comment='from logfile' )
-    checker.checkLogFile(filename,fileReport,log)
-    try:
-        report.addReport(fileReport)
-    except KeyError:
-        pass
-    del fileReport
-
-# run the error diagnoser on all errors
-errorDocter = TransformErrorDiagnoser()
-for error in report.errors():
-    errorDocter.diagnoseError(error)
-
-print(report)
-if writeXML: report.writeJobInfoXML()
-
-# exit with appropriate code
-sys.exit(report.exitCode())
diff --git a/Tools/PyJobTransformsCore/share/ensure_init.py b/Tools/PyJobTransformsCore/share/ensure_init.py
deleted file mode 100755
index 69be4c0963d858b2f6d1311c160710522e37e5af..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/ensure_init.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-# usage: ensure_init.py [options] [files] [options] [files]
-# Makes sure that the file __init__.py exists in the last directory specified
-# with the -s=<dir> option in the arguments. Any other arguments will be ignored.
-# If the files does not exist, a dummy one will be created.
-# If no -s=<dir> option is given, nothing is done, and program exits with error code 1.
-import sys,os,glob
-if len(sys.argv) <= 1: sys.exit(1)
-srcdir = None
-action = False
-for args in sys.argv[1:]:
-    for arg in args.split():
-        if arg.startswith('-s='):
-            srcdir=arg[3:]
-            if srcdir and os.path.exists(srcdir) and glob.glob( os.path.join(srcdir,'*.py') ):
-                # an __init__.py file must be there
-                initfile = os.path.join(srcdir,'__init__.py')
-                if not os.path.exists(initfile):
-                    # make an (almost) empty file
-                    action = True
-                    print "Creating file %s" % initfile
-                    init = file(initfile,'w')
-                    init.write("# Auto-generated by %s%s" % (' '.join(sys.argv),os.linesep) )
-                    init.close()
-                else:
-                    print "__init__.py found in %s" % srcdir
-
-if not action:
-    print "No action needed"
diff --git a/Tools/PyJobTransformsCore/share/expand_files.py b/Tools/PyJobTransformsCore/share/expand_files.py
deleted file mode 100755
index 64072defc4e47f90bd4ffd17b986dbe1df2bc951..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/expand_files.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-# usage: expand_files [options] [files] [options] [files]
-# Return a list of filenames with the package name prepended
-# Files will be collected from source dirs, and prepended with the
-# destination dir.
-# Options:
-# -r=<rootdir>: Root directory of the package. Is used to search for the
-#           files in the correct package.
-# -s=<srcdir>: Default source directory. Will be used if no directory is
-#          specified before a filename. This option can be inserted
-#          in between the filenames.
-# -d=<destdir>: destination directory. Will be prepended to each filename.
-#          Can be inserted in between the filenames. The last one before
-#          a filename will be used.
-import sys,os,glob
-srcdir = os.curdir
-destdir = ''
-rootdir=''
-files=[]
-for args in sys.argv[1:]:
-    for arg in args.split():
-        if arg.startswith('-s='):
-            # get a new default directory
-            srcdir=arg[3:]
-        elif arg.startswith('-d='):
-            destdir=arg[3:]
-        elif arg.startswith('-r='):
-            rootdir=arg[3:]
-        else:
-            argdir = os.path.dirname(arg)
-            if not argdir: argdir = srcdir
-            argname = os.path.basename(arg)
-            fullpath = os.path.normpath( os.path.join( rootdir,'cmt',argdir,argname) )
-            filelist = glob.glob( fullpath )
-            files += [ os.path.join(destdir,os.path.basename(f)) \
-                       for f in filelist ]
-
-if files:
-    print ' '.join( files )
diff --git a/Tools/PyJobTransformsCore/share/slimmetadata b/Tools/PyJobTransformsCore/share/slimmetadata
deleted file mode 100755
index cd5996b9d2a5b8cd5e54e909706defa1d92106c9..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/slimmetadata
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /usr/bin/env bash
-
-SOURCEFILE=$1
-TEMPFILE1=$2
-TEMPFILE2=$3
-cp -f ${SOURCEFILE} ${TEMPFILE1}
-for type in `grep 'META type' ${TEMPFILE1} | cut -d= -f3 | cut -d/ -f1`; do
-    test ${type} = '"events"' && continue
-    val=`grep ${type} ${TEMPFILE1} | tail -1 | cut -d= -f3-99 | sed -e "s%&%__ampersand__%g" -e "s/%/__percentsign__/g"`
-    cat ${TEMPFILE1} | sed -e "s%${type}/%${type} value=${val}%" -e "s%__ampersand__%\&%g" -e "s/__percentsign__/%/g" -e "s/>>/>/" > ${TEMPFILE2}
-    cat ${TEMPFILE2} | grep -v -F "${type} att_value=${val}"  > ${TEMPFILE1}
-   #catch error, abort if there's a problem.
-    if [ $? != 0 ]; then
-      exit 1
-    fi
-done
-cp -f ${TEMPFILE1} ${SOURCEFILE}
diff --git a/Tools/PyJobTransformsCore/share/trf_ls b/Tools/PyJobTransformsCore/share/trf_ls
deleted file mode 100755
index 068bb9959c6d96b4d000c0f9c20466c9d9c4f9e5..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/trf_ls
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
-import sys,os,getopt
-def usage():
-    use = "usage: %s [-h] [-p] [-f] [[trf_name] [trf_name]]" % os.path.basename(sys.argv[0])
-    print(use + """
-    
-Print a list of available jobtransforms, or find a specific one. Wildcards allowed.
-If <trf_name> does not end with _trf.py, this will be added before searching.
-When no arguments are given, all available jobtransforms are printed.
-By default PATH is used to look jobtransforms, and only the jobtransform names are shown.
-Options:
- -h : Print this help message
- -f : Print full path name.
- -p : Look in PYTHONPATH and include package name in front of trf name (for import in python)
-      If combined with option -f, print full path of python module.""")
-
-
-showPython = False
-showPath = False
-
-# parse options
-if len(sys.argv) > 1:
-    opts,args = getopt.getopt( sys.argv[1:], 'hpf' )
-else:
-    opts = []
-    args = []
-
-for opt,arg in opts:
-    if opt == '-h':
-        usage()
-        sys.exit()
-    elif opt == '-p':
-        showPython = True
-    elif opt == '-f':
-        showPath = True
-
-
-try:
-    from PyJobTransformsCore.envutil import find_files_env
-except ImportError:
-    raise EnvironmentError("ATLAS Runtime environment not setup.")
-
-
-if args:
-    filelist = args
-else:
-    filelist = [ '*_trf.py' ]
-
-postfix = '_trf.py'
-prefix = ''
-pathToFind = 'PATH'
-
-if showPython:
-    pathToFind = 'PYTHONPATH'
-    prefix = '*' + os.sep
-
-for f in filelist:
-    if not os.path.dirname(f): f = prefix + f
-    if not f.endswith(postfix): f += postfix
-    found = find_files_env( f, pathToFind )
-    for full in found:
-        trf = os.path.basename(full)
-        if showPath:
-            print(full)
-        elif showPython:
-            dir = os.path.dirname(full)
-            package = os.path.basename(dir)
-            print('%s ' % os.path.join(package,trf))
-        else:
-            print(trf)
diff --git a/Tools/PyJobTransformsCore/share/trigbsextraction_error_ignore.db b/Tools/PyJobTransformsCore/share/trigbsextraction_error_ignore.db
deleted file mode 100755
index d255626a91dabe33c42be9ad3fc31a2f839c6596..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/trigbsextraction_error_ignore.db
+++ /dev/null
@@ -1,20 +0,0 @@
-#Each line contains 3 fields, separated by comma's:
-#atlas_release_regexp,  who_prints_it,  error_message_regexp
-# For the regular expression syntax that can be used in <error_message_regexp>, see:
-# http://docs.python.org/lib/re-syntax.html
-# Note in particular the special regexp characters that need to be backslashed if meant litteral: ()[]{}^$.*+?
-#   In constructing the total regular expression used to match the lines:
-#     - whitespace is stripped of both ends of the fields <atlas_release_regexp> and <who_prints_it>,
-#       and from the right end of <error_message_regexp>
-#     - zero or more whitespace characters are allowed between <who_prints_it> and <error_message_regexp>
-#     - if the <who_prints_it> field is empty, the <error_message_regexp> is the total regexp.
-# error detection can be tested by running on a relevant log file:
-#  checklog.py someLogFile
-
-# this file is meant to be used as Reco_trf --extraignorefilter=trigbsextraction_error_ignore.db
-
-ALL   ,ToolSvc.TrigTSerializer,ERROR Errors while decoding
-ALL   ,AlgErrorAuditor,ERROR Illegal Return Code: Algorithm TrigBSExtraction
-ALL   ,ToolSvc.TrigTSerializer,ERROR Errors while decoding egammaContainer_p2 
-ALL   ,ToolSvc.TrigTSerializer,ERROR Errors while decoding CaloClusterContainer_p4
-ALL   ,ToolSvc.LArCellBuilderFromLArRawChannelTool, ERROR Channel added twice! Data corruption\?
diff --git a/Tools/PyJobTransformsCore/share/upgrade_error_ignore.db b/Tools/PyJobTransformsCore/share/upgrade_error_ignore.db
deleted file mode 100644
index 3bbad381373edefdd2b53003ae7a99ec103c9b05..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/share/upgrade_error_ignore.db
+++ /dev/null
@@ -1,16 +0,0 @@
-#Each line contains 3 fields, separated by comma's:
-#atlas_release_regexp,  who_prints_it,  error_message_regexp
-# For the regular expression syntax that can be used in <error_message_regexp>, see:
-# http://docs.python.org/lib/re-syntax.html
-# Note in particular the special regexp characters that need to be backslashed if meant litteral: ()[]{}^$.*+?
-#   In constructing the total regular expression used to match the lines:
-#     - whitespace is stripped of both ends of the fields <atlas_release_regexp> and <who_prints_it>,
-#       and from the right end of <error_message_regexp>
-#     - zero or more whitespace characters are allowed between <who_prints_it> and <error_message_regexp>
-#     - if the <who_prints_it> field is empty, the <error_message_regexp> is the total regexp.
-# error detection can be tested by running on a relevant log file:
-#  checklog.py someLogFile
-
-# this file is meant to be used as Reco_trf --extraignorefilter=upgrade_error_ignore.db
-
-ALL   ,TrigConfigSvc,.*TrigConfigSvc can not return a HLTChainList object from any of the configured sources XMLL1
diff --git a/Tools/PyJobTransformsCore/test/DBReleaseArg_test.py b/Tools/PyJobTransformsCore/test/DBReleaseArg_test.py
deleted file mode 100755
index 43e9d313e37c401d375b63c9a1a22b3f7d9e56d2..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/DBReleaseArg_test.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-from PyJobTransformsCore.full_trfarg import *
-from PyJobTransformsCore.trf import *
-from PyJobTransformsCore.AtlasErrorCodes import ErrorInfo
-
-trf = JobTransform( Author("Martin Woudstra", "Martin.Woudstra@cern.ch"),
-                    help = "Testing the DBReleaseArg",
-                    skeleton = None,
-                    name = "TestDBRelease" )
-trf.add(DBReleaseArg())
-trf.setLoggerLevel('ALL')
-
-trf.setArgument("DBRelease", "bad_DBRelease.tar.gz" )
-
-try:
-    trf.doPreRunActions()
-except TransformArgumentError,e:
-    expectedException = 'TransformArgumentError'
-    errorExpected = 'TRF_DBREL_TARFILE'
-    excClass = e.__class__.__name__
-    if excClass != expectedException:
-        print "Test FAILED: Expected exception of type %s. Got instead: %s: %s" % \
-              (expectedException,excClass,e)
-        print ErrorInfo()
-    elif e.error != errorExpected:
-        print "Test FAILED: got correct exception type %s but with error=%s (expected %s)" % (e.__class__.__name__,e.error,expectedError)
-        print ErrorInfo()
-    else:
-        # OK!
-        print "Test OK: get expected exception: %s: %s" % (e.__class__.__name__,e)
-
diff --git a/Tools/PyJobTransformsCore/test/TransformConfig_test.py b/Tools/PyJobTransformsCore/test/TransformConfig_test.py
deleted file mode 100755
index 6b37abb631a5fa856a9be0bc10bd6284b2fa4073..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/TransformConfig_test.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import myunittest
-from PyJobTransformsCore.TransformConfig import *
-
-class TransfromConfigTest(myunittest.TestCase):
-    def setUp(self):
-        self.config = TransformConfig()
-
-    def testAddAttribute(self):
-        """add a member to the config"""
-        self.config.addAttribute( String( "myString", "myStringDefault", "myStringDoc" ) )
-        print ""
-        print self.config
-        self.config.myString = "myStringValue"
-        print self.config
-
-
-if __name__ == "__main__":
-    myunittest.main()
-
diff --git a/Tools/PyJobTransformsCore/test/argstest_trf.py b/Tools/PyJobTransformsCore/test/argstest_trf.py
deleted file mode 100755
index 6c5b3cc883a227a8b53bf488fc52d98223bad920..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/argstest_trf.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-from PyJobTransformsCore.trf import *
-from PyJobTransformsCore.full_trfarg import *
-
-class TestArgsJobTransform( JobTransform ):
-      def __init__(self):
-        JobTransform.__init__( self,
-                               version='1.0.0',
-                               authors=Author('Martin Woudstra', 'Martin.Woudstra@cern.ch'),
-                               skeleton='skeleton.test.py',
-                               help="""Test all JobTransform Argument classes""" )
-        # add arguments
-        self.add( RunNumberArg() )
-        self.add( FirstEventArg() )
-        self.add( MaxEventsArg() )
-        self.add( SkipEventsArg() )
-        self.add( RandomSeedArg('Test of randomseed') )
-        self.add( JobOptionsArg() )
-        self.add( InputEvgenFileArg() )
-        self.add( OutputEvgenFileArg() )
-        self.add( InputHitsFileArg() )
-        self.add( OutputHitsFileArg() )
-        self.add( InputRDOFileArg() )
-        self.add( OutputRDOFileArg() )
-        self.add( InputESDFileArg() )
-        self.add( OutputESDFileArg() )
-        self.add( InputAODFileArg() )
-        self.add( OutputAODFileArg() )
-        self.add( GeometryVersionArg() )
-        self.add( JobConfigArg("theConfig") )
-        self.add( InputGeneratorFileArg(), 'NONE' )
-        self.add( HistogramFileArg(), 'NONE' )
-        self.add( NtupleFileArg(), 'NONE' )
-
-        # add utilities
-        self.add( SQLiteSupport() )
-        self.add( GetFiles( ["PDGTABLE.MeV"] ) )
-
-# make transformation object              
-trf = TestArgsJobTransform()
-# execute it if not imported
-if __name__ == '__main__': trf.exeSysArgs()
-
-
-
-
-        
diff --git a/Tools/PyJobTransformsCore/test/bad_DBRelease.tar.gz b/Tools/PyJobTransformsCore/test/bad_DBRelease.tar.gz
deleted file mode 100755
index 55bb632032afdf7fe33d24a85003a16c74051aec..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/bad_DBRelease.tar.gz
+++ /dev/null
@@ -1,3 +0,0 @@
-This is just a file claiming to be a tar.gz file, while it is not.
-It is used to test the error handling of the DBReleaseArg argument.
-
diff --git a/Tools/PyJobTransformsCore/test/basic_trfarg_test.py b/Tools/PyJobTransformsCore/test/basic_trfarg_test.py
deleted file mode 100755
index 6b6bcf8e4ac0b45d5a5fe1ceed3d30589a3787f1..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/basic_trfarg_test.py
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import os,sys
-
-import myunittest
-from PyJobTransformsCore.basic_trfarg import *
-from PyJobTransformsCore.trfutil import *
-
-class TestInputTarFileArg(myunittest.TestCase):
-    def setUp(self):
-        """setup temporary files to work with.
-        Strategy:
-        Create a temporary directory (given by system).
-        In this directory, create 2 subdirectories: 'src' and 'dest'.
-        Put all the source files (tar's) into the 'src' subdir,
-        run the tests from the temporary dir, and gives the 'dest'
-        subdir as a destination dir for the TarZipFile object.
-        The input tar files will be given counted from the current dir,
-        so with the 'dest' prefix.
-        """
-        myunittest.TestCase.setUp(self)
-        if self.debug: self.listTmpFiles(os.linesep + "BEFORE SETUP")
-        self.srcdir = 'src'
-        self.destdir = 'dest'
-        self.srcfulldir  = self.makeTmpDir( self.srcdir )
-        self.destfulldir = self.makeTmpDir( self.destdir )
-        self.arg = InputTarFileArg(help="Testing InputTarFileArg",destdir=self.destdir)
-        # setup temporary filenames
-        self.prefix = 'test'
-        self.tarfile = "%s.tar" % self.prefix
-        self.targzfile = "%s.tar.gz" % self.prefix
-        self.tgzfile = "%s.tgz" % self.prefix
-        self.files = [ "%s.dat" % (self.prefix), "%s.txt" % (self.prefix) ]
-        # create the files
-        for f in self.files: self.createTmpFile( os.path.join( self.srcdir, f ) )
-        if self.debug: self.listTmpFiles("AFTER SETUP")
-
-
-    def tearDown(self):
-        """remove temporary files"""
-        if self.debug: self.listTmpFiles("BEFORE TEARDOWN")
-        myunittest.TestCase.tearDown(self)
-        if self.debug: self.listTmpFiles("AFTER TEARDOWN")
-
-
-    def createTarFile(self):
-        """Creates a tarfile containing the files in self.files.
-        It returns the full path of the tarfile."""
-        subtar = os.path.join( self.srcdir, self.tarfile )
-        # remove existing tarfile
-        self.removeTmpFiles( subtar )
-        # make new one
-        fulltar = self.fullTmpFile( subtar )
-        tarargs = "%s" % (' '.join(self.files))
-        tardir = os.path.dirname( fulltar )
-        cmds = [ "cd %s ; tar -cf %s %s" % (tardir, self.tarfile, tarargs) ]
-        for cmd in cmds:
-            if self.debug: print cmd
-            if not self.fake:
-                stat = os.system( cmd )
-                if stat != 0:
-                    raise OSError( "shell command %s FAILED (exit=%d)" % (cmd,stat) )
-
-        return fulltar
-       
-
-    def createTarGzFile(self):
-        """Creates a zipped tarfile containing the files in self.files"""
-        # create the tarfile
-        fulltar = self.createTarFile()
-        # zip it
-        tardir = os.path.dirname( fulltar )
-        subtargz = os.path.join( self.srcdir, self.targzfile )
-        fulltargz = self.fullTmpFile( subtargz )
-        targzdir = os.path.dirname( fulltargz )
-        assert( tardir == targzdir )
-        cmds = [ "cd %s ; gzip %s" % (tardir, self.tarfile) ]
-        for cmd in cmds:
-            if self.debug: print cmd
-            if not self.fake:
-                stat = os.system( cmd )
-                if stat != 0:
-                    raise OSError( "shell command %s FAILED (exit=%d)" % (cmd,stat) )
-       
-        return fulltargz
-
-
-    def createTgzFile(self):
-        subtgz = os.path.join( self.srcdir, self.tgzfile )
-        # remove existing tarfile
-        self.removeTmpFiles( subtgz )
-        # make and register new one
-        fulltgz = self.fullTmpFile( subtgz )
-        tarargs = "%s" % (' '.join(self.files))
-        tardir = os.path.dirname( fulltgz )
-        cmds = [ "cd %s ; tar -czf %s %s" % (tardir, self.tgzfile, tarargs) ]
-        for cmd in cmds:
-            if self.debug: print cmd
-            if not self.fake:
-                stat = os.system( cmd )
-                if stat != 0:
-                    raise OSError( "shell command %s FAILED (exit=%d)" % (cmd,stat) )
-        
-        return fulltgz
-
-
-    def contentsAndExtraction(self,archive):
-        # setup & execute
-        arg = self.arg
-        self.cdTmpDir()
-        infile = os.path.join(self.srcdir,archive)
-        arg.setValue(infile)
-        found = arg.filelist()
-        arg.extract()
-        # test the contents
-        found.sort()
-        self.files.sort()
-        self.assertEqual( found, self.files )
-        # test the extraction
-        for f in self.files:
-            self.failUnless( os.path.exists( os.path.join( self.destfulldir, f ) ),
-                             "File %s not extracted from archive %s" % (f,infile) )
-
-
-    def testTarFile(self):
-        """Test a non-zipped tarfile"""
-        self.createTarFile()
-        self.contentsAndExtraction( self.tarfile )
-
-        
-    def testTarGzFile(self):
-        """Test a gzipped tarfile"""
-        self.createTarGzFile()
-        self.contentsAndExtraction( self.targzfile )
-
-
-    def testTgzFile(self):
-        """Test a tgz tarfile"""
-        self.createTgzFile()
-        self.contentsAndExtraction( self.tgzfile )
-
-
-
-
-
-class TestInputFileListArg(myunittest.TestCase):
-    def setUp(self):
-        self.arg = InputFileListArg("Testing InputFileListArg",'default',
-                                    FileType(type='root',contents='test|TEST') )
-
-    
-    def checkConversion(self,valIn,valOut):
-        self.assertEqual( self.arg.toPython(valIn), valOut )
-        
-    
-    def assertException(self,valsIn):
-        for valIn in valsIn:
-            self.assertRaises( TransformArgumentError, self.arg.toPython, valIn ) 
-
-
-
-class TestInputFileListArg_GoodInput(TestInputFileListArg):
-    def testSingleFilename(self):
-        """single filename (i.e. without [])"""
-        valIn = "prefix_only.suffix"
-        valOut = [ valIn ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testSimpleList(self):
-        """list of simple filenames"""
-        valIn = "prefix1_only.suffix1,prefix2_only.suffix2,prefix3_only.suffix3"
-        valOut = valIn.split(FileList.listSep)
-        self.checkConversion( valIn, valOut )
-
-
-    def testSingleNumber(self):
-        """Single number in []"""
-        valIn = "prefix.[001].suffix"
-        valOut = [ "prefix.001.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testSingleNumberFirstInList(self):
-        """Single number in [], first in list"""
-        valIn = "prefix.[001].suffix,filename2,filename3"
-        valOut = [ "prefix.001.suffix", "filename2", "filename3" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testSingleNumberMiddleInList(self):
-        """Single number in [], middle in list"""
-        valIn = "filename1,prefix.[001].suffix,filename2"
-        valOut = [ "filename1", "prefix.001.suffix", "filename2" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testSingleNumberLastInList(self):
-        """Single number in [], last in list"""
-        valIn = "filename1,filename2,prefix.[001].suffix"
-        valOut = [ "filename1", "filename2", "prefix.001.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testRangeSizeOne(self):
-        """Range of size 1"""
-        valIn = "prefix.[02-2].suffix"
-        valOut = [ "prefix.02.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-        
-    def testRange(self):
-        """Range of files"""
-        valIn = "prefix.[3-0005].suffix"
-        valOut = [ "prefix.0003.suffix", "prefix.0004.suffix", "prefix.0005.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testList(self):
-        """List of files"""
-        valIn = "prefix.[6,7,8].suffix"
-        valOut = [ "prefix.6.suffix" , "prefix.7.suffix", "prefix.8.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testListAndRange(self):
-        """List and Range of files"""
-        valIn = "prefix.[9,10,14-16].suffix"
-        valOut = [ "prefix.09.suffix", "prefix.10.suffix" ,"prefix.14.suffix", "prefix.15.suffix", "prefix.16.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testRangeAndList(self):
-        """Range and list of files"""
-        valIn = "prefix.[17-19,24,28].suffix"
-        valOut = [ "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix", "prefix.24.suffix", "prefix.28.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testRangeAndListFirstInList(self):
-        """Coded range and list of files, first in list"""
-        valIn = "prefix.[17-19,24,28].suffix,filename1,filename2"
-        valOut = [ "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix", "prefix.24.suffix",
-                   "prefix.28.suffix", "filename1", "filename2" ]
-        self.checkConversion( valIn, valOut )
-
-        
-    def testRangeAndListMiddleInList(self):
-        """Coded range and list of files, middle in list"""
-        valIn = "filename1,prefix.[17-19,24,28].suffix,filename2"
-        valOut = [ "filename1", "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix", "prefix.24.suffix",
-                   "prefix.28.suffix", "filename2" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testRangeAndListLastInList(self):
-        """Coded range and list of files, last in list"""
-        valIn = "filename1,filename2,prefix.[17-19,24,28].suffix"
-        valOut = [ "filename1", "filename2", "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix", "prefix.24.suffix",
-                   "prefix.28.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testAllTogether(self):
-        """All kinds of combinations"""
-        valIn = "filename1,prefix.[17-19].suffix,prefix.[24,28].suffix" + \
-        ",filename2,prefix.[32-34].suffix,filename3"
-        valOut = [ "filename1", "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix",
-                   "prefix.24.suffix", "prefix.28.suffix", "filename2",
-                   "prefix.32.suffix", "prefix.33.suffix", "prefix.34.suffix", "filename3" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testTwoRanges(self):
-        """Two ranges"""
-        valIn = "prefix.[30-32,35-36].suffix"
-        valOut = [ "prefix.30.suffix", "prefix.31.suffix" ,"prefix.32.suffix",
-                   "prefix.35.suffix", "prefix.36.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-
-class InputFileListArg_BadInput(TestInputFileListArg):
-    def testCommaOnly(self):
-        """comma only"""
-        badargs = [ "," ]
-        self.assertException( badargs )
-
-
-    def testEmptyFieldBegin(self):
-        """Empty field begin"""
-        badargs = [ ",file1" ]
-        self.assertException( badargs )
-
-
-    def testEmptyFieldMiddle(self):
-        """Empty field middle"""
-        badargs = [ "file1,,file2" ]
-        self.assertException( badargs )
-
-
-    def testEmptyFieldEnd(self):
-        """Empty field end"""
-        badargs = [ "file1," ]
-        self.assertException( badargs )
-       
-    
-    def testMissingOpenBracket(self):
-        """Missing ["""
-        badargs = [ "prefix.4-7].suffix" ]
-        self.assertException( badargs )
-
-
-    def testMissingCloseBracket(self):
-        """Missing ]"""
-        badargs = [ "prefix.[4-7.suffix" ]
-        self.assertException( badargs )
-
-
-    def testEmptyBrackets(self):
-        """Empty brackets: []"""
-        badargs = [ "prefix.[].suffix" ]
-        self.assertException( badargs )
-
-
-    def testSpuriousCharacter(self):
-        """Spurious character"""
-        badargs = [ "prefix.[4.7].suffix","prefix.[4x7].suffix",  "prefix.[4X7].suffix", "prefix.[4$7].suffix",
-                    "prefix.[4%7].suffix", "prefix.[4#7].suffix" ]
-        self.assertException( badargs )
-
-
-    def testMissingListEntryBegin(self):
-        """Missing number begin [,5]"""
-        badargs = [ "prefix.[,5].suffix", "prefix.[,5,7].suffix", "prefix.[,5,7-8].suffix" ]
-        self.assertException( badargs ) 
-
-
-    def testMissingListEntryEnd(self):
-        """Missing number end [8,]"""
-        badargs = [ "prefix.[8,].suffix", "prefix.[6,8,].suffix", "prefix.[4-6,8,].suffix" ]
-        self.assertException( badargs )
-
-
-    def testMissingListEntryMiddle(self):
-        """Missing number middle [7,,8]"""
-        badargs = [ "prefix.[7,,8].suffix", "prefix.[7,8,,10].suffix", "prefix.[7,,9,10].suffix",
-                    "prefix.[2-4,,8].suffix", "prefix.[7,,8-10].suffix" ]
-        self.assertException( badargs )
-
-
-    def testMissingRangeBegin(self):
-        """Missing start number of range"""
-        badargs = [ "prefix.[-8].suffix", "prefix.[-8].suffix", "prefix.[-8].suffix", "prefix.[-8,10].suffix",
-                    "prefix.[4,-8].suffix", "prefix.[4,-8,10].suffix" ]
-        self.assertException( badargs )
-
-
-    def testMissingRangeEnd(self):
-        """Missing end number of range"""
-        badargs = [ "prefix.[8-].suffix", "prefix.[8-,10].suffix", "prefix.[4,8-].suffix",
-                    "prefix.[4,8-,10].suffix", "prefix.[8-,10-12].suffix" ]
-        self.assertException( badargs )
-
-
-    def testRangeDoubleDash(self):
-        """Double dash: --"""
-        badargs = [ "prefix.[6--8].suffix", "prefix.[6--8,10].suffix", "prefix.[4,6--8].suffix", "prefix.[4,6--8,10].suffix" ]
-        self.assertException( badargs )
-        
-
-
-    def testRangeWrongOrder(self):
-        """Ranga has lower end then start"""
-        badargs = [ "prefix.[8-7].suffix", "prefix.[8-06].suffix" ]
-        self.assertException( badargs )
-
-
-
-    def testNegativeNumbers(self):
-        """Negative numbers"""
-        badargs = [ "prefix.[-4].suffix", "prefix.[-4,6].suffix", "prefix.[6,-4].suffix", "prefix.[-4-7].suffix",
-                    "prefix.[4--7].suffix", "prefix.[-7--4].suffix","prefix.[2,4--7].suffix","prefix.[4--7,2].suffix" ]   
-        self.assertException( badargs )
-
-
-    
-
-if __name__ == "__main__":
-    myunittest.main()
-
diff --git a/Tools/PyJobTransformsCore/test/envutil_test.py b/Tools/PyJobTransformsCore/test/envutil_test.py
deleted file mode 100755
index 696491c071d40d2e09522ad9451f97a7896adf80..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/envutil_test.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import myunittest
-import os
-
-from PyJobTransformsCore.envutil import *
-
-
-class PathEnvManips(myunittest.TestCase):
-
- 
-    def assertPath(self, pathIn, pathOut, *vargs ):
-        envName = 'TESTPATH'
-        if pathIn is None:
-            envStart=None
-            if envName in os.environ:
-                del os.environ[envName]
-        else:
-            os.environ[envName] = pathIn
-            envStart=pathIn
-
-        self.func(envName, *vargs)
-        if pathIn is None:
-            pathInMess = "Undefined environment variable"
-        else:
-            pathInMess = pathIn
-        pathNew = os.environ.get(envName)
-        errMess = 'Starting with \"%s\" gives \"%s\" instead of \"%s\"' % \
-                  ( pathInMess, pathNew, pathOut )
-        
-        self.assertEqual( pathNew, pathOut, errMess )
-
-
-    
-    def testAppendPathEnv(self):
-        """append_path_env"""
-        self.func = append_path_env
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir:mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'dir1:mydir', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mydir:dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:mydir:dir2:mydir', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'dir1:dir2:mydir:mydir', 'mydir' )
-                        
-
-    def testAppendPathEnvIf(self):
-        """append_path_env_if"""
-        self.func = append_path_env_if
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'dir1:mydir', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:mydir:dir2', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'dir1:dir2:mydir', 'mydir' )
-                        
-
-    def testAppendPathEnvForce(self):
-        """append_path_env_force"""
-        self.func = append_path_env_force
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'dir1:mydir', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:dir2:mydir', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'dir1:dir2:mydir', 'mydir' )
-                        
-
-    def testPrependPathEnv(self):
-        """prepend_path_env"""
-        self.func = prepend_path_env
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir:mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'mydir:dir1', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mydir:mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'mydir:dir1:mydir:dir2', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'mydir:dir1:dir2:mydir', 'mydir' )
-                        
-
-    def testPrependPathEnvIf(self):
-        """prepend_path_env_if"""
-        self.func = prepend_path_env_if
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'mydir:dir1', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:mydir:dir2', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'dir1:dir2:mydir', 'mydir' )
-                        
-
-    def testPrependPathEnvForce(self):
-        """prepend_path_env_force"""
-        self.func = prepend_path_env_force
-
-        self.assertPath( None, 'mydir', 'mydir' )
-        self.assertPath( 'mydir', 'mydir', 'mydir' )
-        self.assertPath( "", 'mydir', 'mydir' )
-        self.assertPath( 'dir1', 'mydir:dir1', 'mydir' )
-        self.assertPath( 'dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'dir1:mydir:dir2', 'mydir:dir1:dir2', 'mydir' )
-        self.assertPath( 'dir1:dir2:mydir', 'mydir:dir1:dir2', 'mydir' )
-
-
-    def testReplacePathEnv(self):
-        """replace_path_env"""
-        self.func = replace_path_env
-
-        self.assertPath(    None,       None,    'mydir', 'mynewdir' )
-        self.assertPath( 'mydir', 'mynewdir',    'mydir', 'mynewdir' )
-        self.assertPath( 'mydir',    'mydir', 'notmydir', 'mydir'    )
-        self.assertPath(      "",         "",    'mydir', 'mynewdir' )
-        self.assertPath(      "",         "",    '.*', 'mynewdir' )
-        self.assertPath( 'mydir:dir1', 'mynewdir:dir1', 'mydir', 'mynewdir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mynewdir:dir1:dir2', 'mydir', 'mynewdir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:mynewdir:dir2', 'mydir', 'mynewdir' )
-        self.assertPath( 'dir1:dir2:mydir', 'dir1:dir2:mynewdir', 'mydir', 'mynewdir' )
-        self.assertPath( 'mydir:dir1:dir2', 'mynewdir:newdir1:newdir2', 'dir', 'newdir' )
-        self.assertPath( 'mydir:dir1:keepthisone:dir2', 'mynewdir:newdir1:keepthisone:newdir2', 'dir', 'newdir' )
-
-
-
-    def testRemovePathEnv(self):
-        """remove_path_env"""
-        self.func = remove_path_env
-
-        testdir = '^mydir$'
-        self.assertPath( None, None, testdir )
-        self.assertPath( 'mydir', '', testdir ) 
-        self.assertPath( 'dir1:mydir', 'dir1',       testdir )
-        self.assertPath( 'mydir:dir1', 'dir1',       testdir )
-        self.assertPath( 'mydir:dir1', 'mydir:dir1', 'otherdir' )
-        self.assertPath( 'dir1:mydir', 'dir1',       testdir    )
-        self.assertPath( 'dir1:mydir', 'dir1:mydir', 'otherdir' )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:dir2',  testdir    )
-        self.assertPath( 'dir1:mydir:dir2', 'dir1:mydir:dir2', 'otherdir' )
-
-
-    def testRemovePathEnvRE(self):
-        """remove_path_env regular expression"""
-        self.func = remove_path_env
-        self.assertPath( 'dir1:mydir:mydir2:keep_mydir:dir2', 'dir1:dir2',  'mydir'    )
-        self.assertPath( 'dir1:mydir:mydir2:keep_mydir:dir2', 'dir1:keep_mydir:dir2',  '^mydir'    )
-        self.assertPath( 'dir1:mydir:mydir2:keep_mydir:dir2', '',  '.*'    )
-        self.assertPath( 'dir1:mydir:mydir2:keep_mydir:dir2', 'dir1:mydir:keep_mydir',  '2$'    )
-
-
-class TestFileFind(myunittest.TestCase):
-    def testIgnoreNoneExistingDir(self):
-        """Non-existing dir in path"""
-        found = find_file( '.', [ "not_existing_dir" ], depth=3 )
-        self.assertEqual( found, None )
-            
-                        
-if __name__ == "__main__":
-    myunittest.main()
-
-
diff --git a/Tools/PyJobTransformsCore/test/eventCount_test.py b/Tools/PyJobTransformsCore/test/eventCount_test.py
deleted file mode 100755
index 730b6a735d7a0d8e4cf02b6d2d79556e5ee9220e..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/eventCount_test.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import os,sys
-
-testData = os.getenv('TESTDATA')
-if not testData:
-    print "ERROR: TESTDATA not set"
-    sys.exit(10)
-
-inputList = [os.path.join(testData,'DC3.007218.singlepart_mu20_ATLAS-DC3-07.aod.pool.root')]
-#inputList = [os.path.join(testData,'DC3.007218.singlepart_mu20_ATLAS-DC3-07.rdo.pool.root')]
-
-from PyJobTransformsCore.basic_trfarg import InputDataFileArg,PoolDataFile
-arg = InputDataFileArg("count events",PoolDataFile())
-arg.setLoggerLevel('ALL')
-arg.setValue( inputList )
-arg.getGUID()
-arg.eventCount()
diff --git a/Tools/PyJobTransformsCore/test/genMetadataXML.py b/Tools/PyJobTransformsCore/test/genMetadataXML.py
deleted file mode 100755
index 9a2fa0c0b3c671756e9f39cbb7a297209588821f..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/genMetadataXML.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-#
-#  genMetadataXML.py
-#  
-#
-#  Created by Alvin on 10/05/2010.
-#
-from __future__ import with_statement
-import sys
-import cPickle as pickle
-
-usage = "genMetadataXML.py JOB_REPORT_PICKLE [--new|--old]"
-if len( sys.argv ) < 2:
-    print usage
-    sys.exit( 1 )
-with open( sys.argv[ 1 ] ) as f:
-    r = pickle.load( f )
-try:
-    optParam = sys.argv[ 2 ]
-except IndexError:
-    optParam = '--old'
-if optParam == '--new':
-    r.writeMetaDataXML_new()
-elif optParam == '--old':
-    r.writeMetaDataXML_old()
-else:
-    print usage
-    sys.exit(1)
-sys.exit(0)
diff --git a/Tools/PyJobTransformsCore/test/myunittest.py b/Tools/PyJobTransformsCore/test/myunittest.py
deleted file mode 100755
index a922ee329de8c965538f856f3d3ebc60974a397b..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/myunittest.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-
-import unittest,tempfile
-import sys,os
-
-__all__ = [ ]
-
-debug = False
-fake = False
-
-def main():
-    # remove options that unittest.main does not like
-    myopts = [ '-d', '-n' ]
-    testargv = [ arg for arg in sys.argv if not arg in myopts ]
-    unittest.main( argv=testargv, testRunner=unittest.TextTestRunner(verbosity=2) )
-
-
-class TestCase(unittest.TestCase):
-    def shortDescription(self):
-        return '%s: %s' % (self.__class__.__name__, unittest.TestCase.shortDescription(self))
-
-
-    def setUp(self):
-        """Prepare for temporary files"""
-        # handle some command line options
-        if '-d' in sys.argv:
-            self.debug = True
-        else:
-            self.debug = False
-        if '-n' in sys.argv:
-            self.fake = True
-        else:
-            self.fake = False
-
-        # prepare for temporary files
-        self.startupdir = os.getcwd()
-        self.tmpdir = tempfile.mkdtemp()
-
-
-    def tearDown(self):
-        """Clean up temporary files and go back to startup directory"""
-        if not hasattr(self,'tmpdir'): return
-        self.recursiveTmpRemove()
-        if os.path.isdir(self.tmpdir):
-            if self.debug: print "Removing directory %s" % self.tmpdir
-            os.rmdir(self.tmpdir)
-        os.chdir(self.startupdir)
-
-
-    def makeTmpDir(self,d=None):
-        """Create a temporary directory, with an optional subdirectory d.
-        Returns the full path of the directory."""
-        # no absolute paths
-        if d and os.path.isabs(d): raise IOError('Absolute path not allowed for temporary files')
-        if not d or d == os.curdir: return self.tmpdir
-        # create temporary subdir
-        fulldir = os.path.join(self.tmpdir,d)
-        if not os.path.isdir(fulldir):
-            if self.debug: print "Creating temporary directory %s" % fulldir
-            if not self.fake: os.makedirs(fulldir)
-
-        return fulldir
-
-
-    def recursiveTmpRemove(self,fileOrDir=None):
-        if not self.tmpdir or not os.path.isdir(self.tmpdir): return
-        if fileOrDir is None: fileOrDir = self.tmpdir
-        if fileOrDir.startswith(self.tmpdir):
-            full = fileOrDir
-        else:
-            if os.path.isabs(fileOrDir): raise IOError('Absolute path not allowed for temporary files')
-            full = os.path.join(self.tmpdir,fileOrDir)    
-
-        if os.path.isdir(full):
-            for f in os.listdir(full): self.recursiveTmpRemove(os.path.join(full,f))
-            if self.debug: print "Removing directory %s" % full
-            if not self.fake: os.rmdir(full)
-        else:
-            if self.debug: print "Removing file %s" % full
-            if not self.fake: os.remove(full)
-
-
-
-    def removeTmpDir(self,d):
-        """Default: remove all tmp dirs"""
-        if d and os.path.isabs(d): raise IOError('Absolute path not allowed for temporary files')
-        if d != os.curdir:
-            fulldir = os.path.join(self.tmpdir, d)
-            if os.path.isdir(fulldir):
-                if self.debug: print "Removing temporary directory %s" % fulldir
-                if not self.fake: os.rmdir(fulldir)
-
-
-    def cdTmpDir(self,d=None):
-        """Go to the subdirectory in the temporary area"""
-        if d and d != os.curdir:
-            fulldir = os.path.join( self.tmpdir, d )
-        else:
-            fulldir = self.tmpdir
-            
-        os.chdir(fulldir)
-
-
-    def fullTmpFile(self,filename):
-        if os.path.isabs(filename): raise IOError('Absolute path not allowed for temporary files')
-        return os.path.join( self.tmpdir, filename )
-        
-
-    def createTmpFile(self,filename):
-        """Create a dummy file in the temporary directory. Returns full path of the file."""
-        if os.path.isabs(filename): raise IOError('Absolute path not allowed for temporary files')
-        dirname = os.path.dirname( filename )
-        self.makeTmpDir(dirname)
-        fullpath = os.path.join( self.tmpdir, filename )
-        if not os.path.exists(fullpath):
-            if self.debug: print "Creating temporary file %s" % fullpath
-            if not self.fake:
-                nf = open(fullpath,'w')
-                nf.write( filename )
-                nf.close()
-        return fullpath
-
-    
-
-    def removeTmpFiles(self,files):
-        if not files: return
-        # for compatibility with single filename
-        if type(files) == type(''): files = [ files ]
-        for f in files:
-            fullfile = os.path.join( self.tmpdir, f )
-            if os.path.isfile(fullfile):
-                if self.debug: print "Removing temporary file %s" % fullfile
-                if not self.fake: os.remove(fullfile)
-
-
-    def recursiveListTmpFiles(self,dir=None,indent=''):
-        if dir is None: dir = self.tmpdir
-        if indent == '':
-            line = dir + os.sep
-        else:
-            line = indent + os.path.basename(dir) + os.sep
-        print line
-        newindent = ' '*(len(line)-1)
-        for f in os.listdir(dir):
-            full = os.path.join(dir,f)
-            if os.path.isdir(full):
-                self.recursiveListTmpFiles(full,newindent)
-            else:
-                print newindent + f
-        
-
-    def listTmpFiles(self,title):
-        # move starting newline to beginning of line
-        if title.startswith(os.linesep):
-            mess = "\n%s temporary files %s:" % (self.__class__.__name__, title[1:])
-        else:
-            mess = "%s temporary files %s:" % (self.__class__.__name__, title)
-        print mess
-        if not self.tmpdir or not os.path.isdir(self.tmpdir): return
-        self.recursiveListTmpFiles()
-
-
-       
-        
-
diff --git a/Tools/PyJobTransformsCore/test/testErrorPattern.py b/Tools/PyJobTransformsCore/test/testErrorPattern.py
deleted file mode 100755
index eb2158e033bd5b26da06cd38bda15fd3ea1841f5..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/testErrorPattern.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-#
-#  testErrorPattern.py
-#  
-#
-#  Created by Alvin on 16/04/2009.
-#  
-#
-import sys, os
-from PyJobTransformsCore.trferr import AthenaLogChecker
-from PyJobTransformsCore.VTimer import vTimer
-
-usage = "testErrorPattern RELEASE ERROR_STRING"
-#print len(sys.argv)
-if len(sys.argv) != 3:
-    print usage
-    sys.exit(1)
-release = sys.argv[1]
-errorStr = sys.argv[2]
-l = AthenaLogChecker( release )
-if os.path.exists( os.path.expanduser( os.path.expandvars( errorStr ) ) ):
-    errorFile = open( errorStr,'r')
-else:
-    # treat as string
-    errorFile = [ errorStr ]
-
-noError = True
-vTimer.start()
-for line in errorFile:
-    result = l.processLine( line )
-    if result and result.severity in [ 'ERROR', 'FATAL' ]:
-        print "-->%s [%s: %s %s]\n" % ( line, result.severity, result.acronym, result.code )
-        noError = False
-vTimer.stop()
-vTimer.reset()
-if noError:
-    print "No errors found."
-
-sys.exit(0)
diff --git a/Tools/PyJobTransformsCore/test/trfutil_test.py b/Tools/PyJobTransformsCore/test/trfutil_test.py
deleted file mode 100755
index 0b16ef693d6114221aadbd6ad3a668b14747742d..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/trfutil_test.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import myunittest
-import os
-
-
-from PyJobTransformsCore.trfutil import *
-
-
-class TestStringNumberList(myunittest.TestCase):
-    def setUp(self):
-        self.obj = StringNumberList()
-
-    
-    def checkConversion(self,valIn,valOut):
-        self.assertEqual( self.obj.convertStringList(valIn), valOut )
-        
-    
-    def assertNone(self,valsIn):
-        for valIn in valsIn:
-            self.assertEqual( self.obj.convertStringList(valIn), None ) 
-
-
-class StringNumberList_GoodInput(TestStringNumberList):
-    def testSingleFilename(self):
-        """single filename (i.e. without [])"""
-        valIn = "prefix_only.suffix"
-        valOut = [ valIn ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testSingleNumber(self):
-        """Single number in []"""
-        valIn = "prefix.[001].suffix"
-        valOut = [ "prefix.001.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-
-    def testRangeSizeOne(self):
-        """Range of size 1"""
-        valIn = "prefix.[02-2].suffix"
-        valOut = [ "prefix.02.suffix" ]
-        self.checkConversion( valIn, valOut )
-        
-        
-    def testRange(self):
-        """Range of files"""
-        valIn = "prefix.[3-0005].suffix"
-        valOut = [ "prefix.0003.suffix", "prefix.0004.suffix", "prefix.0005.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testList(self):
-        """List of files"""
-        valIn = "prefix.[6,7,8].suffix"
-        valOut = [ "prefix.6.suffix" , "prefix.7.suffix", "prefix.8.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testListAndRange(self):
-        """List and Range of files"""
-        valIn = "prefix.[9,10,14-16].suffix"
-        valOut = [ "prefix.09.suffix", "prefix.10.suffix" ,"prefix.14.suffix", "prefix.15.suffix", "prefix.16.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testRangeAndList(self):
-        """Range and list of files"""
-        valIn = "prefix.[17-19,24,28].suffix"
-        valOut = [ "prefix.17.suffix", "prefix.18.suffix" ,"prefix.19.suffix", "prefix.24.suffix", "prefix.28.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-    def testTwoRanges(self):
-        """Two ranges"""
-        valIn = "prefix.[30-32,35-36].suffix"
-        valOut = [ "prefix.30.suffix", "prefix.31.suffix" ,"prefix.32.suffix",
-                   "prefix.35.suffix", "prefix.36.suffix" ]
-        self.checkConversion( valIn, valOut )
-
-
-
-class StringNumberList_BadInput(TestStringNumberList):
-    def testMissingOpenBracket(self):
-        """Missing ["""
-        badargs = [ "prefix.4-7].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testMissingCloseBracket(self):
-        """Missing ]"""
-        badargs = [ "prefix.[4-7.suffix" ]
-        self.assertNone( badargs )
-
-
-    def testEmptyBrackets(self):
-        """Empty brackets: []"""
-        badargs = [ "prefix.[].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testSpuriousCharacter(self):
-        """Spurious character"""
-        badargs = [ "prefix.[4.7].suffix","prefix.[4x7].suffix",  "prefix.[4X7].suffix", "prefix.[4$7].suffix",
-                    "prefix.[4%7].suffix", "prefix.[4#7].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testMissingListEntryBegin(self):
-        """Missing number begin [,5]"""
-        badargs = [ "prefix.[,5].suffix", "prefix.[,5,7].suffix", "prefix.[,5,7-8].suffix" ]
-        self.assertNone( badargs ) 
-
-
-    def testMissingListEntryEnd(self):
-        """Missing number end [8,]"""
-        badargs = [ "prefix.[8,].suffix", "prefix.[6,8,].suffix", "prefix.[4-6,8,].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testMissingListEntryMiddle(self):
-        """Missing number middle [7,,8]"""
-        badargs = [ "prefix.[7,,8].suffix", "prefix.[7,8,,10].suffix", "prefix.[7,,9,10].suffix",
-                    "prefix.[2-4,,8].suffix", "prefix.[7,,8-10].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testMissingRangeBegin(self):
-        """Missing start number of range"""
-        badargs = [ "prefix.[-8].suffix", "prefix.[-8].suffix", "prefix.[-8].suffix", "prefix.[-8,10].suffix",
-                    "prefix.[4,-8].suffix", "prefix.[4,-8,10].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testMissingRangeEnd(self):
-        """Missing end number of range"""
-        badargs = [ "prefix.[8-].suffix", "prefix.[8-,10].suffix", "prefix.[4,8-].suffix",
-                    "prefix.[4,8-,10].suffix", "prefix.[8-,10-12].suffix" ]
-        self.assertNone( badargs )
-
-
-    def testRangeDoubleDash(self):
-        """Double dash: --"""
-        badargs = [ "prefix.[6--8].suffix", "prefix.[6--8,10].suffix", "prefix.[4,6--8].suffix", "prefix.[4,6--8,10].suffix" ]
-        self.assertNone( badargs )
-        
-
-
-    def testRangeWrongOrder(self):
-        """Ranga has lower end then start"""
-        badargs = [ "prefix.[8-7].suffix", "prefix.[8-06].suffix" ]
-        self.assertNone( badargs )
-
-
-
-    def testNegativeNumbers(self):
-        """Negative numbers"""
-        badargs = [ "prefix.[-4].suffix", "prefix.[-4,6].suffix", "prefix.[6,-4].suffix", "prefix.[-4-7].suffix",
-                    "prefix.[4--7].suffix", "prefix.[-7--4].suffix","prefix.[2,4--7].suffix","prefix.[4--7,2].suffix" ]   
-        self.assertNone( badargs )
-
-
-class TestLoadTransforms(myunittest.TestCase):
-    def setUp(self):
-        self.trfName = "argstest_trf.py"
-
-
-    def testLoadDefault(self):
-        """Load default JobTransform"""
-        self.failUnless( len( load_transforms(self.trfName) ) >= 1, "default does not load any" )
-
-
-    def testLoadAtLeastOne(self):
-        """Load at least one JobTransform"""
-        names = [ r".*test$" ]
-        for name in names:
-            self.failUnless(  len( load_transforms(self.trfName, name) ) >= 1, "name \"%s\" does not load any" % (name) )
-
-
-    def testLoadNone(self):
-        """Load no JobTransforms"""
-        names = [ "does not exist", r"^test.*" ]
-        for name in names:
-            self.failUnless(  len( load_transforms(self.trfName, name) ) == 0, "name \"%s\" should load nothing" % (name) )
-       
-
-    def testLoadFromSubDir(self):
-        """Load transform in a subdirectory"""
-        oldDir = os.getcwd()
-        subdir = os.path.basename(oldDir)
-        os.chdir(os.pardir)
-        try:
-            self.failUnless( len( load_transforms(os.path.join(subdir,self.trfName)) ) >= 1, "default in subdir does not load any" )
-        finally:
-            os.chdir(oldDir)
-
-
-
-    def testLoadFromSideDir(self):
-        """Load transform in a side-directory"""
-        oldDir = os.getcwd()
-        subdir = os.path.basename(oldDir)
-        os.chdir(os.path.join(os.pardir,'python'))
-        try:
-            self.failUnless( len( load_transforms(os.path.join(os.pardir,subdir,self.trfName)) ) >= 1, "default in side-dir does not load any" )
-        finally:
-            os.chdir(oldDir)
-
-
-
-    def testLoadFromPythonPath(self):
-        """Load transform from pythonpath"""
-        oldDir = os.getcwd()
-        subdir = os.path.basename(oldDir)
-        basedir = os.path.dirname(oldDir)
-        os.chdir(os.path.join(os.pardir,'python'))
-        try:
-            inserted = False
-            if not basedir in sys.path:
-                sys.path.insert(1,basedir)
-                inserted = True
-            self.failUnless( len( load_transforms(os.path.join(subdir,self.trfName)) ) >= 1, "default in pythonpath does not load any" )
-            if inserted: sys.path.remove(basedir)
-        finally:
-            os.chdir(oldDir)
-        
-
-        
-if __name__ == "__main__":
-    myunittest.main()
-
-
-
diff --git a/Tools/PyJobTransformsCore/test/xmlutil_test.py b/Tools/PyJobTransformsCore/test/xmlutil_test.py
deleted file mode 100755
index 538e77a8a0109761dd431207716212374d7f83eb..0000000000000000000000000000000000000000
--- a/Tools/PyJobTransformsCore/test/xmlutil_test.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-import myunittest
-import os
-
-from PyJobTransformsCore.xmlutil import *
-
-class TestXMLNode(myunittest.TestCase):
-
-    def setUp(self):
-        self.node1 = XMLNode("TestName1")
-        self.node2 = XMLNode("TestName2")
-        self.toPrint = [ self.node1, self.node2 ]
-
-
-    def assertNames(self):
-        self.assertEqual(self.node1.name(), "TestName1")
-        self.assertEqual(self.node2.name(), "TestName2")
-        
-
-    def assertContents( self, cont1, cont2 ):
-        self.assertEqual( self.node1.contents(), cont1 )
-        self.assertEqual( self.node2.contents(), cont2 )
-
-
-    def assertAttributes( self, attr1, attr2 ):
-        self.assertEqual( self.node1.attributes(), attr1 )
-        self.assertEqual( self.node2.attributes(), attr2 )
-        
-
-
-    def printXML(self):
-        print ""
-        for node in self.toPrint:
-            print node.getXML()
-
-        
-    def testName(self):
-        """only a name"""
-        self.printXML()
-        self.assertNames()
-        self.assertContents(None,None)
-        self.assertAttributes({},{})
-
-
-    def testNameAndContents(self):
-        """name and contents"""
-        contents1 = 'contents1'
-        contents2 = 'contents2'
-        self.node1.setContents(contents1)
-        self.node2.setContents(contents2)
-        self.assertNames()
-        self.assertContents(contents1,contents2)
-        self.assertAttributes({},{})
-
-
-    def testNameAndAttribs(self):
-        """name and attributes"""
-        attribs1 = { 'name1':'value1' }
-        attribs2 = { 'name2':'value2' }
-        self.node1.setAttributes(**attribs1)
-        self.node2.setAttributes(**attribs2)
-        self.printXML()
-        self.assertNames()
-        self.assertContents(None,None)
-        self.assertAttributes(attribs1,attribs2)
-
-
-
-        
-if __name__ == "__main__":
-    myunittest.main()
-
-