diff --git a/Tools/PyJobTransforms/CMakeLists.txt b/Tools/PyJobTransforms/CMakeLists.txt
index b9e00659b85fed8ff0c362995c3a86e53b4edb04..1611f37b482e6ec2c2ef44a15bf43ebd3cc93b76 100644
--- a/Tools/PyJobTransforms/CMakeLists.txt
+++ b/Tools/PyJobTransforms/CMakeLists.txt
@@ -10,10 +10,10 @@ find_package( cx_Oracle )
 find_package( future )
 
 # Install files from the package:
-atlas_install_python_modules( python/*.py )
+atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
 atlas_install_joboptions( share/*.py )
-atlas_install_runtime( scripts/*.py )
-atlas_install_scripts( scripts/HelloWorld_tf.py )
+atlas_install_runtime( scripts/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
+atlas_install_scripts( scripts/HelloWorld_tf.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
 atlas_install_generic( share/*.db
                        DESTINATION share
                        EXECUTABLE )
diff --git a/Tools/PyJobTransforms/python/transform.py b/Tools/PyJobTransforms/python/transform.py
index 3974b50dee45bfd4e1032b3247d2c3d26ecc39fd..ba69f1affb5f5b4c87e419b3cdf089e11a9cd28b 100644
--- a/Tools/PyJobTransforms/python/transform.py
+++ b/Tools/PyJobTransforms/python/transform.py
@@ -246,7 +246,7 @@ class transform(object):
 
     ## @brief Parse command line arguments for a transform
     def parseCmdLineArgs(self, args):
-        msg.info('Transform command line was: %s' % ' '.join(shQuoteStrings(sys.argv)))
+        msg.info('Transform command line was: %s', ' '.join(shQuoteStrings(sys.argv)))
 
         try:
             # Use the argparse infrastructure to get the actual command line arguments
@@ -363,7 +363,7 @@ class transform(object):
             setRootLoggerLevel(stdLogLevels['DEBUG'])
         elif 'loglevel' in self._argdict:
             if self._argdict['loglevel'] in stdLogLevels:
-                msg.info("Loglevel option found - setting root logger level to %s" % 
+                msg.info("Loglevel option found - setting root logger level to %s", 
                          logging.getLevelName(stdLogLevels[self._argdict['loglevel']]))
                 setRootLoggerLevel(stdLogLevels[self._argdict['loglevel']])
             else:
@@ -741,7 +741,7 @@ class transform(object):
             ('fileValidation' in self._argdict and self._argdict['fileValidation'].value is False) or
             ('inputFileValidation' in self._argdict and self._argdict['inputFileValidation'].value is False)
             ):
-            msg.info('Standard input file validation turned off for transform %s.' % self.name)
+            msg.info('Standard input file validation turned off for transform %s.', self.name)
         else:
             msg.info('Validating input files')
             if 'parallelFileValidation' in self._argdict:
@@ -762,7 +762,7 @@ class transform(object):
             ('fileValidation' in self._argdict and self._argdict['fileValidation'].value is False) or
             ('outputFileValidation' in self._argdict and self._argdict['outputFileValidation'].value is False)
             ):
-            msg.info('Standard output file validation turned off for transform %s.' % self.name)
+            msg.info('Standard output file validation turned off for transform %s.', self.name)
         else:
             msg.info('Validating output files')
             if 'parallelFileValidation' in self._argdict:
diff --git a/Tools/PyJobTransforms/python/trfAMI.py b/Tools/PyJobTransforms/python/trfAMI.py
index c0d31bae9cf4d0d2b157bb788824429182e03c7c..2c3fd4044963d5f9d076bc8c6f361ece88fda70a 100644
--- a/Tools/PyJobTransforms/python/trfAMI.py
+++ b/Tools/PyJobTransforms/python/trfAMI.py
@@ -4,7 +4,7 @@ from builtins import zip
 
 from builtins import object
 from builtins import range
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfAMI
 #  @brief Utilities for configuration of transforms via AMI tags
@@ -260,8 +260,8 @@ def getPANDAClient():
         
     try:
         cur = cx_Oracle.connect('atlas_grisli_r/panda_c10@adcr_panda').cursor()
-    except: 
-        msg.debug('An exception occurred while connecting to PANDA database: %s' % traceback.format_exc())
+    except Exception: 
+        msg.debug('An exception occurred while connecting to PANDA database: %s', traceback.format_exc())
         raise TransformAMIException(AMIerrorCode, 'Failed to get PANDA client connection (N.B. this does not work from outside CERN).')
         
     return cur
@@ -277,14 +277,14 @@ def ReadablePANDA(s):
 #  @returns list of PyJoCbTransforms.trfAMI.TRFConfig instances
 def getTrfConfigFromPANDA(tag):
     
-    msg.debug('Using PANDA to get info about tag %s' % tag)
+    msg.debug('Using PANDA to get info about tag %s', tag)
             
     try:
         pandaclient=getPANDAClient()
         pandaclient.execute("select trf,trfv,lparams,vparams,formats,cache from t_trf_config where tag='%s' and cid=%d" %(tag[:1],int(tag[1:]) ) )
         result=pandaclient.fetchone()
-    except:
-        msg.info('An exception occurred: %s' % traceback.format_exc())
+    except Exception:
+        msg.info('An exception occurred: %s', traceback.format_exc())
         raise TransformAMIException(AMIerrorCode, 'Getting tag info from PANDA failed.')
 
     if result is None:
@@ -293,17 +293,17 @@ def getTrfConfigFromPANDA(tag):
     msg.debug('Raw data returned from panda DB is:' + os.linesep + str(result))
     
     trfn=result[0].split(',')
-    msg.debug('List of transforms: %s' % trfn)
+    msg.debug('List of transforms: %s', trfn)
     trfv=result[1].split(',')
-    msg.debug('List of releases: %s' % trfv)
+    msg.debug('List of releases: %s', trfv)
     lparams=result[2].split(';')
-    msg.debug('List of arguments: %s' % lparams)
+    msg.debug('List of arguments: %s', lparams)
     vparams=result[3].split(';')
-    msg.debug('List of argument values: %s' % vparams)
+    msg.debug('List of argument values: %s', vparams)
     formats=result[4].split('.')
-    msg.debug('List of formats: %s' % formats)
+    msg.debug('List of formats: %s', formats)
     cache=result[5].split(',')
-    msg.debug('List of caches: %s' % formats)
+    msg.debug('List of caches: %s', formats)
 
 
     if not ( len(trfn) == len(trfv) == len(lparams) == len(vparams) ):
@@ -349,18 +349,18 @@ def getTrfConfigFromPANDA(tag):
         msg.debug("Checking for pseudo-argument internal to ProdSys...")
         if 'extraParameter' in physics:
             val=physics.pop('extraParameter')
-            msg.debug("Removed extraParamater=%s from arguments." % val)
+            msg.debug("Removed extraParamater=%s from arguments.", val)
 
         msg.debug("Checking for input/output file arguments...")
         for arg in list(physics):
             if arg.lstrip('-').startswith('input') and arg.endswith('File'):
                 value=physics.pop(arg)
-                msg.debug("Found input file argument %s=%s." % (arg,value) ) 
+                msg.debug("Found input file argument %s=%s.", arg, value ) 
                 fmt=arg.lstrip('-').replace('input','').replace('File','')
                 trf.inFiles[arg]=getInputFileName(arg)
             elif arg.lstrip('-').startswith('output') and arg.endswith('File'):
                 value=physics.pop(arg)
-                msg.debug("Found output file argument %s=%s." % (arg,value) )
+                msg.debug("Found output file argument %s=%s.", arg, value )
                 fmt=arg.lstrip('-').replace('output','').replace('File','')
                 trf.outFiles[arg]=getOutputFileName(fmt)
 
@@ -368,7 +368,7 @@ def getTrfConfigFromPANDA(tag):
         for arg,value in listitems(physics):
             if value=="NONE" or value=="none" or value==["NONE"]:
                 val=physics.pop(arg)
-                msg.debug("Removed %s=%s from arguments." % (arg, val) )
+                msg.debug("Removed %s=%s from arguments.", arg, val )
 
         trf.physics=physics
 
@@ -413,7 +413,7 @@ def remove_enclosing_quotes(s):
     try:
         if s[0] == s[-1] and s[0] in ('"', "'"):
             s = s[1:-1]
-    except:
+    except Exception:
         pass
     return s
 
@@ -421,7 +421,7 @@ def remove_enclosing_quotes(s):
 #  @param tag Tag for which information is requested
 #  @returns list of PyJoCbTransforms.trfAMI.TRFConfig instances
 def getTrfConfigFromAMI(tag, suppressNonJobOptions = True):
-    msg.debug('Using AMI to get info about tag %s' % tag)
+    msg.debug('Using AMI to get info about tag %s', tag)
 
     try:
 #        import pyAMI.atlas.api
@@ -460,14 +460,14 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True):
                     execStrList = [execStr for execStr in convertToStr(v).replace('" "', '"" ""').split('" "')]
                     physics[convertToStr(k)] = [remove_enclosing_quotes(execStr).replace('\\"', '"') for execStr in execStrList]
                 elif '" "' in v:
-                    msg.info('found a quoted space (" ") in parameter value for %s, converting to list' % k)
+                    msg.info('found a quoted space (" ") in parameter value for %s, converting to list', k)
                     subStrList = [subStr for subStr in convertToStr(v).replace('" "', '"" ""').split('" "')]
                     physics[convertToStr(k)] = [remove_enclosing_quotes(subStr).replace('\\"', '"') for subStr in subStrList]
                 else:
                     physics[convertToStr(k)] = convertToStr(remove_enclosing_quotes(v))
 
             msg.debug('Result from AMI after string cleaning:')
-            msg.debug('%s' % dumps(physics, indent = 4))
+            msg.debug('%s', dumps(physics, indent = 4))
 
             if suppressNonJobOptions:
                 for k in list(physics):
@@ -483,18 +483,18 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True):
             msg.debug("Checking for pseudo-argument internal to ProdSys...")
             if 'extraParameter' in physics:
                 val = physics.pop('extraParameter')
-                msg.debug("Removed extraParamater=%s from arguments." % val)
+                msg.debug("Removed extraParamater=%s from arguments.", val)
 
             msg.debug("Checking for input/output file arguments...")
             for arg in list(physics):
                 if arg.lstrip('-').startswith('input') and arg.endswith('File'):
                     value = physics.pop(arg)
-                    msg.debug("Found input file argument %s=%s." % (arg, value))
+                    msg.debug("Found input file argument %s=%s.", arg, value)
                     fmt = arg.lstrip('-').replace('input', '').replace('File', '')
                     trf.inFiles[arg] = getInputFileName(arg)
                 elif arg.lstrip('-').startswith('output') and arg.endswith('File'):
                     value = physics.pop(arg)
-                    msg.debug("Found output file argument %s=%s." % (arg, value))
+                    msg.debug("Found output file argument %s=%s.", arg, value)
                     fmt = arg.lstrip('-').replace('output', '').replace('File', '')
                     trf.outFiles[arg] = getOutputFileName(fmt)
 
@@ -502,7 +502,7 @@ def getTrfConfigFromAMI(tag, suppressNonJobOptions = True):
             for arg, value in listitems(physics):
                 if value == "NONE" or value == "none" or value == ["NONE"]:
                     val = physics.pop(arg)
-                    msg.debug("Removed %s=%s from arguments." % (arg, val))
+                    msg.debug("Removed %s=%s from arguments.", arg, val)
 
             trf.physics = physics
 
@@ -547,7 +547,7 @@ def deserialiseFromAMIString(amistring):
         msg.debug("Failed to decode {0} as JSON: {1}".format(amistring, e_json))
         try:
             result = ast.literal_eval(amistring)
-        except SyntaxError as e_ast:
+        except SyntaxError:
             errMsg = "Failed to deserialise AMI string '{0}' using JSON or eval".format(amistring)
             msg.error(errMsg)
             raise TransformAMIException(AMIerrorCode, errMsg)
diff --git a/Tools/PyJobTransforms/python/trfArgClasses.py b/Tools/PyJobTransforms/python/trfArgClasses.py
index 751c5368790ae96a0203d533e044f6ca61223222..e90b3e872f4e60839136a750ca933457e6cd2cf3 100644
--- a/Tools/PyJobTransforms/python/trfArgClasses.py
+++ b/Tools/PyJobTransforms/python/trfArgClasses.py
@@ -7,7 +7,7 @@ from past.builtins import basestring
 from builtins import object
 from builtins import int
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfArgClasses
 # @brief Transform argument class definitions
@@ -18,11 +18,9 @@ import argparse
 import bz2
 import copy
 import glob
-import io
 import os
 import re
 import subprocess
-import types
 import uuid
 
 import logging
@@ -31,7 +29,7 @@ msg = logging.getLogger(__name__)
 import PyJobTransforms.trfExceptions as trfExceptions
 
 from PyJobTransforms.trfFileUtils import athFileInterestingKeys, AthenaLiteFileInfo, NTUPEntries, HISTEntries, PRWEntries, urlType, ROOTGetSize
-from PyJobTransforms.trfUtils import call, cliToKey
+from PyJobTransforms.trfUtils import call
 from PyJobTransforms.trfExitCodes import trfExit as trfExit
 from PyJobTransforms.trfDecorators import timelimited
 from PyJobTransforms.trfAMI import getAMIClient
@@ -41,13 +39,13 @@ from PyJobTransforms.trfAMI import getAMIClient
 #  @brief Factory class used to generate argument class instances for argparse
 class argFactory(object):
     def __init__(self, genclass, *args, **kwargs):
-        msg.debug('Initialised class %s with args=%s; kwargs=%s' % (genclass, args, kwargs))
+        msg.debug('Initialised class %s with args=%s; kwargs=%s', genclass, args, kwargs)
         self._genclass = genclass
         self._args = args
         self._kwargs = kwargs
     
     def __call__(self, valueString=None):
-        msg.debug('Called class %s with value=%s; args=%s; kwargs=%s' % (self._genclass, valueString, self._args, self._kwargs))
+        msg.debug('Called class %s with value=%s; args=%s; kwargs=%s', self._genclass, valueString, self._args, self._kwargs)
         
         # Wrap this step in our own try/except because if this goes wrong we want to see the exception
         # instead of having it masked by the argparse module
@@ -1110,7 +1108,7 @@ class argFile(argList):
                     self._fileMetadata[fname]['integrity'] = True
                 except (OSError, IOError) as e:
                     msg.error('Got exception {0!s} raised while checking integrity of file {1}'.format(e, fname))
-                    self._fileMetadata[file]['integrity'] = False
+                    self._fileMetadata[fname]['integrity'] = False
                     
                     
     ## @brief Generate a GUID on demand - no intrinsic for this file type        
@@ -1808,7 +1806,7 @@ class argLHEFile(argFile):
                         lhecount = lines.find('/event')
 
                 self._fileMetadata[fname]['nentries'] = lhecount
-            except :
+            except Exception:
                 msg.debug('Entries is set to None - event count undefined for this LHE')
                 self._fileMetadata[fname]['nentries'] = 'UNDEFINED'
 
@@ -1833,7 +1831,7 @@ class argLHEFile(argFile):
                                     w = float(re.sub(' +',' ',line).split(" ")[2])
                                     if w > 0 : weightPos += w
                                     else : weightNeg += abs(w)
-                                except :
+                                except Exception:
                                     pass
                                 next = False
                             if "<event" in line :
@@ -1841,7 +1839,7 @@ class argLHEFile(argFile):
 
                 self._fileMetadata[fname]['lheSumOfPosWeights'] = weightPos
                 self._fileMetadata[fname]['lheSumOfNegWeights'] = weightNeg
-            except :
+            except Exception:
                 msg.debug('Entries is set to None - negative fraction count undefined for this LHE')
                 self._fileMetadata[fname]['lheSumOfPosWeights'] = 'UNDEFINED'
                 self._fileMetadata[fname]['lheSumOfNegWeights'] = 'UNDEFINED'
@@ -2146,7 +2144,7 @@ class argSubstepInt(argSubstep):
                 self._value = value
             else:
                 raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Setter value {0!s} (type {1}) for substep argument cannot be parsed'.format(value, type(value)))
-        except ValueError as e:
+        except ValueError:
             raise trfExceptions.TransformArgException(trfExit.nameToCode('TRF_ARG_CONV_FAIL'), 'Failed to convert substep value {0} to int'.format(value))
 
 
@@ -2157,7 +2155,6 @@ class argSubstepFloat(argSubstep):
     def __init__(self, value=None, min=None, max=None, runarg=True, name=None):
         self._min = min
         self._max = max
-        desc = {}
         super(argSubstepFloat, self).__init__(value = value, runarg = runarg, name=name)
         
     @property
@@ -2430,7 +2427,7 @@ class trfArgParser(argparse.ArgumentParser):
     def defineArgGroup(self, *args):
         # Get an argparse group
         if args[0] in self._argGroups:
-            msg.warning('Argument group %s already exists' % args[0])
+            msg.warning('Argument group %s already exists', args[0])
             return
         self._argGroups[args[0]] = self.add_argument_group(*args)
         
@@ -2466,13 +2463,13 @@ class trfArgParser(argparse.ArgumentParser):
         else:
             namespace = super(trfArgParser, self).parse_args(args = args)
         for k, v in iteritems(namespace.__dict__):
-            msg.debug('Treating key %s (%s)' % (k, v))
+            msg.debug('Treating key %s (%s)', k, v)
             if isinstance(v, list):
                 # We build on the v[0] instance as this contains the correct metadata
                 # and object references for this instance (shallow copying can 
                 # mess up object references and deepcopy thows exceptions!)
                 newValueObj = v[0] 
-                msg.debug('Started with: %s = %s' % (type(newValueObj), newValueObj))
+                msg.debug('Started with: %s = %s', type(newValueObj), newValueObj)
                 if isinstance(v[0], argSubstep):
                     # Make sure you do not have a reference to the original value - this is a deeper copy
                     newValues = dictSubstepMerge(v[0].value, {})
@@ -2483,7 +2480,7 @@ class trfArgParser(argparse.ArgumentParser):
                 else:
                     newValues = [v[0].value,]
                 for valueObj in v[1:]:
-                    msg.debug('Value Object: %s = %s' % (type(valueObj), valueObj))
+                    msg.debug('Value Object: %s = %s', type(valueObj), valueObj)
                     if isinstance(v[0], argSubstep):
                         # Special merger for lists attached to substeps
                         newValues = dictSubstepMerge(newValues, valueObj.value)
@@ -2497,7 +2494,7 @@ class trfArgParser(argparse.ArgumentParser):
                         newValues.append(valueObj.value)
                 newValueObj.value = newValues
                 namespace.__dict__[k] = newValueObj
-                msg.debug('Set to %s' % newValueObj.value)                
+                msg.debug('Set to %s', newValueObj.value)                
 
         return namespace
 
diff --git a/Tools/PyJobTransforms/python/trfDecorators.py b/Tools/PyJobTransforms/python/trfDecorators.py
index 9261a2f7622fc557d23808a7ef4da4e390534ab5..5095a4d94462439d61d21bb54a7bca6626258a22 100644
--- a/Tools/PyJobTransforms/python/trfDecorators.py
+++ b/Tools/PyJobTransforms/python/trfDecorators.py
@@ -1,6 +1,6 @@
 from future import standard_library
 standard_library.install_aliases()
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @Package PyJobTrasforms.trfDecorators
 #  @brief Some useful decorators used by the transforms
@@ -110,7 +110,7 @@ def sigUsrStackTrace(func):
         pass
     
     def sigHandler(signum, frame):
-        msg.info('Handling signal %d in sigHandler' % signum)
+        msg.info('Handling signal %d in sigHandler', signum)
         raise SigUsr1
     
     def signal_wrapper(*args, **kwargs):
@@ -153,11 +153,11 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N
             try:
                 result = func(*args, **kwargs)
                 queue.put((True, result))
-            except:
+            except Exception:
                 exc0=exc_info()[0]
                 exc1=exc_info()[1]
                 exc2=traceback.format_exc()
-                msg.warning('In time limited function %s an exception occurred' % (func.__name__))
+                msg.warning('In time limited function %s an exception occurred', func.__name__)
                 msg.warning('Original traceback:')
                 msg.warning(exc2)            
                 queue.put((False,(exc0, exc1, exc2))) 
@@ -187,7 +187,7 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N
                 
             n=0
             while n<=lretry:
-                msg.info('Try %i out of %i (time limit %s s) to call %s.' % (n+1, retry+1, ltimeout, func.__name__))
+                msg.info('Try %i out of %i (time limit %s s) to call %s.', n+1, retry+1, ltimeout, func.__name__)
                 starttime = time.time()
                 q=mp.Queue(maxsize=1)
                 nargs = (q,) + args
@@ -197,23 +197,23 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N
                     # Wait for function to run and return, but with a timeout
                     flag,result = q.get(block=True, timeout=ltimeout)
                     proc.join(60)
-                    msg.info('Executed call within %d s.' % (time.time()-starttime))
+                    msg.info('Executed call within %d s.', time.time()-starttime)
                     if flag:
                         return result
                     else:
-                        msg.warning('But an exception occurred in function %s.' % (func.__name__))
-                        msg.warning('Returning default return code %s.' % ldefaultrc)
+                        msg.warning('But an exception occurred in function %s.', func.__name__)
+                        msg.warning('Returning default return code %s.', ldefaultrc)
                         return ldefaultrc
                 except queue.Empty:
                     # Our function did not run in time - kill increase timeout
-                    msg.warning('Timeout limit of %d s reached. Kill subprocess and its children.' % ltimeout)
+                    msg.warning('Timeout limit of %d s reached. Kill subprocess and its children.', ltimeout)
                     parent=proc.pid
                     pids=[parent]
                     pids.extend(trfUtils.listChildren(parent=parent, listOrphans = False))
                     trfUtils.infanticide(pids)
                     proc.join(60) # Ensure cleanup
                     if n!=lretry:
-                        msg.info('Going to sleep for %d s.' % lsleeptime)                    
+                        msg.info('Going to sleep for %d s.', lsleeptime)                    
                         time.sleep(lsleeptime)
                     n+=1
                     ltimeout*=ltimefactor
@@ -223,7 +223,7 @@ def timelimited(timeout=None, retry=1, timefactor=1.5, sleeptime=10, defaultrc=N
                     msg.error(errMsg)
                     raise TransformInternalException(trfExit.nameToCode("TRF_EXTERNAL"), errMsg)
 
-            msg.warning('All %i tries failed!' % n)
+            msg.warning('All %i tries failed!', n)
             raise TransformTimeoutException(trfExit.nameToCode('TRF_EXEC_TIMEOUT'), 'Timeout in function %s' % (func.__name__))
             
         return funcWithTimeout
diff --git a/Tools/PyJobTransforms/python/trfEnv.py b/Tools/PyJobTransforms/python/trfEnv.py
index f3fa4eb7b88455ac9b6f8d7b80cb5a8e35fbff53..6a0aaf7e48a349269010a26f879368715d8c2d89 100644
--- a/Tools/PyJobTransforms/python/trfEnv.py
+++ b/Tools/PyJobTransforms/python/trfEnv.py
@@ -1,7 +1,7 @@
 from future.utils import iteritems
 
 from builtins import object
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @Package PyJobTransforms.trfEnv
 #  @brief Support for environemnt variable manipulation in the transforms
@@ -14,8 +14,6 @@ import os.path as path
 import logging
 msg = logging.getLogger(__name__)
 
-import PyJobTransforms.trfUtils as trfUtils
-
 ## @brief Class holding the update to an environment that will be passed on to 
 #  an executor
 class environmentUpdate(object):
diff --git a/Tools/PyJobTransforms/python/trfExe.py b/Tools/PyJobTransforms/python/trfExe.py
index 999b4a56ce74bf317c7d754fdf4f000f3dd7d4dc..48c3e46855126ef3806d4dea5b28620dee190f00 100755
--- a/Tools/PyJobTransforms/python/trfExe.py
+++ b/Tools/PyJobTransforms/python/trfExe.py
@@ -30,7 +30,6 @@ import signal
 import subprocess
 import sys
 import time
-import six
 
 import logging
 from fnmatch import fnmatch
@@ -39,7 +38,6 @@ msg = logging.getLogger(__name__)
 from PyJobTransforms.trfJobOptions import JobOptionsTemplate
 from PyJobTransforms.trfUtils import asetupReport, unpackDBRelease, setupDBRelease, cvmfsDBReleaseCheck, forceToAlphaNum
 from PyJobTransforms.trfUtils import ValgrindCommand, isInteractiveEnv, calcCpuTime, calcWallTime
-from PyJobTransforms.trfUtils import bind_port
 from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfLogger import stdLogLevels
 from PyJobTransforms.trfMPTools import detectAthenaMPProcs, athenaMPOutputHandler
@@ -449,26 +447,26 @@ class transformExecutor(object):
 
     def preExecute(self, input = set(), output = set()):
         self.setPreExeStart()
-        msg.info('Preexecute for %s' % self._name)
+        msg.info('Preexecute for %s', self._name)
         
     def execute(self):
         self._exeStart = os.times()
         msg.debug('exeStart time is {0}'.format(self._exeStart))
-        msg.info('Starting execution of %s' % self._name)
+        msg.info('Starting execution of %s', self._name)
         self._hasExecuted = True
         self._rc = 0
         self._errMsg = ''
-        msg.info('%s executor returns %d' % (self._name, self._rc))
+        msg.info('%s executor returns %d', self._name, self._rc)
         self._exeStop = os.times()
         msg.debug('preExeStop time is {0}'.format(self._exeStop))
         
     def postExecute(self):
-        msg.info('Postexecute for %s' % self._name)
+        msg.info('Postexecute for %s', self._name)
         
     def validate(self):
         self.setValStart()
         self._hasValidated = True        
-        msg.info('Executor %s has no validation function - assuming all ok' % self._name)
+        msg.info('Executor %s has no validation function - assuming all ok', self._name)
         self._isValidated = True
         self._errMsg = ''
         self._valStop = os.times()
@@ -490,7 +488,7 @@ class logscanExecutor(transformExecutor):
 
     def preExecute(self, input = set(), output = set()):
         self.setPreExeStart()
-        msg.info('Preexecute for %s' % self._name)
+        msg.info('Preexecute for %s', self._name)
         if 'logfile' in self.conf.argdict:
             self._logFileName = self.conf.argdict['logfile'].value
         
@@ -559,14 +557,14 @@ class echoExecutor(transformExecutor):
     def execute(self):
         self._exeStart = os.times()
         msg.debug('exeStart time is {0}'.format(self._exeStart))
-        msg.info('Starting execution of %s' % self._name)        
+        msg.info('Starting execution of %s', self._name)        
         msg.info('Transform argument dictionary now follows:')
         for k, v in iteritems(self.conf.argdict):
             print("%s = %s" % (k, v))
         self._hasExecuted = True
         self._rc = 0
         self._errMsg = ''
-        msg.info('%s executor returns %d' % (self._name, self._rc))
+        msg.info('%s executor returns %d', self._name, self._rc)
         self._exeStop = os.times()
         msg.debug('exeStop time is {0}'.format(self._exeStop))
 
@@ -581,7 +579,7 @@ class dummyExecutor(transformExecutor):
     def execute(self):
         self._exeStart = os.times()
         msg.debug('exeStart time is {0}'.format(self._exeStart))
-        msg.info('Starting execution of %s' % self._name)
+        msg.info('Starting execution of %s', self._name)
         for type in self._outData:
             for k, v in iteritems(self.conf.argdict):
                 if type in k:
@@ -590,7 +588,7 @@ class dummyExecutor(transformExecutor):
         self._hasExecuted = True
         self._rc = 0
         self._errMsg = ''
-        msg.info('%s executor returns %d' % (self._name, self._rc))
+        msg.info('%s executor returns %d', self._name, self._rc)
         self._exeStop = os.times()
         msg.debug('exeStop time is {0}'.format(self._exeStop))
 
@@ -734,7 +732,7 @@ class scriptExecutor(transformExecutor):
                 self._echologger.info(line.rstrip())
     
             self._rc = p.returncode
-            msg.info('%s executor returns %d' % (self._name, self._rc))
+            msg.info('%s executor returns %d', self._name, self._rc)
             self._exeStop = os.times()
             msg.debug('exeStop time is {0}'.format(self._exeStop))
         except OSError as e:
@@ -749,7 +747,7 @@ class scriptExecutor(transformExecutor):
                     while (not mem_proc.poll()) and countWait < 10:
                         time.sleep(0.1)
                         countWait += 1
-                except OSError as UnboundLocalError:
+                except OSError:
                     pass
         
         
@@ -912,6 +910,9 @@ class athenaExecutor(scriptExecutor):
         if self._inputDataTypeCountCheck is None:
             self._inputDataTypeCountCheck = input
         for dataType in self._inputDataTypeCountCheck:
+            if self.conf.dataDictionary[dataType].nentries == 'UNDEFINED':
+                continue
+
             thisInputEvents = self.conf.dataDictionary[dataType].nentries
             if thisInputEvents > inputEvents:
                 inputEvents = thisInputEvents
@@ -944,7 +945,7 @@ class athenaExecutor(scriptExecutor):
                     expectedEvents = min(inputEvents-mySkipEvents, myMaxEvents)
             else:
                 expectedEvents = inputEvents-mySkipEvents
-        except TypeError as e:
+        except TypeError:
             # catching type error from UNDEFINED inputEvents count
             msg.info('input event count is UNDEFINED, setting expectedEvents to 0')
             expectedEvents = 0
@@ -958,7 +959,7 @@ class athenaExecutor(scriptExecutor):
 
         # 2. One of the parallel command-line flags has been provided but ATHENA_CORE_NUMBER environment has not been set
         if (('multithreaded' in self.conf._argdict or 'multiprocess' in self.conf._argdict) and
-            (not 'ATHENA_CORE_NUMBER' in os.environ)):
+            ('ATHENA_CORE_NUMBER' not in os.environ)):
             raise trfExceptions.TransformExecutionException(trfExit.nameToCode('TRF_SETUP'),
                                                             'either --multithreaded nor --multiprocess command line option provided but ATHENA_CORE_NUMBER environment has not been set')
 
@@ -1313,7 +1314,7 @@ class athenaExecutor(scriptExecutor):
         # Add topoptions
         if self._skeleton or self._skeletonCA:
             self._cmd += self._topOptionsFiles
-            msg.info('Updated script arguments with topoptions: %s' % self._cmd)
+            msg.info('Updated script arguments with topoptions: %s', self._cmd)
 
 
     ## @brief Write a wrapper script which runs asetup and then Athena.
@@ -1489,7 +1490,7 @@ class athenaExecutor(scriptExecutor):
             os.remove(targetTGZName)
 
         import tarfile
-        fNameRE = re.compile("JiveXML\_\d+\_\d+.xml")
+        fNameRE = re.compile(r"JiveXML\_\d+\_\d+.xml")
 
         # force gz compression
         tar = tarfile.open(targetTGZName, "w:gz")
@@ -1497,11 +1498,11 @@ class athenaExecutor(scriptExecutor):
             matches = fNameRE.findall(fName)
             if len(matches) > 0:
                 if fNameRE.findall(fName)[0] == fName:
-                    msg.info('adding %s to %s' % (fName, targetTGZName))
+                    msg.info('adding %s to %s', fName, targetTGZName)
                     tar.add(fName)
 
         tar.close()
-        msg.info('JiveXML compression: %s has been written and closed.' % (targetTGZName))
+        msg.info('JiveXML compression: %s has been written and closed.', targetTGZName)
 
 
 ## @brief Athena executor where failure is not consisered fatal
diff --git a/Tools/PyJobTransforms/python/trfFileUtils-lite.py b/Tools/PyJobTransforms/python/trfFileUtils-lite.py
index e0b38527041a3964fb699da345317b314ffe955f..9cca048a095346d293bccdbf8fc2fa419ab1b580 100644
--- a/Tools/PyJobTransforms/python/trfFileUtils-lite.py
+++ b/Tools/PyJobTransforms/python/trfFileUtils-lite.py
@@ -2,7 +2,7 @@ from past.builtins import basestring
 
 from builtins import zip
 from builtins import range
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfFileUtils
 # @brief Transform utilities to deal with files.
@@ -111,7 +111,7 @@ def AthenaFileInfo(fileNames, retrieveKeys = athFileInterestingKeys):
                     msg.warning('Missing key in athFile info: {0}'.format(key))
             msg.debug('Found these metadata for {0}: {1}'.format(fname, list(metaDict[fname])))
         return metaDict
-    except ValueError as e:
+    except ValueError:
         msg.error('Problem in getting AthFile metadata for {0}'.format(fileNames))
         return None
 
@@ -187,13 +187,13 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys
                     try: 
                         metaDict[filename][key] = meta['metadata']['/Simulation/Parameters']['G4Version']
                         msg.debug('Setting G4Version to {0}'.format(meta['metadata']['/Simulation/Parameters']['G4Version']))
-                    except (KeyError, TypeError) as e:
+                    except (KeyError, TypeError):
                         msg.debug('Could not find G4Version information in metadata for file {0}'.format(filename))
                 else:
                     metaDict[filename][key] = meta[key]
             except KeyError:
                 msg.warning('Missing key in athFile info: {0}'.format(key))
-    except (CalledProcessError, ValueError, AssertionError, ReferenceError) as e:
+    except (CalledProcessError, ValueError, AssertionError, ReferenceError):
         msg.error('Problem in getting AthFile metadata for {0}'.format(filename))
         return None
     msg.debug('Returning {0}'.format(metaDict))
@@ -228,7 +228,7 @@ def HISTEntries(fileName):
         if name.startswith('run_') and name != 'run_multiple':
             
             if rundir is not None:
-                msg.warning('Found two run_ directories in HIST file %s: %s and %s' % ( fileName, rundir, name) )
+                msg.warning('Found two run_ directories in HIST file %s: %s and %s', fileName, rundir, name)
                 return None
             else:
                 rundir = name
@@ -236,11 +236,11 @@ def HISTEntries(fileName):
         del name
        
     if rundir is None:
-        msg.warning( 'Unable to find run directory in HIST file %s' % fileName )
+        msg.warning( 'Unable to find run directory in HIST file %s', fileName )
         fname.Close()
         return None
     
-    msg.info( 'Using run directory %s for event counting of HIST file %s. ' % ( rundir, fileName ) )
+    msg.info( 'Using run directory %s for event counting of HIST file %s. ', rundir, fileName )
     
     hpath = '%s/GLOBAL/DQTDataFlow/events_lb' % rundir
     possibleLBs = []
@@ -257,14 +257,14 @@ def HISTEntries(fileName):
         possibleLBs.append(hpath)
     nev = 0
     if len(possibleLBs) == 0:
-        msg.warning( 'Unable to find events_lb histogram in HIST file %s' % fileName )
+        msg.warning( 'Unable to find events_lb histogram in HIST file %s', fileName )
         fname.Close()
         return None
     for hpath in possibleLBs:
         h = fname.Get(hpath)
         
         if not isinstance( h, root.TH1 ):
-            msg.warning( 'Unable to retrieve %s in HIST file %s.' % ( hpath, fileName ) )
+            msg.warning( 'Unable to retrieve %s in HIST file %s.', hpath, fileName )
             fname.Close()
             return None
         
@@ -274,7 +274,7 @@ def HISTEntries(fileName):
         for i in range(1, nBinsX):
             
             if h[i] < 0:
-                msg.warning( 'Negative number of events for step %s in HIST file %s.' %( h.GetXaxis().GetBinLabel(i), fileName ) )
+                msg.warning( 'Negative number of events for step %s in HIST file %s.', h.GetXaxis().GetBinLabel(i), fileName )
                 fname.Close()
                 return None
             
@@ -286,7 +286,7 @@ def HISTEntries(fileName):
                 
             else:
                 if nevLoc != h[i]:
-                    msg.warning( 'Mismatch in events per step in HIST file %s; most recent step seen is %s.' % ( fileName, h.GetXaxis().GetBinLabel(i) ) )
+                    msg.warning( 'Mismatch in events per step in HIST file %s; most recent step seen is %s.', fileName, h.GetXaxis().GetBinLabel(i) )
                     fname.Close()
                     return None
         nev += nevLoc        
@@ -332,11 +332,11 @@ def NTUPEntries(fileName, treeNames):
         num = tree.GetEntriesFast()
 
         if not num>=0:
-            msg.warning('GetEntriesFast returned non positive value for tree %s in NTUP file %s.' % ( treeName, fileName ))
+            msg.warning('GetEntriesFast returned non positive value for tree %s in NTUP file %s.', treeName, fileName )
             return None
                 
         if prevNum is not None and prevNum != num:
-            msg.warning( "Found diffferent number of entries in tree %s and tree %s of file %s." % ( treeName, prevTree, fileName  ))
+            msg.warning( "Found diffferent number of entries in tree %s and tree %s of file %s.", treeName, prevTree, fileName )
             return None
         
         numberOfEntries=num
@@ -372,7 +372,7 @@ def ROOTGetSize(filename):
                     extraparam = '&filetype=raw'
                 else:
                     extraparam = '?filetype=raw'
-            except:
+            except Exception:
                 extraparam = '?filetype=raw'
         fname = root.TFile.Open(filename + extraparam, 'READ')
         fsize = fname.GetSize()
diff --git a/Tools/PyJobTransforms/python/trfFileUtils.py b/Tools/PyJobTransforms/python/trfFileUtils.py
index 92304f0e94c6947909a122810fd5110d7e273d0f..65bb75072cc3a775063cc3f74c987d3201b981ce 100644
--- a/Tools/PyJobTransforms/python/trfFileUtils.py
+++ b/Tools/PyJobTransforms/python/trfFileUtils.py
@@ -1,6 +1,6 @@
 
 from builtins import range
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfFileUtils
 # @brief Transform utilities to deal with files.
@@ -18,7 +18,6 @@ msg = logging.getLogger(__name__)
 import PyUtils.Decorators as _decos
 
 from PyUtils.RootUtils import import_root
-from PyJobTransforms.trfDecorators import timelimited
 
 # Use a stripped down key list, as we retrieve only 'fast' metadata  
 athFileInterestingKeys = ['file_size', 'file_guid', 'file_type', 'nentries']
@@ -52,13 +51,13 @@ def AthenaLiteFileInfo(filename, filetype, retrieveKeys = athFileInterestingKeys
                     try: 
                         metaDict[filename][key] = meta['metadata']['/Simulation/Parameters']['G4Version']
                         msg.debug('Setting G4Version to {0}'.format(meta['metadata']['/Simulation/Parameters']['G4Version']))
-                    except (KeyError, TypeError) as e:
+                    except (KeyError, TypeError):
                         msg.debug('Could not find G4Version information in metadata for file {0}'.format(filename))
                 else:
                     metaDict[filename][key] = meta[key]
             except KeyError:
                 msg.warning('Missing key in athFile info: {0}'.format(key))
-    except (CalledProcessError, ValueError, AssertionError, ReferenceError) as e:
+    except (CalledProcessError, ValueError, AssertionError, ReferenceError):
         msg.error('Problem in getting AthFile metadata for {0}'.format(filename))
         return None
     msg.debug('Returning {0}'.format(metaDict))
@@ -93,7 +92,7 @@ def HISTEntries(fileName):
         if name.startswith('run_') and name != 'run_multiple':
             
             if rundir is not None:
-                msg.warning('Found two run_ directories in HIST file %s: %s and %s' % ( fileName, rundir, name) )
+                msg.warning('Found two run_ directories in HIST file %s: %s and %s', fileName, rundir, name)
                 return None
             else:
                 rundir = name
@@ -101,11 +100,11 @@ def HISTEntries(fileName):
         del name
        
     if rundir is None:
-        msg.warning( 'Unable to find run directory in HIST file %s' % fileName )
+        msg.warning( 'Unable to find run directory in HIST file %s', fileName )
         fname.Close()
         return None
     
-    msg.info( 'Using run directory %s for event counting of HIST file %s. ' % ( rundir, fileName ) )
+    msg.info( 'Using run directory %s for event counting of HIST file %s. ', rundir, fileName )
     
     hpath = '%s/GLOBAL/DQTDataFlow/events_lb' % rundir
     possibleLBs = []
@@ -122,14 +121,14 @@ def HISTEntries(fileName):
         possibleLBs.append(hpath)
     nev = 0
     if len(possibleLBs) == 0:
-        msg.warning( 'Unable to find events_lb histogram in HIST file %s' % fileName )
+        msg.warning( 'Unable to find events_lb histogram in HIST file %s', fileName )
         fname.Close()
         return None
     for hpath in possibleLBs:
         h = fname.Get(hpath)
         
         if not isinstance( h, root.TH1 ):
-            msg.warning( 'Unable to retrieve %s in HIST file %s.' % ( hpath, fileName ) )
+            msg.warning( 'Unable to retrieve %s in HIST file %s.', hpath, fileName )
             fname.Close()
             return None
         
@@ -139,7 +138,7 @@ def HISTEntries(fileName):
         for i in range(1, nBinsX):
             
             if h[i] < 0:
-                msg.warning( 'Negative number of events for step %s in HIST file %s.' %( h.GetXaxis().GetBinLabel(i), fileName ) )
+                msg.warning( 'Negative number of events for step %s in HIST file %s.', h.GetXaxis().GetBinLabel(i), fileName )
                 fname.Close()
                 return None
             
@@ -151,7 +150,7 @@ def HISTEntries(fileName):
                 
             else:
                 if nevLoc != h[i]:
-                    msg.warning( 'Mismatch in events per step in HIST file %s; most recent step seen is %s.' % ( fileName, h.GetXaxis().GetBinLabel(i) ) )
+                    msg.warning( 'Mismatch in events per step in HIST file %s; most recent step seen is %s.', fileName, h.GetXaxis().GetBinLabel(i) )
                     fname.Close()
                     return None
         nev += nevLoc        
@@ -197,11 +196,11 @@ def NTUPEntries(fileName, treeNames):
         num = tree.GetEntriesFast()
 
         if not num>=0:
-            msg.warning('GetEntriesFast returned non positive value for tree %s in NTUP file %s.' % ( treeName, fileName ))
+            msg.warning('GetEntriesFast returned non positive value for tree %s in NTUP file %s.', treeName, fileName )
             return None
                 
         if prevNum is not None and prevNum != num:
-            msg.warning( "Found diffferent number of entries in tree %s and tree %s of file %s." % ( treeName, prevTree, fileName  ))
+            msg.warning( "Found diffferent number of entries in tree %s and tree %s of file %s.", treeName, prevTree, fileName )
             return None
         
         numberOfEntries=num
@@ -279,7 +278,7 @@ def ROOTGetSize(filename):
                     extraparam = '&filetype=raw'
                 else:
                     extraparam = '?filetype=raw'
-            except:
+            except Exception:
                 extraparam = '?filetype=raw'
         fname = root.TFile.Open(filename + extraparam, 'READ')
         fsize = fname.GetSize()
diff --git a/Tools/PyJobTransforms/python/trfJobOptions.py b/Tools/PyJobTransforms/python/trfJobOptions.py
index dafbaed30d670e474ff8d8d097af9c62620968b4..26ff604b17763b6fe0600fa848298300b57bb85f 100644
--- a/Tools/PyJobTransforms/python/trfJobOptions.py
+++ b/Tools/PyJobTransforms/python/trfJobOptions.py
@@ -55,7 +55,7 @@ class JobOptionsTemplate(object):
 
     ## @brief Write the runArgs Job Options file
     def writeRunArgs(self, input = dict(), output = dict()):
-        msg.info('Writing runArgs to file \"%s\"' % self._runArgsFile)
+        msg.info('Writing runArgs to file \"%s\"', self._runArgsFile)
 
         ## Check consistency btw --CA flag and provided skeletons:
         if 'CA' in self._exe.conf.argdict:
@@ -225,13 +225,13 @@ class JobOptionsTemplate(object):
             msg.warning('No runArgs available')
 
         if not findFile(os.environ["JOBOPTSEARCHPATH"], self._runArgsFile):
-            msg.warning('Could not find runArgs file %s' % self._runArgsFile)
+            msg.warning('Could not find runArgs file %s', self._runArgsFile)
 
         # Check the skeleton(s):
         if  self._exe._skeleton:
             for skeleton in self._exe._skeleton:
                 if not findFile(os.environ["JOBOPTSEARCHPATH"], skeleton):
-                    msg.warning('Could not find job options skeleton file %s' % skeleton)
+                    msg.warning('Could not find job options skeleton file %s', skeleton)
 
   
     ## @brief Get the runArgs and skeleton joboptions, Master function
diff --git a/Tools/PyJobTransforms/python/trfMPTools.py b/Tools/PyJobTransforms/python/trfMPTools.py
index f2e8e80bd308dc60fd3f5a39974ddf8e608c488a..8ec23d42fff4cb52a840dd186338d2d1f7d3c12e 100644
--- a/Tools/PyJobTransforms/python/trfMPTools.py
+++ b/Tools/PyJobTransforms/python/trfMPTools.py
@@ -1,6 +1,6 @@
 from future.utils import iteritems
 from builtins import zip
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfMPTools
 #
@@ -13,7 +13,6 @@ __version__ = '$Revision'
 
 import os
 import os.path as path
-import re
 
 import logging
 msg = logging.getLogger(__name__)
diff --git a/Tools/PyJobTransforms/python/trfReports.py b/Tools/PyJobTransforms/python/trfReports.py
index 63e009b90dee27578a4e4cf724e79ee38e8bccd5..b59cf374b7cff1eaf56a643307b666407568e6ca 100644
--- a/Tools/PyJobTransforms/python/trfReports.py
+++ b/Tools/PyJobTransforms/python/trfReports.py
@@ -519,8 +519,6 @@ class trfFileReport(object):
             raise trfExceptions.TransformReportException(trfExit.nameToCode('TRF_INTERNAL_REPORT_ERROR'),
                                                          'Unknown file ({0}) in the file report for {1}'.format(filename, self._fileArg))
         tree = ElementTree.Element('File', ID = str(self._fileArg.getSingleMetadata(fname = filename, metadataKey = 'file_guid', populate = not fast)))
-        logical = ElementTree.SubElement(tree, 'logical')
-        lfn = ElementTree.SubElement(logical, 'lfn', name = filename)
         for myKey, classicKey in iteritems(self._internalToClassicMap):
             # beam_type is tricky - we return only the first list value,
             # (but remember, protect against funny stuff!)
@@ -601,7 +599,7 @@ class machineReport(object):
             with open('/proc/cpuinfo') as cpuinfo:
                 for line in cpuinfo:
                     try:
-                        k, v = [ e.strip() for e in line.split(':') ]
+                        k, v = [ l.strip() for l in line.split(':') ]
                         if k == 'cpu family' and 'cpu_family' not in machine:
                             machine['cpu_family'] = v
                         elif k == 'model' and 'model' not in machine:
@@ -615,7 +613,7 @@ class machineReport(object):
         try:
             with open('/etc/machinefeatures/hs06') as hs:
                 machine['hepspec'] = hs.readlines()[0].strip()
-        except IOError as e:
+        except IOError:
             pass
         return machine
 
diff --git a/Tools/PyJobTransforms/python/trfSignal.py b/Tools/PyJobTransforms/python/trfSignal.py
index dba8a09fbc70b14f18a492b20d28833a9173a958..44672b3ee1480e9cafacee9f79ed405ae3fc2be5 100755
--- a/Tools/PyJobTransforms/python/trfSignal.py
+++ b/Tools/PyJobTransforms/python/trfSignal.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfSignal
 #
@@ -28,10 +28,10 @@ _defaultSignalList = ['SIGABRT', 'SIGFPE', 'SIGBUS', 'SIGHUP', 'SIGILL', 'SIGIO'
 def setTrfSignalHandlers(handler):
     for s in _defaultSignalList:
         try:
-            msg.debug("Setting signalhandler for %s to %s" % (s, handler))
+            msg.debug("Setting signalhandler for %s to %s", s, handler)
             _savedSignalHandlerDict[s] =  signal.signal(getattr(signal, s), handler)
         except Exception as e:
-            msg.error("Unable to attach custom signal handler to %s: %s" % (s, e))
+            msg.error("Unable to attach custom signal handler to %s: %s", s, e)
             continue
 
 ## @brief Restore signal handlers to the default ones
@@ -42,5 +42,5 @@ def resetTrfSignalHandlers():
         try:
             signal.signal(getattr(signal, s), _savedSignalHandlerDict.get(s, signal.SIG_DFL))
         except Exception as e:
-            msg.error("Unable to attach custom signal handler to %s: %s" % (s, e))
+            msg.error("Unable to attach custom signal handler to %s: %s", s, e)
             continue
diff --git a/Tools/PyJobTransforms/python/trfUtils.py b/Tools/PyJobTransforms/python/trfUtils.py
index 230ba083f46d6d90aaba259f18aa500b40236c40..ebd62f2ab30f663ae6dd32f0e122f182034dc4e9 100644
--- a/Tools/PyJobTransforms/python/trfUtils.py
+++ b/Tools/PyJobTransforms/python/trfUtils.py
@@ -30,7 +30,7 @@ import multiprocessing
 import base64
 
 from datetime import datetime
-from subprocess import Popen, STDOUT, PIPE, CalledProcessError
+from subprocess import Popen, STDOUT, PIPE
 from xml.dom import minidom
 from xml.parsers.expat import ExpatError
 from xml.etree import ElementTree
@@ -75,12 +75,12 @@ def getAncestry(listMyOrphans = False):
     psCmd = ['ps', 'ax', '-o', 'pid,ppid,pgid,args', '-m']
 
     try:
-        msg.debug('Executing %s' % psCmd)
+        msg.debug('Executing %s', psCmd)
         p = Popen(psCmd, stdout=PIPE, stderr=PIPE)
         stdout = p.communicate()[0]
         psPID = p.pid
     except OSError as e:
-        msg.error('Failed to execute "ps" to get process ancestry: %s' % repr(e))
+        msg.error('Failed to execute "ps" to get process ancestry: %s', repr(e))
         raise
 
     childDict = {}
@@ -123,7 +123,7 @@ def listChildren(psTree = None, parent = os.getpid(), listOrphans = False):
     if psTree is None:
         psTree = getAncestry(listMyOrphans = listOrphans)
 
-    msg.debug("List children of %d (%s)" % (parent, psTree.get(parent, [])))
+    msg.debug("List children of %d (%s)", parent, psTree.get(parent, []))
     children = []
     if parent in psTree:
         children.extend(psTree[parent])
@@ -189,27 +189,27 @@ def call(args, bufsize=0, executable=None, stdin=None, preexec_fn=None, close_fd
         loglevel=logging.DEBUG
 
     if timeout is None or timeout<=0: # no timeout set
-        msg.info('Executing %s...' % args)
+        msg.info('Executing %s...', args)
         starttime = time.time()
         p=Popen(args=args, bufsize=bufsize, executable=executable, stdin=stdin, stdout=PIPE, stderr=STDOUT, preexec_fn=preexec_fn, close_fds=close_fds, shell=shell, cwd=cwd, env=env, universal_newlines=universal_newlines, startupinfo=startupinfo, creationflags=creationflags)
         while p.poll() is None:
             logProc(p)
         flushProc(p)
         if timeout is not None:
-            msg.info('Executed call within %d s.' % (time.time()-starttime))
+            msg.info('Executed call within %d s.', time.time()-starttime)
         return p.returncode
 
     else: #timeout set
         n=0
         while n<=retry:
-            msg.info('Try %i out of %i (time limit %ss) to call %s.' % (n+1, retry+1, timeout, args))
+            msg.info('Try %i out of %i (time limit %ss) to call %s.', n+1, retry+1, timeout, args)
             starttime = time.time()
             endtime=starttime+timeout
             p=Popen(args=args, bufsize=bufsize, executable=executable, stdin=stdin, stdout=PIPE, stderr=STDOUT, preexec_fn=preexec_fn, close_fds=close_fds, shell=shell, cwd=cwd, env=env, universal_newlines=universal_newlines, startupinfo=startupinfo, creationflags=creationflags)
             while p.poll() is None and time.time()<endtime:
                 logProc(p)
             if p.poll() is None:
-                msg.warning('Timeout limit of %d s reached. Kill subprocess and its children.' % timeout)
+                msg.warning('Timeout limit of %d s reached. Kill subprocess and its children.', timeout)
                 parent=p.pid
                 pids=[parent]
                 pids.extend(listChildren(parent=parent))
@@ -217,17 +217,17 @@ def call(args, bufsize=0, executable=None, stdin=None, preexec_fn=None, close_fd
                 msg.info('Checking if something is left in buffer.')
                 flushProc(p)
                 if n!=retry:
-                    msg.info('Going to sleep for %d s.' % sleeptime)
+                    msg.info('Going to sleep for %d s.', sleeptime)
                     time.sleep(sleeptime)
                 n+=1
                 timeout*=timefactor
                 sleeptime*=timefactor
             else:
                 flushProc(p)
-                msg.info('Executed call within %d s.' % (time.time()-starttime))
+                msg.info('Executed call within %d s.', time.time()-starttime)
                 return p.returncode
 
-        msg.warning('All %i tries failed!' % n)
+        msg.warning('All %i tries failed!', n)
         raise Exception
 
 
@@ -500,7 +500,7 @@ def setupDBRelease(setup):
         sys.path.insert(0, dbdir)
         from setup import Setup
         # Instansiate the Setup module, which activates the customisation
-        setupObj = Setup(dbdir)
+        Setup(dbdir)
         sys.path = opath
         msg.debug('DBRelease setup module was initialised successfully')
     except ImportError as e:
diff --git a/Tools/PyJobTransforms/python/trfValidateRootFile.py b/Tools/PyJobTransforms/python/trfValidateRootFile.py
index 05562df6273bafbac95f4a8207e35cccc3446a79..cd74fc0411cae5674fe3a94a5321f51c90b7768f 100755
--- a/Tools/PyJobTransforms/python/trfValidateRootFile.py
+++ b/Tools/PyJobTransforms/python/trfValidateRootFile.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @Package PyJobTransforms.trfValidateRootFile
 # @brief Functionality to test a Root file for corruption
@@ -16,31 +16,31 @@ import logging
 
 from PyUtils import RootUtils
 ROOT = RootUtils.import_root()
-from ROOT import TFile, TTree, TKey, TDirectory, TClass, TList, TObjArray, TStopwatch, TBasket
+from ROOT import TFile, TTree, TDirectory, TStopwatch
 
 msg = logging.getLogger(__name__)
 
 def checkBranch(branch):
 
-    msg.debug('Checking branch %s...' % branch.GetName())
+    msg.debug('Checking branch %s...', branch.GetName())
 
-    nBaskets=branch.GetWriteBasket();
+    nBaskets=branch.GetWriteBasket()
 
-    msg.debug('Checking %s baskets...' % nBaskets)
+    msg.debug('Checking %s baskets...', nBaskets)
 
     for iBasket in range(nBaskets):
-        basket=branch.GetBasket(iBasket);
+        basket=branch.GetBasket(iBasket)
         if not basket:
-            msg.warning('Basket %s of branch %s is corrupted.' % (iBasket, branch.GetName() ))
+            msg.warning('Basket %s of branch %s is corrupted.', iBasket, branch.GetName() )
             return 1
 
-    listOfSubBranches=branch.GetListOfBranches();
-    msg.debug('Checking %s subbranches...' % listOfSubBranches.GetEntries())
+    listOfSubBranches=branch.GetListOfBranches()
+    msg.debug('Checking %s subbranches...', listOfSubBranches.GetEntries())
     for subBranch in listOfSubBranches:
         if checkBranch(subBranch)==1:
-            return 1;
+            return 1
 
-    msg.debug('Branch %s looks ok.' % branch.GetName())
+    msg.debug('Branch %s looks ok.', branch.GetName())
     return 0    
 
 
@@ -48,11 +48,11 @@ def checkTreeBasketWise(tree):
 
     listOfBranches=tree.GetListOfBranches()
 
-    msg.debug('Checking %s branches ...' % listOfBranches.GetEntries())
+    msg.debug('Checking %s branches ...', listOfBranches.GetEntries())
 
     for branch in listOfBranches:
         if checkBranch(branch)==1:
-            msg.warning('Tree %s is corrupted (branch %s ).' % (tree.GetName(), branch.GetName()))
+            msg.warning('Tree %s is corrupted (branch %s ).', tree.GetName(), branch.GetName())
             return 1
 
     return 0
@@ -62,11 +62,11 @@ def checkTreeEventWise(tree):
 
     nEntries=tree.GetEntries()
 
-    msg.debug('Checking %s entries...' % nEntries)
+    msg.debug('Checking %s entries...', nEntries)
 
     for i in range(nEntries):
         if tree.GetEntry(i)<0:
-            msg.warning('Event %s of tree %s is corrupted.' % (i, tree.GetName()))
+            msg.warning('Event %s of tree %s is corrupted.', i, tree.GetName())
             return 1
 
     return 0
@@ -74,29 +74,29 @@ def checkTreeEventWise(tree):
 
 def checkDirectory(directory, the_type, requireTree):
 
-    msg.debug('Checking directory %s...' % directory.GetName())
+    msg.debug('Checking directory %s...', directory.GetName())
 
     listOfKeys=directory.GetListOfKeys()
 
-    msg.debug('Checking %s keys... ' % listOfKeys.GetEntries())
+    msg.debug('Checking %s keys... ', listOfKeys.GetEntries())
 
     for key in listOfKeys:
 
-        msg.debug('Looking at key %s...' % key.GetName())
-        msg.debug('Key is of class %s.' % key.GetClassName())
+        msg.debug('Looking at key %s...', key.GetName())
+        msg.debug('Key is of class %s.', key.GetClassName())
 
         the_object=directory.Get(key.GetName())
         if not the_object:
-            msg.warning("Can't get object of key %s." % key.GetName())
+            msg.warning("Can't get object of key %s.", key.GetName())
             return 1
 
         if requireTree and not isinstance(the_object, TTree):
-            msg.warning("Object %s is not of class TTree!" % the_object.GetName())
+            msg.warning("Object %s is not of class TTree!", the_object.GetName())
             return 1
 
         if isinstance(the_object,TTree):
 
-            msg.debug('Checking tree %s ...' % the_object.GetName())
+            msg.debug('Checking tree %s ...', the_object.GetName())
             
             if the_type=='event':
                 if checkTreeEventWise(the_object)==1:
@@ -105,47 +105,47 @@ def checkDirectory(directory, the_type, requireTree):
                 if checkTreeBasketWise(the_object)==1:
                     return 1
 
-            msg.debug('Tree %s looks ok.' % the_object.GetName())    
+            msg.debug('Tree %s looks ok.', the_object.GetName())    
             
         if isinstance(the_object, TDirectory):
             if checkDirectory(the_object, the_type, requireTree)==1:
                 return 1
 
-    msg.debug('Directory %s looks ok.' % directory.GetName())
+    msg.debug('Directory %s looks ok.', directory.GetName())
     return 0
 
 
 def checkFile(fileName, the_type, requireTree):
 
-    msg.info('Checking file %s.' % fileName)
+    msg.info('Checking file %s.', fileName)
 
     file_handle=TFile.Open(fileName)
 
     if not file_handle:
-        msg.warning("Can't access file %s." % fileName)
+        msg.warning("Can't access file %s.", fileName)
         return 1
 
     if not file_handle.IsOpen():
-        msg.warning("Can't open file %s." % fileName)
+        msg.warning("Can't open file %s.", fileName)
         return 1
 
     if file_handle.IsZombie():
-        msg.warning("File %s is a zombie." % fileName)
-        file.Close()
+        msg.warning("File %s is a zombie.", fileName)
+        file_handle.Close()
         return 1
 
     if file_handle.TestBit(TFile.kRecovered):
-        msg.warning("File %s needed to be recovered." % fileName)
+        msg.warning("File %s needed to be recovered.", fileName)
         file_handle.Close()
         return 1
 
     if checkDirectory(file_handle, the_type, requireTree)==1:
-        msg.warning("File %s is corrupted." % fileName)
+        msg.warning("File %s is corrupted.", fileName)
         file_handle.Close()
         return 1
 
-    file_handle.Close();
-    msg.info("File %s looks ok." % fileName)
+    file_handle.Close()
+    msg.info("File %s looks ok.", fileName)
     return 0
 
 
@@ -187,10 +187,10 @@ def main(argv):
         return usage()
   
     rc=checkFile(fileName,the_type, requireTree)
-    msg.debug('Returning %s' % rc)
+    msg.debug('Returning %s', rc)
     
-    clock.Stop();
-    clock.Print();
+    clock.Stop()
+    clock.Print()
 
     return rc
 
diff --git a/Tools/PyJobTransforms/python/trfValidation.py b/Tools/PyJobTransforms/python/trfValidation.py
index a90de04f92a92673420f3a1137d5acba0e93dea9..7f89dbd4c80dcbbee33e0ca99f1fcc458172ee07 100644
--- a/Tools/PyJobTransforms/python/trfValidation.py
+++ b/Tools/PyJobTransforms/python/trfValidation.py
@@ -1,5 +1,4 @@
 from future.utils import iteritems
-from future.utils import listitems
 
 from past.builtins import basestring
 from builtins import zip
@@ -7,7 +6,7 @@ from builtins import object
 from builtins import range
 from builtins import int
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## @package PyJobTransforms.trfValidation
 #
@@ -43,17 +42,15 @@ import PyJobTransforms.trfUtils as trfUtils
 # @brief Check a Pool file for corruption, return N events or -1 if access problem, -2 if corruption
 def corruptionTestPool(filename, verbose=False):
     if not os.access(filename, os.R_OK):
-        msg.info("ERROR can't access file %s" % filename)
+        msg.info("ERROR can't access file %s", filename)
         return -1
 
     ROOT = RootUtils.import_root()
-    from ROOT import TFile, TTree
-    import cppyy
 
     try:
-        f = TFile.Open(filename)
-    except:
-        msg.info("Can't open file %s" % filename)
+        f = ROOT.TFile.Open(filename)
+    except Exception:
+        msg.info("Can't open file %s", filename)
         return -1
 
     nEvents = None
@@ -63,24 +60,24 @@ def corruptionTestPool(filename, verbose=False):
         try:
             tn = k.GetName()
             t = f.Get(tn)
-            if not isinstance(t, TTree): return
-        except:
-            msg.info("Can't get tree %s from file %s" % (tn, filename))
+            if not isinstance(t, ROOT.TTree): return
+        except Exception:
+            msg.info("Can't get tree %s from file %s", tn, filename)
             f.Close()
             return -1
 
-        if (verbose): msg.info("Working on tree %s" % tn)
+        if (verbose): msg.info("Working on tree %s", tn)
         n = t.GetEntriesFast()
         for i in range(n):
             s = t.GetEntry(i)
             if s <= 0:
-                msg.info("Tree %s: Found corruption in event %i" % (i, n))
+                msg.info("Tree %s: Found corruption in event %i", i, n)
                 f.Close()
                 return -2
             else:
                 if verbose and i > 0 and i % 100 == 0:
-                    msg.info("Checking event %s" % i)
-        msg.info("Tree %s: %i event(s) ok" % (tn, n))
+                    msg.info("Checking event %s", i)
+        msg.info("Tree %s: %i event(s) ok", tn, n)
 
         # Use CollectionTree determine the number of events
         if tn == 'CollectionTree':
@@ -88,9 +85,9 @@ def corruptionTestPool(filename, verbose=False):
         pass  # end of loop over trees
 
     f.Close()
-    msg.info("ROOT file %s looks ok" % filename)
+    msg.info("ROOT file %s looks ok", filename)
     if n is None:
-        msg.info("Failed to determine number of events in file %s. No tree named 'CollectionTree'" % filename)
+        msg.info("Failed to determine number of events in file %s. No tree named 'CollectionTree'", filename)
         return 0
     return nEvents
 
@@ -102,7 +99,7 @@ def corruptionTestBS(filename):
     while p.poll() is None:
         line = p.stdout.readline()
         if line:
-            msg.info("AtlListBSEvents Report: %s" % line.strip())
+            msg.info("AtlListBSEvents Report: %s", line.strip())
     rc = p.returncode
     return rc
 
@@ -154,7 +151,6 @@ class ignorePatterns(object):
                                 # Blank means match anything, so make it so...
                                 who = "."
                             reWho = re.compile(who)
-                            reLevel = level # level is not a regexp (for now)
                             reMessage = re.compile(message)
                         except ValueError:
                             msg.warning('Could not parse this line as a valid error pattern: {0}'.format(line))
@@ -358,7 +354,7 @@ class athenaLogFileReport(logFileReport):
                         seenNonStandardError = line
                         continue
 
-                    msg.debug('Non-standard line in %s: %s' % (log, line))
+                    msg.debug('Non-standard line in %s: %s', log, line)
                     self._levelCounter['UNKNOWN'] += 1
                     continue
 
@@ -488,7 +484,7 @@ class athenaLogFileReport(logFileReport):
 
                 #Lookup: 'EventID: [Run,Evt,Lumi,Time,BunchCross,DetMask] = [267599,7146597,1,1434123751:0,0,0x0,0x0,0x0]'
                 if 'EventID' in line:
-                    match = re.findall('\[.*?\]', line)
+                    match = re.findall(r'\[.*?\]', line)
                     if match and match.__len__() >= 2:      # Assuming the line contains at-least one key-value pair.
                         brackets = "[]"
                         commaDelimer = ','
@@ -573,7 +569,7 @@ class athenaLogFileReport(logFileReport):
     def g494ExceptionParser(self, lineGenerator, firstline, firstLineCount):
         g4Report = firstline
         g4lines = 1
-        if not 'Aborting execution' in g4Report:
+        if 'Aborting execution' not in g4Report:
             for line, linecounter in lineGenerator:
                 g4Report += os.linesep + line
                 g4lines += 1
@@ -755,10 +751,10 @@ def performStandardFileValidation(dictionary, io, parallelMode = False):
             if arg.auxiliaryFile:
                 continue
             
-            msg.info('Validating data type %s...' % key)
+            msg.info('Validating data type %s...', key)
     
             for fname in arg.value:
-                msg.info('Validating file %s...' % fname)
+                msg.info('Validating file %s...', fname)
     
                 if io == "output":
                     msg.info('{0}: Testing corruption...'.format(fname))
@@ -792,7 +788,7 @@ def performStandardFileValidation(dictionary, io, parallelMode = False):
                 elif arg.getSingleMetadata(fname, 'file_guid') == 'UNDEFINED':
                     msg.info('Guid not defined.')
                 else:
-                    msg.info('Guid is %s' % arg.getSingleMetadata(fname, 'file_guid'))
+                    msg.info('Guid is %s', arg.getSingleMetadata(fname, 'file_guid'))
         msg.info('Stopping legacy (serial) file validation')
     if parallelMode is True:
         msg.info('Starting parallel file validation')
diff --git a/Tools/PyJobTransforms/scripts/Archive_tf.py b/Tools/PyJobTransforms/scripts/Archive_tf.py
index 83753708fc520e3bfd8145a1f736edd4453ad54d..a3e3e0e3a177a91f05098329ce3a5744679a7562 100755
--- a/Tools/PyJobTransforms/scripts/Archive_tf.py
+++ b/Tools/PyJobTransforms/scripts/Archive_tf.py
@@ -1,19 +1,16 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## Archiving transform which will tar or zip input files to an output file
 # @version $Id: Archive_tf.py 659213 2015-04-07 13:20:39Z graemes $ 
 
-import os.path
 import sys
 import time
 
-import logging
-
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
 from PyJobTransforms.trfExe import archiveExecutor
@@ -25,14 +22,14 @@ import PyJobTransforms.trfArgClasses as trfArgClasses
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
 
     trf = getTransform() 
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 def getTransform():
diff --git a/Tools/PyJobTransforms/scripts/Athena_tf.py b/Tools/PyJobTransforms/scripts/Athena_tf.py
index db1d17334e71634bd5a461d435ec9814e87a8f82..d0644198467462e0edcb72a8671e6584a228072f 100755
--- a/Tools/PyJobTransforms/scripts/Athena_tf.py
+++ b/Tools/PyJobTransforms/scripts/Athena_tf.py
@@ -1,43 +1,36 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## A simple athena transform. 
 # @version $Id: Athena_tf.py 557865 2013-08-12 21:54:36Z graemes $ 
 
-import argparse
-import os.path
 import sys
 import time
-import traceback
-
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.transform import transform
 from PyJobTransforms.trfExe import athenaExecutor
 from PyJobTransforms.trfArgs import addAthenaArguments, addDetectorArguments
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
-import PyJobTransforms.trfExceptions as trfExceptions
 import PyJobTransforms.trfArgClasses as trfArgClasses
 
 @stdTrfExceptionHandler
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
     
     trf = getTransform()    
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 ## Get the base transform with all arguments added
diff --git a/Tools/PyJobTransforms/scripts/Cat_tf.py b/Tools/PyJobTransforms/scripts/Cat_tf.py
index 229acbf4a6e81733d79e1d479e993a4f81733796..d1be6a9c43744e03cc4477156e5271e9c905a79c 100755
--- a/Tools/PyJobTransforms/scripts/Cat_tf.py
+++ b/Tools/PyJobTransforms/scripts/Cat_tf.py
@@ -1,43 +1,34 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## A simple 'cat' transform which just cats some input files
 # $Id: Cat_tf.py 529035 2012-12-05 15:45:24Z graemes $
 
-import argparse
-import os
-import os.path
 import sys
 import time
-import traceback
-
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfExe import scriptExecutor
-import PyJobTransforms.trfArgs as trfArgs
 import PyJobTransforms.trfArgClasses as trfArgClasses
-import PyJobTransforms.trfExceptions as trfExceptions
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
 @stdTrfExceptionHandler
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
         
     trf = getTransform()
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 def getTransform():
diff --git a/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py b/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py
index 20154437b3771d839b95addec3c8d208157b2aa0..f2d58784ad9f38a38232743bc3aec1bc856629e9 100755
--- a/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py
+++ b/Tools/PyJobTransforms/scripts/EVNTMerge_tf.py
@@ -1,6 +1,6 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## AODMerge_tf.py - AOD merger with optional TAG and DPD outputs
 #  N.B. Do need clarification as to if AODtoDPD is ever run in parallel with AOD merging 
@@ -10,11 +10,9 @@ from __future__ import print_function
 import sys
 import time
 
-import logging
-
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
 from PyJobTransforms.trfExe import athenaExecutor
@@ -27,14 +25,14 @@ import PyJobTransforms.trfArgClasses as trfArgClasses
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
 
     trf = getTransform()
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 def getTransform():
diff --git a/Tools/PyJobTransforms/scripts/Echo_tf.py b/Tools/PyJobTransforms/scripts/Echo_tf.py
index 3a11eff64dfc20199aa213f53e66c2ceeacbae12..5114a18a50049c4b2e4f443107e7d90e7dc087c1 100755
--- a/Tools/PyJobTransforms/scripts/Echo_tf.py
+++ b/Tools/PyJobTransforms/scripts/Echo_tf.py
@@ -1,31 +1,23 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## A simple 'echo' transform which merely prints its arguments and exits
 # $Id: Echo_tf.py 532364 2013-01-09 15:51:55Z graemes $
 
-import argparse
-import os
-import os.path
 import sys
 import time
-import traceback
-
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfExe import echoExecutor
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
 import PyJobTransforms.trfArgs as trfArgs
 import PyJobTransforms.trfArgClasses as trfArgClasses
-import PyJobTransforms.trfExceptions as trfExceptions
 
 # Always embed your transform inside a top level exception
 # handler. This ensures that uncaught exceptions are handled
@@ -36,7 +28,7 @@ import PyJobTransforms.trfExceptions as trfExceptions
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
         
     trf = getTransform()
 
@@ -44,7 +36,7 @@ def main():
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, transform exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, transform exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 def getTransform():
diff --git a/Tools/PyJobTransforms/scripts/ExeWrap_tf.py b/Tools/PyJobTransforms/scripts/ExeWrap_tf.py
index fa67b30d03572323c6795ebffa6860b7a7a2a715..ad5daf27e98615707a084a01a95513a79654c4e2 100755
--- a/Tools/PyJobTransforms/scripts/ExeWrap_tf.py
+++ b/Tools/PyJobTransforms/scripts/ExeWrap_tf.py
@@ -6,32 +6,23 @@
 #  Mainly used to test core infrastructure
 # $Id: ExeWrap_tf.py 634752 2014-12-09 15:01:52Z graemes $
 
-import argparse
-import os
-import os.path
 import sys
 import time
-import traceback
-
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfExe import scriptExecutor
-import PyJobTransforms.trfArgs as trfArgs
 import PyJobTransforms.trfArgClasses as trfArgClasses
-import PyJobTransforms.trfExceptions as trfExceptions
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
 @stdTrfExceptionHandler
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
         
     trf = getTransform()
     trf.parseCmdLineArgs(sys.argv[1:])
@@ -44,7 +35,7 @@ def main():
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", (sys.argv[0], time.asctime(), trf.exitCode))
     sys.exit(trf.exitCode)
 
 def getTransform():
diff --git a/Tools/PyJobTransforms/scripts/GetTfCommand.py b/Tools/PyJobTransforms/scripts/GetTfCommand.py
index 4f4e0690af82ff3730c280622570ed67388f376e..0063ae619430cef1728c00b7b24482241b48a32f 100755
--- a/Tools/PyJobTransforms/scripts/GetTfCommand.py
+++ b/Tools/PyJobTransforms/scripts/GetTfCommand.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## GetTfCommand.py - prints the job transform command accociated with an AMI tag.
 # $Id$
@@ -10,8 +10,8 @@ import sys
 import argparse
 
 from PyJobTransforms.trfLogger import msg, stdLogLevels
-if not '--printOnlyCmdLine' in sys.argv:
-    msg.info('logging set in %s' % sys.argv[0])
+if '--printOnlyCmdLine' not in sys.argv:
+    msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.trfAMI import TagInfo
 from PyJobTransforms.trfExceptions import TransformAMIException
@@ -40,7 +40,7 @@ def main():
         print('Note that you need both suitable credentials to access AMI and access to the panda database (only works from inside CERN) for GetTfCommand.py to work.')
         sys.exit(1)
 
-    if not 'printOnlyCmdLine' in args:
+    if 'printOnlyCmdLine' not in args:
         print(tag)
 
         if 'argdict' in args:
diff --git a/Tools/PyJobTransforms/scripts/HelloWorld_tf.py b/Tools/PyJobTransforms/scripts/HelloWorld_tf.py
index 5bf035d1b3f994328eb871f3b10a383590313f02..caa2a6afff4b2c79c6b9fd18909a836f1d23ba83 100755
--- a/Tools/PyJobTransforms/scripts/HelloWorld_tf.py
+++ b/Tools/PyJobTransforms/scripts/HelloWorld_tf.py
@@ -4,39 +4,30 @@
 
 ## A simple tranform running HelloWorld 
 
-import argparse
-import os.path
 import sys
 import time
-import traceback
-
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.transform import transform
 from PyJobTransforms.trfExe import athenaExecutor
 from PyJobTransforms.trfArgs import addAthenaArguments, addDetectorArguments
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
-import PyJobTransforms.trfExceptions as trfExceptions
-import PyJobTransforms.trfArgClasses as trfArgClasses
-
 @stdTrfExceptionHandler
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
     
     trf = getTransform()    
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 ## Get the base transform with all arguments added
diff --git a/Tools/PyJobTransforms/scripts/Merge_tf.py b/Tools/PyJobTransforms/scripts/Merge_tf.py
index f93a3ad79ce89aa7e40afd25d8890edfde59f96e..38e180005331be10aaef4073b3dbe2d3293e927b 100755
--- a/Tools/PyJobTransforms/scripts/Merge_tf.py
+++ b/Tools/PyJobTransforms/scripts/Merge_tf.py
@@ -1,18 +1,15 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## Merge_tf.py - Transform for merging any data type
 
-import os.path
 import sys
 import time
 
-import logging
-
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
 from PyJobTransforms.trfExe import athenaExecutor, hybridPOOLMergeExecutor
@@ -22,7 +19,6 @@ from RecJobTransforms.recTransformUtils import addCommonRecTrfArgs
 from PyJobTransforms.trfExe import DQMergeExecutor
 from PyJobTransforms.trfExe import tagMergeExecutor
 from PyJobTransforms.trfExe import bsMergeExecutor
-from PyJobTransforms.trfExe import NTUPMergeExecutor
 from PyJobTransforms.trfArgs import addD3PDArguments, addExtraDPDTypes
 from PATJobTransforms.PATTransformUtils import addNTUPMergeSubsteps, addPhysValidationMergeFiles
 from PATJobTransforms.PATTransformUtils import addDAODArguments, addDAODMergerSubsteps
@@ -33,14 +29,14 @@ import PyJobTransforms.trfArgClasses as trfArgClasses
 @sigUsrStackTrace
 def main():
 
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
 
     trf = getTransform()
     trf.parseCmdLineArgs(sys.argv[1:])
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", sys.argv[0], time.asctime(), trf.exitCode)
     sys.exit(trf.exitCode)
 
 def getTransform():
@@ -75,7 +71,6 @@ def getTransform():
 
     # Add HITSMerge only if SimuJobTransforms is available
     try:
-        from SimuJobTransforms.simTrfArgs import addForwardDetTrfArgs
         from SimuJobTransforms.SimTransformUtils import addHITSMergeArguments
         addHITSMergeArguments(trf.parser)
         simStepSet = set()
diff --git a/Tools/PyJobTransforms/scripts/Sleep_tf.py b/Tools/PyJobTransforms/scripts/Sleep_tf.py
index f0d8d15cf673caacd8c12fb61fbea7a33dc624a8..e90c2ffa0a4cb853c9c86b3d2de1e172884b6ab1 100755
--- a/Tools/PyJobTransforms/scripts/Sleep_tf.py
+++ b/Tools/PyJobTransforms/scripts/Sleep_tf.py
@@ -1,37 +1,29 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## A simple 'sleep' transform which just sleeps
 # Useful for testing behaviour with signals and stuff
 # $Id: Sleep_tf.py 534178 2013-01-21 19:04:08Z graemes $
 
-import argparse
-import os
-import os.path
 import sys
 import time
-import traceback
 
-import logging
 
 # Setup core logging here
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfExe import scriptExecutor
-import PyJobTransforms.trfArgs as trfArgs
 import PyJobTransforms.trfArgClasses as trfArgClasses
-import PyJobTransforms.trfExceptions as trfExceptions
 from PyJobTransforms.trfDecorators import stdTrfExceptionHandler, sigUsrStackTrace
 
 @stdTrfExceptionHandler
 @sigUsrStackTrace
 def main():
     
-    msg.info('This is %s' % sys.argv[0])
+    msg.info('This is %s', sys.argv[0])
         
 
     trf = getTransform()
@@ -39,7 +31,7 @@ def main():
     trf.execute()
     trf.generateReport()
 
-    msg.info("%s stopped at %s, trf exit code %d" % (sys.argv[0], time.asctime(), trf.exitCode))
+    msg.info("%s stopped at %s, trf exit code %d", (sys.argv[0], time.asctime(), trf.exitCode))
     sys.exit(trf.exitCode)
 
 
diff --git a/Tools/PyJobTransforms/scripts/ValidateFiles_tf.py b/Tools/PyJobTransforms/scripts/ValidateFiles_tf.py
index 9362052e0b16b0631cf3902125fa0ac7c7f284ef..d28d49f0b462aaec17b7e46717c314857fcddb08 100755
--- a/Tools/PyJobTransforms/scripts/ValidateFiles_tf.py
+++ b/Tools/PyJobTransforms/scripts/ValidateFiles_tf.py
@@ -1,23 +1,15 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ## A transform just getting some default file arguments (only to test file validation )
 
-import argparse
-import os
-import os.path
 import sys
-import time
-import traceback
-
-import logging
 
 from PyJobTransforms.trfLogger import msg
-msg.info('logging set in %s' % sys.argv[0])
+msg.info('logging set in %s', sys.argv[0])
 
 from PyJobTransforms.transform import transform
-from PyJobTransforms.trfExitCodes import trfExit
 from PyJobTransforms.trfExe import logscanExecutor
 import PyJobTransforms.trfArgs as trfArgs
 import PyJobTransforms.trfArgClasses as trfArgClasses
diff --git a/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py b/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py
index c95495ba6723c7ee03610ed80c019b70d5fb39a9..7f5f5461636d735acc17d2adb73fae24b8ee609a 100755
--- a/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py
+++ b/Tools/PyJobTransforms/scripts/makeTrfJSONSignatures.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 ##############################################################################
 
@@ -150,7 +150,7 @@ def main():
     for transform_path in transforms_path_list:
         ######################################################################
 
-        if transform_path.endswith('_tf.py') == False:
+        if not transform_path.endswith('_tf.py'):
             continue
 
         ######################################################################
diff --git a/Tools/PyJobTransforms/scripts/makeTrfSignatures.py b/Tools/PyJobTransforms/scripts/makeTrfSignatures.py
index ae57d80cbcbb503b03de0f7fed8a1f0160e5dc9e..2ce23e42503c13b2a6cb188e85d57d6986579526 100755
--- a/Tools/PyJobTransforms/scripts/makeTrfSignatures.py
+++ b/Tools/PyJobTransforms/scripts/makeTrfSignatures.py
@@ -1,6 +1,6 @@
 #! /usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 #
 # $Id: makeTrfSignatures.py 630078 2014-11-21 11:20:16Z graemes $
 #
@@ -53,7 +53,7 @@ def main():
         transform = trfModule.getTransform()
         args = transform.parser.allArgs
 
-        logging.debug('Trf %s: %s' % (trf, args))
+        logging.debug('Trf %s: %s', trf, args)
         processedTrfs.append(trf)
         myTrfSigs[trf] = args
         myTrfSigDesc[trf] = transform.parser.getProdsysDesc
@@ -62,10 +62,10 @@ def main():
         sigFile = open(cliargs['output'], 'wb')
         json.dump(myTrfSigDesc, sigFile, indent=4)
     except (OSError, IOError) as e:
-        logging.error('Failed to dump pickled signatures to %s: %s' % (cliargs['output'], e))
+        logging.error('Failed to dump pickled signatures to %s: %s', cliargs['output'], e)
         sys.exit(1)
         
-    logging.info('Successfully generated signature file "%s" for transforms %s' % (cliargs['output'], processedTrfs))
+    logging.info('Successfully generated signature file "%s" for transforms %s', cliargs['output'], processedTrfs)
     sys.exit(0)
 
 if __name__ == '__main__':