diff --git a/Control/PerformanceMonitoring/PerfMonComps/CMakeLists.txt b/Control/PerformanceMonitoring/PerfMonComps/CMakeLists.txt
index e2512aedb0f2e8677ac158ab46ebc8e22920425f..7e335590382b9b60a73475aa6421e1a6d38e7565 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/CMakeLists.txt
+++ b/Control/PerformanceMonitoring/PerfMonComps/CMakeLists.txt
@@ -23,5 +23,5 @@ atlas_add_component( PerfMonComps
    AthDSoCallBacks nlohmann_json::nlohmann_json)
 
 # Install files from the package:
-atlas_install_python_modules( python/*.py )
-atlas_install_joboptions( share/*.py )
+atlas_install_python_modules( python/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
+atlas_install_joboptions( share/*.py POST_BUILD_CMD ${ATLAS_FLAKE8} )
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/DomainsRegistry.py b/Control/PerformanceMonitoring/PerfMonComps/python/DomainsRegistry.py
index ad3867be4e2205256f7770d38541d23299d3985a..57deaf97bb05f7d02dad2a29f3aa9fe8764494f7 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/DomainsRegistry.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/DomainsRegistry.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file PerfMonComps/python/DomainsRegistry.py
 # @purpose hold a registry of alg names and their association w/ domain
@@ -158,8 +158,6 @@ class Registry(object):
         """
         if registry is None:
             registry=self._registry
-        start_alg = None
-        idx = None
         for ielmt, elmt in enumerate(registry):
             if elmt[0] == name:
                 return ielmt, elmt[1]
@@ -328,7 +326,7 @@ class Registry(object):
         if not self._dirty_db:
             return dict(self._d2a_db)
         # side-effect of calling self.algs: will build  self._d2a_db
-        a2d = self.algs
+        a2d = self.algs # noqa: F841
         return dict(self._d2a_db)
 
     @property
@@ -443,9 +441,6 @@ def _test_main():
         print("    ref: ",ref[d])
         assert algs == ref[d]
 
-    db = pdr.a2d_db()
-    db = pdr.d2a_db()
-    
     print("OK")
     return 0
 
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/JobOptCfg.py b/Control/PerformanceMonitoring/PerfMonComps/python/JobOptCfg.py
index 639ead38e6495143ce8af05700b4d71258c157e4..bd31661d30beef3f6c8a67591cdc33305ce8e699 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/JobOptCfg.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/JobOptCfg.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file: JobOptCfg.py
 # @purpose: a customized Configurable class for the PerfMonSvc
@@ -145,11 +145,11 @@ class PerfMonSvc( _PerfMonSvc ):
                     ioLabels = [ "streamRDO","streamESD",
                                  "streamAOD","streamTAG",
                                  "inputBackNav","inputFile" ]
-                    for l in ioLabels:
+                    for z in ioLabels:
                         try:
-                            ioContainers.extend(keystore[l].list())
+                            ioContainers.extend(keystore[z].list())
                         except AttributeError:
-                            for k,v in keystore[l].items():
+                            for k,v in keystore[z].items():
                                 ioContainers += [ "%s#%s" % (k,c) for c in v ]
                     pass
                 ## collect everything
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/MTJobOptCfg.py b/Control/PerformanceMonitoring/PerfMonComps/python/MTJobOptCfg.py
index a366480824bf6c0be85767e778913f24be9a6de6..3562eb45f3c73b1809f8c74d1092f1ecea522e92 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/MTJobOptCfg.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/MTJobOptCfg.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
  
 # Job options configuration file for PerfMonMTSvc
 
@@ -26,8 +26,6 @@ class PerfMonMTSvc ( _PerfMonMTSvc  ):
         if not isinstance(handle, PerfMonMTSvc):
             return
 
-        from AthenaCommon import CfgMgr
-
         ## Enable the auditors
         from AthenaCommon.AppMgr import theApp
         theApp.AuditAlgorithms = True
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PMonSD.py b/Control/PerformanceMonitoring/PerfMonComps/python/PMonSD.py
index ec3404dbe2d75206484881a6f67429823d5fbb22..cd47b4052bcaaeb029913c434e6eca9b399ffbd2 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PMonSD.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PMonSD.py
@@ -1,5 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
-from __future__ import print_function
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 __doc__   ='Module for parsing and basic analysis of Semi-Detailed PerfMon (PMonSD) output. More info at https://twiki.cern.ch/twiki/bin/viewauth/Atlas/PerfMonSD'
 __author__='Thomas Kittelmann <thomas.kittelmann@cern.ch>'
@@ -15,7 +14,8 @@ def pmonsd_version():
 def parse(infile,outfile=None):
     """Parse PMonSD output and return list of dictionaries. Optionally save output in pickle file."""
     p=__smart_parse(infile)
-    if p==None: return None
+    if p is None:
+        return None
     if outfile:
         __save_output(p,outfile,'.psd',infile)
     return p
@@ -25,14 +25,15 @@ def deparse(infile):
     identical to the ones it was parsed from"""
     out=[]
     p=__smart_parse(infile)
-    if p==None: return []
+    if p is None:
+        return []
     for e in p:
         out+=__deparse_single(e)
     return out
 
-def need_line(l):
+def need_line(z):
     """To identify lines which PMonSD needs for parsing"""
-    return l.startswith(_appname)
+    return z.startswith(_appname)
 
 def print_ascii(infile):
     """Print parsed PMonSD info to stdout"""
@@ -52,13 +53,16 @@ def _validate_identical(infile1,infile2):
     #For validation
     p1=__smart_parse(infile1)
     p2=__smart_parse(infile2)
-    if p1==None or p2==None: return False
+    if p1 is None or p2 is None:
+        return False
     return p1==p2
 
 def force_share(obj):
     """Dive into lists and dictionaries and make sure strings with similar content gets shared"""
-    if type(obj)==list: __fs_list(obj)
-    elif type(obj)==dict: __fs_dict(obj)
+    if type(obj)==list:
+        __fs_list(obj)
+    elif type(obj)==dict:
+        __fs_dict(obj)
 
 def get_shared_string(s): return __get_shared_string(s)
 
@@ -94,7 +98,7 @@ def __save_output(data,outfile,prefix,infile=None):
         fh=gzip.open(outfile,'w')
     else:
         fh=open(outfile,'w')
-    if infile!=None and outfile==infile:
+    if infile is not None and outfile==infile:
         print("%s.parse WARNING: output file %s equals input file. Won't dump."%(_appname,outfile))
     else:
         import cPickle
@@ -146,8 +150,10 @@ def __smart_parse(infile):
     else:
         #in case this is already parsed info, make sure we just return it as it is:
         if type(infile)==list:
-            if len(infile)==0: return infile
-            if type(infile[0])==dict and 'steps_comps' in infile[0].keys(): return infile
+            if len(infile)==0:
+                return infile
+            if type(infile[0])==dict and 'steps_comps' in infile[0].keys():
+                return infile
         #Hopefully this is something we can iterate through (like a list of strings or a file-handle):
         return __actual_parse(infile)
 
@@ -168,13 +174,13 @@ def __actual_parse(filehandle):
         return (float(v),int(i))
     d=new_dict()
     stepcount={}#for keeping track of in what order within each step a component is listed
-    for l in filehandle:
-        if not l.startswith(_prefix):
+    for z in filehandle:
+        if not z.startswith(_prefix):
             continue
         #ensure the first thing we pick up is the version:
-        if version==None:
-            if intro_version in l:
-                vstr=l.split(intro_version)[1].split()[0]
+        if version is None:
+            if intro_version in z:
+                vstr=z.split(intro_version)[1].split()[0]
                 full_info=vstr[-1]=='f'
                 v_major,v_minor=vstr[:-1].split('.')
                 version=(int(v_major),int(v_minor))
@@ -185,22 +191,23 @@ def __actual_parse(filehandle):
                     print("WARNING: Using PMonSD of version %f to parse output made with version %f"%(pmonsd_version(),version))
             continue
         #remove prefix:
-        l=l[len(_prefix):].strip()
-        if l.startswith('WARNING'): continue
-        if l.startswith('=='):
+        z=z[len(_prefix):].strip()
+        if z.startswith('WARNING'):
+            continue
+        if z.startswith('=='):
             #This is a comment/separator. Look for end marker:
-            if end_marker in l:
+            if end_marker in z:
                 #found. Grab parsed info and make room for more (in case of concatenated logs)
                 output+=[d]
                 d=new_dict()
                 version=None#reset
-            elif 'Full output inside:' in l:
-                filename=l.split('Full output inside:')[1].split('===')[0].strip()
+            elif 'Full output inside:' in z:
+                filename=z.split('Full output inside:')[1].split('===')[0].strip()
                 d['fulloutput_file']=filename
             continue
-        if not l.startswith('['):
+        if not z.startswith('['):
             continue#ignore column headers
-        f=l.split()
+        f=z.split()
         if f[0]=='[---]' and '=' in f[1]:
             for valfield in f[1:]:
                 n,vstr=valfield.split('=',1)
@@ -226,15 +233,17 @@ def __actual_parse(filehandle):
                 d['special']['snapshots'][comp]={'n':n,'cpu':float(f[0]),'wall':float(f[1]),
                                                  'vmem':float(f[2]),'malloc':float(f[3])}
         else:
-            if not step in d['steps_comps'].keys():
+            if step not in d['steps_comps'].keys():
                 d['steps_comps'][step]={}
                 d['steps_totals'][step]={}
                 stepcount[step]=0
             iorder=stepcount[step]
             stepcount[step]+=1
                 #workaround situation where two collapsed or total lines have same form (nentries is always different):
-            if is_collapsed and comp in d['steps_comps'][step].keys(): comp+=':n=%i'%n
-            if is_total and comp in d['steps_totals'][step].keys(): comp+=':n=%i'%n
+            if is_collapsed and comp in d['steps_comps'][step].keys():
+                comp+=':n=%i'%n
+            if is_total and comp in d['steps_totals'][step].keys():
+                comp+=':n=%i'%n
             if len(f)==6:
                 #has max@evt info
                 d['steps_comps'][step][comp]={'order':iorder,'n':n,'cpu':float(f[0]),'vmem':float(f[2]),'malloc':float(f[4])}
@@ -242,8 +251,10 @@ def __actual_parse(filehandle):
             else:
                 #doesn't have max@evt info (step!='evt' or 'evt' but collapsed or total)
                 nfo={'order':iorder,'n':n,'cpu':float(f[0]),'vmem':float(f[1]),'malloc':float(f[2])}
-                if is_total: d['steps_totals'][step][comp]=nfo
-                else: d['steps_comps'][step][comp]=nfo
+                if is_total:
+                    d['steps_totals'][step][comp]=nfo
+                else:
+                    d['steps_comps'][step][comp]=nfo
     force_share(output)#make sure we register shared strings
     return output
 
@@ -251,14 +262,18 @@ def __deparse_single(d):
     _prefix=_appname+' '
     out=[]
     assert type(d)==dict
-    def header(l,s,center=True):
-        if center: s=(' %s '%s).center(82,'=')
-        else: s=(' %s '%s).ljust(82,'=')
-        l+=[ _prefix+'==='+s+'===']
+    def header(z,s,center=True):
+        if center:
+            s=(' %s '%s).center(82,'=')
+        else:
+            s=(' %s '%s).ljust(82,'=')
+        z+=[ _prefix+'==='+s+'===']
         
     full_info=d['full_info']
-    if full_info: fullstr='f'
-    else: fullstr='c'
+    if full_info:
+        fullstr='f'
+    else:
+        fullstr='c'
     header(out,'semi-detailed perfmon info v%i.%i%s / start'%(d['version'][0],d['version'][1],fullstr))
     header(out,'Documentation: https://twiki.cern.ch/twiki/bin/viewauth/Atlas/PerfMonSD',center=False)
     header(out,'Note that documentation includes recipe for easy parsing from python.  ',center=False)
@@ -268,7 +283,7 @@ def __deparse_single(d):
     stdsteps=['ini','1st','cbk','evt','fin']
     steps=[]
     for step in d['steps_comps'].keys():
-        if not step in stdsteps and not step in steps:
+        if step not in stdsteps and step not in steps:
             steps+=[step]
     steps.sort()
     steps=stdsteps+steps
@@ -284,20 +299,25 @@ def __deparse_single(d):
         is_evt=step=='evt'
         header(out,'step %s'%step)
         entries=[]
-        if not step in d['steps_comps'].keys(): continue
+        if step not in d['steps_comps'].keys():
+            continue
         for comp,compdata in d['steps_comps'][step].items():
-            if '_comps]:n=' in comp: comp=comp.split('_comps]:n=')[0]+'_comps]'
+            if '_comps]:n=' in comp:
+                comp=comp.split('_comps]:n=')[0]+'_comps]'
             if is_evt and comp in d['evt_max_info'].keys():
                 s=format_evt_withmax%(compdata['n'],compdata['cpu'],format_max(d['evt_max_info'][comp]['cpu']),
                                                  compdata['vmem'],format_max(d['evt_max_info'][comp]['vmem']),
                                                  compdata['malloc'],format_max(d['evt_max_info'][comp]['malloc']),comp)
             else:
-                if is_evt: format=format_evt_nomax
-                else: format=format_notevt
+                if is_evt:
+                    format=format_evt_nomax
+                else:
+                    format=format_notevt
                 s=format%(compdata['n'],compdata['cpu'],compdata['vmem'],compdata['malloc'],comp)
             entries+=[(compdata['order'],comp,s)]
         for comp,compdata in d['steps_totals'][step].items():
-            if '_comps]:n=' in comp: comp=comp.split('_comps]:n=')[0]+'_comps]'
+            if '_comps]:n=' in comp:
+                comp=comp.split('_comps]:n=')[0]+'_comps]'
             format='%4i %6i %7i %7i %s'
             if is_evt:
                 format='%4i %6i            %7i            %7i             %s'
@@ -305,8 +325,10 @@ def __deparse_single(d):
             entries+=[(compdata['order'],comp,s)]
         if entries:
             entries.sort()
-            if is_evt: out+=[ _prefix+' '*len(step)+colheader_evt]
-            else: out+=[ _prefix+' '*len(step)+colheader_std]
+            if is_evt:
+                out+=[ _prefix+' '*len(step)+colheader_evt]
+            else:
+                out+=[ _prefix+' '*len(step)+colheader_std]
             for _,_,s in entries:
                 out+=[ '%s[%s] %s'%(_prefix,step,s)]
     header(out,'special info')
@@ -323,8 +345,6 @@ def __deparse_single(d):
     for leak in leaks:
         dl=d['special']['leaks'][leak]
         out+=[ '%s[---] %4i        -        - %8i %8i %s'%(_prefix,dl['n'],dl['vmem'],dl['malloc'],leak)]
-    specialvals=d['special']['values'].keys()
-    svs=[]
     order=[['vmem_peak','vmem_mean','rss_mean'],
            ['jobcfg_walltime','jobstart'],
            ['cpu_bmips','cpu_res','release'],
@@ -335,8 +355,10 @@ def __deparse_single(d):
         lineformat=[]
         for sv in lineorder:
             v=d['special']['values'][sv]
-            if type(v)==float: v_str='%i'%v
-            else: v_str=v
+            if type(v)==float:
+                v_str='%i'%v
+            else:
+                v_str=v
             lineformat+=['%s=%s'%(sv,v_str)]
         out+=['%s[---] %s'%(_prefix,' '.join(lineformat))]
     header(out,'semi-detailed perfmon info / end')
@@ -352,17 +374,18 @@ def _validate_deparsing(f):
         fh=gzip_fastopen(f)
     else:
         fh=open(f)
-    for l in fh:
-        if l.startswith(_prefix):
-            if l.startswith(_prefix+'WARNING'):
+    for z in fh:
+        if z.startswith(_prefix):
+            if z.startswith(_prefix+'WARNING'):
                 continue
-            if l.endswith('\n'): l=l[0:-1]
-            lines+=[l]
+            if z.endswith('\n'):
+                z=z[0:-1]
+            lines+=[z]
     if len(lines)==0:
         print("File does not have %s lines!"%_appname)
         return False
     d=__smart_parse(lines)
-    if d==None:
+    if d is None:
         return False
     lines2=deparse(d)
     if len(lines)!=len(lines2):
@@ -388,7 +411,8 @@ def _validate_deparsing(f):
 def __actual_diff(infile1,infile2):
     d1=__smart_parse(infile1)
     d2=__smart_parse(infile2)
-    if d1==None or d2==None: return False
+    if d1 is None or d2 is None:
+        return False
     #Gymnastics to accept separate types:
     if type(d1)==list and type(d2)==list:
         if len(d1)!=len(d2):
@@ -422,13 +446,15 @@ def __actual_diff(infile1,infile2):
         anycollapsed=False
         for comp,data in compdata.items():
             n=data['n']
-            if not n in nentries2ncomps.keys(): nentries2ncomps[n]=0
+            if n not in nentries2ncomps.keys():
+                nentries2ncomps[n]=0
             if comp.startswith('[collapsed_'):
                 anycollapsed=True
-                nc=int(comp.split('_')[1])
+                #nc=int(comp.split('_')[1])
             else:
-                nc=1
-            nentries2ncomps[n]+=1
+                pass
+                #nc=1
+            nentries2ncomps[n]+=1 # check if this should be +=nc
         return nentries2ncomps,anycollapsed
 
 
@@ -455,7 +481,7 @@ def __actual_diff(infile1,infile2):
         if not anycollapsed1 and not anycollapsed2:
             #awesome, we can check all comps completely before vs. after
             for comp,compdata in d1['steps_comps'][step].items():
-                if not comp in d2['steps_comps'][step].keys():
+                if comp not in d2['steps_comps'][step].keys():
                     print("Difference: Component %s only present in one input in step %s"%(comp,step))
                     return False
                 check+=[(comp,compdata,d2['steps_comps'][step][comp])]
@@ -477,14 +503,17 @@ def __get_shared_string(s):
     global __allstrings
     return __allstrings.setdefault(s,s)
 
-def __fs_list(l):
-    i=len(l)
+def __fs_list(z):
+    i=len(z)
     while i:
         i-=1
-        t=type(l[i])
-        if t==str: l[i]=__get_shared_string(l[i])
-        elif t==list: __fs_list(l[i])
-        elif t==dict: __fs_dict(l[i])
+        t=type(z[i])
+        if t==str:
+            z[i]=__get_shared_string(z[i])
+        elif t==list:
+            __fs_list(z[i])
+        elif t==dict:
+            __fs_dict(z[i])
 
 def __fs_dict(d):
     keys=d.keys()
@@ -492,7 +521,10 @@ def __fs_dict(d):
         o=d[k]
         del d[k]
         t=type(o)
-        if t==str: o=__get_shared_string(o)
-        elif t==list: __fs_list(o)
-        elif t==dict: __fs_dict(o)
+        if t==str:
+            o=__get_shared_string(o)
+        elif t==list:
+            __fs_list(o)
+        elif t==dict:
+            __fs_dict(o)
         d[__get_shared_string(k)]=o
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonFlags.py b/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonFlags.py
index 565bb21ad05de13700d44ffabe18b094df03554a..150ecf02ad8d4192369a5c2d8c44233abd49f458 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonFlags.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonFlags.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file: PerfMonFlags.py
 # @purpose: a container of flags for Performance Monitoring
@@ -58,8 +58,6 @@ class doPersistencyMonitoring(JobProperty):
         if not jobproperties.PerfMonFlags.doMonitoring():
             jobproperties.PerfMonFlags.doMonitoring = True
             pass
-        from AthenaCommon.AppMgr import ServiceMgr as svcMgr
-        #svcMgr.PerfMonSvc.MonLvl = -1
         return
 # 
 class doDetailedMonitoring(JobProperty):
@@ -105,8 +103,6 @@ class doFullMon(JobProperty):
         jobproperties.PerfMonFlags.doFastMon = False
         jobproperties.PerfMonFlags.doMonitoring = True
         # setup values
-        from AthenaCommon.AppMgr import ServiceMgr as svcMgr
-        #svcMgr.PerfMonSvc.MonLvl = -1
         # enable DSO monitoring
         jobproperties.PerfMonFlags.doDsoMonitoring = True
         # activate persistency monitoring too
@@ -410,7 +406,7 @@ def _decode_pmon_opts(opts):
         elif opt.startswith('+'):
             val = True
             flag_name = flag_name[1:]
-        if not flag_name in dispatch:
+        if flag_name not in dispatch:
             raise ValueError(
                 '[%s] is not a valid PerfMonFlag (allowed: %r)' %
                 (flag_name, dispatch.keys())
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonSerializer.py b/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonSerializer.py
index 94cc5df2b969fcdcde4f5282c799a4536498b6b3..ab092b64cc30a40beadc4d77f3aa2aeb78c46324 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonSerializer.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PerfMonSerializer.py
@@ -1,9 +1,7 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file PerfMonComps/python/PerfMonSerializer
 
-from __future__ import with_statement, print_function
-
 __version__ = "$Revision: 524466 $"
 __doc__ = "various utils to encode/decode perfmon (meta)data with base64"
 __author__ = "Sebastien Binet, Thomas Kittlemann"
@@ -161,23 +159,22 @@ def iextract_pmon_data(fname):
     else:
         raise ValueError("expect a xyz.pmon.gz or xyz.stream file (got [%s])"%(fname,))
     
-    from collections import defaultdict
     import numpy as np
     out = _init_pmon_data()
         
     with open(stream_fname, 'r') as f:
-        for l in f:
+        for z in f:
             data, step, idx, comp = (None, ) * 4
-            if l.startswith('#'):
+            if z.startswith('#'):
                 continue
             #print("[%s]" % l.strip())
             # handle things like:
             # /io/std::vector<unsigned int>#L1CaloUnpackingErrors ...
             # /io/std::map<std::string,std::vector<int> >#mapdata ...
-            l = l.replace('unsigned int', 'unsigned-int')\
+            z = z.replace('unsigned int', 'unsigned-int')\
                  .replace('> >', '>->')
             
-            fields = l.split()
+            fields = z.split()
             #print("##",repr(l))
             if fields[0].startswith(('/ini/','/evt/','/fin/',
                                      '/cbk/','/usr/',
@@ -345,7 +342,7 @@ def iextract_pmon_data(fname):
                 pass
             else:
                 print("warning: unhandled field [%s]" % (fields[0],))
-                print(repr(l))
+                print(repr(z))
 
             # yields what we got so far
             yield step, idx, comp, out
@@ -391,7 +388,8 @@ def encode(data, use_base64=True):
 def decode(s):
     """decode a (compressed) string into a python object
     """
-    if not s: return None
+    if not s:
+        return None
     import zlib
     import cPickle as pickle
     if s[0]=='B':
@@ -399,7 +397,6 @@ def decode(s):
         s=base64.b64decode(s[1:])
     else:
         s=s[1:]
-    d=pickle.loads(zlib.decompress(s))
     return pickle.loads(zlib.decompress(s))
 
 def build_callgraph(fname):
@@ -417,9 +414,7 @@ def build_callgraph(fname):
     current_step = 'ini'
     local_ctx = None
     
-    out = None
     for step, idx, comp, table in iextract_pmon_data(fname):
-        out = table
         if idx is None:
             if comp == 'PerfMonSliceIo':
                 # ignore this component for now...
@@ -493,7 +488,7 @@ def build_callgraph(fname):
                 # push the stack of contexes
                 parent_ctx = local_ctx
                 local_ctx = GraphNode(comp, parent=parent_ctx)
-                if not step in graph.keys():
+                if step not in graph.keys():
                     local_ctx.ctype = step
                 parent_ctx.children.append(local_ctx)
             elif idx == 1:
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PyComps.py b/Control/PerformanceMonitoring/PerfMonComps/python/PyComps.py
index 305c6a877a770203437560d99871a0fe5e10b5e2..0646fbafff59c9aa1eda8d7e875c55b50c94b151 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PyComps.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PyComps.py
@@ -1,15 +1,13 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file:    PerfMonComps/python/PyComps.py
 # @purpose: a set of python components to perform performance monitoring
 # @author:  Sebastien Binet <binet@cern.ch>
-from __future__ import print_function
 
 __doc__     = 'a set of python components to perform performance monitoring'
 __version__ = '$Revision: 298807 $'
 __author__  = 'Sebastien Binet <binet@cern.ch>'
 
-import AthenaCommon.SystemOfUnits as Units
 import AthenaPython.PyAthena as PyAthena
 from AthenaPython.PyAthena import StatusCode
 
@@ -105,8 +103,8 @@ class PyStorePayloadMon (PyAthena.Svc):
             tp_name = clid2name(p.clID())
             print(fmt, (mem_0, mem_1, mem_0 - mem_1, tp_name, p.name()), file=fd)
             pass
-        mem_store_0 = long(mem_store_0)
-        mem_store_1 = long(mem_store_1)
+        mem_store_0 = int(mem_store_0)
+        mem_store_1 = int(mem_store_1)
         
         print(fmt, (
             mem_store_0, mem_store_1, mem_store_0 - mem_store_1,
@@ -133,7 +131,7 @@ class PyStorePayloadMon (PyAthena.Svc):
         ##     mem_0, mem_1, mem_1 - mem_0, ncalls_0, ncalls_1,
         ##     p.clID(), p.name()
         ##     ))
-        return (p, long(mem_0), long(mem_1))
+        return (p, int(mem_0), int(mem_1))
     
     def finalize(self):
         self.msg.info('==> finalize...')
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PyMonUtils.py b/Control/PerformanceMonitoring/PerfMonComps/python/PyMonUtils.py
index 78bd444393e970e36d22d259f3a810c2eadde1fc..488d9145455de5945dadb1dfdf2dbe5303747f98 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PyMonUtils.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PyMonUtils.py
@@ -1,8 +1,7 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file: PyMonUtils.py
 # @author: Sebastien Binet <binet@cern.ch>
-from __future__ import print_function
 
 __author__  = "Sebastien Binet <binet@cern.ch>"
 __version__ = "$Revision: 1.3 $"
@@ -44,7 +43,7 @@ def mon_push_back (sgname='StoreGateSvc'):
         # no double counting from symlinks
         # FIXME: it is actually valid to have 2 different collections
         #        (=/= CLIDs) with the same key...
-        if wasted.has_key(k):
+        if k in wasted:
             continue
         clid = dp.clID()
         klass = "%s" % cl.typename(clid)
@@ -81,18 +80,22 @@ def mon_push_back (sgname='StoreGateSvc'):
 
 def dump_smaps (fname=None):
     import os,sys
-    if not (fname is None): o = open (fname, 'w')
-    else:                   o = sys.stdout
-    for l in open('/proc/%d/smaps'%os.getpid()):
-        print(l, file=o)
+    if not (fname is None):
+        o = open (fname, 'w')
+    else:
+        o = sys.stdout
+    for z in open('/proc/%d/smaps'%os.getpid()):
+        print(z, file=o)
     if not (fname is None):
         o.close()
     return
 
 def loaded_libs (fname=None, pid=None, show=False):
     import os,sys,re
-    if not (fname is None): o = open (fname, 'w')
-    else:                   o = sys.stdout
+    if not (fname is None):
+        o = open (fname, 'w')
+    else:
+        o = sys.stdout
     pat = re.compile(r'(?P<addr_beg>\w*?)\-(?P<addr_end>\w*?)\s'\
                      r'(?P<perm>.{4})\s(?P<offset>\w*?)\s'\
                      r'(?P<devmajor>\d{2}):(?P<devminor>\d{2})\s'\
@@ -102,13 +105,13 @@ def loaded_libs (fname=None, pid=None, show=False):
     if pid is None:
         pid = os.getpid()
     for line in open('/proc/%s/smaps'%pid):
-        l = line.strip()
-        res = re.match(pat,l)
+        z = line.strip()
+        res = re.match(pat,z)
         if res:
             g = res.group
             libname = g('libname').strip()
             libs.add(_realpath(libname))
-    libs = sorted([l for l in libs], reverse=True)
+    libs = sorted([z for z in libs], reverse=True)
     if show:
         for libname in libs:
             print(libname, file=o)
@@ -117,8 +120,6 @@ def loaded_libs (fname=None, pid=None, show=False):
 import sys
 if sys.platform == 'darwin':
     def pymon():
-        from os import getpid,sysconf
-        from sys import platform
         from resource import getrusage, RUSAGE_SELF
         cpu = getrusage(RUSAGE_SELF)
         cpu = (cpu.ru_utime+cpu.ru_stime) * 1e3 # in milliseconds
@@ -138,7 +139,6 @@ if sys.platform == 'darwin':
 else:
     def pymon():
         from os import getpid,sysconf
-        from sys import platform
         from resource import getrusage, RUSAGE_SELF
         cpu = getrusage(RUSAGE_SELF)
         cpu = (cpu.ru_utime+cpu.ru_stime) * 1e3 # in milliseconds
@@ -152,13 +152,17 @@ else:
 def lshosts_infos():
     import socket,commands
     hostname = '<unknown>'
-    try: hostname = socket.gethostname()
-    except Exception: pass
+    try:
+        hostname = socket.gethostname()
+    except Exception:
+        pass
     sc,out = commands.getstatusoutput('which lshosts')
-    if sc != 0: return ('no lshosts command',0.) # no lshosts could be found
+    if sc != 0:
+        return ('no lshosts command',0.) # no lshosts could be found
     cmd = out
     sc,out = commands.getstatusoutput("%s %s"%(cmd,hostname))
-    if sc != 0: return ('host not in db', 0.)
+    if sc != 0:
+        return ('host not in db', 0.)
     cpu_infos = {}
     try:
         title,data = out.splitlines()
diff --git a/Control/PerformanceMonitoring/PerfMonComps/python/PyPerfMon.py b/Control/PerformanceMonitoring/PerfMonComps/python/PyPerfMon.py
index 60d0dc473fbcddc9be1cde13dffcee6ccd3595ae..90ef5b7bdbd1b5a7fa980de4cb4e1cc205162875 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/python/PyPerfMon.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/python/PyPerfMon.py
@@ -2,7 +2,6 @@
 
 # @file: PyPerfMon.py
 # @author: Sebastien Binet <binet@cern.ch>
-from __future__ import with_statement
 
 __author__  = "Sebastien Binet <binet@cern.ch>"
 __version__ = "$Revision: 1.51 $"
@@ -10,17 +9,12 @@ __doc__     = """python module holding a python service to monitor athena perfor
 """
 
 import os,sys
-from time import time
-import resource
-from resource import getrusage as resource_getrusage
-import string
 
-import array
 import AthenaCommon.Logging as L
 
 _perfMonStates = ('ini','evt','fin')
 
-from PerfMonComps.PyMonUtils import *
+from PerfMonComps.PyMonUtils import Units, pymon
 
 from PyUtils.Decorators import memoize, forking
 
@@ -54,7 +48,6 @@ class Svc(object):
     instances = {}
 
     def __init__(self, name, properties = None):
-        import AthenaCommon.Logging as L
         ## init base class
         super(Svc,self).__init__()
         self.name  = name
@@ -94,9 +87,12 @@ class Svc(object):
                 cfg_module = 'PerfMonComps'
             elif c in cfgs:
                 cfg = cfgs[c]
-                if isinstance(cfg, ConfigurableAlgorithm): cfg_type = 'alg'
-                elif isinstance(cfg, ConfigurableAlgTool): cfg_type = 'algtool'
-                elif isinstance(cfg, ConfigurableService): cfg_type = 'svc'
+                if isinstance(cfg, ConfigurableAlgorithm):
+                    cfg_type = 'alg'
+                elif isinstance(cfg, ConfigurableAlgTool):
+                    cfg_type = 'algtool'
+                elif isinstance(cfg, ConfigurableService):
+                    cfg_type = 'svc'
                 cfg_class  = cfg.__class__.__name__
                 cfg_module = cfg.__class__.__module__
             else:
@@ -139,7 +135,7 @@ class Svc(object):
 
         ## perfmon domains
         try:
-            import DomainsRegistry as pdr
+            import PerfMonComps.DomainsRegistry as pdr
             self.meta['domains_a2d'] = pdr.a2d_db()
         except Exception:
             _msg.info('problem retrieving domains-registry...')
@@ -159,7 +155,9 @@ class Svc(object):
                 'rt':       (0.,0.),
                 }
 
-        import gc; gc.collect(); del gc
+        import gc
+        gc.collect()
+        del gc
         return
 
     def domains_db(self):
@@ -170,7 +168,6 @@ class Svc(object):
     
     @property
     def msg(self):
-        import AthenaCommon.Logging as L
         return L.logging.getLogger(self.name)
 
     def _set_stats(self, name,
@@ -227,7 +224,7 @@ class Svc(object):
             self._do_malloc_mon = False
         _msg.info('installing pmon-malloc hooks: %s', self._do_malloc_mon)
         import AthenaPython.PyAthena as PyAthena
-        lib = PyAthena.load_library('PerfMonEventDict')
+        PyAthena.load_library('PerfMonEventDict')
         memstats = PyAthena.PerfMon.MemStats
         memstats.enable(bool(self._do_malloc_mon))
         _msg.info('pmon-malloc hooks enabled: %s', bool(memstats.enabled()))
@@ -308,10 +305,10 @@ class Svc(object):
             statm = {}
             from sys import platform
             if platform != 'darwin' :
-                for l in open('/proc/self/status', 'r'):
+                for z in open('/proc/self/status', 'r'):
                     # lines are of the form:
                     # VmPeak: some value
-                    ll = list(map(str.strip, l.split(':')))
+                    ll = list(map(str.strip, z.split(':')))
                     k = ll[0]
                     v = ' '.join(ll[1:])
                     statm[k] = v
@@ -343,9 +340,9 @@ class Svc(object):
             for evtstr,fitn,fitted_slope in self._slope_data['fits']:
                 maxfitn=max(maxfitn,fitn)
             for evtstr,fitn,fitted_slope in self._slope_data['fits']:
-                _msg.info( '  evt %s fitted vmem-slope (%s points): %s'%
-                           (evtstr,str(fitn).rjust(len(str(maxfitn))),
-                            '%7.1f kb/evt'%fitted_slope if fitn>=2 else 'N/A') )
+                _msg.info( '  evt %s fitted vmem-slope (%s points): %s',
+                           evtstr,str(fitn).rjust(len(str(maxfitn))),
+                            '%7.1f kb/evt'%fitted_slope if fitn>=2 else 'N/A' )
             summary['job']['vmem_slope'] = self._slope_data
         else:
             _msg.info('vmem-leak estimation: [N/A]')
@@ -353,8 +350,10 @@ class Svc(object):
             
         ## try to recoup some memory by flushing out ROOT stuff...
         headerFile = os.path.splitext(self.outFileName)[0]+".dat"
-        if os.path.exists(headerFile):       os.remove(headerFile)
-        if os.path.exists(self.outFileName): os.remove(self.outFileName)
+        if os.path.exists(headerFile):
+            os.remove(headerFile)
+        if os.path.exists(self.outFileName):
+            os.remove(self.outFileName)
 
         ## build the callgraph...
         #import PerfMonComps.PerfMonSerializer as pmon_ser
@@ -431,12 +430,15 @@ class Svc(object):
         ## write out meta-data
         import PyUtils.dbsqlite as dbs
         meta = dbs.open(headerFile, 'n')
-        for k,v in six.iteritems (self.meta): meta[k] = v
+        for k,v in six.iteritems (self.meta):
+            meta[k] = v
         meta['version_id'] = '0.4.0' # stream-format + header file
         meta['pmon_tuple_files'] = map( os.path.basename, outFiles[1:] )
         import socket
-        try:   meta['hostname'] = socket.gethostname()
-        except Exception: meta['hostname'] = '<unknown>'
+        try:
+            meta['hostname'] = socket.gethostname()
+        except Exception:
+            meta['hostname'] = '<unknown>'
         meta.close()
 
         
@@ -447,8 +449,10 @@ class Svc(object):
         try:
             for outFile in outFiles:
                 outFileDirName = os.path.dirname(outFile)
-                try: os.chdir(outFileDirName)
-                except OSError as err: pass
+                try:
+                    os.chdir(outFileDirName)
+                except OSError:
+                    pass
                 outFile = os.path.basename(outFile)
                 _msg.info(' --> [%s] => %8.3f kb',
                           outFile,
@@ -517,10 +521,12 @@ class PoolMonTool(object):
         from AthenaCommon import CfgMgr
         from AthenaCommon.Configurable import Configurable
         for c in list(Configurable.allConfigurables.values()):
-            if not isinstance(c, CfgMgr.AthenaOutputStream): continue
+            if not isinstance(c, CfgMgr.AthenaOutputStream):
+                continue
             try:
                 outFile = c.properties()["OutputFile"]
-            except KeyError: continue
+            except KeyError:
+                continue
             if outFile.startswith("ROOTTREE:"):
                 outFile = outFile[len("ROOTTREE:"):]
             outFiles.add( outFile )
@@ -530,7 +536,6 @@ class PoolMonTool(object):
         
     @property
     def msg(self):
-        import AthenaCommon.Logging as L
         return L.logging.getLogger(self.name)
 
     def initialize(self):
@@ -620,7 +625,8 @@ class PoolMonTool(object):
                     self.msg.info( "Could not run checkFile on [%s] !!",
                                    inFileName )
                     self.msg.info( "Reason: %s", err )
-                    if 'inFile' in dir(): del inFile               
+                    if 'inFile' in dir():
+                        del inFile
                 _msg.unMute()
         if len(self.outputPoolFiles)>0:
             self.msg.info( "Content of output POOL files:" )
@@ -651,7 +657,8 @@ class PoolMonTool(object):
                     self.msg.info( "Could not run checkFile on [%s] !!",
                                    outFileName )
                     self.msg.info( "Reason: %s", err )
-                    if 'outFile' in dir(): del outFile               
+                    if 'outFile' in dir():
+                        del outFile
                 _msg.unMute()
                 
         return
@@ -678,13 +685,13 @@ class HephaestusMonTool(object):
         # during our finalize.
         self._heph_has_checkPoint = False 
         import sys
-        if not 'Hephaestus.atexit' in sys.modules.keys():
+        if 'Hephaestus.atexit' not in sys.modules.keys():
             self.msg.warning('Hephaestus was not correctly initialized !')
             self.msg.warning('Final report may be inaccurate...')
             self.msg.warning('(to fix this, run athena with --leak-check)')
 
         import dl, Hephaestus.MemoryTracker as m
-        _hephLib = dl.open (m.__file__, dl.RTLD_GLOBAL | dl.RTLD_NOW)
+        dl.open (m.__file__, dl.RTLD_GLOBAL | dl.RTLD_NOW)
         memtrack = m
 
         from os.path import splitext
@@ -742,7 +749,7 @@ class HephaestusMonTool(object):
         
         # consolidate last events with end-of-job leak report
         _clearCheckPoint = self.memtrack.CheckPoints.clearCheckPoint
-        for _ in xrange(self.lag):
+        for _ in range(self.lag):
             _clearCheckPoint( 0 )
 
         # put the per-evt leaks into a different file
diff --git a/Control/PerformanceMonitoring/PerfMonComps/share/FastMon.py b/Control/PerformanceMonitoring/PerfMonComps/share/FastMon.py
index bead3f619f1d913b46b7992b552ffd468eaedc6a..6c175942e6a6276d83dac82269c506ab36c6fffd 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/share/FastMon.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/share/FastMon.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
 # @file: PerfMonComps/FastMon.py
 # @author: Sebastien Binet
 # $Id: FastMon.py,v 1.2 2007-12-03 19:07:38 binet Exp $
diff --git a/Control/PerformanceMonitoring/PerfMonComps/share/FullMon.py b/Control/PerformanceMonitoring/PerfMonComps/share/FullMon.py
index 8cf777264c1c95ab5e2614279fdbaf7c149a95ab..0f5ab9cf19d244f7b09704f5d32c58436e0ca5ae 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/share/FullMon.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/share/FullMon.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
 # @file: PerfMonComps/FullMon.py
 # @author: Sebastien Binet
 # $Id$
diff --git a/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonMTSvc_jobOptions.py b/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonMTSvc_jobOptions.py
index b8adada555d2b950e92575b9950033913c317ff6..b3cb87c5506ecd7ae7223eb80f87d625a24c3763 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonMTSvc_jobOptions.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonMTSvc_jobOptions.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
 ###############################
 # Print what we're doing
 ###############################
diff --git a/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonSvc_jobOptions.py b/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonSvc_jobOptions.py
index 8b732fc34a69275448fde4fc548b82bf08bb8437..5f546a8b942a677fe02312986b99e0f3ff0ad962 100644
--- a/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonSvc_jobOptions.py
+++ b/Control/PerformanceMonitoring/PerfMonComps/share/PerfMonSvc_jobOptions.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
 # @file: PerfMonComps/PerfMonSvc_jobOptions.py
 # @author: Sebastien Binet
 # $Id: PerfMonSvc_jobOptions.py,v 1.3 2007-08-01 20:58:52 binet Exp $