diff --git a/Control/AthenaCommon/python/ChapPy.py b/Control/AthenaCommon/python/ChapPy.py
index 2bc329b2522dc78691bc8254b3c591bb04d7e1cd..dfa81fd723794888e8fe9f7b2477a8c5b1e3e3d1 100755
--- a/Control/AthenaCommon/python/ChapPy.py
+++ b/Control/AthenaCommon/python/ChapPy.py
@@ -276,7 +276,7 @@ class AthenaApp(object):
     def __init__(self, cmdlineargs=None):
 
         import tempfile
-        self._jobo = tempfile.NamedTemporaryFile(suffix='-jobo.py')
+        self._jobo = tempfile.NamedTemporaryFile(suffix='-jobo.py', mode='w+')
         if cmdlineargs is None:
             cmdlineargs = []
         if isinstance(cmdlineargs, basestring):
@@ -292,7 +292,7 @@ class AthenaApp(object):
     def __lshift__(self, o):
         if isinstance(o, str):
             import textwrap
-            self._jobo.write(textwrap.dedent(o).encode())
+            self._jobo.write(textwrap.dedent(o))
             self._jobo.flush()
             return
         raise TypeError('unexpected type %s'%type(o))
diff --git a/Control/AthenaConfiguration/share/confTool.py b/Control/AthenaConfiguration/share/confTool.py
index 9b5b97204d9ee500d6f211152bfd0dd600b15a51..58140100e629cbb158c4743524f683a72f9015db 100755
--- a/Control/AthenaConfiguration/share/confTool.py
+++ b/Control/AthenaConfiguration/share/confTool.py
@@ -170,20 +170,22 @@ def __compareConfig(configRef, configChk, args):
 
     for component in allComps:
 
-        if component not in configRef and not args.ignoreMissing:
-            print(
-                "\n\033[91m Component ",
-                component,
-                " \033[94m exists only in Chk \033[0m \033[0m \n",
-            )
+        if component not in configRef:
+            if not args.ignoreMissing:
+                print(
+                    "\n\033[91m Component ",
+                    component,
+                    " \033[94m exists only in Chk \033[0m \033[0m \n",
+                )
             continue
 
-        if component not in configChk and not args.ignoreMissing:
-            print(
-                "\n\033[91m Component",
-                component,
-                " \033[92m exists only in Ref \033[0m  \033[0m \n",
-            )
+        if component not in configChk:
+            if not args.ignoreMissing:
+                print(
+                    "\n\033[91m Component",
+                    component,
+                    " \033[92m exists only in Ref \033[0m  \033[0m \n",
+                )
             continue
         refValue = configRef[component]
         chkValue = configChk[component]
diff --git a/Control/AthenaMP/python/IoUtils.py b/Control/AthenaMP/python/IoUtils.py
index aa60d8af878f38d039467b64af3f004822b80f22..91714fb0d2732f5b728a4b2dca34142a9fa9fef4 100644
--- a/Control/AthenaMP/python/IoUtils.py
+++ b/Control/AthenaMP/python/IoUtils.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file AthenaMP.IoUtils
 # @purpose I/O utils 
@@ -10,6 +10,7 @@ __author__  = "Mous Tatarkhanov <tmmous@berkeley.edu>"
 
 from AthenaCommon.Logging import log as msg     #logging handle
 from GaudiMP.IoRegistry import IoRegistry
+import six
 _debug = msg.debug
 _info = msg.info
 
@@ -50,10 +51,10 @@ def update_io_registry(wkdir, mpid, iocomp_types=None):
     
     pfc = PoolFileCatalog()
 
-    ioreg_items = IoRegistry.instances.iteritems()
+    ioreg_items = six.iteritems (IoRegistry.instances)
     for iocomp,iodata in ioreg_items:
         #print "--iocomp,len(iodata)",iocomp, len(iodata)
-        io_items = iodata.iteritems()
+        io_items = six.iteritems(iodata)
         for ioname,ioval in io_items:
             # handle logical filenames...
             #ioname=pfc(ioname)
@@ -76,7 +77,7 @@ def update_io_registry(wkdir, mpid, iocomp_types=None):
                     os.symlink(src, dst)
                     msg.debug( "update_io_registry:<input> created symlink %s for" % dst)
             else:
-                raise ValueError, "unexpected iomode value: %r"%iomode
+                raise ValueError ("unexpected iomode value: %r"%iomode)
             ioreg[iocomp][ioname][1] = newname
             pass
         pass
@@ -86,6 +87,7 @@ def update_io_registry(wkdir, mpid, iocomp_types=None):
 def redirect_log(wkdir):
     """redirect stdout and stderr of forked worker to tmp wkdir"""
     import os, sys
+    import multiprocess as mp
     # define stdout and stderr names
     
     stdout = os.path.join(wkdir, 'stdout')
@@ -124,7 +126,7 @@ def reopen_fds(wkdir=""):
     _fds = IoRegistry.fds_dict
     _fds.create_symlinks(wkdir)
 
-    for k, v in _fds.iteritems():
+    for k, v in six.iteritems(_fds):
         fd = k; 
         (real_name, iomode, flags) = v
         if not os.path.isfile (real_name):
@@ -139,7 +141,7 @@ def reopen_fds(wkdir=""):
             try:
                 new_fd = os.open (_join(wkdir, os.path.basename(real_name)), flags)
                 os.lseek(new_fd, pos, os.SEEK_SET)
-            except Exception, err:         
+            except Exception as err:         
                 msg.warning("Exception caught handling OUTPUT file %s: %s" %  (real_name, err) )
                 msg.warning(" ...ignoring file FIXME!")
                 continue
diff --git a/Control/AthenaMP/python/PyComps.py b/Control/AthenaMP/python/PyComps.py
index a66e5044e27827b3271345b30409db0ab8f609ba..6a2a659779abdd8d40addea2d08c0ff200a18896 100644
--- a/Control/AthenaMP/python/PyComps.py
+++ b/Control/AthenaMP/python/PyComps.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 #-----Python imports---#
 import os, sys, time, shutil
@@ -23,7 +23,7 @@ class MpEvtLoopMgr(AthMpEvtLoopMgr):
         os.putenv('XRD_ENABLEFORKHANDLERS','1')
         os.putenv('XRD_RUNFORKHANDLER','1')
 
-        from AthenaMPFlags import jobproperties as jp
+        from .AthenaMPFlags import jobproperties as jp
         self.WorkerTopDir = jp.AthenaMPFlags.WorkerTopDir()
         self.OutputReportFile = jp.AthenaMPFlags.OutputReportFile()
         self.CollectSubprocessLogs = jp.AthenaMPFlags.CollectSubprocessLogs()
@@ -56,7 +56,7 @@ class MpEvtLoopMgr(AthMpEvtLoopMgr):
         self.configureStrategy(self.Strategy,self.IsPileup,self.EventsBeforeFork)
         
     def configureStrategy(self,strategy,pileup,events_before_fork):
-        from AthenaMPFlags import jobproperties as jp
+        from .AthenaMPFlags import jobproperties as jp
         from AthenaCommon.ConcurrencyFlags import jobproperties as jp
         event_range_channel = jp.AthenaMPFlags.EventRangeChannel()
         if (jp.AthenaMPFlags.ChunkSize() > 0):
@@ -106,10 +106,10 @@ class MpEvtLoopMgr(AthMpEvtLoopMgr):
                 from AthenaCommon.AppMgr import ServiceMgr as svcMgr
                 from AthenaIPCTools.AthenaIPCToolsConf import AthenaSharedMemoryTool
                 svcMgr.EventSelector.SharedMemoryTool = AthenaSharedMemoryTool("EventStreamingTool")
-                if sys.modules.has_key('AthenaPoolCnvSvc.ReadAthenaPool'):
+                if 'AthenaPoolCnvSvc.ReadAthenaPool' in sys.modules:
                     svcMgr.AthenaPoolCnvSvc.InputStreamingTool = AthenaSharedMemoryTool("InputStreamingTool")
             if use_shared_writer:
-                if sys.modules.has_key('AthenaPoolCnvSvc.WriteAthenaPool'):
+                if 'AthenaPoolCnvSvc.WriteAthenaPool' in sys.modules:
                     from AthenaCommon.AppMgr import ServiceMgr as svcMgr
                     from AthenaIPCTools.AthenaIPCToolsConf import AthenaSharedMemoryTool
                     svcMgr.AthenaPoolCnvSvc.OutputStreamingTool += [ AthenaSharedMemoryTool("OutputStreamingTool_0") ]
@@ -176,12 +176,12 @@ def setupEvtSelForSeekOps():
    #import sys
    #from AthenaCommon.Logging import log as msg
    msg.debug("setupEvtSelForSeekOps:")
-   if sys.modules.has_key('AthenaRootComps.ReadAthenaRoot'):
+   if 'AthenaRootComps.ReadAthenaRoot' in  sys.modules:
        # athenarootcomps has seeking enabled by default
        msg.info('=> Seeking enabled.')
        return
    
-   if not sys.modules.has_key('AthenaPoolCnvSvc.ReadAthenaPool'):
+   if 'AthenaPoolCnvSvc.ReadAthenaPool' not in sys.modules:
       ## user did not import that module so we give up
       msg.info( "Cannot enable 'seeking' b/c module " + \
                  "[AthenaPoolCnvSvc.ReadAthenaPool] hasn't been imported..." )
diff --git a/Control/AthenaMP/python/Utils.py b/Control/AthenaMP/python/Utils.py
index 6f335098b24966393213fb8a2bcd6d08655c967f..954ab638aa2eff6b4c6daff18d3e1c9cb5cca74c 100644
--- a/Control/AthenaMP/python/Utils.py
+++ b/Control/AthenaMP/python/Utils.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file: AthenaMP.Utils
 # @purpose: a set of tools to handle various aspects of AthenaMP
@@ -11,6 +11,10 @@ __author__  = "Sebastien Binet <binet@cern.ch>"
 
 import os
 
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 #-----Helper tools for AthenaMP--#
 
 def get_mp_root(msg=""):
@@ -38,6 +42,7 @@ def hack_copy(srcDir, destDir):
     if srcDir == '': 
         srcDir = os.curdir
     if srcDir == destDir:
+        from AthenaCommon.Logging import log as msg
         msg.warning("hack_copy called with srcDir = destDir = "+srcDir)
         return
 
@@ -63,7 +68,7 @@ def slice_it(iterable, cols=2):
     chunksz,extra = divmod (len(iterable), cols)
     if extra:
         chunksz += 1
-    for i in xrange(cols):
+    for i in range(cols):
         yield islice (iterable, start, start+chunksz)
         start += chunksz
 
@@ -112,9 +117,9 @@ def _get_mem_stats(pid='self'):
             pss_adjust=0.5 #add 0.5KiB as this average error due to trunctation
             Pss=sum([float(line.split()[1])+pss_adjust for line in pss_lines])
             shared = Pss - private
-    elif (2,6,1) <= kv <= (2,6,9):
-        shared=0 #lots of overestimation, but what can we do?
-        private = rss
+    #elif (2,6,1) <= kv <= (2,6,9):
+    #    shared=0 #lots of overestimation, but what can we do?
+    #    private = rss
     else:
         shared=int(open(statm_name).readline().split()[2])
         shared*=PAGESIZE
@@ -125,11 +130,10 @@ def _get_mem_stats(pid='self'):
 #---- CPU-Proc affinty setting tools---#
 if 'linux' in sys.platform:
     def get_cpu(pid):
-        import commands
         """get core nbr where the proc-pid resides at that moment"""
         cmd = "ps --pid %i -o psr" % pid
         #print ">%s" % cmd
-        out = commands.getoutput(cmd)
+        out = subprocess.getoutput(cmd)
         cpu = int(out.splitlines()[1].split()[0])
         #print "pid: [%i] has cpu: [%i]" % (pid, cpu)
         return cpu
@@ -140,10 +144,9 @@ else:
     
 def set_proc_affinity(pid, cpu):
     """set pid to cpu affinity for process"""
-    import commands
     cmd = "taskset -pc %i %i" % (cpu, pid)
     #print "> taskset -pc %i %i" % (cpu, pid)                                                                                                       
-    st,out = commands.getstatusoutput(cmd)
+    st,out = subprocess.getstatusoutput(cmd)
     return st
 
 
diff --git a/Control/AthenaMP/python/tests/mjMonTools.py b/Control/AthenaMP/python/tests/mjMonTools.py
index cc459976d346839b08f1f328c745d9d8a2ae05ab..9db47f643db44bbf35e85e879ec83219ab1c9ef7 100644
--- a/Control/AthenaMP/python/tests/mjMonTools.py
+++ b/Control/AthenaMP/python/tests/mjMonTools.py
@@ -1,22 +1,26 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file:    mpMonTools.py
 # @purpose: Library for mp performance monitoring of AthenaMP
 # @author:  Mous Tatarkhanov <tmmous@cern.ch>
 # @date:    December 2009
 
+from __future__ import print_function
+
 __version__ = "$Revision: 276791 $"
 __author__  = "Mous Tatarkhanov <tmmous@cern.ch>"
 
 import sys
 import os
-import subprocess
-import commands
 import signal
 import time
 import array
 import copy
-    
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 
 TIME_STEP = 10
 KB = (1 << 10)
@@ -68,8 +72,8 @@ def init_mp_stat():
     init_numa = list(get_numastat())
     init_numa[0] = numa_T0
     
-    print "initial_mem=%s" % init_mem
-    print "initial_numa=%s" % list(init_numa)
+    print ("initial_mem=%s" % init_mem)
+    print ("initial_numa=%s" % list(init_numa))
     
     
 class ProcDict(dict):
@@ -116,52 +120,52 @@ class ProcDict(dict):
             global T0
             if grepExist(self.out, "'start processing'"):
                 self.init_time = time.time()- T0
-                print "pid-%i: init_time=%s"% (self.pid, self.init_time)
+                print ("pid-%i: init_time=%s"% (self.pid, self.init_time))
 
         private = shared = -1
         try:
             pass
             #private, shared = _get_shared_private_mem(self.pid)
-        except Exception, e:
-            print "## Caught exception [%s] !!" % str(e.__class__)
-            print "## What:", e
-            print sys.exc_info()[0]
-            print sys.exc_info()[1]
+        except Exception as e:
+            print ("## Caught exception [%s] !!" % str(e.__class__))
+            print ("## What:", e)
+            print (sys.exc_info()[0])
+            print (sys.exc_info()[1])
         self["private"].append(private)
         self["shared"].append(shared)  
     
     def proc_ps_stat(self):
         """ ps statistics for this process of pid """
-        out = commands.getoutput("ps --pid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % self.pid)
+        out = subprocess.getoutput("ps --pid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % self.pid)
         lines = out.splitlines()
         if len(lines) > 1:
             self.add_ps_line(lines[1])
         else:
-            print "there is no process with pid: [%i]", self.pid
+            print ("there is no process with pid: [%i]", self.pid)
             return False
         return True  
     
     def children_exist(self):
         """ figures out weather the np kids were spawned for mother mpid""" 
-        sc, out = commands.getstatusoutput("ps --ppid %i -o pid,start" % self.pid)
+        sc, out = subprocess.getstatusoutput("ps --ppid %i -o pid,start" % self.pid)
         if sc is not 0:
-            #print "   children_exist: Error, sc=%i" % sc
+            #print ("   children_exist: Error, sc=%i" % sc)
             return False
  
         ps_lines = out.splitlines()
         nc = len(ps_lines)-1
-        print "  children_exist().nbr of children = %i" % nc
+        print ("  children_exist().nbr of children = %i" % nc)
         if nc > 0 :
-            print "%i children workers exist. Creating ProcDicts..." % nc
+            print ("%i children workers exist. Creating ProcDicts..." % nc)
             ps_lines.pop(0)
             for line in ps_lines:
                 ps_str = line.split()
                 cpid = int(ps_str[0])
                 ProcDict(cpid, start_time = _seconds(ps_str[1]))
-                print "..... child [%i] added" %  cpid
+                print ("..... child [%i] added" %  cpid)
             return nc
         else:
-            #print "no children exist for parent: %s " % self.pid
+            #print ("no children exist for parent: %s " % self.pid)
             return False
 
 
@@ -210,7 +214,7 @@ class SPSummary(dict):
     def extract_summary(self, dir):
         self.spid_list = mp_stat["pid"].keys()
         for pid in  mp_stat["pid"].keys():
-            print "extract_summary: pid %i" % pid
+            print ("extract_summary: pid %i" % pid)
             self['pid'].append(pid)
             self['init_time_x'].append(mp_stat['pid'][pid].init_time)
             self['elap_time_x'].append(mp_stat['pid'][pid].elap_time)
@@ -221,7 +225,7 @@ class SPSummary(dict):
                                                             sumList(mp_stat['mem']['kbbuffers'],
                                                                 mp_stat['mem']['kbcached']) )))
         for pid in self.cpid_list:
-            print "  %s/%s exists ->" % (dir,pid), os.path.exists(os.path.join(dir,"%s" % pid)) #FIX: add the extraction from cpid's logs.
+            print ("  %s/%s exists ->" % (dir,pid), os.path.exists(os.path.join(dir,"%s" % pid))) #FIX: add the extraction from cpid's logs.
             out_path = os.path.join(dir,  'stdout')
             err_path = os.path.join(dir,  'stderr')
 
@@ -279,22 +283,22 @@ def _seconds(time_str): #handles time in "H:M:S" and "M:S" format
         return 3600*int(time_nums[0])+60*int(time_nums[1]) + int(time_nums[2])
     elif (len(time_nums)==2):
         return 60*int(time_nums[0]) + int(time_nums[1])
-    print "ERROR: _seconds() returning - 0"
+    print ("ERROR: _seconds() returning - 0")
     return 0
 
 def show_numactl():
-    sc,out=commands.getstatusoutput("numactl --show")
+    sc,out=subprocess.getstatusoutput("numactl --show")
     if sc==256:
-        print "mjMonTools.show_numactl: numastat is not working! zeroes will be returned"
+        print ("mjMonTools.show_numactl: numastat is not working! zeroes will be returned")
         return False
     else:
-        print "mjMonTools.show_numactl: \n %s" % out
+        print ("mjMonTools.show_numactl: \n %s" % out)
         return True
 
 def get_numastat():
-    sc,out=commands.getstatusoutput("numastat")
+    sc,out=subprocess.getstatusoutput("numastat")
     if sc==256:
-        print "mjMonTools.get_numastat: numastat is not working! zeroes will be returned"
+        print ("mjMonTools.get_numastat: numastat is not working! zeroes will be returned")
         return (0,0,0,0,0,0,0)
     else:
         lines = out.splitlines()
@@ -309,7 +313,7 @@ def get_numastat():
         
 def save_numastat():
     current_numa = get_numastat()
-    #print "current_numa=%s" % list(current_numa)
+    #print ("current_numa=%s" % list(current_numa))
 
     _numa_stat = (
         mp_stat['numa']['Time'],
@@ -322,18 +326,18 @@ def save_numastat():
         )
     
     change_numa = subList(current_numa,init_numa)
-    print "NUMA_CHANGE=%s" % change_numa
+    print ("NUMA_CHANGE=%s" % change_numa)
     return [_numa_stat[i].append(change_numa[i]) for i in range(len(change_numa))]
     
 def print_memstat(msg =""):
     mem = get_memstat()
     t = time.time() - T0;
     save_numastat()
-    print msg + " [T=%i sec]" % t + " USED[%i Mb][change: %i Mb] - FREE[%i Mb][change: %i Mb]" % ( 
-        mem["USED"], mem["USED"]-init_mem["USED"], mem["FREE"], mem["FREE"]-init_mem["FREE"])
+    print (msg + " [T=%i sec]" % t + " USED[%i Mb][change: %i Mb] - FREE[%i Mb][change: %i Mb]" % (
+        mem["USED"], mem["USED"]-init_mem["USED"], mem["FREE"], mem["FREE"]-init_mem["FREE"]))
 
 def get_memstat():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem = dict()
     lines = out.splitlines()
     mem_strs = lines[1].split()
@@ -344,7 +348,7 @@ def get_memstat():
     mem_strs = lines[2].split()
     mem['USED'] = int(mem_strs[2])
     mem['FREE'] = int(mem_strs[3])
-    #print "mem: [%s Mbs]" %  mem
+    #print ("mem: [%s Mbs]" %  mem)
     return mem
 
 init_mem = get_memstat()
@@ -352,46 +356,46 @@ init_mem = get_memstat()
 def meanList(num_list):
     """finds average value of the number list"""
     if len(num_list) == 0:
-        print "meanList: WARNING - empty list, returning 0.0"
+        print ("meanList: WARNING - empty list, returning 0.0")
         return 0.0
     return float(sum(num_list)) / len(num_list)
     
 def sumList(l1, l2):
     """sum up values of two lists l1 + l2"""
     if len(l1) is not len(l2):
-        print "sumList: WARNING: len(l1) not equals len(l2)"
+        print ("sumList: WARNING: len(l1) not equals len(l2)")
         n = len(l1) if len(l2) > len(l1) else len(l2)
     else:
         n = len(l1)
 
     sum = list()
-    for i  in xrange(n):
+    for i  in range(n):
         sum.append(l1[i] + l2[i])
     return sum
 
 def subList(l1, l2): 
     """subtract values of two lists: l1 - l2"""
     if len(l1) is not len(l2):
-        print "subList: WARNING: len(l1) not equals len(l2)"
+        print ("subList: WARNING: len(l1) not equals len(l2)")
         n = len(l1) if len(l2) > len(l1) else len(l2)
     else:
         n = len(l1)
 
     sub = list()
-    for i  in xrange(n):
+    for i  in range(n):
         sub.append(l1[i] - l2[i])
     return sub
         
 def get_spike(l):
-    #print " get_spike:",
-    #print " e0 = ",  l[0]/1024, "Mb", 
-    #print " eN = ",  l[-1]/1024, "Mb",
-    #print " max = ", max(l)/1024, "Mb", 
-    #print " min = ", min(l)/1024, "Mb", 
-    #print " e0 - eN = ",  (l[0] - l[-1])/1024, "Mb",
-    #print " e0 - min = ", (l[0] - min(l))/1024, "Mb",
-    #print " eN - min = ", (l[-1] - min(l))/1024, "Mb",
-    #print " return  max - min =", (max(l) - min(l))/1024, "Mb"
+    #print (" get_spike:", end='')
+    #print (" e0 = ",  l[0]/1024, "Mb",  end='')
+    #print (" eN = ",  l[-1]/1024, "Mb", end='')
+    #print (" max = ", max(l)/1024, "Mb",  end='')
+    #print (" min = ", min(l)/1024, "Mb",  end='')
+    #print (" e0 - eN = ",  (l[0] - l[-1])/1024, "Mb", end='')
+    #print (" e0 - min = ", (l[0] - min(l))/1024, "Mb", end='')
+    #print (" eN - min = ", (l[-1] - min(l))/1024, "Mb", end='')
+    #print (" return  max - min =", (max(l) - min(l))/1024, "Mb")
     return max(l) - min(l)   
 
 def prepare_mp_stat():
@@ -413,34 +417,34 @@ def prepare_mp_stat():
 
 
 def print_summary():
-    print "===== SUB PROCESS SUMMARY ====="
+    print ("===== SUB PROCESS SUMMARY =====")
     for (k, v) in mp_stat['sp_summary'].items():
-        print "sp_summary['%s']=%s " % (k, v)
+        print ("sp_summary['%s']=%s " % (k, v))
         
 ################## children tools ######################
 def launched_processes_working(ppid):
     """ ps statistics for children of ppid. returns False if no children exist """
-    out = commands.getoutput("ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % ppid)
+    out = subprocess.getoutput("ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % ppid)
     ps_lines = out.splitlines()
     ps_lines.pop(0)
     
     exist = False # switch for existance of launched processes (not any processes)
     
     if len(ps_lines) > 0:
-        print "Subprocesses exist:"
+        print ("Subprocesses exist:")
         for line in ps_lines:
             ps_str = line.split()
             pid = int(ps_str[0])
-            #print "subprocess pid=%i" % pid
+            #print ("subprocess pid=%i" % pid)
             if pid in mp_stat["pid"].keys():
                 exist = True
                 mp_stat["pid"][pid].add_ps_line(line)
-                print "pid-%i: ps-stat appended" % pid
+                print ("pid-%i: ps-stat appended" % pid)
             else:
-                print "pid-%i: secondary proc" % pid
+                print ("pid-%i: secondary proc" % pid)
         return exist 
     else:
-        print "No subprocesses exist for parent: %i" % ppid
+        print ("No subprocesses exist for parent: %i" % ppid)
         return  exist #False
     return exist #False
 
@@ -460,30 +464,30 @@ def summarize_proc_stat():
 
 def children_born(log, mpid, np):
     """ figures out weather the np kids were spawned for mother mpid""" 
-    sc,out = commands.getstatusoutput("ps --ppid %i -o pid,start" % mpid)
+    sc,out = subprocess.getstatusoutput("ps --ppid %i -o pid,start" % mpid)
     if sc is not 0:
-        print "   mpMonTools.children_born: no kids yet... Error, sc=%i" % sc
+        print ("   mpMonTools.children_born: no kids yet... Error, sc=%i" % sc)
         return False
 
     ps_lines = out.splitlines()
-    #print "ps_lines=", ps_lines
+    #print ("ps_lines=", ps_lines)
     nc = len(ps_lines)-1
     
-    print " children_exist: nbr of children = [%i]" % nc
+    print (" children_exist: nbr of children = [%i]" % nc)
     if grepValue(log, "FIRSTEVENT_ELAP_TIME") is None:
         return False        
     
     if nc==np : #nbr of children is equal to nbr of procs required 
-        print "%i children workers forked! Registering them (creating ProcDicts) ..." % np
+        print ("%i children workers forked! Registering them (creating ProcDicts) ..." % np)
         ps_lines.pop(0)
         for line in ps_lines:
             ps_str = line.split()
             pid = int(ps_str[0])
             ProcDict(pid, start_time = _seconds(ps_str[1]))
-            print "..... child [%i] added" %  pid
+            print ("..... child [%i] added" %  pid)
         return True
     else:
-        print "no children exist for parent: %s " % mpid
+        print ("no children exist for parent: %s " % mpid)
     return False
 
 
@@ -492,26 +496,26 @@ def children_born(log, mpid, np):
 def grepExist(log, field):
     """grep check for the existance of the unique field in the log 
     """
-    print "grep %s %s" % (field, log),
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    print ("grep %s %s" % (field, log),)
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc==256:
-        print " FALSE:  sc=%i" % sc
+        print (" FALSE:  sc=%i" % sc)
         return False
     line = out.splitlines()[0]
-    print " TRUE: sc=%i \n  grepped-line=%s" % (sc,line)
+    print (" TRUE: sc=%i \n  grepped-line=%s" % (sc,line))
     return True
 
 def grepExist2(log, field):
     """grep check for the existance of the unique field in the log 
     """
-    print "grep %s %s" % (field, log)
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    print ("grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        print "grepping %s in %s failed with sc=%i" % (field, log, sc) 
+        print ("grepping %s in %s failed with sc=%i" % (field, log, sc) )
         return False
     line = out.splitlines()[0]
-    print "grepped-line=%s" % line
-    print "sc=", sc
+    print ("grepped-line=%s" % line)
+    print ("sc=", sc)
 
     return True
                                                 
@@ -520,13 +524,13 @@ def grepValue(log, field, sep='='):
        Example: out = 'Py:EventLoopMgr      INFO EvtMax  =  123456  something'      
        grepValue(log, "EvtMax", sep="=") = '123456' 
     """
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        #print "grepping %s in %s failed" % (field, log)
+        #print ("grepping %s in %s failed" % (field, log))
         return None
     line = out.splitlines()[0]
     import re 
-    vexpr = '\s*'+ sep+ '\s*(\d+)'
+    vexpr = '\\s*'+ sep+ '\\s*(\\d+)'
     m = re.search( field + vexpr, line)
     value = m.group(1)
     return value
@@ -536,13 +540,13 @@ def grepPath(log, field, sep=':'):
        Example: out = 'Py:EventLoopMgr      INFO master workdir: /tmp/athena-mp-tmp-tmmous/22590-1261097934  smthng'      
        grepPath(log, "workdir", sep=":") = '/tmp/athena-mp-tmp-tmmous/22590-1261097934' 
     """
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        print "grepping %s in %s failed" % (field, log)
+        print ("grepping %s in %s failed" % (field, log))
         return None
     line = out.splitlines()[0]
     import re 
-    vexpr = '\s*'+ sep+ '\s*([^\s]+)'
+    vexpr = '\\s*'+ sep+ '\\s*([^\\s]+)'
     m = re.search( field + vexpr, line)
     path = m.group(1)
     return path
@@ -550,14 +554,14 @@ def grepPath(log, field, sep=':'):
 
 ############# related to  athena-mp #########################
 def launch_athena(jobo, ne, se, np, output_dir, numa_set=None):
-    """"launching cmd: athena.py -c EvtMax=$ne $jobo  1> mp.output/stdout_$jobo.$np.$ne   2> mp.output/stderr_$jobo.$np.$ne""" 
+    """launching cmd: athena.py -c EvtMax=$ne $jobo  1> mp.output/stdout_$jobo.$np.$ne   2> mp.output/stderr_$jobo.$np.$ne""" 
     
     if not os.path.isdir(output_dir):
         os.mkdir(output_dir)
         
     numa_args = list()
     
-    print "job command and options as template: %s" % jobo
+    print ("job command and options as template: %s" % jobo)
     from string import Template
     arg_template= Template(jobo)
     arg_str = arg_template.substitute(MAXEVT=ne, SKIPEVT=se)
@@ -570,7 +574,7 @@ def launch_athena(jobo, ne, se, np, output_dir, numa_set=None):
     STDERR_FILE = open(stderr_name,  "w")
     
     #proc_args = ["athena.py",  "-c", "EvtMax=%i; SkipEvents=%i" % (ne, se) ,  "../%s" % jobo]
-    print "job command and options after template processing: %s" % proc_args
+    print ("job command and options after template processing: %s" % proc_args)
     
     if numa_set != None:
         numa_args = [ "numactl"]
@@ -579,11 +583,11 @@ def launch_athena(jobo, ne, se, np, output_dir, numa_set=None):
         elif ( numa_set[0]=='f' and numa_set[1]!='f'):
             numa_args.append( "--membind=%i"  % numa_set[1])
         elif ( numa_set[0]!='f' and numa_set[1]=='f'):                    
-            numa_args.append( "--cpubind=%i"  % numa-set[0])
+            numa_args.append( "--cpubind=%i"  % numa_set[0])
         elif (numa_set[0]!='f' and numa_set[1]!='f'):
             numa_args += ["--membind=%s" % numa_set[0], "--cpubind=%s" % numa_set[1] ]
         else:
-            print "SOMETHING WRONG: numa_set=%s" % numa_set
+            print ("SOMETHING WRONG: numa_set=%s" % numa_set)
 
         
     #proc_args = [   "numactl",  
@@ -593,7 +597,7 @@ def launch_athena(jobo, ne, se, np, output_dir, numa_set=None):
     
     proc_args = numa_args + proc_args
     
-    print "<<<LAUNCH>>>: %s" % proc_args
+    print ("<<<LAUNCH>>>: %s" % proc_args)
     mproc = subprocess.Popen( proc_args, 
                              stdout=STDOUT_FILE, 
                              stderr=STDERR_FILE,
@@ -619,30 +623,30 @@ def stop_proc(proc):
             pid = proc.pid 
             if proc.poll() is None: os.kill(pid, signal.SIGKILL); 
             proc.wait();
-        print "process %s  terminated" % pid 
-    except Exception, e:
-        print "## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e
-        print sys.exc_info()[0], sys.exc_info()[1]
+        print ("process %s  terminated" % pid )
+    except Exception as e:
+        print ("## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e)
+        print (sys.exc_info()[0], sys.exc_info()[1])
         return False
     pid_list.remove(pid)
     return True
 
 def stop_proc_tree(pid):
     """ Terminate/kill recursively process tree by pid. Be precautious using this!"""
-    out = commands.getoutput("ps --ppid %i" % pid)
+    out = subprocess.getoutput("ps --ppid %i" % pid)
     lines = out.splitlines(); lines.pop(0) #remove header
     try:
         if len(lines) > 0: 
             for line in lines:
                 cpid = int(line.split()[0])
-                print "child [%i:%i] being terminated..." % (pid, cpid)
+                print ("child [%i:%i] being terminated..." % (pid, cpid))
                 stop_proc_tree(cpid)
         if  pid in pid_list: pid_list.remove(pid) 
         os.kill(pid, signal.SIGKILL); #os.waitpid(pid, 0);
-        print "[%i] - terminated." % pid 
-    except Exception, e:
-        print "[%i] - dead #while killing caught exception [%s] !!" % (pid, str(e.__class__)),"  ## What:",e
-        #print sys.exc_info()[0], sys.exc_info()[1]
+        print ("[%i] - terminated." % pid )
+    except Exception as e:
+        print ("[%i] - dead #while killing caught exception [%s] !!" % (pid, str(e.__class__)),"  ## What:",e)
+        #print (sys.exc_info()[0], sys.exc_info()[1])
         return False
     return True
 
@@ -658,12 +662,12 @@ def stop_athenaMP(mproc):
             pid = mproc.pid;
             if mproc.poll() is None: os.kill(pid, signal.SIGKILL); 
             mproc.wait();
-        print "process %s  terminated" % pid 
+        print ("process %s  terminated" % pid )
         return True
-    except Exception, e:
-        print "## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e
-        print sys.exc_info()[0], sys.exc_info()[1]
-        return False
+    except Exception as e:
+        print ("## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e)
+        print (sys.exc_info()[0], sys.exc_info()[1])
+        return (False)
     return False
 
 
@@ -677,10 +681,10 @@ def launch_sar(log, time_step):
      `sar -bBcdqrRuvwWy -I SUM -I XALL -n ALL -P ALL` = `sar -A`
     """
     sar_args = [ "sar", "-bBrvwu", "-o", log, "%i" % time_step, "0" ]
-    print "launching: %s %s %s %s %s %s" % tuple(sar_args)    
-    sc,out = commands.getstatusoutput('sar -b 1 1')
+    print ("launching: %s %s %s %s %s %s" % tuple(sar_args)    )
+    sc,out = subprocess.getstatusoutput('sar -b 1 1')
     if sc!=0:
-        print 'launching failed - sar do not work on this system - please install if available!'
+        print ('launching failed - sar do not work on this system - please install if available!')
         return None
     FNULL = open('/dev/null', 'w')
     proc = subprocess.Popen(sar_args, 
@@ -706,32 +710,32 @@ def _num(str):
         
 def get_sar_stat(log, key):
     """ get statistics by issueing this cmd: `sar -key $log`"""
-    print 'launching cmd: sar %s -f %s' % (key, log)        
-    sc,out = commands.getstatusoutput("sar %s -f %s" % (key,log) )
+    print ('launching cmd: sar %s -f %s' % (key, log)        )
+    sc,out = subprocess.getstatusoutput("sar %s -f %s" % (key,log) )
     if sc!=0:
-        print "launching failed - either file %s does not exist or sar does not work on this system - please check!" % log
+        print ("launching failed - either file %s does not exist or sar does not work on this system - please check!" % log)
         return None
     sar_dict = dict()
-    #print"##################################"; print "out=\n", out; print "################################################"
+    #print("##################################"); print ("out=\n", out; print "################################################")
 
     lines = out.splitlines()
-    print "trim1=", lines.pop(0)#trimming output
-    print "trim2=", lines.pop(0)#trimming output
+    print ("trim1=", lines.pop(0))#trimming output
+    print ("trim2=", lines.pop(0))#trimming output
 
     avg_line = lines.pop(); #trimming avg line at the end 
-    print "avg_line1=", avg_line
+    print ("avg_line1=", avg_line)
     
     hstrs = lines.pop(0).replace('%', 'p').replace('/', 'p').split() #trimming header strings and replacing '%' and '/' to satisfy ROOT 
     hstrs[0] = "Time"
-    print "Sar statistics fields found: ", hstrs
+    print ("Sar statistics fields found: ", hstrs)
 
-    #print"##################################"; print "lines=\n", lines; print "################################################"
+    #print"(##################################"; print "lines=\n", lines; print "################################################")
     
     for hstr in hstrs:
         sar_dict[hstr] = list()
     for line in lines:
         lstrs = line.split()
-        #print "lstrs=", lstrs
+        #print ("lstrs=", lstrs)
         for i,hstr in enumerate(hstrs):
             if i!=0:
                 sar_dict[hstr].append( _num(lstrs[i]) )
@@ -752,53 +756,55 @@ def get_full_sar_stat(log):
   ##############sysstat and other linux commands wrappers########
 
 def _meminfo():
-    out=commands.getoutput("cat /proc/meminfo")
+    out=subprocess.getoutput("cat /proc/meminfo")
     lines = out.splitlines()
     mem=dict()
+    Kb = 1024
     mem['total']= int(lines[0].split()[1]) / Kb
     mem['free'] = int(lines[1].split()[1]) / Kb
     mem['buffers']= int(lines[2].split()[1]) / Kb
     mem['cached'] = int(lines[3].split()[1]) / Kb
-    print "meminfo.real_total: [%i Mb]", mem['total'] 
-    print "meminfo.free: [%i Mb]", mem['free']
-    print "meminfo.cached: [%i Mb]", mem['cached'] 
-    print "meminfo.buffers: [%i Mb]", mem['buffers']
+    print ("meminfo.real_total: [%i Mb]", mem['total'] )
+    print ("meminfo.free: [%i Mb]", mem['free'])
+    print ("meminfo.cached: [%i Mb]", mem['cached'] )
+    print ("meminfo.buffers: [%i Mb]", mem['buffers'])
     return mem
 
 def _get_iostat():
-    out=commands.getoutput("iostat")
+    out=subprocess.getoutput("iostat")
     io = dict()
     lines = out.splitlines()
     strs = lines[1].split()
     io['used'] = int(strs[2])
+    mem=dict()
     mem['free'] = int(strs[3])
     mem['cached'] = int(strs[5])
     mem['buffers'] = int(strs[6])
     mem_strs = lines[2].split()
     mem['USED'] = int(strs[2])
     mem['FREE'] = int(strs[3])
-    #print "mem: [%s Mbs]" %  mem
+    #print ("mem: [%s Mbs]" %  mem)
     return io
 def _used_mem():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem_strs = out.splitlines()[2].split()
     used_mem = int(mem_strs[2]) 
-    print "used_mem: [%i Mb]" % used_mem
+    print ("used_mem: [%i Mb]" % used_mem)
     return used_mem            
 def _free_mem():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem_strs = out.splitlines()[2].split()
     free_mem  = int(mem_strs[3]) 
-    print "free_mem: [%i Mb]" % free_mem
+    print ("free_mem: [%i Mb]" % free_mem)
     return free_mem
 
 def _launch_iostat(log, time_step):
-    print 'launching cmd: iostat $TIME_STEP -d -x > iostat.$jobo.$np.$ne &'
-    sc,out = commands.getstatusoutput( "iostat" )
+    print ('launching cmd: iostat $TIME_STEP -d -x > iostat.$jobo.$np.$ne &')
+    sc,out = subprocess.getstatusoutput( "iostat" )
     if sc!=0:
-        print 'launching failed - iostat do not work on this system'
+        print ('launching failed - iostat do not work on this system')
         return None
-    file = open(log, "w")
+    f_iostat = open(log, "w")
     iostat_proc = subprocess.Popen(
         [ "iostat",  "%i" % time_step, "-d", "-x"], 
         executable="iostat", 
@@ -806,13 +812,13 @@ def _launch_iostat(log, time_step):
         shell=False, 
         close_fds = True)
 
-    file.close()
+    f_iostat.close()
     return iostat_proc  
 def _launch_vmstat(log, time_step):
-    print 'launching cmd: vmstat $TIME_STEP -n > vmstat.$jobo.$np.$ne &'        
-    sc,out = commands.getstatusoutput( "vmstat -V" )
+    print ('launching cmd: vmstat $TIME_STEP -n > vmstat.$jobo.$np.$ne &'        )
+    sc,out = subprocess.getstatusoutput( "vmstat -V" )
     if sc!=0:
-        print 'launching failed - vmstat do not work on this system'
+        print ('launching failed - vmstat do not work on this system')
         return None
     file = open(log, "w")
     proc = subprocess.Popen([ "vmstat", "%i" % time_step, "-n" ], 
@@ -824,7 +830,7 @@ def _launch_vmstat(log, time_step):
     return proc
 def __create_childProcDicts(ppid):
     """ creates stats dictionary with """
-    out = commands.getoutput("ps --ppid %i -o pid, start" % ppid)
+    out = subprocess.getoutput("ps --ppid %i -o pid, start" % ppid)
     ps_lines = out.splitlines()
     ps_lines.pop(0)
     
@@ -834,19 +840,19 @@ def __create_childProcDicts(ppid):
             ps_str = line.split()
             pid = int(ps_str[0])
             ProcDict(pid, start_time = _seconds(ps_str[1]))
-            print "ppid: [%i]: child [%i] added" % (ppid, pid)
+            print ("ppid: [%i]: child [%i] added" % (ppid, pid))
     else: 
-        print "no children exist for parent: %s " % ppid
+        print ("no children exist for parent: %s " % ppid)
 
 
 #######  adopted from AthenaMP/PyComps ###################
 def print_shared_private(pid):
-    print "CPROC-SHARED_PRIVATE_MEM for pid: [%i]" % pid
+    print ("CPROC-SHARED_PRIVATE_MEM for pid: [%i]" % pid)
     for line in open("/proc/%i/status" % pid):
         if line.startswith('Vm'):
                 print(line.strip())
     private,shared=_get_shared_private_mem()
-    print "pid:[%i] ===> private: %s MB | shared: %s MB" % (pid, private/1024., shared /1024.)
+    print ("pid:[%i] ===> private: %s MB | shared: %s MB" % (pid, private/1024., shared /1024.))
 def _get_shared_private_mem(pid='self'):
     """ Finds proc's shared and private memory size from /proc/pid/statm  and /proc/pid/smaps dir
        Coppied from AthenaMP/PyComps.py"""
@@ -874,9 +880,9 @@ def _get_shared_private_mem(pid='self'):
             pss_adjust=0.5 #add 0.5KiB as this average error due to trunctation
             Pss=sum([float(line.split()[1])+pss_adjust for line in pss_lines])
             shared = Pss - private
-    elif (2,6,1) <= kv <= (2,6,9):
-        shared=0 #lots of overestimation, but what can we do?
-        private = rss
+    #elif (2,6,1) <= kv <= (2,6,9):
+    #    shared=0 #lots of overestimation, but what can we do?
+    #    private = rss
     else:
         shared=int(open(statm_name).readline().split()[2])
         shared*=PAGESIZE
@@ -889,7 +895,7 @@ def _get_shared_private_mem(pid='self'):
 
 def _createRootFile(outName):
     """creating carcasus of report ROOT file"""
-    print "create ROOT file..."
+    print ("create ROOT file...")
     from PerfMonAna.PyRootLib import importRoot
     from ROOT import TTree
     import array
@@ -989,19 +995,19 @@ def _createRootFile(outName):
     outFile.cd()
     outFile.Write()
     outFile.Close()
-    print "create ROOT file... [DONE]" 
+    print ("create ROOT file... [DONE]" )
     return
 
 def createRootFile(outName, np):
     """creating structure of ROOT-report file from mp_stat dictionary """
-    print "create ROOT file..."
+    print ("create ROOT file...")
 
     from PerfMonAna.PyRootLib import importRoot
     from ROOT import TTree
     import array
     ROOT = importRoot( batch = True )
     outFile = ROOT.fopen( outName, 'RECREATE' )
-    print "ROOT.fopened"
+    print ("ROOT.fopened")
 
     outFile.cd("/")
     
@@ -1012,31 +1018,31 @@ def createRootFile(outName, np):
         tree =  TTree( t, "%s stat tree" % t)
         tree.Branch('np', i, 'int/I') # each tree will have 'np' branch
         for b in mp_stat[t].keys():
-            #print "tree=%s, branch=%s" % (t,b)
+            #print ("tree=%s, branch=%s" % (t,b))
             if isinstance(mp_stat[t][b][0], int):
                 tree.Branch(b, i, 'int/I')
             elif isinstance(mp_stat[t][b][0], float):
                 tree.Branch(b, d,'float/F')
             else:
-                #print "branch [%s] is not int or float type" % b
+                #print ("branch [%s] is not int or float type" % b)
                 tree.Branch(b, i, 'int/I')
         tree.Write()
     outFile.Write()
     outFile.Close()
-    print "create ROOT file... [DONE]"
+    print ("create ROOT file... [DONE]")
 
 
 def fillRootTree(tree, stat, np):
-    #print "writing %s statistics Tree:" % tree.GetName(),
+    #print ("writing %s statistics Tree:" % tree.GetName(),)
     branches = stat.keys()
-    #print "    branches=", branches, "...", 
+    #print ("    branches=", branches, "...", )
     nbr  = len(branches)
     array_list = list()
 
     np_array = array.array('i', [np])
     tree.SetBranchAddress('np', np_array) #putting 'np' into each tree.
     for branch in branches:
-        #print "fillRT: branch=%s" % branch
+        #print ("fillRT: branch=%s" % branch)
         if isinstance(stat[branch][0], float):
             f = stat[branch][0]
             nums = array.array('f', [0.0])
@@ -1046,64 +1052,62 @@ def fillRootTree(tree, stat, np):
             nums =  array.array('i', [0])
             array_list.append(nums)
         else:
-            #print "branch [%s] is not int or float type" % branch
+            #print ("branch [%s] is not int or float type" % branch)
             nums = array.array('i', [-1])
             array_list.append(nums)
         tree.SetBranchAddress(branch, array_list[-1]);
         
-    for index in xrange(len(stat[branches[0]])):
+    for index in range(len(stat[branches[0]])):
         for array_index, branch in enumerate(branches):
-            #print "stat[branch=%s][index=%i] array_index=%i " % (branch, index, array_index)
+            #print ("stat[branch=%s][index=%i] array_index=%i " % (branch, index, array_index))
             array_list[array_index][0] = stat[branch][index] if array_list[array_index][0] is not -1 else -1
         tree.Fill()
-    #print "[DONE]"
+    #print ("[DONE]")
 
 
 
 def writeRootFile(outName, np):
     """writes statistics into ROOT file"""
-    print "write ROOT file %s...", outName 
+    print ("write ROOT file %s...", outName )
     createRootFile(outName, np)
     from ROOT import TFile, TTree
     import array
     outFile = TFile( outName, 'update' )
 
     stat_keys = mp_stat.keys()
-    #print "mp_stat.keys()", stat_keys
+    #print ("mp_stat.keys()", stat_keys)
     for key in stat_keys:
-        #print " writing [%s]" % key
+        #print (" writing [%s]" % key)
         tree = outFile.Get( "%s" %   key )
         fillRootTree(tree, mp_stat[key], np)
         tree.Write()
 
     outFile.Write()
     outFile.Close()        
-    print "write ROOT file... [DONE]" 
+    print ("write ROOT file... [DONE]" )
     return
 
 def mergeRootFiles(file, ne):
     import glob
     file_list = glob.glob1(os.getcwd(), "%s.*.%i.root" % (file, ne) )
-    import commands
     cmd = "hadd -f6 mp_stat.%s.ne%i" % (file, ne)
     for f in file_list:
         cmd = cmd + ' ' + f 
-    sc, out = commands.getstatusoutput(cmd)
+    sc, out = subprocess.getstatusoutput(cmd)
 
 def mergeRootOutput(output_file, jobo, np_list, ne):
-    import commands
     from ROOT import TFile, TTree
     #output_file = "merged.%s.ne%i.root" % (jobo, ne)
     cmd = "hadd -f6 %s" % output_file
     for np in np_list:
         # here we copy mp_summary and cp_summary trees in each root file from /$np dir into root dir for further merging
         file = "mj.%s.%i.%i.root" % (jobo, np, ne)
-        print " ---> processing file = %s" % file
+        print (" ---> processing file = %s" % file)
         #here we form the command for merging
         cmd = cmd + " %s" % file
 
-    print "issueing root files merging command:[%s]" % cmd
-    sc, out = commands.getstatusoutput(cmd)
+    print ("issueing root files merging command:[%s]" % cmd)
+    sc, out = subprocess.getstatusoutput(cmd)
     return #output_file
 
 def _createGlobalRootFile(file, ne):
@@ -1112,13 +1116,13 @@ def _createGlobalRootFile(file, ne):
     file_list = glob.glob1(os.getcwd(), "%s.*.%i.root" % (file, ne) )
     outFile = TFile ("%s.%i.root" % (file, ne), 'RECREATE' )    
     for f in file_list:
-        print "Copying trees from [%s]" % f
+        print ("Copying trees from [%s]" % f)
         tf = TFile (f, 'READ' )
         mpt = tf.Get("mp_summary")
         cpt = tf.Get("cp_summary")
         outFile.cd('/')
         dir = "%s" % f.replace(file, "").split(".")[1]
-        print "   creating dir for np = %s" % dir
+        print ("   creating dir for np = %s" % dir)
         outFile.mkdir(dir) # creating dir for np
         outFile.cd(dir)
         mpTree = mpt.CloneTree(); mpTree.Write()
@@ -1140,7 +1144,7 @@ def _createGlobalRootFile(file, ne):
 
 
 def report2(root_file, ne = 0, comments=""):
-    print'  mpMonTools.report(): root_file=', root_file
+    print('  mpMonTools.report(): root_file=', root_file)
     from ROOT import TFile, TTree, TBranch, TCanvas, TPad, TGraph, TLegend, TMultiGraph, gStyle, TLatex, TPaveLabel, TPaveText, TH2I, TMath
 
     def getTreeList(tree, column, condition):
@@ -1170,11 +1174,11 @@ def report2(root_file, ne = 0, comments=""):
                     formula = param[0]
                     condition = param[1]
                 else:
-                    print "MakeMG: ", formula, condition
+                    print ("MakeMG: ", formula, condition)
                     formula = param
                     condition = ""
 
-                print "name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) 
+                print ("name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) )
            
     
                 #g = makeGraph(tree, name, formula, condition, color=clr)
@@ -1190,7 +1194,7 @@ def report2(root_file, ne = 0, comments=""):
 
         if graph_data['type'] is 'list':
             for name, (lx,ly) in graph_data['data'].items():
-                print "name=%s" % name; print lx; print ly
+                print ("name=%s" % name); print (lx); print (ly)
                 clr+=1
                 g = TGraph( len(lx), array.array('f', lx), array.array('f', ly) )
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr); 
@@ -1201,9 +1205,9 @@ def report2(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'array':
             clr = 1
             g_list = list()
-            data = arrayd['data']
+            data = graph_data['data']
             for name,(x,y) in graph_data['data'].items():
-                print x; print y
+                print (x); print (y)
                 clr+=1;
                 g = TGraph(len(x), x, y)
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr) 
@@ -1213,7 +1217,7 @@ def report2(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'text':
             title.DrawPaveLabel(0.1,0.93,0.9,0.99, graph_data['title'], "brNDC")
             for s in graph_data['data']:
-                print "graph_data['data']=%s" % s
+                print ("graph_data['data']=%s" % s)
                 sp_pt.AddText(s)             
             sp_pt.SetTextAlign(12);
             sp_pt.SetTextSize(0.04)
@@ -1258,7 +1262,7 @@ def report2(root_file, ne = 0, comments=""):
     c = TCanvas("mpr", "AthenaMP-mp-scaling-charts", 10, 10, 800, 1024)
     c.SetFillColor(17);  c.SetBorderSize(1); c.cd()
  
-    tfile = TFile(root_file, "READ"); print "   root compression factor = ", tfile.GetCompressionFactor()
+    tfile = TFile(root_file, "READ"); print ("   root compression factor = ", tfile.GetCompressionFactor())
     spSumTree = tfile.Get("sp_summary")
     #cpSumTree = tfile.Get("cp_summary")
     ioTree = tfile.Get("io")
@@ -1267,7 +1271,7 @@ def report2(root_file, ne = 0, comments=""):
     
     if ne is 0:
         ne = int(root_file.split('.')[-2].replace('ne', ''))
-        print "extracted ne=[%i]" % ne
+        print ("extracted ne=[%i]" % ne)
 
 ##### FORMING THE DATA FOR ROOT Graphing-Charting-Histogramming #####    
     np_list = list(set(getTreeList(spSumTree, 'np', ''))); np_list.sort() #uniqeify and sort np_list
@@ -1348,8 +1352,8 @@ def report2(root_file, ne = 0, comments=""):
             txt_dict[s] += "%10.1f" % getTreeList(spSumTree, s, "np==%i" % int(np) )[0]
             ltxt_dict[s].append( "%10.1f" % getTreeList(spSumTree, s, "np==%i" % int(np))[0] )
 
-    print "np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev)
-    print "elap-cpu=%s" % (elap_cpu_time)
+    print ("np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev))
+    print ("elap-cpu=%s" % (elap_cpu_time))
 
     from socket import gethostname
     import platform
@@ -1615,33 +1619,33 @@ def report2(root_file, ne = 0, comments=""):
     gStyle.SetMarkerStyle(21)
     gStyle.SetMarkerColor(2)
     gStyle.SetMarkerSize(0.4)
-    print "gStyle.Set done"
+    print ("gStyle.Set done")
 
     title = TPaveLabel(0.1,0.98,0.9,1, "Athena MP Plots");
     title.SetFillColor(42); title.SetTextFont(40); 
-    #title.Draw();print "title Drawn"
+    #title.Draw();print ("title Drawn")
 
     mgs =  list()  #List of TMultiGraphs
     ls =   list()  #List of TLegends
     gs =   list()  #List of TGraph
 
-    for j in xrange(ppc):
+    for j in range(ppc):
         y_factor = 0.99;   x1 = 0.01; x2 = 0.99;  y1 = y_factor - (y_factor-0.01)*(j+1)/float(ppc); y2 = y_factor - (y_factor-0.01)*j/float(ppc)
-        print "x1,y1,x2,y2",  x1, y1, x2, y2 
+        print ("x1,y1,x2,y2",  x1, y1, x2, y2 )
         pad = TPad("pad%i" % j, "pad%i" % j,   x1, y1, x2, y2,   33); pad.Draw()
         pads.append(pad);
     
     num_cans = len(graph_list) /(cpp*ppc) if len(graph_list) % (cpp*ppc)==0 else len(graph_list)/(cpp*ppc) + 1 
     graph_list += [None,]* (num_cans*cpp*ppc - len(graph_list))
-    print "number of pages/canvases in report = ", num_cans
+    print ("number of pages/canvases in report = ", num_cans)
     
     pdf_file = root_file
     for s in ['merged.', '.py', '.root']:
         pdf_file = pdf_file.replace(s, '')
     pdf_file ="%s.pdf" % pdf_file
 
-    for i in xrange(num_cans):
-        for j in xrange(ppc):
+    for i in range(num_cans):
+        for j in range(ppc):
             graph = graph_list[ppc*i+j]
             if graph is None:
                 continue
@@ -1656,25 +1660,25 @@ def report2(root_file, ne = 0, comments=""):
             pads[j].SetRightMargin(0.2)
             l = TLegend(0.82,0.20,0.99,0.89); ls.append(l) 
             mg = TMultiGraph(); mgs.append(mg)
-            print "graph=", graph
+            print ("graph=", graph)
             gs.append(MakeMultiGraph(graph, mg, l))
 
         c.Update()
         if i == 0:
-            print "pdf.start"
+            print ("pdf.start")
             c.Print(pdf_file+'(', 'pdf') #start page
         elif i < num_cans-1:
-            print "pdf.body"
+            print ("pdf.body")
             c.Print(pdf_file, 'pdf')    #body pages
         else:
-            print "pdf.end"
+            print ("pdf.end")
             c.Print(pdf_file + ')', 'pdf') #end page
         c.SaveAs("%s.%i.png" % (pdf_file, i))
         for pad in pads:
             pad.Clear()
 
 def report(root_file, ne = 0, comments=""):
-    print'  mpMonTools.report(): root_file=', root_file
+    print('  mpMonTools.report(): root_file=', root_file)
     from ROOT import TFile, TTree, TBranch, TCanvas, TPad, TGraph, TLegend, TMultiGraph, gStyle, TLatex, TPaveLabel, TPaveText, TH2I, TMath
 
     def getTreeList(tree, column, condition):
@@ -1707,20 +1711,20 @@ def report(root_file, ne = 0, comments=""):
                     formula = param[0]
                     condition = param[1]
                 else:
-                    print "MakeMG: ", formula, condition
+                    print ("MakeMG: ", formula, condition)
                     formula = param
                     condition = ""
 
-                print "name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) 
+                print ("name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) )
            
                 tree.Draw(formula, condition, "goff")
                 
                 selection_size = tree.GetSelectedRows()
                 if selection_size==-1:
-                    print "-> SKIPPED (DO NOT EXIST): SELECTION_SIZE=%i" % selection_size 
+                    print ("-> SKIPPED (DO NOT EXIST): SELECTION_SIZE=%i" % selection_size )
                     continue
                 else:
-                    print "-> SELECTION_SIZE=%i" % selection_size 
+                    print ("-> SELECTION_SIZE=%i" % selection_size )
                     pass
 
                 g = TGraph(selection_size, tree.GetV2(), tree.GetV1()); gl.append(g)
@@ -1734,7 +1738,7 @@ def report(root_file, ne = 0, comments=""):
 
         if graph_data['type'] is 'list':
             for name, (lx,ly) in graph_data['data'].items():
-                print "name=%s" % name; print lx; print ly
+                print ("name=%s" % name); print (lx); print (ly)
                 clr+=1
                 g = TGraph( len(lx), array.array('f', lx), array.array('f', ly) )
                 g.SetName(name); g.SetLineColor(clr*line_blank); g.SetLineWidth(1); g.SetMarkerColor(clr); 
@@ -1745,9 +1749,9 @@ def report(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'array':
             clr = 1
             g_list = list()
-            data = arrayd['data']
+            data = graph_data['data']
             for name,(x,y) in graph_data['data'].items():
-                print x; print y
+                print (x); print (y)
                 clr+=1;
                 g = TGraph(len(x), x, y)
                 g.SetName(name); g.SetLineColor(clr*line_blank); g.SetLineWidth(1); g.SetMarkerColor(clr) 
@@ -1757,7 +1761,7 @@ def report(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'text':
             title.DrawPaveLabel(0.1,0.93,0.9,0.99, graph_data['title'], "brNDC")
             for s in graph_data['data']:
-                print "graph_data['data']=%s" % s
+                print ("graph_data['data']=%s" % s)
                 sp_pt.AddText(s)             
             sp_pt.SetTextAlign(12);
             sp_pt.SetTextSize(0.04)
@@ -1802,7 +1806,7 @@ def report(root_file, ne = 0, comments=""):
     c = TCanvas("mpr", "AthenaMJ-mp-scaling-charts", 1, 1, 800, 1024)
     c.SetFillColor(0);  c.SetBorderSize(1); c.cd()
  
-    tfile = TFile(root_file, "READ"); print "   root compression factor = ", tfile.GetCompressionFactor()
+    tfile = TFile(root_file, "READ"); print ("   root compression factor = ", tfile.GetCompressionFactor())
     spSumTree = tfile.Get("sp_summary")
     #cpSumTree = tfile.Get("cp_summary")
     ioTree = tfile.Get("io")
@@ -1812,7 +1816,7 @@ def report(root_file, ne = 0, comments=""):
 
     if ne is 0:
         ne = int(root_file.split('.')[-2].replace('ne', ''))
-        print "extracted ne=[%i]" % ne
+        print ("extracted ne=[%i]" % ne)
 
 ##### FORMING THE DATA FOR ROOT Graphing-Charting-Histogramming #####    
     np_list = list(set(getTreeList(spSumTree, 'np', ''))); np_list.sort() #uniqeify and sort np_list
@@ -1894,9 +1898,9 @@ def report(root_file, ne = 0, comments=""):
         np_txt += "%10s" % np
         for s in sp_lb:
             gtl = getTreeList(spSumTree, s, "np==%i" % int(np) )
-            print "%s: getTreeList: %s" % (s,gtl), 
+            print ("%s: getTreeList: %s" % (s,gtl), end='')
             gtl_avg = meanList(gtl)
-            print " avg=%10.1f" % gtl_avg
+            print (" avg=%10.1f" % gtl_avg)
             txt_dict[s] += "%10.1f" % gtl_avg
             ltxt_dict[s].append( "%10.1f" % gtl_avg)
         ltxt_dict["total_rate"].append("%10.1f" % 
@@ -1904,8 +1908,8 @@ def report(root_file, ne = 0, comments=""):
         ltxt_dict["proc_rate_avg"].append("%10.1f" % 
                 ( 60.0*float(ne)/( float(ltxt_dict["elap_time_x"][-1]) - float(ltxt_dict["init_time_x"][-1]) ) ) )
         
-    print "np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev)
-    print "elap-cpu=%s" % (elap_cpu_time)
+    print ("np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev))
+    print ("elap-cpu=%s" % (elap_cpu_time))
 
     from socket import gethostname
     import platform
@@ -2185,33 +2189,33 @@ def report(root_file, ne = 0, comments=""):
     gStyle.SetMarkerStyle(21)
     gStyle.SetMarkerColor(2)
     gStyle.SetMarkerSize(0.5)
-    print "gStyle.Set done"
+    print ("gStyle.Set done")
 
     title = TPaveLabel(0.1,0.98,0.9,1, "Athena MJ Plots");
     title.SetFillColor(0); title.SetTextFont(40); 
-    #title.Draw();print "title Drawn"
+    #title.Draw();print ("title Drawn")
 
     mgs =  list()  #List of TMultiGraphs
     ls =   list()  #List of TLegends
     gs =   list()  #List of TGraph
 
-    for j in xrange(ppc):
+    for j in range(ppc):
         y_factor = 0.99;   x1 = 0.01; x2 = 0.99;  y1 = y_factor - (y_factor-0.01)*(j+1)/float(ppc); y2 = y_factor - (y_factor-0.01)*j/float(ppc)
-        print "x1,y1,x2,y2",  x1, y1, x2, y2 
+        print ("x1,y1,x2,y2",  x1, y1, x2, y2 )
         pad = TPad("pad%i" % j, "pad%i" % j,   x1, y1, x2, y2,   0); pad.Draw()
         pads.append(pad);
     
     num_cans = len(graph_list) /(cpp*ppc) if len(graph_list) % (cpp*ppc)==0 else len(graph_list)/(cpp*ppc) + 1 
     graph_list += [None,]* (num_cans*cpp*ppc - len(graph_list))
-    print "number of pages/canvases in report = ", num_cans
+    print ("number of pages/canvases in report = ", num_cans)
     
     pdf_file = root_file
     for s in ['merged.', '.py', '.root']:
         pdf_file = pdf_file.replace(s, '')
     pdf_file ="%s.pdf" % pdf_file
 
-    for i in xrange(num_cans):
-        for j in xrange(ppc):
+    for i in range(num_cans):
+        for j in range(ppc):
             graph = graph_list[ppc*i+j]
             if graph is None:
                 continue
@@ -2226,18 +2230,18 @@ def report(root_file, ne = 0, comments=""):
             pads[j].SetRightMargin(0.2)
             l = TLegend(0.82,0.20,0.99,0.89); ls.append(l) 
             mg = TMultiGraph(); mgs.append(mg)
-            print "graph=", graph
+            print ("graph=", graph)
             gs.append(MakeMultiGraph(graph, mg, l))
 
         c.Update()
         if i == 0:
-            print "pdf.start"
+            print ("pdf.start")
             c.Print(pdf_file+'(', 'pdf') #start page
         elif i < num_cans-1:
-            print "pdf.body"
+            print ("pdf.body")
             c.Print(pdf_file, 'pdf')    #body pages
         else:
-            print "pdf.end"
+            print ("pdf.end")
             c.Print(pdf_file + ')', 'pdf') #end page
         #c.SaveAs("%s.%i.png" % (pdf_file, i))
         c.SaveAs("%s.%i.C" % (pdf_file, i))
diff --git a/Control/AthenaMP/python/tests/mpMonTools.py b/Control/AthenaMP/python/tests/mpMonTools.py
index 5d33def7341e57c35944e87b30c1001fa2cf9a35..9d59eb9fff18ec136f68f65cc47f168a1753e935 100644
--- a/Control/AthenaMP/python/tests/mpMonTools.py
+++ b/Control/AthenaMP/python/tests/mpMonTools.py
@@ -1,22 +1,27 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file:    mpMonTools.py
 # @purpose: Library for mp performance monitoring of AthenaMP
 # @author:  Mous Tatarkhanov <tmmous@cern.ch>
 # @date:    December 2009
 
+from __future__ import print_function
+
 __version__ = "$Revision: 329336 $"
 __author__  = "Mous Tatarkhanov <tmmous@cern.ch>"
 
 import sys
 import os
-import subprocess
-import commands
 import signal
 import time
 import array
 import copy
-    
+import six
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 
 T0 = time.time()
 numa_T0 = T0
@@ -68,8 +73,8 @@ def init_mp_stat():
     init_numa = list(get_numastat())
     init_numa[0] = numa_T0
     
-    print "initial_mem=%s" % init_mem
-    print "initial_numa=%s" % list(init_numa)
+    print ("initial_mem=%s" % init_mem)
+    print ("initial_numa=%s" % list(init_numa))
     
     init_mem = get_memstat()
 
@@ -112,46 +117,46 @@ class ProcDict(dict):
         try:
             pass
             #private, shared = _get_shared_private_mem(self.pid)
-        except Exception, e:
-            print "## Caught exception [%s] !!" % str(e.__class__)
-            print "## What:", e
-            print sys.exc_info()[0]
-            print sys.exc_info()[1]
+        except Exception as e:
+            print ("## Caught exception [%s] !!" % str(e.__class__))
+            print ("## What:", e)
+            print (sys.exc_info()[0])
+            print (sys.exc_info()[1])
         self["private"].append(private)
         self["shared"].append(shared)  
     
     def proc_ps_stat(self):
         """ ps statistics for this process of pid """
-        out = commands.getoutput("ps --pid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % self.pid)
+        out = subprocess.getoutput("ps --pid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % self.pid)
         lines = out.splitlines()
         if len(lines) > 1:
             self.add_ps_line(lines[1])
         else:
-            print "there is no process with pid: [%i]", self.pid
+            print ("there is no process with pid: [%i]", self.pid)
             return False
         return True  
     
     def children_exist(self):
         """ figures out weather the np kids were spawned for mother mpid""" 
-        sc, out = commands.getstatusoutput("ps --ppid %i -o pid,start" % self.pid)
+        sc, out = subprocess.getstatusoutput("ps --ppid %i -o pid,start" % self.pid)
         if sc is not 0:
-            #print "   children_exist: Error, sc=%i" % sc
+            #print ("   children_exist: Error, sc=%i" % sc)
             return False
  
         ps_lines = out.splitlines()
         nc = len(ps_lines)-1
-        print "  children_exist().nbr of children = %i" % nc
+        print ("  children_exist().nbr of children = %i" % nc)
         if nc > 0 :
-            print "%i children workers exist. Creating ProcDicts..." % nc
+            print ("%i children workers exist. Creating ProcDicts..." % nc)
             ps_lines.pop(0)
             for line in ps_lines:
                 ps_str = line.split()
                 cpid = int(ps_str[0])
                 ProcDict(cpid, start_time = _seconds(ps_str[1]))
-                print "..... child [%i] added" %  cpid
+                print ("..... child [%i] added" %  cpid)
             return nc
         else:
-            #print "no children exist for parent: %s " % self.pid
+            #print ("no children exist for parent: %s " % self.pid)
             return False
 
 
@@ -207,7 +212,7 @@ class CPSummary(dict):
             self['vmem'].append(mp_stat['cpid'][pid].vmem)
             self['rss'].append(mp_stat['cpid'][pid].rss)
         for pid in self.cpid_list:
-            print "  %s/%s exists ->" % (dir,pid), os.path.exists(os.path.join(dir,"%s" % pid)) #FIX: add the extraction from cpid's logs.
+            print ("  %s/%s exists ->" % (dir,pid), os.path.exists(os.path.join(dir,"%s" % pid))) #FIX: add the extraction from cpid's logs.
             out_path = os.path.join(dir, "%s" % pid, 'stdout')
             err_path = os.path.join(dir, "%s" % pid, 'stderr')
 
@@ -270,13 +275,13 @@ def _seconds(time_str): #handles time in "H:M:S" and "M:S" format
         return 3600*int(time_nums[0])+60*int(time_nums[1]) + int(time_nums[2])
     elif (len(time_nums)==2):
         return 60*int(time_nums[0]) + int(time_nums[1])
-    print "ERROR: _seconds() returning - 0"
+    print ("ERROR: _seconds() returning - 0")
     return 0
     
 def get_numastat():
-    sc,out=commands.getstatusoutput("numastat")
+    sc,out=subprocess.getstatusoutput("numastat")
     if sc==256:
-        print "mjMonTools.get_numastat: numastat is not working! zeroes will be returned"
+        print ("mjMonTools.get_numastat: numastat is not working! zeroes will be returned")
         return (0,0,0,0,0,0,0)
     else:
         lines = out.splitlines()
@@ -291,7 +296,7 @@ def get_numastat():
         
 def save_numastat():
     current_numa = get_numastat()
-    #print "current_numa=%s" % list(current_numa)
+    #print ("current_numa=%s" % list(current_numa))
 
     _numa_stat = (
         mp_stat['numa']['Time'],
@@ -304,18 +309,18 @@ def save_numastat():
         )
     
     change_numa = subList(current_numa,init_numa)
-    print "NUMA_CHANGE=%s" % change_numa
+    print ("NUMA_CHANGE=%s" % change_numa)
     return [_numa_stat[i].append(change_numa[i]) for i in range(len(change_numa))]
     
 def print_memstat(msg =""):
     mem = get_memstat()
     t = time.time() - T0;
     save_numastat()
-    print msg + " [T=%i sec]" % t + " USED[%i Mb][change: %i Mb] - FREE[%i Mb][change: %i Mb]" % ( 
-        mem["USED"], mem["USED"]-init_mem["USED"], mem["FREE"], mem["FREE"]-init_mem["FREE"])
+    print (msg + " [T=%i sec]" % t + " USED[%i Mb][change: %i Mb] - FREE[%i Mb][change: %i Mb]" % ( 
+        mem["USED"], mem["USED"]-init_mem["USED"], mem["FREE"], mem["FREE"]-init_mem["FREE"]))
 
 def get_memstat():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem = dict()
     lines = out.splitlines()
     mem_strs = lines[1].split()
@@ -326,7 +331,7 @@ def get_memstat():
     mem_strs = lines[2].split()
     mem['USED'] = int(mem_strs[2])
     mem['FREE'] = int(mem_strs[3])
-    #print "mem: [%s Mbs]" %  mem
+    #print ("mem: [%s Mbs]" %  mem)
     return mem
 
 init_mem = get_memstat()
@@ -334,46 +339,46 @@ init_mem = get_memstat()
 def meanList(num_list):
     """finds average value of the number list"""
     if len(num_list) == 0:
-        print "meanList: WARNING - empty list, returning 0.0"
+        print ("meanList: WARNING - empty list, returning 0.0")
         return 0.0
     return float(sum(num_list)) / len(num_list)
     
 def sumList(l1, l2):
     """sum up values of two lists l1 + l2"""
     if len(l1) is not len(l2):
-        print "sumList: WARNING: len(l1) not equals len(l2)"
+        print ("sumList: WARNING: len(l1) not equals len(l2)")
         n = len(l1) if len(l2) > len(l1) else len(l2)
     else:
         n = len(l1)
 
     sum = list()
-    for i  in xrange(n):
+    for i  in range(n):
         sum.append(l1[i] + l2[i])
     return sum
 
 def subList(l1, l2): 
     """subtract values of two lists: l1 - l2"""
     if len(l1) is not len(l2):
-        print "subList: WARNING: len(l1) not equals len(l2)"
+        print ("subList: WARNING: len(l1) not equals len(l2)")
         n = len(l1) if len(l2) > len(l1) else len(l2)
     else:
         n = len(l1)
 
     sub = list()
-    for i  in xrange(n):
+    for i  in range(n):
         sub.append(l1[i] - l2[i])
     return sub
         
 def get_spike(l):
-    #print " get_spike:",
-    #print " e0 = ",  l[0]/1024, "Mb", 
-    #print " eN = ",  l[-1]/1024, "Mb",
-    #print " max = ", max(l)/1024, "Mb", 
-    #print " min = ", min(l)/1024, "Mb", 
-    #print " e0 - eN = ",  (l[0] - l[-1])/1024, "Mb",
-    #print " e0 - min = ", (l[0] - min(l))/1024, "Mb",
-    #print " eN - min = ", (l[-1] - min(l))/1024, "Mb",
-    #print " return  max - min =", (max(l) - min(l))/1024, "Mb"
+    #print (" get_spike:",)
+    #print (" e0 = ",  l[0]/1024, "Mb",  end='')
+    #print (" eN = ",  l[-1]/1024, "Mb", end='')
+    #print (" max = ", max(l)/1024, "Mb", end='' )
+    #print (" min = ", min(l)/1024, "Mb",  end='')
+    #print (" e0 - eN = ",  (l[0] - l[-1])/1024, "Mb", end='')
+    #print (" e0 - min = ", (l[0] - min(l))/1024, "Mb", end='')
+    #print (" eN - min = ", (l[-1] - min(l))/1024, "Mb", end='')
+    #print (" return  max - min =", (max(l) - min(l))/1024, "Mb")
     return max(l) - min(l)   
 
 def prepare_mp_stat():
@@ -395,19 +400,19 @@ def prepare_mp_stat():
 
 
 def print_summary():
-    print "===== MOTHER PROCESS SUMMARY ====="
+    print ("===== MOTHER PROCESS SUMMARY =====")
     for (k, v) in mp_stat['mp_summary'].items():
-        print "mp_summary['%s']=%s " % (k, v)
+        print ("mp_summary['%s']=%s " % (k, v))
 
-    print "===== CHILDREN PROCESS SUMMARY ==="
+    print ("===== CHILDREN PROCESS SUMMARY ===")
     for (k, v) in mp_stat['cp_summary'].items():
-        print "cp_summary['%s']=%s " % (k, v)
+        print ("cp_summary['%s']=%s " % (k, v))
 
         
 ################## children tools ######################
 def children_working(ppid):
     """ ps statistics for children of ppid. returns False if no children exist """
-    out = commands.getoutput("ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % ppid)
+    out = subprocess.getoutput("ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime" % ppid)
     ps_lines = out.splitlines()
     ps_lines.pop(0)
         
@@ -417,10 +422,10 @@ def children_working(ppid):
             pid = int(ps_str[0])
             if pid in mp_stat["cpid"].keys():
                 mp_stat["cpid"][pid].add_ps_line(line)
-            #print "child_stat.appended for kid: %i" % pid       
+            #print ("child_stat.appended for kid: %i" % pid       )
         return True #ps returns something -> children still exist   
     else:
-        print " mpMonTools.children_working: no children exist for parent: %i" % ppid
+        print (" mpMonTools.children_working: no children exist for parent: %i" % ppid)
         return False #ps returns nothing -> children either weren't born or died.           
     return False
 
@@ -439,34 +444,34 @@ def summarize_proc_stat():
 
 def children_born(log, mpid, np):
     """ figures out weather the np kids were spawned for mother mpid""" 
-    sc,out = commands.getstatusoutput("ps --ppid %i -o pid,start" % mpid)
+    sc,out = subprocess.getstatusoutput("ps --ppid %i -o pid,start" % mpid)
     if sc is not 0:
-        print "   mpMonTools.children_born: no kids yet... Error, sc=%i" % sc
+        print ("   mpMonTools.children_born: no kids yet... Error, sc=%i" % sc)
         return False
 
     ps_lines = out.splitlines()
-    #print "ps_lines=", ps_lines
+    #print ("ps_lines=", ps_lines)
     nc = len(ps_lines)-1
     
-    print " children_exist: nbr of children = [%i]" % nc
+    print (" children_exist: nbr of children = [%i]" % nc)
     if grepValue(log, "FIRSTEVENT_ELAP_TIME") is None:
         return False
     else:
         pass
 
     if nc>=np : #nbr of children is equal to nbr of procs required 
-        print "%i children workers forked! Registering them (creating ProcDicts) ..." % np
+        print ("%i children workers forked! Registering them (creating ProcDicts) ..." % np)
         ps_lines.pop(0)
         for line in ps_lines:
             ps_str = line.split()
             pid = int(ps_str[0])
-            print "child [%i] born" % pid, 
+            print ("child [%i] born" % pid, )
             if grepExist(log, "%i-%i" % (mpid, pid)):
                 ProcDict(pid, start_time = _seconds(ps_str[1]))
-                print "..... child WORKER [%i] added" %  pid
+                print ("..... child WORKER [%i] added" %  pid)
         return True
     else:
-        print "no children exist for parent: %s " % mpid
+        print ("no children exist for parent: %s " % mpid)
     return False
 
 
@@ -474,13 +479,13 @@ def children_born(log, mpid, np):
 def grepExist(log, field):
     """grep check for the existance of the unique field in the log 
     """
-    #print "grepping %s in %s" % (field, log)
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    #print ("grepping %s in %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc==256:
-        print "grepExist: FALSE: grep %s %s failed with sc=%i" % (field, log, sc)
+        print ("grepExist: FALSE: grep %s %s failed with sc=%i" % (field, log, sc))
         return False
     line = out.splitlines()[0]
-    print "grepExist: TRUE: sc=%i grepped-line=%s" % (sc,line)
+    print ("grepExist: TRUE: sc=%i grepped-line=%s" % (sc,line))
     return True
     
 def grepValue(log, field, sep='='):
@@ -488,17 +493,17 @@ def grepValue(log, field, sep='='):
        Example: out = 'Py:EventLoopMgr      INFO EvtMax  =  123456  something'      
        grepValue(log, "EvtMax", sep="=") = '123456' 
     """
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        print "grepping %s in %s failed" % (field, log)
+        print ("grepping %s in %s failed" % (field, log))
         return None
     line = out.splitlines()[0]
-    print "grepped-line=%s" % line
+    print ("grepped-line=%s" % line)
     import re 
-    vexpr = '\s*'+ sep+ '\s*(\d*\.?\d+)' #vexpr = '\s*'+ sep+ '\s*(\d+)'
+    vexpr = '\\s*'+ sep+ '\\s*(\\d*\\.?\\d+)' #vexpr = '\\s*'+ sep+ '\\s*(\\d+)'
     m = re.search( field + vexpr, line)
     value = m.group(1)
-    print "grepValue:[%s], line=%s" % (value,line)
+    print ("grepValue:[%s], line=%s" % (value,line))
     return value
 
 def grepValueList(log, search_str, field ='', sep='='):
@@ -506,9 +511,9 @@ def grepValueList(log, search_str, field ='', sep='='):
        Example: out = 'Py:EventLoopMgr      INFO EvtMax  =  123456  something'      
        grepValue(log, "EvtMax", sep="=") = '123456' 
     """
-    sc,out = commands.getstatusoutput( "grep %s %s" % (search_str, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (search_str, log))
     if sc!=0:
-        print "grepping %s in %s failed with sc=%s" % (search_str, log, sc), "out=%s" % out
+        print ("grepping %s in %s failed with sc=%s" % (search_str, log, sc), "out=%s" % out)
         return []
 
     if field =='':
@@ -516,15 +521,15 @@ def grepValueList(log, search_str, field ='', sep='='):
 
     value_list = []
     
-    #print "grepped lines = %s" % out
+    #print ("grepped lines = %s" % out)
 
     import re 
-    vexpr = '\s*'+ sep+ '\s*(\d*\.?\d+)'
+    vexpr = '\\s*'+ sep+ '\\s*(\\d*\\.?\\d+)'
     for line in out.splitlines():
-        print "grepped-line=%s" % line
+        print ("grepped-line=%s" % line)
         m = re.search( field + vexpr, line)
         value = m.group(1)
-        print "grepValue:[%s], line=%s" % (value,line)
+        print ("grepValue:[%s], line=%s" % (value,line))
         value_list.append(value)
     return value_list
 
@@ -533,22 +538,22 @@ def grepPath(log, field, sep=':'):
        Example: out = 'Py:EventLoopMgr      INFO master workdir: /tmp/athena-mp-tmp-tmmous/22590-1261097934  smthng'      
        grepPath(log, "workdir", sep=":") = '/tmp/athena-mp-tmp-tmmous/22590-1261097934' 
     """
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        print "grepping %s in %s failed" % (field, log)
+        print ("grepping %s in %s failed" % (field, log))
         return None
     line = out.splitlines()[0]
     import re 
-    vexpr = '\s*'+ sep+ '\s*([^\s]+)'
+    vexpr = '\\s*'+ sep+ '\\s*([^\\s]+)'
     m = re.search( field + vexpr, line)
     path = m.group(1)
     return path
 
 
 def grepWorkerPathes(log, field = "WORKER_STDOUT", sep=':'):
-    sc,out = commands.getstatusoutput( "grep %s %s" % (field, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (field, log))
     if sc!=0:
-        print "grepping %s in %s failed" % (field, log)
+        print ("grepping %s in %s failed" % (field, log))
         return None
 
     workers = dict()
@@ -558,11 +563,11 @@ def grepWorkerPathes(log, field = "WORKER_STDOUT", sep=':'):
     #    return []
 
     import re
-    vexpr = '\s*'+ sep+ '\s*([^\s]+)'
+    vexpr = '\\s*'+ sep+ '\\s*([^\\s]+)'
     
     for  line in out.splitlines():
         rout = re.search( field + vexpr, line)
-        rpid = re.search( 'WORKER_PID'  +  '\s*'+ '=' + '\s*([^\s]+)', line)
+        rpid = re.search( 'WORKER_PID'  +  '\\s*'+ '=' + '\\s*([^\\s]+)', line)
         path = rout.group(1)
         pid = rpid.group(1)
 
@@ -571,9 +576,9 @@ def grepWorkerPathes(log, field = "WORKER_STDOUT", sep=':'):
     return workers
 
 def grepWorkerStat(log, search_str = "WORKER_EVENT_STAT", fields=['elap_time',],  sep='='):
-    sc,out = commands.getstatusoutput( "grep %s %s" % (search_str, log))
+    sc,out = subprocess.getstatusoutput( "grep %s %s" % (search_str, log))
     if sc!=0:
-        print "grepping %s in %s failed" % (search_str, log)
+        print ("grepping %s in %s failed" % (search_str, log))
         return None
 
     worker_stat = dict()
@@ -585,7 +590,7 @@ def grepWorkerStat(log, search_str = "WORKER_EVENT_STAT", fields=['elap_time',],
     #    return []
 
     import re
-    vexpr = '\s*'+ sep+ '\s*([^\s]+)'
+    vexpr = '\\s*'+ sep+ '\\s*([^\\s]+)'
     
     for  line in out.splitlines():
         for field in fields:
@@ -599,7 +604,7 @@ def extractWorkersStat(mlog):
     """extract event based statistics of the WORKER using methods implemented above"""
     paths_dict = grepWorkerPathes(mlog)
     worker_stat = dict()
-    for pid,path in paths_dict.iteritems():
+    for pid,path in six.iteritems(paths_dict):
         worker_stat[pid] = grepWorkerStat(path, fields=['evt', 'cpu', 'elap_time', 'elap_os_time', 'system_time', 'user_time'])
     return worker_stat
 
@@ -616,12 +621,12 @@ def writeOutWorkersStat(mlog):
     os.mkdir("stat-%s" % ppid )
     os.chdir("stat-%s" % ppid )
 
-    for pid,stat in ws.iteritems():
+    for pid,stat in six.iteritems(ws):
         rows = list() 
         for i in range( 1 + len(stat['evt'])):
             rows.append('');
 
-        for field,values in stat.iteritems():
+        for field,values in six.iteritems(stat):
             rows[0] = "%s\t%s" % (rows[0], field)
             i=1
             for value in values:
@@ -643,7 +648,7 @@ def writeOutWorkersStat(mlog):
 def launch_athenaMP2(cmd, job, np, ne):
     """"launching cmd: athena.py --nprocs=$np -c EvtMax=$ne $jobo  1> mp.output/stdout_$jobo.$np.$ne   2> mp.output/stderr_$jobo.$np.$ne""" 
     
-    print "job command and options as template: %s" % cmd
+    print ("job command and options as template: %s" % cmd)
     from string import Template
     arg_template= Template(cmd)
     arg_str = arg_template.substitute(MAXEVT=np*ne, NPROCS=np, JOBO=job)
@@ -657,7 +662,7 @@ def launch_athenaMP2(cmd, job, np, ne):
     stderr_name = os.path.join(output_dir, "stderr.mp.%s.%i.%i"  % (job,np,ne))
 
     if not os.path.exists(job):
-        print "job options file %s doesn't exist" % job
+        print ("job options file %s doesn't exist" % job)
         return None
     import shutil
     shutil.copy(job, output_dir)
@@ -666,7 +671,7 @@ def launch_athenaMP2(cmd, job, np, ne):
     STDOUT_FILE = open(stdout_name , "w")
     STDERR_FILE = open(stderr_name,  "w")
     
-    print "<<<LAUNCH>>>: %s" % proc_args
+    print ("<<<LAUNCH>>>: %s" % proc_args)
     mproc = subprocess.Popen( proc_args, 
                              stdout=STDOUT_FILE, 
                              stderr=STDERR_FILE,
@@ -684,11 +689,11 @@ def launch_athenaMP(jobo, np, ne):
     output_dir = "mp.output"
     stdout_name = os.path.join(output_dir, "stdout.mp.%s.%i.%i"  % (jobo,np,ne))
     stderr_name = os.path.join(output_dir, "stderr.mp.%s.%i.%i"  % (jobo,np,ne))
-    print "launching: athena.py --nprocs=%i -c EvtMax=%i %s \
-        1> %s   2> %s" % (np, np*ne, jobo, stdout_name, stderr_name)
+    print ("launching: athena.py --nprocs=%i -c EvtMax=%i %s \
+        1> %s   2> %s" % (np, np*ne, jobo, stdout_name, stderr_name))
 
     if not os.path.exists(jobo):
-        print "job options file doesn't exist"
+        print ("job options file doesn't exist")
         return None
 
     if not os.path.isdir(output_dir):
@@ -720,30 +725,30 @@ def stop_proc(proc):
             pid = proc.pid 
             if proc.poll() is None: os.kill(pid, signal.SIGKILL); 
             proc.wait();
-        print "process %s  terminated" % pid 
-    except Exception, e:
-        print "## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e
-        print sys.exc_info()[0], sys.exc_info()[1]
+        print ("process %s  terminated" % pid )
+    except Exception as e:
+        print ("## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e)
+        print (sys.exc_info()[0], sys.exc_info()[1])
         return False
     pid_list.remove(pid)
     return True
 
 def stop_proc_tree(pid):
     """ Terminate/kill recursively process tree by pid. Be precautious using this!"""
-    out = commands.getoutput("ps --ppid %i" % pid)
+    out = subprocess.getoutput("ps --ppid %i" % pid)
     lines = out.splitlines(); lines.pop(0) #remove header
     try:
         if len(lines) > 0: 
             for line in lines:
                 cpid = int(line.split()[0])
-                print "child [%i:%i] being terminated..." % (pid, cpid)
+                print ("child [%i:%i] being terminated..." % (pid, cpid))
                 stop_proc_tree(cpid)
         if  pid in pid_list: pid_list.remove(pid) 
         os.kill(pid, signal.SIGKILL); #os.waitpid(pid, 0);
-        print "[%i] - terminated." % pid 
-    except Exception, e:
-        print "[%i] - dead #while killing caught exception [%s] !!" % (pid, str(e.__class__)),"  ## What:",e
-        #print sys.exc_info()[0], sys.exc_info()[1]
+        print ("[%i] - terminated." % pid )
+    except Exception as e:
+        print ("[%i] - dead #while killing caught exception [%s] !!" % (pid, str(e.__class__)),"  ## What:",e)
+        #print (sys.exc_info()[0], sys.exc_info()[1])
         return False
     return True
 
@@ -759,11 +764,11 @@ def stop_athenaMP(mproc):
             pid = mproc.pid;
             if mproc.poll() is None: os.kill(pid, signal.SIGKILL); 
             mproc.wait();
-        print "process %s  terminated" % pid 
+        print ("process %s  terminated" % pid )
         return True
-    except Exception, e:
-        print "## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e
-        print sys.exc_info()[0], sys.exc_info()[1]
+    except Exception as e:
+        print ("## Caught exception [%s] !!" % str(e.__class__),"  ## What:",e)
+        print (sys.exc_info()[0], sys.exc_info()[1])
         return False
     return False
 
@@ -778,10 +783,10 @@ def launch_sar(log, time_step):
      `sar -bBcdqrRuvwWy -I SUM -I XALL -n ALL -P ALL` = `sar -A`
     """
     sar_args = [ "sar", "-bBrvwu", "-o", log, "%i"% time_step, "0" ]
-    print "launching: %s %s %s %s %s %s" % tuple(sar_args)    
-    sc,out = commands.getstatusoutput('sar -b 1 1')
+    print ("launching: %s %s %s %s %s %s" % tuple(sar_args)    )
+    sc,out = subprocess.getstatusoutput('sar -b 1 1')
     if sc!=0:
-        print 'launching failed - sar do not work on this system - please install if available!'
+        print ('launching failed - sar do not work on this system - please install if available!')
         return None
     FNULL = open('/dev/null', 'w')
     proc = subprocess.Popen(sar_args, 
@@ -793,8 +798,8 @@ def launch_sar(log, time_step):
                             )
 
     FNULL.close()
-    print "sc=%i" % sc
-    print "out=%s" % out
+    print ("sc=%i" % sc)
+    print ("out=%s" % out)
 
     pid_list.append(proc.pid)
     return proc
@@ -812,32 +817,32 @@ def _num(str):
         
 def get_sar_stat(log, key):
     """ get statistics by issueing this cmd: `sar -key $log`"""
-    print 'launching cmd: sar %s -f %s' % (key, log)        
-    sc,out = commands.getstatusoutput("sar %s -f %s" % (key,log) )
+    print ('launching cmd: sar %s -f %s' % (key, log)        )
+    sc,out = subprocess.getstatusoutput("sar %s -f %s" % (key,log) )
     if sc!=0:
-        print "launching failed - either file %s does not exist or sar does not work on this system - please check!" % log
+        print ("launching failed - either file %s does not exist or sar does not work on this system - please check!" % log)
         return None
     sar_dict = dict()
-    #print"##################################"; print "out=\n", out; print "################################################"
+    #print"(##################################"; print "out=\n", out; print "################################################")
 
     lines = out.splitlines()
-    print "trim1=", lines.pop(0)#trimming output
-    print "trim2=", lines.pop(0)#trimming output
+    print ("trim1=", lines.pop(0))#trimming output
+    print ("trim2=", lines.pop(0))#trimming output
 
     avg_line = lines.pop(); #trimming avg line at the end 
-    print "avg_line1=", avg_line
+    print ("avg_line1=", avg_line)
     
     hstrs = lines.pop(0).replace('%', 'p').replace('/', 'p').split() #trimming header strings and replacing '%' and '/' to satisfy ROOT 
     hstrs[0] = "Time"
-    print "Sar statistics fields found: ", hstrs
+    print ("Sar statistics fields found: ", hstrs)
 
-    #print"##################################"; print "lines=\n", lines; print "################################################"
+    #print("##################################"); print ("lines=\n", lines; print "################################################)"
     
     for hstr in hstrs:
         sar_dict[hstr] = list()
     for line in lines:
         lstrs = line.split()
-        print "lstrs=", lstrs
+        print ("lstrs=", lstrs)
         for i,hstr in enumerate(hstrs):
             if i!=0:
                 sar_dict[hstr].append( _num(lstrs[i]) )
@@ -859,53 +864,55 @@ def get_full_sar_stat(log):
   ##############sysstat and other linux commands wrappers########
 
 def _meminfo():
-    out=commands.getoutput("cat /proc/meminfo")
+    out=subprocess.getoutput("cat /proc/meminfo")
     lines = out.splitlines()
     mem=dict()
+    Kb = 1024
     mem['total']= int(lines[0].split()[1]) / Kb
     mem['free'] = int(lines[1].split()[1]) / Kb
     mem['buffers']= int(lines[2].split()[1]) / Kb
     mem['cached'] = int(lines[3].split()[1]) / Kb
-    print "meminfo.real_total: [%i Mb]", mem['total'] 
-    print "meminfo.free: [%i Mb]", mem['free']
-    print "meminfo.cached: [%i Mb]", mem['cached'] 
-    print "meminfo.buffers: [%i Mb]", mem['buffers']
+    print ("meminfo.real_total: [%i Mb]", mem['total'] )
+    print ("meminfo.free: [%i Mb]", mem['free'])
+    print ("meminfo.cached: [%i Mb]", mem['cached'] )
+    print ("meminfo.buffers: [%i Mb]", mem['buffers'])
     return mem
 
 def _get_iostat():
-    out=commands.getoutput("iostat")
+    out=subprocess.getoutput("iostat")
     io = dict()
     lines = out.splitlines()
     strs = lines[1].split()
     io['used'] = int(strs[2])
+    mem = dict()
     mem['free'] = int(strs[3])
     mem['cached'] = int(strs[5])
     mem['buffers'] = int(strs[6])
     mem_strs = lines[2].split()
     mem['USED'] = int(strs[2])
     mem['FREE'] = int(strs[3])
-    #print "mem: [%s Mbs]" %  mem
+    #print ("mem: [%s Mbs]" %  mem)
     return io
 def _used_mem():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem_strs = out.splitlines()[2].split()
     used_mem = int(mem_strs[2]) 
-    print "used_mem: [%i Mb]" % used_mem
+    print ("used_mem: [%i Mb]" % used_mem)
     return used_mem            
 def _free_mem():
-    out=commands.getoutput("free -m")
+    out=subprocess.getoutput("free -m")
     mem_strs = out.splitlines()[2].split()
     free_mem  = int(mem_strs[3]) 
-    print "free_mem: [%i Mb]" % free_mem
+    print ("free_mem: [%i Mb]" % free_mem)
     return free_mem
 
 def _launch_iostat(log, time_step):
-    print 'launching cmd: iostat $TIME_STEP -d -x > iostat.$jobo.$np.$ne &'
-    sc,out = commands.getstatusoutput( "iostat" )
+    print ('launching cmd: iostat $TIME_STEP -d -x > iostat.$jobo.$np.$ne &')
+    sc,out = subprocess.getstatusoutput( "iostat" )
     if sc!=0:
-        print 'launching failed - iostat do not work on this system'
+        print ('launching failed - iostat do not work on this system')
         return None
-    file = open(log, "w")
+    f_iostat = open(log, "w")
     iostat_proc = subprocess.Popen(
         [ "iostat",  "%i" % time_step, "-d", "-x"], 
         executable="iostat", 
@@ -913,13 +920,13 @@ def _launch_iostat(log, time_step):
         shell=False, 
         close_fds = True)
 
-    file.close()
+    f_iostat.close()
     return iostat_proc  
 def _launch_vmstat(log, time_step):
-    print 'launching cmd: vmstat $TIME_STEP -n > vmstat.$jobo.$np.$ne &'        
-    sc,out = commands.getstatusoutput( "vmstat -V" )
+    print ('launching cmd: vmstat $TIME_STEP -n > vmstat.$jobo.$np.$ne &'        )
+    sc,out = subprocess.getstatusoutput( "vmstat -V" )
     if sc!=0:
-        print 'launching failed - vmstat do not work on this system'
+        print ('launching failed - vmstat do not work on this system')
         return None
     file = open(log, "w")
     proc = subprocess.Popen([ "vmstat", "%i" % time_step, "-n" ], 
@@ -931,7 +938,7 @@ def _launch_vmstat(log, time_step):
     return proc
 def __create_childProcDicts(ppid):
     """ creates stats dictionary with """
-    out = commands.getoutput("ps --ppid %i -o pid, start" % ppid)
+    out = subprocess.getoutput("ps --ppid %i -o pid, start" % ppid)
     ps_lines = out.splitlines()
     ps_lines.pop(0)
     
@@ -941,19 +948,19 @@ def __create_childProcDicts(ppid):
             ps_str = line.split()
             pid = int(ps_str[0])
             ProcDict(pid, start_time = _seconds(ps_str[1]))
-            print "ppid: [%i]: child [%i] added" % (ppid, pid)
+            print ("ppid: [%i]: child [%i] added" % (ppid, pid))
     else: 
-        print "no children exist for parent: %s " % ppid
+        print ("no children exist for parent: %s " % ppid)
 
 
 #######  adopted from AthenaMP/PyComps ###################
 def print_shared_private(pid):
-    print "CPROC-SHARED_PRIVATE_MEM for pid: [%i]" % pid
+    print ("CPROC-SHARED_PRIVATE_MEM for pid: [%i]" % pid)
     for line in open("/proc/%i/status" % pid):
         if line.startswith('Vm'):
                 print(line.strip())
     private,shared=_get_shared_private_mem()
-    print "pid:[%i] ===> private: %s MB | shared: %s MB" % (pid, private/1024., shared /1024.)
+    print ("pid:[%i] ===> private: %s MB | shared: %s MB" % (pid, private/1024., shared /1024.))
 def _get_shared_private_mem(pid='self'):
     """ Finds proc's shared and private memory size from /proc/pid/statm  and /proc/pid/smaps dir
        Coppied from AthenaMP/PyComps.py"""
@@ -981,9 +988,9 @@ def _get_shared_private_mem(pid='self'):
             pss_adjust=0.5 #add 0.5KiB as this average error due to trunctation
             Pss=sum([float(line.split()[1])+pss_adjust for line in pss_lines])
             shared = Pss - private
-    elif (2,6,1) <= kv <= (2,6,9):
-        shared=0 #lots of overestimation, but what can we do?
-        private = rss
+    #elif (2,6,1) <= kv <= (2,6,9):
+    #    shared=0 #lots of overestimation, but what can we do?
+    #    private = rss
     else:
         shared=int(open(statm_name).readline().split()[2])
         shared*=PAGESIZE
@@ -996,7 +1003,7 @@ def _get_shared_private_mem(pid='self'):
 
 def _createRootFile(outName):
     """creating carcasus of report ROOT file"""
-    print "create ROOT file..."
+    print ("create ROOT file...")
     from PerfMonAna.PyRootLib import importRoot
     from ROOT import TTree
     import array
@@ -1096,12 +1103,12 @@ def _createRootFile(outName):
     outFile.cd()
     outFile.Write()
     outFile.Close()
-    print "create ROOT file... [DONE]" 
+    print ("create ROOT file... [DONE]" )
     return
 
 def createRootFile(outName, np):
     """creating structure of ROOT-report file from mp_stat dictionary """
-    print "create ROOT file..."
+    print ("create ROOT file...")
 
     from PerfMonAna.PyRootLib import importRoot
     from ROOT import TTree
@@ -1122,18 +1129,18 @@ def createRootFile(outName, np):
             elif isinstance(mp_stat[t][b][0], float):
                 tree.Branch(b, d,'float/F')
             else:
-                #print "branch [%s] is not int or float type" % b
+                #print ("branch [%s] is not int or float type" % b)
                 tree.Branch(b, i, 'int/I')
         tree.Write()
     outFile.Write()
     outFile.Close()
-    print "create ROOT file... [DONE]"
+    print ("create ROOT file... [DONE]")
 
 
 def fillRootTree(tree, stat, np):
-    #print "writing %s statistics Tree:" % tree.GetName(),
+    #print ("writing %s statistics Tree:" % tree.GetName(), end='')
     branches = stat.keys()
-    #print "    branches=", branches, "...", 
+    #print ("    branches=", branches, "...", end='')
     nbr  = len(branches)
     array_list = list()
 
@@ -1149,63 +1156,61 @@ def fillRootTree(tree, stat, np):
             nums =  array.array('i', [0])
             array_list.append(nums)
         else:
-            #print "branch [%s] is not int or float type" % branch
+            #print ("branch [%s] is not int or float type" % branch)
             nums = array.array('i', [-1])
             array_list.append(nums)
         tree.SetBranchAddress(branch, array_list[-1]);
         
-    for index in xrange(len(stat[branches[0]])):
+    for index in range(len(stat[branches[0]])):
         for array_index, branch in enumerate(branches):
             array_list[array_index][0] = stat[branch][index] if array_list[array_index][0] is not -1 else -1
         tree.Fill()
-    #print "[DONE]"
+    #print ("[DONE]")
 
 
 
 def writeRootFile(outName, np):
     """writes statistics into ROOT file"""
-    print "write ROOT file %s...", outName 
+    print ("write ROOT file %s...", outName )
     createRootFile(outName, np)
     from ROOT import TFile, TTree
     import array
     outFile = TFile( outName, 'update' )
 
     stat_keys = mp_stat.keys()
-    #print "mp_stat.keys()", stat_keys
+    #print ("mp_stat.keys()", stat_keys)
     for key in stat_keys:
-        #print " writing [%s]" % key
+        #print (" writing [%s]" % key)
         tree = outFile.Get( "%s" %   key )
         fillRootTree(tree, mp_stat[key], np)
         tree.Write()
 
     outFile.Write()
     outFile.Close()        
-    print "write ROOT file... [DONE]" 
+    print ("write ROOT file... [DONE]" )
     return
 
 def mergeRootFiles(file, ne):
     import glob
     file_list = glob.glob1(os.getcwd(), "%s.*.%i.root" % (file, ne) )
-    import commands
     cmd = "hadd -f6 mp_stat.%s.ne%i" % (file, ne)
     for f in file_list:
         cmd = cmd + ' ' + f 
-    sc, out = commands.getstatusoutput(cmd)
+    sc, out = subprocess.getstatusoutput(cmd)
 
 def mergeRootOutput(output_file, jobo, np_list, ne):
-    import commands
     from ROOT import TFile, TTree
     #output_file = "merged.%s.ne%i.root" % (jobo, ne)
     cmd = "hadd -f6 %s" % output_file
     for np in np_list:
         # here we copy mp_summary and cp_summary trees in each root file from /$np dir into root dir for further merging
         file = "mp.%s.%i.%i.root" % (jobo, np, ne)
-        print " ---> processing file = %s" % file
+        print (" ---> processing file = %s" % file)
         #here we form the command for merging
         cmd = cmd + " %s" % file
 
-    print "issueing root files merging command:[%s]" % cmd
-    sc, out = commands.getstatusoutput(cmd)
+    print ("issuing root files merging command:[%s]" % cmd)
+    sc, out = subprocess.getstatusoutput(cmd)
     return #output_file
 
 def _createGlobalRootFile(file, ne):
@@ -1214,13 +1219,13 @@ def _createGlobalRootFile(file, ne):
     file_list = glob.glob1(os.getcwd(), "%s.*.%i.root" % (file, ne) )
     outFile = TFile ("%s.%i.root" % (file, ne), 'RECREATE' )    
     for f in file_list:
-        print "Copying trees from [%s]" % f
+        print ("Copying trees from [%s]" % f)
         tf = TFile (f, 'READ' )
         mpt = tf.Get("mp_summary")
         cpt = tf.Get("cp_summary")
         outFile.cd('/')
         dir = "%s" % f.replace(file, "").split(".")[1]
-        print "   creating dir for np = %s" % dir
+        print ("   creating dir for np = %s" % dir)
         outFile.mkdir(dir) # creating dir for np
         outFile.cd(dir)
         mpTree = mpt.CloneTree(); mpTree.Write()
@@ -1242,7 +1247,7 @@ def _createGlobalRootFile(file, ne):
 
 
 def report2(root_file, ne = 0, comments=""):
-    print'  mpMonTools.report(): root_file=', root_file
+    print('  mpMonTools.report(): root_file=', root_file)
     from ROOT import TFile, TTree, TBranch, TCanvas, TPad, TGraph, TLegend, TMultiGraph, gStyle, TLatex, TPaveLabel, TPaveText, TH2I, TMath
 
     def getTreeList(tree, column, condition):
@@ -1272,11 +1277,11 @@ def report2(root_file, ne = 0, comments=""):
                     formula = param[0]
                     condition = param[1]
                 else:
-                    print "MakeMG: ", formula, condition
+                    print ("MakeMG: ", formula, condition)
                     formula = param
                     condition = ""
 
-                print "name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) 
+                print ("name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) )
            
                 #g = makeGraph(tree, name, formula, condition, color=clr)
                 tree.Draw(formula, condition, "goff")
@@ -1291,7 +1296,7 @@ def report2(root_file, ne = 0, comments=""):
 
         if graph_data['type'] is 'list':
             for name, (lx,ly) in graph_data['data'].items():
-                print "name=%s" % name; print lx; print ly
+                print ("name=%s" % name); print (lx); print (ly)
                 clr+=1
                 g = TGraph( len(lx), array.array('f', lx), array.array('f', ly) )
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr); 
@@ -1302,9 +1307,9 @@ def report2(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'array':
             clr = 1
             g_list = list()
-            data = arrayd['data']
+            data = graph_data['data']
             for name,(x,y) in graph_data['data'].items():
-                print x; print y
+                print (x); print (y)
                 clr+=1;
                 g = TGraph(len(x), x, y)
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr) 
@@ -1314,7 +1319,7 @@ def report2(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'text':
             title.DrawPaveLabel(0.1,0.93,0.9,0.99, graph_data['title'], "brNDC")
             for s in graph_data['data']:
-                print "graph_data['data']=%s" % s
+                print ("graph_data['data']=%s" % s)
                 mp_pt.AddText(s)             
             mp_pt.SetTextAlign(12);
             mp_pt.SetTextSize(0.04)
@@ -1359,7 +1364,7 @@ def report2(root_file, ne = 0, comments=""):
     c = TCanvas("mpr", "AthenaMP-mp-scaling-charts", 10, 10, 800, 1024)
     c.SetFillColor(17);  c.SetBorderSize(1); c.cd()
  
-    tfile = TFile(root_file, "READ"); print "   root compression factor = ", tfile.GetCompressionFactor()
+    tfile = TFile(root_file, "READ"); print ("   root compression factor = ", tfile.GetCompressionFactor())
     mpSumTree = tfile.Get("mp_summary")
     cpSumTree = tfile.Get("cp_summary")
     ioTree = tfile.Get("io")
@@ -1368,7 +1373,7 @@ def report2(root_file, ne = 0, comments=""):
 
     if ne is 0:
         ne = int(root_file.split('.')[-2].replace('ne', ''))
-        print "extracted ne=[%i]" % ne
+        print ("extracted ne=[%i]" % ne)
 
 ##### FORMING THE DATA FOR ROOT Graphing-Charting-Histogramming #####    
     np_list = list(set(getTreeList(cpSumTree, 'np', ''))); np_list.sort() #uniqeify and sort np_list
@@ -1446,8 +1451,8 @@ def report2(root_file, ne = 0, comments=""):
             txt_dict[s] += "%10.1f" % getTreeList(mpSumTree, s, "np==%i" % int(np) )[0]
             ltxt_dict[s].append( "%10.1f" % getTreeList(mpSumTree, s, "np==%i" % int(np))[0] )
 
-    print "np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev)
-    print "elap-cpu=%s" % (elap_cpu_time)
+    print ("np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev))
+    print ("elap-cpu=%s" % (elap_cpu_time))
     
     #mn = 7; mt=10000
     from socket import gethostname
@@ -1690,33 +1695,33 @@ def report2(root_file, ne = 0, comments=""):
     gStyle.SetMarkerStyle(21)
     gStyle.SetMarkerColor(2)
     gStyle.SetMarkerSize(0.4)
-    print "gStyle.Set done"
+    print ("gStyle.Set done")
 
     title = TPaveLabel(0.1,0.98,0.9,1, "Athena MP Plots");
     title.SetFillColor(42); title.SetTextFont(40); 
-    #title.Draw();print "title Drawn"
+    #title.Draw();print ("title Drawn")
 
     mgs =  list()  #List of TMultiGraphs
     ls =   list()  #List of TLegends
     gs =   list()  #List of TGraph
 
-    for j in xrange(ppc):
+    for j in range(ppc):
         y_factor = 0.99;   x1 = 0.01; x2 = 0.99;  y1 = y_factor - (y_factor-0.01)*(j+1)/float(ppc); y2 = y_factor - (y_factor-0.01)*j/float(ppc)
-        print "x1,y1,x2,y2",  x1, y1, x2, y2 
+        print ("x1,y1,x2,y2",  x1, y1, x2, y2 )
         pad = TPad("pad%i" % j, "pad%i" % j,   x1, y1, x2, y2,   33); pad.Draw()
         pads.append(pad);
     
     num_cans = len(graph_list) /(cpp*ppc) if len(graph_list) % (cpp*ppc)==0 else len(graph_list)/(cpp*ppc) + 1 
     graph_list += [None,]* (num_cans*cpp*ppc - len(graph_list))
-    print "number of pages/canvases in report = ", num_cans
+    print ("number of pages/canvases in report = ", num_cans)
     
     pdf_file = root_file
     for s in ['merged.', '.py', '.root']:
         pdf_file = pdf_file.replace(s, '')
     pdf_file ="%s.pdf" % pdf_file
 
-    for i in xrange(num_cans):
-        for j in xrange(ppc):
+    for i in range(num_cans):
+        for j in range(ppc):
             graph = graph_list[ppc*i+j]
             if graph is None:
                 continue
@@ -1731,18 +1736,18 @@ def report2(root_file, ne = 0, comments=""):
             pads[j].SetRightMargin(0.2)
             l = TLegend(0.82,0.20,0.99,0.89); ls.append(l) 
             mg = TMultiGraph(); mgs.append(mg)
-            print "graph=", graph
+            print ("graph=", graph)
             gs.append(MakeMultiGraph(graph, mg, l))
 
         c.Update()
         if i == 0:
-            print "pdf.start"
+            print ("pdf.start")
             c.Print(pdf_file+'(', 'pdf') #start page
         elif i < num_cans-1:
-            print "pdf.body"
+            print ("pdf.body")
             c.Print(pdf_file, 'pdf')    #body pages
         else:
-            print "pdf.end"
+            print ("pdf.end")
             c.Print(pdf_file + ')', 'pdf') #end page
         c.SaveAs("%s.%i.png" % (pdf_file, i))
         for pad in pads:
@@ -1750,7 +1755,7 @@ def report2(root_file, ne = 0, comments=""):
 
 
 def report(root_file, ne = 0, comments=""):
-    print'  mpMonTools.report(): root_file=', root_file
+    print('  mpMonTools.report(): root_file=', root_file)
     from ROOT import TFile, TTree, TBranch, gPad, TCanvas, TPad,TProfile, TGraph, TLegend, TLegendEntry, TMultiGraph, gStyle, TLatex, TPaveLabel, TPaveText, TH2I, TMath
 
     def getTreeList(tree, column, condition):
@@ -1783,11 +1788,11 @@ def report(root_file, ne = 0, comments=""):
                     formula = param[0]
                     condition = param[1]
                 else:
-                    print "MakeMG: ", formula, condition
+                    print ("MakeMG: ", formula, condition)
                     formula = param
                     condition = ""
 
-                print "PROFILE: name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) 
+                print ("PROFILE: name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition) )
                 
                 hprof = TProfile(
                         "%s" % name, 
@@ -1802,7 +1807,7 @@ def report(root_file, ne = 0, comments=""):
                     tpl[-1].Draw()
                     draw_option="PSAME"
                 else:
-                    print "PROFILE: %s 'PSAME' clr=%i " % (name, clr)
+                    print ("PROFILE: %s 'PSAME' clr=%i " % (name, clr))
                     tpl[-1].Draw("PSAME")
                 le = l.AddEntry(tpl[-1], name)
                 le.SetFillColor(0)
@@ -1819,23 +1824,23 @@ def report(root_file, ne = 0, comments=""):
                     formula = param[0]
                     condition = param[1]
                 else:
-                    print "MakeMG: ", formula, condition
+                    print ("MakeMG: ", formula, condition)
                     formula = param
                     condition = ""
                 
                 if tree is None:
-                    print "name=%s -> TTree DOESN't EXIST" % name
+                    print ("name=%s -> TTree DOESN't EXIST" % name)
                     continue
 
-                print "name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition), 
+                print ("name=%s, tree=%s, formula=%s, condition=%s" % (name, tree.GetName(), formula, condition), end='')
                 tree.Draw(formula, condition,   "goff")
                 
                 selection_size = tree.GetSelectedRows()
                 if selection_size==-1:
-                    print "-> SKIPPED (DO NOT EXIST): SELECTION_SIZE=%i" % selection_size 
+                    print ("-> SKIPPED (DO NOT EXIST): SELECTION_SIZE=%i" % selection_size )
                     continue
                 else:
-                    print "-> SELECTION_SIZE=%i" % selection_size 
+                    print ("-> SELECTION_SIZE=%i" % selection_size )
                     pass
 
                 g = TGraph(selection_size, tree.GetV2(), tree.GetV1()); gl.append(g)
@@ -1851,7 +1856,7 @@ def report(root_file, ne = 0, comments=""):
                 
         if graph_data['type'] is 'list':
             for name, (lx,ly) in graph_data['data'].items():
-                print "name=%s" % name; print lx; print ly
+                print ("name=%s" % name); print (lx); print (ly)
                 clr+=1
                 g = TGraph( len(lx), array.array('f', lx), array.array('f', ly) )
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr); 
@@ -1862,9 +1867,9 @@ def report(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'array':
             clr = 1
             g_list = list()
-            data = arrayd['data']
+            data = graph_data['data']
             for name,(x,y) in graph_data['data'].items():
-                print x; print y
+                print (x); print (y)
                 clr+=1;
                 g = TGraph(len(x), x, y)
                 g.SetName(name); g.SetLineColor(clr); g.SetLineWidth(1); g.SetMarkerColor(clr) 
@@ -1874,7 +1879,7 @@ def report(root_file, ne = 0, comments=""):
         if graph_data['type'] is 'text':
             title.DrawPaveLabel(0.1,0.93,0.9,0.99, graph_data['title'], "brNDC")
             for s in graph_data['data']:
-                print "graph_data['data']=%s" % s
+                print ("graph_data['data']=%s" % s)
                 mp_pt.AddText(s)             
             mp_pt.SetTextAlign(12);
             mp_pt.SetTextSize(0.04)
@@ -1903,8 +1908,8 @@ def report(root_file, ne = 0, comments=""):
             return []
         
         if mg.GetListOfGraphs() is None:
-            print "MultiGraph: Empty",
-            print "mg=%s" % mg.GetName()
+            print ("MultiGraph: Empty",)
+            print ("mg=%s" % mg.GetName())
             return[]
 
         if 'goptions' in graph_data.keys():
@@ -1925,18 +1930,18 @@ def report(root_file, ne = 0, comments=""):
     c = TCanvas("mp_can", "AthenaMP-mp-scaling-charts", 1, 1, 800, 1024)
     c.SetFillColor(0);  c.SetBorderSize(1); c.cd()
  
-    tfile = TFile(root_file, "READ"); print "   root compression factor = ", tfile.GetCompressionFactor()
+    tfile = TFile(root_file, "READ"); print ("   root compression factor = ", tfile.GetCompressionFactor())
     mpSumTree = tfile.Get("mp_summary")
     cpSumTree = tfile.Get("cp_summary")
     ioTree = tfile.Get("io")
     cpuTree = tfile.Get("cpu")
     memTree = tfile.Get("mem")
     numaTree = tfile.Get("numa")
-    print "numaTree=%s" % numaTree
+    print ("numaTree=%s" % numaTree)
 
     if ne is 0:
         ne = int(root_file.split('.')[-2].replace('ne', ''))
-        print "extracted ne=[%i]" % ne
+        print ("extracted ne=[%i]" % ne)
 
 ##### FORMING THE DATA FOR ROOT Graphing-Charting-Histogramming #####    
     np_list = list(set(getTreeList(cpSumTree, 'np', ''))); np_list.sort() #uniqeify and sort np_list
@@ -2014,17 +2019,17 @@ def report(root_file, ne = 0, comments=""):
         np_txt += "%10s" % np
         for s in mp_lb:
             gtl = getTreeList(mpSumTree, s, "np==%i" % int(np) )
-            print "%s: getTreeList: %s" % (s,gtl), 
+            print ("%s: getTreeList: %s" % (s,gtl), end='')
             gtl_avg = meanList(gtl)
-            print " avg=%10.1f" % gtl_avg
+            print (" avg=%10.1f" % gtl_avg)
             txt_dict[s] += "%10.1f" % gtl_avg
             ltxt_dict[s].append( "%10.1f" % gtl_avg)
         ltxt_dict["total_rate"].append("%10.1f" % 
                     ( 60.0*float(np)*float(ne)/ float(ltxt_dict["m_par_time"][-1]) )
                     ) 
 
-    print "np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev)
-    print "elap-cpu=%s" % (elap_cpu_time)
+    print ("np_list=%s\n etime_stdev=%s \n cpu_time_stdev=%s" % (np_list, elap_time_stdev, cpu_time_stdev))
+    print ("elap-cpu=%s" % (elap_cpu_time))
     
     #mn = 7; mt=10000
     from socket import gethostname
@@ -2323,33 +2328,33 @@ def report(root_file, ne = 0, comments=""):
     gStyle.SetMarkerStyle(21)
     gStyle.SetMarkerColor(2)
     gStyle.SetMarkerSize(0.3)
-    print "gStyle.Set done"
+    print ("gStyle.Set done")
 
     title = TPaveLabel(0.1,0.98,0.9,1, "Athena MP Plots");
     title.SetFillColor(0); title.SetTextFont(40); 
-    #title.Draw();print "title Drawn"
+    #title.Draw();print ("title Drawn")
 
     mgs =  list()  #List of TMultiGraphs
     ls =   list()  #List of TLegends
     gs =   list()  #List of TGraph
 
-    for j in xrange(ppc):
+    for j in range(ppc):
         y_factor = 0.99;   x1 = 0.01; x2 = 0.99;  y1 = y_factor - (y_factor-0.01)*(j+1)/float(ppc); y2 = y_factor - (y_factor-0.01)*j/float(ppc)
-        print "x1,y1,x2,y2",  x1, y1, x2, y2 
+        print ("x1,y1,x2,y2",  x1, y1, x2, y2 )
         pad = TPad("pad%i" % j, "pad%i" % j,   x1, y1, x2, y2,   10); pad.Draw()
         pads.append(pad);
     
     num_cans = len(graph_list) /(cpp*ppc) if len(graph_list) % (cpp*ppc)==0 else len(graph_list)/(cpp*ppc) + 1 
     graph_list += [None,]* (num_cans*cpp*ppc - len(graph_list))
-    print "number of pages/canvases in report = ", num_cans
+    print ("number of pages/canvases in report = ", num_cans)
     
     pdf_file = root_file
     for s in ['merged.', '.py', '.root']:
         pdf_file = pdf_file.replace(s, '')
     pdf_file ="%s.pdf" % pdf_file
     tpl = list()
-    for i in xrange(num_cans):
-        for j in xrange(ppc):
+    for i in range(num_cans):
+        for j in range(ppc):
             graph = graph_list[ppc*i+j]
             if graph is None:
                 continue
@@ -2365,18 +2370,18 @@ def report(root_file, ne = 0, comments=""):
             l = TLegend(0.82,0.20,0.99,0.89); ls.append(l) 
             #tpl = list()#TProfile list    
             mg = TMultiGraph(); mgs.append(mg)
-            print "graph=", graph
+            print ("graph=", graph)
             gs.append(MakeMultiGraph(c, pads[j],graph, mg, l, tpl))
 
         c.Update()
         if i == 0:
-            print "pdf.start"
+            print ("pdf.start")
             c.Print(pdf_file+'(', 'pdf') #start page
         elif i < num_cans-1:
-            print "pdf.body"
+            print ("pdf.body")
             c.Print(pdf_file, 'pdf')    #body pages
         else:
-            print "pdf.end"
+            print ("pdf.end")
             c.Print(pdf_file + ')', 'pdf') #end page
         #c.SaveAs("%s.%i.png" % (pdf_file, i))
         #c.SaveAs("%s.%i.C" % (pdf_file, i))
@@ -2389,7 +2394,7 @@ def report(root_file, ne = 0, comments=""):
     pads[0].SetRightMargin(0.2)
     draw_option=""
     for tp in tpl:
-        print " TProfile: %s" % tp.GetName()
+        print (" TProfile: %s" % tp.GetName())
         if draw_option=="":
             tp.Draw()
             draw_option="PSAME"
diff --git a/Control/AthenaMP/share/tests/extract_mp_stat.py b/Control/AthenaMP/share/tests/extract_mp_stat.py
index f0a9e406e0a6de55a469d1d01b40ac4ce8cafa64..8d77ccc29283e38a3e16c136ea9eaa00f7217528 100755
--- a/Control/AthenaMP/share/tests/extract_mp_stat.py
+++ b/Control/AthenaMP/share/tests/extract_mp_stat.py
@@ -10,6 +10,7 @@
 # @endcode
 #
 
+from __future__ import print_function
 
 __version__ = "$Revision: 285809 $"
 __author__  = "Mous Tatarkhanov <tmmous@cern.ch>"
@@ -39,9 +40,9 @@ if __name__ == "__main__":
         
     log_file = options.log_file
 
-    print "log_file = %s" % log_file
+    print ("log_file = %s" % log_file)
     if not os.path.exists(log_file):
-        print "log_file doesn't exist. Please give valid parent process log file"
+        print ("log_file doesn't exist. Please give valid parent process log file")
         str(parser.print_help() or "")
         sys.exit(1)
     
diff --git a/Control/AthenaMP/share/tests/flush_cache.py b/Control/AthenaMP/share/tests/flush_cache.py
index a164bdba4dff1c657e5bea9bf8a78a5bfb4bd396..efeab70276392bef71eb70d6951fd87e6b854572 100755
--- a/Control/AthenaMP/share/tests/flush_cache.py
+++ b/Control/AthenaMP/share/tests/flush_cache.py
@@ -1,52 +1,57 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import os, sys, time
 import multiprocessing
 from multiprocessing import Pool
 
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 def flush_cache(n):
     l = list()
-    for i in xrange(n):
+    for i in range(n):
         l.append("a" * (1 << 30))
-    print "%i gb of memory eaten" % n
+    print ("%i gb of memory eaten" % n)
     time.sleep(10)
     return n
 
 if __name__ == '__main__':
-    import commands
-    out = commands.getoutput('free -m')
-    print ">free -m \n", out
+    out = subprocess.getoutput('free -m')
+    print (">free -m \n", out)
 
     total_mem_mb =int( out.splitlines()[1].split()[1])
     cached_mem_mb = int( out.splitlines()[1].split()[6])
-    print "CACHED [%i Mb] - before flush" % cached_mem_mb
+    print ("CACHED [%i Mb] - before flush" % cached_mem_mb)
     
     if cached_mem_mb < 200:
-        print "no need to flush the cache... bye!"
+        print ("no need to flush the cache... bye!")
         sys.exit(0)
 
     gb = 1 + (total_mem_mb >> 10) 
     
     ncpus = multiprocessing.cpu_count() 
-    print "ncpus= [%i]" % ncpus
-    print "total available memory [%i Mb] [%i Gb]" % (total_mem_mb, gb)
+    print ("ncpus= [%i]" % ncpus)
+    print ("total available memory [%i Mb] [%i Gb]" % (total_mem_mb, gb))
 
     nprocs = 2*ncpus
     ngb = 1 + gb / nprocs
 
     
-    print "Nbr of Procs to bite on memory [%i] " % nprocs
-    print "Nbr of Gb to flush per process  [%i Gb]" % ngb
+    print ("Nbr of Procs to bite on memory [%i] " % nprocs)
+    print ("Nbr of Gb to flush per process  [%i Gb]" % ngb)
 
 
     pool = Pool(processes = nprocs)
     result = pool.map(flush_cache, [ngb,]*nprocs)
-    print "Total memory eaten: [%i Gb]" % sum(result)
+    print ("Total memory eaten: [%i Gb]" % sum(result))
 
-    out = commands.getoutput('free -m')        
-    print ">free -m \n", out
+    out = subprocess.getoutput('free -m')        
+    print (">free -m \n", out)
     cached_mem_mb = int( out.splitlines()[1].split()[6])    
-    print "CACHED [%i Mb] - after flush" % cached_mem_mb
-    print "Your machine's memory cache is  flushed" 
+    print ("CACHED [%i Mb] - after flush" % cached_mem_mb)
+    print ("Your machine's memory cache is  flushed" )
     
     time.sleep(5)
diff --git a/Control/AthenaMP/share/tests/mjMon.py b/Control/AthenaMP/share/tests/mjMon.py
index 79bff46d520e663ef7c60e23fd6c7ca80ec9e8eb..28697e436aa873261770450111e5072aec3453d5 100755
--- a/Control/AthenaMP/share/tests/mjMon.py
+++ b/Control/AthenaMP/share/tests/mjMon.py
@@ -13,6 +13,7 @@
 # @endcode
 #
 
+from __future__ import print_function
 
 __version__ = "$Revision: 276792 $"
 __author__  = "Mous Tatarkhanov <tmmous@cern.ch>"
@@ -25,7 +26,7 @@ import sys
 class Writer:
     def __init__(self, stdout, filename):
         self.stdout = stdout
-        self.logfile = file(filename, 'a')
+        self.logfile = open(filename, 'a')
 
     def write(self, text):
         self.stdout.write(text)
@@ -111,26 +112,26 @@ if __name__ == "__main__":
         numa_list = None
 
     if isinstance(numa_list, list):
-        print "numa_list=%s" % numa_list
+        print ("numa_list=%s" % numa_list)
     elif numa_list is not None:
-        print "Please input correct numa_list"
+        print ("Please input correct numa_list")
         str(parser.print_help() or "")
         sys.exit(1)
 
     ne = int(options.nbrEvts)
     jobo = options.jobo
-    print "np_list = ", np_list
-    print "ne = ", ne
-    print "jobo = ", jobo
+    print ("np_list = ", np_list)
+    print ("ne = ", ne)
+    print ("jobo = ", jobo)
     job = jobo.split()[0] 
-    print "mpMon.log =", options.outFileName
-    print "doFluchCache=", options.doFlushCache, type(options.doFlushCache)
+    print ("mpMon.log =", options.outFileName)
+    print ("doFluchCache=", options.doFlushCache, type(options.doFlushCache))
     if options.doFlushCache:
         options.commentsStr += ".doFlushCache"
-    print "numa_list=%s" % numa_list
+    print ("numa_list=%s" % numa_list)
 
     def cleanup():
-        print ' Cleaning...Goodbye!'
+        print (' Cleaning...Goodbye!')
         for pid in mpt.pid_list:
             mpt.stop_proc_tree(pid)
     
@@ -146,7 +147,6 @@ if __name__ == "__main__":
     import subprocess
     import signal
     import time
-    import commands
     
     
     for np in np_list:
@@ -160,7 +160,7 @@ if __name__ == "__main__":
             os.remove(sar_log)
                
         if options.doFlushCache:
-            print subprocess.call(['flush_cache.py',])
+            print (subprocess.call(['flush_cache.py',]))
             
         #time.sleep(TIME_STEP) 
         
@@ -184,16 +184,16 @@ if __name__ == "__main__":
         
         if numa_list is not None:
             if len(numa_list) < np:
-                print "len(numa_list) < np:  need to append [('f','f'),]"
+                print ("len(numa_list) < np:  need to append [('f','f'),]")
                 numa_list += [('f','f'),] * (np - len(numa_list))
             else:
-                print "len(numa_list)==len(range(np)): there are enough numa settings defined"
+                print ("len(numa_list)==len(range(np)): there are enough numa settings defined")
             
             iterator = zip(range(np), numa_list)
         else:
             iterator = zip(range(np), range(np))
 
-        print "numa_list=%s" % numa_list
+        print ("numa_list=%s" % numa_list)
 
         se = 0 # skip events
         numa_set = None
@@ -207,7 +207,7 @@ if __name__ == "__main__":
             (proc, proc_out, proc_err) = mpt.launch_athena(jobo, ne, se, np, "ne%i.ni%i" % (ne,i), numa_set ); #launching athena
             proc_list.append(proc)
             proc_dict[proc.pid] = (proc_out, proc_err)
-            #print "%s athena %i.%i.%i proc launched ...[pid %i]  out:%s err:%s" % (numa_str, ne, np, i, proc.pid, proc_out, proc_err )
+            #print ("%s athena %i.%i.%i proc launched ...[pid %i]  out:%s err:%s" % (numa_str, ne, np, i, proc.pid, proc_out, proc_err ))
             se +=ne
         time.sleep(TIME_STEP); 
 
@@ -224,24 +224,24 @@ if __name__ == "__main__":
             
         t1=time.time()
         
-        print "athena processes finished:"
+        print ("athena processes finished:")
         
         #SUMMARY
         mpt.summarize_proc_stat()
-        for i in xrange(2):
+        for i in range(2):
             _print_mem()
             time.sleep(TIME_STEP)
         
-        print "FINISHED MONITORING:"
+        print ("FINISHED MONITORING:")
         mpt.stop_proc(sar_proc)
         
-        print "COLLECTING STATISTICS..."
+        print ("COLLECTING STATISTICS...")
         mpt.get_full_sar_stat(sar_log)
-        print "FINISHED COLLECTING STATISTICS"
+        print ("FINISHED COLLECTING STATISTICS")
         
-        print "START ANALYSIS..."
+        print ("START ANALYSIS...")
         
-        print " ELAPSED TIMES: \n Time: dt1=[%i sec]" % (t1-t0)
+        print (" ELAPSED TIMES: \n Time: dt1=[%i sec]" % (t1-t0))
         
     
         _mp_stat['sp_summary']=mpt.SPSummary(np)
@@ -260,15 +260,15 @@ if __name__ == "__main__":
         
         mpt.print_summary()
         
-        print "FINISHED ANALYSIS"
+        print ("FINISHED ANALYSIS")
         
-        print "START REPORT..."
+        print ("START REPORT...")
         mpt.prepare_mp_stat() # preparing mp_stat dictionary for ROOT
         import pickle
-        pickle.dump(_mp_stat, open("pickle.%s.f" % suffix,  "w"))        
+        pickle.dump(_mp_stat, open("pickle.%s.f" % suffix,  "wb"))
 
         mpt.writeRootFile("%s.root" % suffix, np)
-        print "FINISHED REPORT."
+        print ("FINISHED REPORT.")
         
         cleanup()
 
@@ -282,6 +282,6 @@ if __name__ == "__main__":
         mpt.report(merged_root_file, ne, comments = options.commentsStr)
     
     cleanup()
-    print "The End"
+    print ("The End")
     sys.exit(0)
 
diff --git a/Control/AthenaMP/share/tests/mpMon.py b/Control/AthenaMP/share/tests/mpMon.py
index 141bc501702e23c26df496fcf978de4c6c5f8326..92400de6aaf920c6a2645634857e51ae44a65b75 100755
--- a/Control/AthenaMP/share/tests/mpMon.py
+++ b/Control/AthenaMP/share/tests/mpMon.py
@@ -13,6 +13,8 @@
 # @endcode
 #
 
+from __future__ import print_function
+
 
 """
 PROCESS STATE CODES
@@ -38,7 +40,7 @@ import sys
 class Writer:
     def __init__(self, stdout, filename):
         self.stdout = stdout
-        self.logfile = file(filename, 'a')
+        self.logfile = open(filename, 'a')
 
     def write(self, text):
         self.stdout.write(text)
@@ -114,17 +116,17 @@ if __name__ == "__main__":
 
     ne = int(options.nbrEvts)
     jobo = options.jobo
-    print "np_list = ", np_list
-    print "ne = ", ne
-    print "jobo = ", jobo
-    print "mpMon.log =", options.outFileName
-    print "doFluchCache=", options.doFlushCache, type(options.doFlushCache)
+    print ("np_list = ", np_list)
+    print ("ne = ", ne)
+    print ("jobo = ", jobo)
+    print ("mpMon.log =", options.outFileName)
+    print ("doFluchCache=", options.doFlushCache, type(options.doFlushCache))
     if options.doFlushCache:
         options.commentsStr += ".doFlushCache"
     
     
     def cleanup():
-        print ' Cleaning...Goodbye!'
+        print (' Cleaning...Goodbye!')
         for pid in mpt.pid_list:
             mpt.stop_proc_tree(pid)
 
@@ -137,7 +139,6 @@ if __name__ == "__main__":
     import subprocess
     import signal
     import time
-    import commands
     
     for np in np_list:
         writer.flush()
@@ -151,7 +152,7 @@ if __name__ == "__main__":
             mpt.print_memstat("<np%i.ne%i>:" % (np, ne))
        
         if options.doFlushCache:
-            print subprocess.call(['flush_cache.py',])
+            print (subprocess.call(['flush_cache.py',]))
             time.sleep(TIME_STEP) 
         
         mpt.init_mp_stat()
@@ -167,10 +168,10 @@ if __name__ == "__main__":
         t0=time.time()
         mproc = mpt.launch_athenaMP(jobo, np, ne); #launching athena-MP
         mpid = mproc.pid #mother process pid
-        print "parent launched ...[ %i]" % mpid       
+        print ("parent launched ...[ %i]" % mpid       )
         
         mp_log = os.path.join("mp.output", "stdout.%s" % suffix)
-        #print "mpid_log = ", mp_log
+        #print ("mpid_log = ", mp_log)
         
         _mproc = mpt.ProcDict(mpid, child=False)        
        
@@ -192,7 +193,7 @@ if __name__ == "__main__":
         
         _print_mem()
         
-        print "children processes finished:"
+        print ("children processes finished:")
         
         #SERIAL: Mother-Finalize stage
         while mproc.poll() is None:
@@ -203,31 +204,31 @@ if __name__ == "__main__":
         
         mpt.summarize_proc_stat()
        
-        #print "EXIT, thus have to terminate all created processes:"
+        #print ("EXIT, thus have to terminate all created processes:")
         try:
-            mproc.wait(); print "mproc joined-finished"
-        except Exception, e:
-            print "## while waiting mother process caught exception [%s] !!" % str(e.__class__), "## What:",e,
-            print sys.exc_info()[0], sys.exc_info()[1]
+            mproc.wait(); print ("mproc joined-finished")
+        except Exception as e:
+            print ("## while waiting mother process caught exception [%s] !!" % str(e.__class__), "## What:",e,)
+            print (sys.exc_info()[0], sys.exc_info()[1])
             sc = 1
             pass
         
-        for i in xrange(3):
+        for i in range(3):
             _print_mem()
             time.sleep(TIME_STEP)
         
-        print "FINISHED MONITORING:"
+        print ("FINISHED MONITORING:")
         mpt.stop_proc(sar_proc)
         
-        print "COLLECTING STATISTICS..."
+        print ("COLLECTING STATISTICS...")
         mpt.get_full_sar_stat(sar_log)
-        print "FINISHED COLLECTING STATISTICS"
+        print ("FINISHED COLLECTING STATISTICS")
         
-        print "START ANALYSIS..."
+        print ("START ANALYSIS...")
         
         cp_dir = mpt.grepPath(mp_log, "workdir", sep=':')
-        #print "worker master cpid_dir = ", cp_dir
-        print " ELAPSED TIMES: \n MotherInit: dt1=[%i sec] \n Parallel dt2=[%i sec] \n MotherFinalize dt3=[%i sec]" % (t1-t0, t2-t1, t3-t2)
+        #print ("worker master cpid_dir = ", cp_dir)
+        print (" ELAPSED TIMES: \n MotherInit: dt1=[%i sec] \n Parallel dt2=[%i sec] \n MotherFinalize dt3=[%i sec]" % (t1-t0, t2-t1, t3-t2))
         
         _mp_stat['cp_summary']=mpt.CPSummary(np)
         _mp_stat['mp_summary']=mpt.MPSummary(np)
@@ -248,15 +249,15 @@ if __name__ == "__main__":
         
         mpt.print_summary()
         
-        print "FINISHED ANALYSIS"
+        print ("FINISHED ANALYSIS")
         
-        print "START REPORT..."
+        print ("START REPORT...")
         mpt.prepare_mp_stat() # preparing mp_stat dictionary for ROOT
         import pickle
-        pickle.dump(_mp_stat, open("pickle.%s.f" % suffix,  "w"))        
+        pickle.dump(_mp_stat, open("pickle.%s.f" % suffix,  "wb"))
 
         mpt.writeRootFile("%s.root" % suffix, np)
-        print "FINISHED REPORT."
+        print ("FINISHED REPORT.")
         
         cleanup()
 
@@ -270,6 +271,6 @@ if __name__ == "__main__":
         mpt.report(merged_root_file, ne, comments = options.commentsStr)
     
     cleanup()
-    print "The End"
+    print ("The End")
     sys.exit(0)
 
diff --git a/Control/AthenaMP/share/tests/mp_basic_test.py b/Control/AthenaMP/share/tests/mp_basic_test.py
index 369878c7d64ceea9587eb0bcd4643a0f429925c2..91ac6f92eff939379e4e9dde81377924dc26c62f 100644
--- a/Control/AthenaMP/share/tests/mp_basic_test.py
+++ b/Control/AthenaMP/share/tests/mp_basic_test.py
@@ -3,6 +3,8 @@
 # @file mp_basic_test.py
 # @purpose: simple file to create a few elephantino events with athena-mp
 
+from __future__ import print_function
+
 input_file_name = 'my.data.pool'
 output_file_name= 'reaccessed.my.data.pool'
 
@@ -18,11 +20,11 @@ OUTPUT='%(input_file_name)s'
 """ % globals()
 app.include('AthExThinning/AthExThinning_makeData.py')
 
-print "=== create an elephantino file..."
+print ("=== create an elephantino file...")
 rc = app.run(stdout=os.devnull)
 if rc:
     raise RuntimeError(rc)
-print "=== create an elephantino file... [ok]"
+print ("=== create an elephantino file... [ok]")
 
 
 app = accp.AthenaApp(cmdlineargs=['--nprocs=-1'])
@@ -35,31 +37,29 @@ OUTPUT='%(output_file_name)s'
 app.include('AthExThinning/ReadNonThinnedData_jobOptions.py')
 
 mp_logfile = open('mp.elephantino.readback.logfile.txt', 'w+')
-print "=== read the elephantino file back (with athena-mp)... (logfile=%s)" % (mp_logfile.name,)
+print ("=== read the elephantino file back (with athena-mp)... (logfile=%s)" % (mp_logfile.name,))
 rc = app.run(stdout=mp_logfile)
 if rc:
     raise RuntimeError(rc)
-print "=== read the elephantino file back (with athena-mp)... [ok]"
+print ("=== read the elephantino file back (with athena-mp)... [ok]")
 
 input_file  = af.fopen(input_file_name).infos
 output_file = af.fopen(output_file_name).infos
 
-print ":"*80
-print "::: results:"
+print (":"*80)
+print ("::: results:")
 
-print """\
+print ("""\
 input_file: [%s]
   nentries: %s""" % (
   input_file['file_name'],
-  input_file['nentries'],
-  )
+  input_file['nentries']))
 
-print """\
+print ("""\
 output_file: [%s]
    nentries: %s""" % (
    output_file['file_name'],
-   output_file['nentries'],
-   )
+   output_file['nentries']))
 
-print "::: bye."
-print ":"*80
+print ("::: bye.")
+print (":"*80)
diff --git a/Control/AthenaMP/share/tests/mp_genevt_test.py b/Control/AthenaMP/share/tests/mp_genevt_test.py
index b6fdff252dfe78f7e9182f133b045cefae7b20ac..05a7e125c3ccd5ea2825a178f30f6590bcdc9ccc 100644
--- a/Control/AthenaMP/share/tests/mp_genevt_test.py
+++ b/Control/AthenaMP/share/tests/mp_genevt_test.py
@@ -4,6 +4,8 @@
 # @purpose: simple file to create a few ttbar events and read them back
 #           with athena-mp
 
+from __future__ import print_function
+
 input_file_name = 'mc.event.pool'
 output_file_name= 'reaccessed.mc.event.pool'
 
@@ -19,11 +21,12 @@ OUTPUT='%(input_file_name)s'
 """ % globals()
 app.include('McParticleTests/iotest_WriteGenEvent_jobOptions.py')
 
-print "=== create an EVGEN file..."
-rc = app.run(stdout=os.devnull)
+evt_logfile = open('mp.evgen.logfile.txt', 'w+')
+print ("=== create an EVGEN file...")
+rc = app.run(stdout=evt_logfile)
 if rc:
     raise RuntimeError(rc)
-print "=== create an EVGEN file... [ok]"
+print ("=== create an EVGEN file... [ok]")
 
 
 app = accp.AthenaApp(cmdlineargs=['--nprocs=-1'])
@@ -36,20 +39,20 @@ OUTPUT='%(output_file_name)s'
 app.include('McParticleTests/iotest_ReadGenEvent_jobOptions.py')
 
 mp_logfile = open('mp.readback.logfile.txt', 'w+')
-print "=== read the EVGEN file back (with athena-mp)... (logfile=%s)" % (mp_logfile.name,)
+print ("=== read the EVGEN file back (with athena-mp)... (logfile=%s)" % (mp_logfile.name,))
 rc = app.run(stdout=mp_logfile)
 if rc:
     raise RuntimeError(rc)
-print "=== read the EVGEN file back (with athena-mp)... [ok]"
+print ("=== read the EVGEN file back (with athena-mp)... [ok]")
 
-print ":"*80
-print "::: results:"
+print (":"*80)
+print ("::: results:")
 input_file = af.fopen(input_file_name).infos
-print "input_file: [%s]\n nentries: %s" % (input_file['file_name'],
-                                           input_file['nentries'],)
+print ("input_file: [%s]\n nentries: %s" % (input_file['file_name'],
+                                           input_file['nentries'],))
 
 output_file = af.fopen('reaccessed.mc.event.pool').infos
-print "output_file: [%s]\n nentries: %s" % (output_file['file_name'],
-                                            output_file['nentries'],)
-print "::: bye."
-print ":"*80
+print ("output_file: [%s]\n nentries: %s" % (output_file['file_name'],
+                                             output_file['nentries'],))
+print ("::: bye.")
+print (":"*80)
diff --git a/Control/AthenaMP/share/tests/smem_mon.py b/Control/AthenaMP/share/tests/smem_mon.py
index c9db0940477349fac903841d3c7fe087c4753d58..79d95923b325eb383bd3c35fbc07ad9d3b769771 100755
--- a/Control/AthenaMP/share/tests/smem_mon.py
+++ b/Control/AthenaMP/share/tests/smem_mon.py
@@ -7,6 +7,8 @@
 # @endcode
 #
 
+from __future__ import print_function
+
 
 __version__ = "$Revision: 000001 $"
 __author__  = "Mous Tatarkhanov <tmmous@cern.ch>"
@@ -15,7 +17,10 @@ from optparse import OptionParser
 
 import sys, os
 import time, operator
-import commands 
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
 
 smem_exe = "/afs/cern.ch/user/t/tmmous/smem-0.9/smem" 
 smem_log = "smem_log"
@@ -30,12 +35,12 @@ def smem(ppid = None, message = None):
 
     if message is not None:
         cmd = "echo %s >> %s" % (message, smem_log)
-        out = commands.getoutput(cmd)
+        out = subprocess.getoutput(cmd)
 
     cmd = "%s -P athena.py -s pid >> %s" % (smem_exe, smem_log) 
-    out += commands.getoutput(cmd)
+    out += subprocess.getoutput(cmd)
     
-    print "smem: %s" % out
+    print ("smem: %s" % out)
     
     if ps_line_nbr(ppid) > 0:
         return True
@@ -43,38 +48,35 @@ def smem(ppid = None, message = None):
         return False
 
 def ps_line_nbr(ppid):
-    import commands
     cmd = "ps --ppid %s -o pid,state,vsize,rss,sz,start,cputime,etime " % ppid
-    (sc, out) = commands.getstatusoutput(cmd)
+    (sc, out) = subprocess.getstatusoutput(cmd)
     
     if (sc != 0):
-        print "%s\n" % cmd
-        print " PS> ERRROR... sc=%i" % sc
-        print " out=%s" % out 
+        print ("%s\n" % cmd)
+        print (" PS> ERRROR... sc=%i" % sc)
+        print (" out=%s" % out )
         return 0
     
-    print ">PS sc=%i" % sc
-    print "%s" % out
+    print (">PS sc=%i" % sc)
+    print ("%s" % out)
 
     
     ln = len(out.splitlines()) - 1
-    print "line_nbr=", ln
+    print ("line_nbr=", ln)
     return ln
 
 def get_cpu(pid):
-    import commands
     cmd = "ps --pid %i -o psr" % pid
-    #print ">%s" % cmd
-    out = commands.getoutput(cmd)
+    #print (">%s" % cmd)
+    out = subprocess.getoutput(cmd)
     cpu = int(out.splitlines()[1].split()[0])
-    #print "pid: [%i] has cpu: [%i]" % (pid, cpu)
+    #print ("pid: [%i] has cpu: [%i]" % (pid, cpu))
     return cpu
 
 def set_proc_affinity(pid, cpu):
-    import commands
     cmd = "taskset -pc %i %i" % (cpu, pid)
-    #print "> taskset -pc %i %i" % (cpu, pid)                                                                                                       
-    st,out = commands.getstatusoutput(cmd)
+    #print ("> taskset -pc %i %i" % (cpu, pid)                                                                                                       )
+    st,out = subprocess.getstatusoutput(cmd)
     return st
 
 time_list = list()
@@ -140,13 +142,13 @@ if __name__ == "__main__":
     smem_ppid = options.ppid
     smem_time_step = float(options.time_step)
 
-    print "smem log_file = [%s]" % smem_log
-    print "smem exe_file = [%s]" % smem_exe
-    print "smem ppid = [%s]" % smem_ppid
-    print "smem time_step = [%.1f]" % smem_time_step
+    print ("smem log_file = [%s]" % smem_log)
+    print ("smem exe_file = [%s]" % smem_exe)
+    print ("smem ppid = [%s]" % smem_ppid)
+    print ("smem time_step = [%.1f]" % smem_time_step)
 
     if os.path.exists(smem_log):
-        print "  given smem_log name %s exists.. renaming it to old.%s" % (smem_log, smem_log)
+        print ("  given smem_log name %s exists.. renaming it to old.%s" % (smem_log, smem_log))
         os.rename(smem_log, "OLD.%s" % smem_log)
     
     t0 = time.time()
diff --git a/Control/AthenaServices/src/AthDictLoaderSvc.h b/Control/AthenaServices/src/AthDictLoaderSvc.h
index e02fc2fd5e3132211de09c8613d220ccafb86a67..c4fe32e6d14fc386bd91e34dd2f83624802f28e9 100644
--- a/Control/AthenaServices/src/AthDictLoaderSvc.h
+++ b/Control/AthenaServices/src/AthDictLoaderSvc.h
@@ -31,7 +31,7 @@
 class ISvcLocator;
 template <class TYPE> class SvcFactory;
 
-class AthDictLoaderSvc
+class ATLAS_CHECK_THREAD_SAFETY AthDictLoaderSvc
   : virtual public ::IDictLoaderSvc,
             public ::AthService
 { 
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/countGuidsClient.py b/Database/AthenaPOOL/AtlasCollectionTools/python/countGuidsClient.py
index 68ddb1abaf53aa4858ca5690e2c03890afd91704..b33d75226a8dd417df1cf11f28561f4eb2074af3 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/countGuidsClient.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/countGuidsClient.py
@@ -1,4 +1,6 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from __future__ import print_function
 
 import urllib, re, string, os, time
 from eventLookupClient import eventLookupClient
@@ -64,7 +66,7 @@ class countGuidsClient(eventLookupClient):
       tokpat = re.compile(r'([0-9A-F]{8}-([0-9A-F]{4}-){3}[0-9A-F]{12})')
       for line in self.output:
          if re.search(self.errorPattern, line, re.I):
-            #print " -- Error line matched: " + line
+            #print (" -- Error line matched: " + line)
             return None
          if stage == "readGuids":
             try:
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/eventLookupClient.py b/Database/AthenaPOOL/AtlasCollectionTools/python/eventLookupClient.py
index 68eff1fd73212af1186e9745dd527e09da6f3fb5..e7752f9123606385ac0e9233c7e1f5a3d834f4d9 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/eventLookupClient.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/eventLookupClient.py
@@ -1,6 +1,12 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
-import urllib, re, string, os, time, commands
+from __future__ import print_function
+
+import urllib, re, string, os, time
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
 
 # client for eventLookup TAG service
 # author:  Marcin.Nowak@cern.ch
@@ -31,7 +37,7 @@ class eventLookupClient:
       except KeyError:
          self.certProxyFileName = '/tmp/x509up_u%s' % os.getuid()
       if not os.access(self.certProxyFileName, os.R_OK):
-         print 'EventLookup could not locate user GRID certificate proxy! (do voms-proxy-init)'
+         print ('EventLookup could not locate user GRID certificate proxy! (do voms-proxy-init)')
          return
       proxy = open(self.certProxyFileName)
       try:
@@ -58,20 +64,20 @@ class eventLookupClient:
             self.certDir = os.environ['X509_CERT_DIR']
          except KeyError:
             self.certDir = '/etc/grid-security/certificates'
-         rc, response = commands.getstatusoutput( 'uname -r' )
+         rc, response = subprocess.getstatusoutput( 'uname -r' )
          if 'el6' in response.split('.'):
-            if self.debug:  print "detected SLC6 for curl"
+            if self.debug:  print ("detected SLC6 for curl")
             self.curlCmd += ' --capath ' + self.certDir + ' --cacert ' + self.certProxyFileName
          else:
             self.curlCmd += ' -k '
          return self.curlCmd         
 
  
-   def doLookup(self, inputEvents, async=None, stream="", tokens="",
+   def doLookup(self, inputEvents, asyncFlag=None, stream="", tokens="",
                 amitag="", extract=False):
       """ contact the server and return a list of GUIDs
       inputEvents  - list of run-event pairs
-      async - request query procesing in a separate process, client will poll for results
+      asyncFlag - request query procesing in a separate process, client will poll for results
       stream - stream
       tokens - token names
       amitag - used to select reprocessing pass (default empty means the latest)
@@ -87,10 +93,10 @@ class eventLookupClient:
          sep = "\n"
          runs.add(run_ev[0]);
 
-      if async is None:
+      if asyncFlag is None:
          if len(runs) > 50 or len(inputEvents) > 1000:
-            async = True
-      if async:
+            asyncFlag = True
+      if asyncFlag:
          asyncStr = "true"
       else:
          asyncStr = "false"
@@ -108,11 +114,11 @@ class eventLookupClient:
          query_args['extract'] = "true"
 
       self.talkToServer(self.serverURL + self.lookupPage, query_args)
-      if not async:
+      if not asyncFlag:
          for line in self.output:
             if re.search("502 Bad Gateway", line):
                # usually signifies a timeout on the J2EE server
-               print "Timeout detected. Retrying in asynchronous mode"
+               print ("Timeout detected. Retrying in asynchronous mode")
                query_args['async'] = "true"
                self.talkToServer(self.serverURL + self.lookupPage, query_args)
                break
@@ -129,8 +135,8 @@ class eventLookupClient:
    def talkToServer(self, url, args):
       encoded_args = urllib.urlencode(args)
       if self.debug:
-         print "Contacting URL: " + url
-         print encoded_args
+         print ("Contacting URL: " + url)
+         print (encoded_args)
 
       for _try in range(1,6):
          response = urllib.urlopen(url, encoded_args)
@@ -142,7 +148,7 @@ class eventLookupClient:
                retry = True
          if retry:
             if self.debug:
-               print "Failed to connect to the server, try " + str(_try)
+               print ("Failed to connect to the server, try " + str(_try))
             time.sleep(self.connectionRefusedSleep)
          else:
             break
@@ -157,7 +163,7 @@ class eventLookupClient:
       mcarlo - if True ask for MC TAGs only
       """
 
-      if isinstance(inputEvents, basestring):
+      if isinstance(inputEvents, str):
          #events from a file
          runs_events = "<" + inputEvents
       else:
@@ -188,10 +194,10 @@ class eventLookupClient:
       cmd += args
       
       if self.debug:
-         print "Executing command: " + cmd
+         print ("Executing command: " + cmd)
 
       for _try in range(1,6):
-         self.rc, response = commands.getstatusoutput( cmd )
+         self.rc, response = subprocess.getstatusoutput( cmd )
          self.output = []
          retry = False
          for line in response.split('\n'):
@@ -200,7 +206,7 @@ class eventLookupClient:
                retry = True
          if retry:
             if self.debug:
-               print "Failed to connect to the server, try " + str(_try)
+               print ("Failed to connect to the server, try " + str(_try))
             time.sleep(self.connectionRefusedSleep)
          else:
             break
@@ -219,7 +225,7 @@ class eventLookupClient:
       tokpat = re.compile(r'[[]DB=(?P<FID>.*?)[]]')
       for line in self.output:
          if re.search(self.errorPattern, line, re.I):
-            #print " -- Error line matched: " + line
+            #print (" -- Error line matched: " + line)
             return None
          if stage == "readTags":
             if line[0:1] == ":":
@@ -235,7 +241,7 @@ class eventLookupClient:
                continue
             else:
                return (self.tagAttributes, self.tags)
-         if re.match("\{.*\}$", line):
+         if re.match("\\{.*\\}$", line):
             guids = eval(line)
             if type(guids).__name__!='dict':
                return None
@@ -260,33 +266,33 @@ class eventLookupClient:
       if type(output) == type('str'):  output = output.split('\n')
       for line in output:
          if re.search("certificate expired", line):
-            print "Your CA certificate proxy may have expired. The returned error is:\n" + line
+            print ("Your CA certificate proxy may have expired. The returned error is:\n" + line)
             return 2
          if re.search("SSL connect error", line):
-            print line
+            print (line)
             checkcmd = 'voms-proxy-info -exists -file '+self.certProxyFileName
-            rc, out = commands.getstatusoutput(checkcmd)
+            rc, out = subprocess.getstatusoutput(checkcmd)
             if rc==0:
                return 20  # reason not known
             if rc==1:
-               print "Certificate Proxy is NOT valid. Check with " + checkcmd
+               print ("Certificate Proxy is NOT valid. Check with " + checkcmd)
                return 21
-            print "Check if your Certificate Proxy is still valid: " + checkcmd
+            print ("Check if your Certificate Proxy is still valid: " + checkcmd)
             return 25
          if re.search("unable to use client certificate", line):
-            print line
+            print (line)
             return 22
          if self.remoteFile and re.match("NOT EXISTING", line):
-            print "File '" + self.remoteFile + "' not found on " + self.workerHost
+            print ("File '" + self.remoteFile + "' not found on " + self.workerHost)
             return 3
          if( re.search("AthenaeumException: No response from server", line)
              or re.search("ConnectException: Connection refused", line) ):
-            print "ERROR contacting " + self.workerHost
-            print error1
+            print ("ERROR contacting " + self.workerHost)
+            print (error1)
             return 4
          if re.search("AthenaeumException: Can't execute commad", line):
-            print "ERROR processing request on " + self.workerHost
-            print error1
+            print ("ERROR processing request on " + self.workerHost)
+            print (error1)
             return 5
       return None
       
@@ -302,7 +308,7 @@ class eventLookupClient:
                      }
       self.remoteFile = file
       if self.debug:
-         print "EventLookup waiting for server.  Remote file=" + file
+         print ("EventLookup waiting for server.  Remote file=" + file)
 
       ready = False  
       while not ready:
@@ -311,7 +317,7 @@ class eventLookupClient:
          for line in self.output:
             if re.match("NOT READY", line):
                if self.debug:
-                  print "received NOT READY"
+                  print ("received NOT READY")
                time.sleep(1)
                ready = False
 
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/importCheck.py b/Database/AthenaPOOL/AtlasCollectionTools/python/importCheck.py
index 4b63913c38133b33165e1251bb99f024d4482e96..f0006e6b42b6e86d9f71b961cc489d1f574f4420 100644
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/importCheck.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/importCheck.py
@@ -1,13 +1,14 @@
 #!/usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from __future__ import print_function
 
 # listDatasets.py - from an input list of GUIDs, gets a corresponding list of DQ2 datasets.
 import cx_Oracle
 import os, getopt
 import re
 import sys, signal
-import commands
 import random
 
 try:
@@ -19,13 +20,13 @@ try:
    from dq2.common.DQException import *
    from dq2.location.client.LocationClient import LocationClient
 except ImportError:
-   print "Environment not set [error importing DQ2 dependencies]!"
-   print "Try setting PYTHONPATH to the dq2-client directory."
+   print ("Environment not set [error importing DQ2 dependencies]!")
+   print ("Try setting PYTHONPATH to the dq2-client directory.")
    sys.exit(1)
 
 # Usage summary
 def _usage(exit = None):
-   print """Command line options: [-h | --help], [-n | --nevnt], [-l | --lfn], [-g | --guid], [-s | --schema] <schema name>, [-c | --collection] <collection name (comma separated)>."""
+   print ("""Command line options: [-h | --help], [-n | --nevnt], [-l | --lfn], [-g | --guid], [-s | --schema] <schema name>, [-c | --collection] <collection name (comma separated)>.""")
    if exit != None:
       sys.exit()
 
@@ -59,11 +60,11 @@ def main():
    try:
       optlist, args = getopt.getopt( sys.argv[1:], _useropts, _userlongopts )
    except getopt.error:
-      print sys.exc_value
+      print (sys.exc_value)
       _usage( 2 )
 
    if args:
-      print "Unhandled arguments:", args
+      print ("Unhandled arguments:", args)
       _usage( 2 )
 
    for opt, arg in optlist:
@@ -82,8 +83,8 @@ def main():
          countNumber = True
 
    if (len(Collections)==0):
-      print "No collections specified on relational DB"
-      print "--> The python variable Collections is not set"
+      print ("No collections specified on relational DB")
+      print ("--> The python variable Collections is not set")
       sys.exit()
 
    #newTuple = {}
@@ -91,7 +92,7 @@ def main():
    numberDict = {}
    for coll in Collections:
       level = "  COLL (TAB="+coll+") "
-      if (debugFlag): print "  BEGIN ", level
+      if (debugFlag): print ("  BEGIN ", level)
 
       attributes = "COLLECTION_NAME,DATA_TABLE_NAME,LINKS_TABLE_NAME"
       if (schema !=""):
@@ -99,23 +100,23 @@ def main():
       else:
          querySql = "SELECT "+attributes+" from pool_collections where collection_name=\'" + coll + "\'"
 
-      #print level, "query = ", querySql
+      #print (level, "query = ", querySql)
       cursor.execute(querySql)
       returnSql = cursor.fetchall()
-      #print level, "Resultset = ", returnSql
+      #print (level, "Resultset = ", returnSql)
       #make sure we only get one table name back
       if len(returnSql) == 0:
-         print level, "problem executing query " + querySql
-         print level, "no table names returned in output:" + str(returnSql)
-         print level, "--> Skipping coll"
+         print (level, "problem executing query " + querySql)
+         print (level, "no table names returned in output:" + str(returnSql))
+         print (level, "--> Skipping coll")
          continue
       if len(returnSql) > 1:
-         print level, "problem executing query " + querySql
-         print level, "too many table names in output:" + str(returnSql)
-         print level, "--> Skipping coll"
+         print (level, "problem executing query " + querySql)
+         print (level, "too many table names in output:" + str(returnSql))
+         print (level, "--> Skipping coll")
          continue
  
-      #print returnSql
+      #print (returnSql)
       dataTableName = returnSql[0][1]
       linksTableName = returnSql[0][2]
 
@@ -146,19 +147,19 @@ def main():
          
          guidList =[]
          linkIdDict = {}
-         #print queryID
-         #print returnSqlId
+         #print (queryID)
+         #print (returnSqlId)
          for element in returnSqlId:
             linkId = element[0]
             element = element[1].strip()
             element = element.split("][")[0].split("=")[1]
             guidList.append(element)
             linkIdDict[element] = linkId
-         print ""
-         print str(len(guidList)) + " unique guids found in collection " + coll
-         #print guidList
-         #print linkIdDict
-         if (debugFlag): print guidList
+         print ("")
+         print (str(len(guidList)) + " unique guids found in collection " + coll)
+         #print (guidList)
+         #print (linkIdDict)
+         if (debugFlag): print (guidList)
          
             
                                                                                                                                            
@@ -168,12 +169,12 @@ def main():
          fileList = []
 
          for guid in guidList:
-            if (debugFlag): print "Processing GUID = " + guid
+            if (debugFlag): print ("Processing GUID = " + guid)
             vuid = dq.contentClient.queryDatasetsWithFileByGUID(guid)
-            if (debugFlag): print "VUID(S) = " + str(vuid)
+            if (debugFlag): print ("VUID(S) = " + str(vuid))
             if len(vuid) == 0:
-               print "Error: guid " + guid + " returned by query is not registered in any DQ2 dataset!"
-               print "Skipping to next file..."
+               print ("Error: guid " + guid + " returned by query is not registered in any DQ2 dataset!")
+               print ("Skipping to next file...")
                continue
             else: 
                #for each dataset vuid
@@ -181,7 +182,7 @@ def main():
                   #get the dataset name
                   dataset = dq.repositoryClient.resolveVUID(v)
                   name = dataset.get('dsn')
-                  if (debugFlag): print "dataset name = " + str(name)
+                  if (debugFlag): print ("dataset name = " + str(name))
                   #if the dataset isn't a _tid dataset, ignore, else do stuff.
                   if (re.search(r'tid',name)):
                      continue
@@ -193,13 +194,13 @@ def main():
                         dataset_names.append(name)
          #it appears that you need to send the vuid to queryFilesInDataset as a list-type
          #"files" contains a tuple of the lfns for all of the (one) dataset
-         #print "guids = " +str(guidList)
-         print "Getting files for VUIDs"
-         #print "getting files for VUIDs=" + str(keptVuids) 
+         #print ("guids = " +str(guidList))
+         print ("Getting files for VUIDs")
+         #print ("getting files for VUIDs=" + str(keptVuids) )
          files = dq.contentClient.queryFilesInDataset(keptVuids)
-         #print files
+         #print (files)
          #now just need to get the corresponding lfn for a given guid
-         print str(len(guidList)) + " GUID:LFN pairs found:"
+         print (str(len(guidList)) + " GUID:LFN pairs found:")
          dataDict = {}
          for guid in guidList:
             dataDict[guid]= files[0][guid]['lfn']
@@ -209,54 +210,54 @@ def main():
                countGuids = "SELECT count(OID_1) from " + dataTableName + " where OID_1=\'" + str(linkIdDict[guid]) + "\'"
                cursor.execute(countGuids)
                returnCountGuids = cursor.fetchall()
-               #print returnCountGuids[0][0]
+               #print (returnCountGuids[0][0])
                numberDict[guid] = returnCountGuids[0][0]
          masterDict[coll] = dataDict
     
-      except DQException, e:
-         print "Error", e
+      except DQException as e:
+         print ("Error", e)
 
    for x in masterDict.keys():
-      print ""
-      print "######################################################"
+      print ("")
+      print ("######################################################")
       if (viewLfn) and not (viewGuid):
-         print "LFNs found for collection " + x
-         print "######################################################"
+         print ("LFNs found for collection " + x)
+         print ("######################################################")
          total = 0
          if (countNumber):
             for y in masterDict[x].keys():
-               print masterDict[x][y] + "\t" + str(numberDict[y])
+               print (masterDict[x][y] + "\t" + str(numberDict[y]))
                total = total + numberDict[y]
          else:
             for y in masterDict[x].keys():
-               print masterDict[x][y]
+               print (masterDict[x][y])
       elif (viewLfn) and (viewGuid):
-         print "LFNs & GUIDs found for collection " + x
-         print "######################################################"
+         print ("LFNs & GUIDs found for collection " + x)
+         print ("######################################################")
          total = 0
          if (countNumber):
             for y in masterDict[x].keys():
-               print y + "\t" + masterDict[x][y] + "\t" + str(numberDict[y])
+               print (y + "\t" + masterDict[x][y] + "\t" + str(numberDict[y]))
                total = total + numberDict[y]
          else:
             for y in masterDict[x].keys():
-               print y + "\t" + masterDict[x][y]
+               print (y + "\t" + masterDict[x][y])
       elif (viewGuid) and not (viewLfn):
-         print "GUIDs found for collection " + x
-         print "######################################################"
+         print ("GUIDs found for collection " + x)
+         print ("######################################################")
          total = 0
          if (countNumber):
             for y in masterDict[x].keys():
-               print y + "\t" + str(numberDict[y])
+               print (y + "\t" + str(numberDict[y]))
                total = total + numberDict[y]
          else:
             for y in masterDict[x].keys():
-               print y 
+               print (y )
          
-      print "#########################################"
-      print "Total number of events loaded = " + str(total)
-      print "#########################################"
-      print " "
+      print ("#########################################")
+      print ("Total number of events loaded = " + str(total))
+      print ("#########################################")
+      print (" ")
          
 
 if __name__ == '__main__':
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/listDatasets.py b/Database/AthenaPOOL/AtlasCollectionTools/python/listDatasets.py
index 9489d240a361903b1c7e349ba5db37a6d7f3c42c..4af45bb275e31f6cc5a8382db5ccbd712b75d78e 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/listDatasets.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/listDatasets.py
@@ -1,16 +1,21 @@
 #!/usr/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # listDatasets.py - from an input list of GUIDs, gets a corresponding list of DQ2 datasets.
 
+from __future__ import print_function
+
 import os
 import re
 import sys, signal
-import commands
 import random
 import getopt
 
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 try:
    import dq2.clientapi.cli.cliutil
    from dq2.common.DQDashboardTool import DQDashboardTool
@@ -20,15 +25,15 @@ try:
    from dq2.common.DQException import *
    from dq2.location.client.LocationClient import LocationClient
 except ImportError:
-   print "Environment not set [error importing DQ2 dependencies]!"
-   print "Try setting PYTHONPATH to the dq2-client directory."
+   print ("Environment not set [error importing DQ2 dependencies]!")
+   print ("Try setting PYTHONPATH to the dq2-client directory.")
    sys.exit(1)
 
 
 def usage():
    """
    listDatasets.py --colls <list of collections> OR listDatasets.py --guids <list of guids>  """
-   print usage.__doc__
+   print (usage.__doc__)
 
 def fillMaps(guids):
 
@@ -71,10 +76,10 @@ def fillMaps(guids):
       for guid in dsguids[ds]:
          try: 
             info = filelist[guid]
-            #print guid,info['lfn']
+            #print (guid,info['lfn'])
             lfnmap[guid] = info['lfn']
-            #print "DS="+ds+"; GUID="+guid+"; LFN="+info['lfn']
-            #print "(" + str(cnt) + ") LFN=" + info['lfn'] + "; DS=" + ds
+            #print ("DS="+ds+"; GUID="+guid+"; LFN="+info['lfn'])
+            #print ("(" + str(cnt) + ") LFN=" + info['lfn'] + "; DS=" + ds)
          except:
             pass
 
@@ -90,8 +95,8 @@ def main():
    try:
       longopts = ['guids=','colls=']
       opts,args=getopt.getopt(sys.argv[1:],'',longopts)
-   except getopt.GetoptError,e:
-      print e
+   except getopt.GetoptError as e:
+      print (e)
       usage()
       sys.exit(0)
 
@@ -118,7 +123,7 @@ def main():
         for collection in collections:
           coll_name = collection[:-5]
           command_string = "CollListFileGUID.exe -src " + coll_name + " RootCollection"
-          guid_string = commands.getoutput(command_string)
+          guid_string = subprocess.getoutput(command_string)
           clfgout = guid_string.split('\n')
           for line in clfgout:
             words = line.split()
@@ -131,11 +136,11 @@ def main():
             guids += [words[1]]
  
         if len(guids) == 0:
-          print "Error: query returned no files"
+          print ("Error: query returned no files")
           sys.exit(1)
 
       elif len(guids) == 0:
-        print "Error: Did not specify properly input"
+        print ("Error: Did not specify properly input")
         usage()
         sys.exit(1)
 
@@ -143,19 +148,19 @@ def main():
          dsets = {}
          (dsets,lfnmap) = fillMaps(refguids[ref])
          cnt = 0
-         print "\nINFORMATION FOR REF = ",ref,"\n"
+         print ("\nINFORMATION FOR REF = ",ref,"\n")
          for guid in refguids[ref]:
             dslist = ""
             for ds in dsets[guid]:
                if dslist != "" : dslist += ", "
                dslist += ds
             if dslist == "" : dslist = "NONE FOUND"
-            print "(" + str(guid) + ") LFN=" + lfnmap[guid] + "; DS=(" + str(len(dsets[guid])) + ") " + dslist
+            print ("(" + str(guid) + ") LFN=" + lfnmap[guid] + "; DS=(" + str(len(dsets[guid])) + ") " + dslist)
  
       sys.exit(0)
 
-   except DQException, e:
-      print "Error", e
+   except DQException as e:
+      print ("Error", e)
 
 if __name__ == '__main__':
     main()
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/runEventLookup.py b/Database/AthenaPOOL/AtlasCollectionTools/python/runEventLookup.py
index d5cf15c3666a4a07b9f318036499b43eb0888675..06ab07a54c0c4c3d160b721a16708e46e10ee940 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/runEventLookup.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/runEventLookup.py
@@ -1,11 +1,14 @@
 #!/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from __future__ import print_function
 
 from optparse import OptionParser
-from eventLookupClient import eventLookupClient
+from .eventLookupClient import eventLookupClient
 import sys, re, os, time
 from sys import exit
+import six
 
 PandaSys = '/afs/cern.ch/atlas/offline/external/GRID/DA/panda-client/latest'
 
@@ -15,7 +18,7 @@ PandaSys = '/afs/cern.ch/atlas/offline/external/GRID/DA/panda-client/latest'
 parser = OptionParser()
 parser.add_option("-d", "--debug", dest="debug", default=False, action="store_true",
                   help="enable debug output")
-parser.add_option("-a", "--async", dest="async", default=None, action="store_true",
+parser.add_option("-a", "--async", dest="asyncFlag", default=None, action="store_true",
                   help="OLD mode: asks the server to run EventLookup in background to prevent timeouts for large queries")
 parser.add_option("-f", "--runs_events_file", dest="filename",
                   help="read the list of run-event pairs from FILE", metavar="FILE")
@@ -66,15 +69,15 @@ if options.filename: action += 1
 if options.runs_events: action += 1
 if options.remoteFile: action += 1
 if action > 1:
-   print os.path.basename(sys.argv[0]) + " -e, -f and -g options are exclusive (-h for help)"
+   print (os.path.basename(sys.argv[0]) + " -e, -f and -g options are exclusive (-h for help)")
    exit(11)
 if action == 0:
-   print os.path.basename(sys.argv[0]) + " requires  -e, -f or -g option (-h for help)"
+   print (os.path.basename(sys.argv[0]) + " requires  -e, -f or -g option (-h for help)")
    exit(10)
 
 if options.debug:
-	print "Event numbers: "
-	print runs_events
+	print ("Event numbers: ")
+	print (runs_events)
 	
 if options.workerhost:
    eventLookupClient.workerHost = options.workerhost
@@ -92,25 +95,25 @@ try:
    elif options.remoteFile:
       guids = client.waitForFile(options.remoteFile)
    else:
-      guids = client.doLookup(runs_events, async = options.async,
+      guids = client.doLookup(runs_events, asyncFlag = options.asyncFlag,
                               stream = options.stream, tokens = options.tokens,
                               amitag = options.amitag, extract = options.gettags)
-except KeyboardInterrupt, e:
-   print "Keyboard interrupt " + str(e)
+except KeyboardInterrupt as e:
+   print ("Keyboard interrupt " + str(e))
    exit(100)
 
 # ------  Results processing
 if options.debug:
   for line in client.output:
-      print line.rstrip()	
+      print (line.rstrip()	)
 
 if guids == None:
    code = client.checkError()
    if code:
       exit(code)
-   print "ERROR!  Event lookup probably failed"
+   print ("ERROR!  Event lookup probably failed")
    for line in client.output:
-      print line.rstrip()
+      print (line.rstrip())
    exit(1)
 
 
@@ -121,7 +124,7 @@ def printGUIDsWithDatasets(guids):
     try:
       from pandatools import Client
     except ImportError:
-      if os.environ.has_key('PANDA_SYS'):
+      if 'PANDA_SYS' in os.environ:
          pandapath = os.environ['PANDA_SYS']
       else:
          pandapath = PandaSys
@@ -129,7 +132,7 @@ def printGUIDsWithDatasets(guids):
       try:
          from pandatools import Client
       except ImportError:
-         print "EventLookup failed to import PanDA client, GUID->dataset name resolution disabled"
+         print ("EventLookup failed to import PanDA client, GUID->dataset name resolution disabled")
          return False
 
     # instantiate curl
@@ -141,7 +144,7 @@ def printGUIDsWithDatasets(guids):
     # loop over all GUIDs
     for guid in guids.keys():
         # check existing map to avid redundant lookup
-        if guidLfnMap.has_key(guid):
+        if guid in  guidLfnMap:
             continue
         iLookUp += 1
         if iLookUp % 20 == 0:
@@ -176,27 +179,27 @@ def printGUIDsWithDatasets(guids):
             if not (tmpDsName.startswith('panda') or \
                     tmpDsName.startswith('user') or \
                     tmpDsName.startswith('group') or \
-                    re.search('_sub\d+$',tmpDsName) != None or \
-                    re.search('_dis\d+$',tmpDsName) != None or \
+                    re.search('_sub\\d+$',tmpDsName) != None or \
+                    re.search('_dis\\d+$',tmpDsName) != None or \
                     re.search('_shadow$',tmpDsName) != None \
                     or tmpDsName in checkedDSList ):
                tmpMap = Client.queryFilesInDataset(tmpDsName)
-               for tmpLFN,tmpVal in tmpMap.iteritems():
+               for tmpLFN,tmpVal in six.iteritems(tmpMap):
                    guidLfnMap.setdefault(tmpVal['guid'],[]).append([tmpLFN,tmpDsName])
                checkedDSList.append(tmpDsName)
 
     for guid in guids.keys():
-       print guid, guids[guid], guidLfnMap.setdefault(guid,"")
+       print (guid, guids[guid], guidLfnMap.setdefault(guid,""))
     return True
 
 
       
 # -----  Print out GUIDs
 if len(guids) == 0:
-   print "No GUIDs found"
+   print ("No GUIDs found")
    for line in client.output:
       if re.search("Warning: no TAG collections were found", line):
-         print "Warning: no TAG Collections matched the criteria (run,stream,amitag)"   
+         print ("Warning: no TAG Collections matched the criteria (run,stream,amitag)"   )
 else:
    if type(guids)==type({}):
       # default GUID printout
@@ -204,13 +207,13 @@ else:
          if printGUIDsWithDatasets(guids):
             exit(0)
       for guid in guids.keys():
-         print guid, guids[guid]
+         print (guid, guids[guid])
    else:
       # this is the per-event listing from --gettags option
       (attrNames, attrVals) = guids
-      print "TAG attributes are: " + str(attrNames)
+      print ("TAG attributes are: " + str(attrNames))
       for tag in attrVals:
-         print tag
+         print (tag)
 
 
 
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/runGuidsCount.py b/Database/AthenaPOOL/AtlasCollectionTools/python/runGuidsCount.py
index 7c2c2f06cd10e8fc550de11b260dc383867e622c..bf9ba66388794deb6c90cc27ac40f56f41796ade 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/runGuidsCount.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/runGuidsCount.py
@@ -1,9 +1,11 @@
 #!/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from __future__ import print_function
 
 from optparse import OptionParser
-from countGuidsClient import countGuidsClient
+from .countGuidsClient import countGuidsClient
 import sys, re, os
 from sys import exit
 
@@ -29,7 +31,7 @@ parser.add_option("--old", dest="athenaeum", default=False, action="store_true",
 (options, args) = parser.parse_args()
 
 if not options.dataset and not options.remoteFile:
-   print os.path.basename(sys.argv[0]) + " requires dataset name or remote file name (-g option) (-h for help)"
+   print (os.path.basename(sys.argv[0]) + " requires dataset name or remote file name (-g option) (-h for help)")
    exit(10)
 
 client = countGuidsClient()
@@ -46,31 +48,31 @@ try:
    else:
       results = client.countGuids(options.dataset, options.query, options.tokens)
 
-except KeyboardInterrupt, e:
-   print "Keyboard interrupt " + str(e)
+except KeyboardInterrupt as e:
+   print ("Keyboard interrupt " + str(e))
    exit(100)
 
 # ------  Results processing
 if options.debug:
   for line in client.output:
-      print line.rstrip()	
+      print (line.rstrip()	)
 
 if results == None:
    code = client.checkError()
    if code:
       exit(code)
-   print "ERROR! GUID count probably failed"
+   print ("ERROR! GUID count probably failed")
    for line in client.output:
-      print line.rstrip()
+      print (line.rstrip())
    exit(1)
 
 # -----  Print out GUIDs
 if not results[0]:
-   print "No GUIDs found"
+   print ("No GUIDs found")
 else:
-   print "#Events, GUIDs: " + str(results[0])
+   print ("#Events, GUIDs: " + str(results[0]))
    for line in results[1]:
-      print line[0] + " " + str( line[1])
+      print (line[0] + " " + str( line[1]))
 
 
 
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/tagDSList.py b/Database/AthenaPOOL/AtlasCollectionTools/python/tagDSList.py
index 5a29066b2161926f73558d8a5f4a0a761bded888..34493406712758031548d4ac00c80a7509e28c46 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/tagDSList.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/tagDSList.py
@@ -1,16 +1,22 @@
 #!/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 #
 # @author Marcin Nowak
 # @date 09.2012
 # @brief utility to list datasets for a given Run that are in the TAG DB
 #
 
-import commands, time, tempfile, os, sys, re
+from __future__ import print_function
+
+import time, tempfile, os, sys, re
 from optparse import OptionParser
 from xml.dom import minidom
-from eventLookupClient import eventLookupClient
+from .eventLookupClient import eventLookupClient
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
 
 
 parser = OptionParser()
@@ -36,13 +42,13 @@ formatparams = dict( server = options.server,
                      )
 cmd = EL.getCurlCmd() + " 'https://{server}/{url}?runnr={run}&tagtype={type}&amitag={amitag}' ".format( **formatparams )
 
-(rc,out) = commands.getstatusoutput(cmd)
+(rc,out) = subprocess.getstatusoutput(cmd)
 if rc != 0:
-    print "ERROR!" 
+    print ("ERROR!" )
     code = EL.checkError(out)
     if code:
         sys.exit(code)
-    print out
+    print (out)
     sys.exit(-1)
 
 try:
@@ -53,8 +59,8 @@ try:
         row = table.childNodes[x]
         coll = row.childNodes[1].firstChild.data.encode().replace('_READ','')
         db   = row.childNodes[0].firstChild.data.encode().split('/')[2]
-        print coll, '  ', db
-except Exception, e:
+        print (coll, '  ', db)
+except Exception as e:
     raise RuntimeError( 'The result of the TAG catalog query could not be parsed: '
                         +str(e) + "\n" + out )
 
diff --git a/Database/AthenaPOOL/AtlasCollectionTools/python/tagExtract.py b/Database/AthenaPOOL/AtlasCollectionTools/python/tagExtract.py
index 56e8cbc6d023361f47cd9d9978eb1859f6c7c6a2..1027508b439db6ec80092ebc0e2fd9319822b273 100755
--- a/Database/AthenaPOOL/AtlasCollectionTools/python/tagExtract.py
+++ b/Database/AthenaPOOL/AtlasCollectionTools/python/tagExtract.py
@@ -1,15 +1,21 @@
 #!/bin/env python
 
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 #
 # @author Marcin Nowak
 # @date 09.2012
 # @brief utility to extract TAG DB Collection to a local ROOT file
 #
 
-import commands, time, tempfile, os, sys, re
+from __future__ import print_function
+
+import time, tempfile, os, sys, re
 from optparse import OptionParser
-from eventLookupClient import eventLookupClient
+from .eventLookupClient import eventLookupClient
+
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
 
 parser = OptionParser()
 parser.add_option("--server", dest="server", default='atlas-tagservices.cern.ch',
@@ -34,7 +40,7 @@ output = open(options.output, "w")
 starttime = time.time()
 #extract collection from the database
 EL = eventLookupClient()
-print "# Extracting TAG collection into %s" % options.output
+print ("# Extracting TAG collection into %s" % options.output)
 formatparams = dict( server = options.server,
                      cert = EL.certProxyFileName,
                      url = 'tagservices/EventLookup/www/extract',
@@ -50,19 +56,19 @@ if options.taskID:
 didRetry = 0
 while True:
     restarttime = time.time()
-    (rc,out) = commands.getstatusoutput(cmd)
+    (rc,out) = subprocess.getstatusoutput(cmd)
     if rc != 0:
-        print "ERROR!" 
+        print ("ERROR!" )
         code = EL.checkError(out)
         if code:
             sys.exit(code)
-        print out
+        print (out)
         sys.exit(-1)
 
     cmd2 = 'file ' + options.output
-    (rc,out) = commands.getstatusoutput(cmd2)
+    (rc,out) = subprocess.getstatusoutput(cmd2)
     if re.search('empty', out):
-        print "ERROR: Extraction process did not return any data"
+        print ("ERROR: Extraction process did not return any data")
         sys.exit(1)
     if re.search('ASCII', out) or re.search('text', out):
         CF = open(options.output, "r")
@@ -73,20 +79,20 @@ while True:
                 break
         CF.close()
         if found_retry:
-            print line
+            print (line)
             didRetry += 1
             time.sleep(5)
             continue
-        print "Extraction process probably failed with message:"
+        print ("Extraction process probably failed with message:")
         with open(options.output, 'r') as CF:
-            print CF.read()
+            print (CF.read())
         sys.exit(2)
     # finished OK!
     break
 
-print "# %s extracted in %.0ds (may include server queue time)" %(options.output, time.time()-restarttime)
+print ("# %s extracted in %.0ds (may include server queue time)" %(options.output, time.time()-restarttime))
 if didRetry > 0:
-    print "# reqest retried %d time (server busy). Total time %d" % (didRetry, time.time()-starttime)
+    print ("# reqest retried %d time (server busy). Total time %d" % (didRetry, time.time()-starttime))
 
 
 
diff --git a/Database/CrestApi/src/CrestApi.cxx b/Database/CrestApi/src/CrestApi.cxx
index bf93976d3bef70c220cb4a9f51303b4701ae6f22..732de9be1a83ee03fcaf31b8e20666a5a67c6db8 100644
--- a/Database/CrestApi/src/CrestApi.cxx
+++ b/Database/CrestApi/src/CrestApi.cxx
@@ -869,6 +869,8 @@ namespace Crest {
     return respond;
   }
 
+// REQUEST METHODS
+
   void CrestClient::storePayload(const std::string& tag, uint64_t since, const std::string& js) {
     if (m_mode == FILESYSTEM_MODE) {
       storePayloadDump(tag, since, js);
@@ -877,10 +879,6 @@ namespace Crest {
     storePayloadRequest(tag, since, js);
   }
 
-// REQUEST METHODS
-
-
-
 
   struct data {
     char trace_ascii; /* 1 or 0 */
@@ -959,6 +957,11 @@ namespace Crest {
 
       /* always cleanup */
       curl_easy_cleanup(curl);
+      curl_formfree(formpost);
+      curl_slist_free_all (headers);
+
+      curl_formfree(lastptr);      
+      curl_global_cleanup(); 
 
       return s;
     }
@@ -1027,6 +1030,10 @@ namespace Crest {
 
       /* always cleanup */
       curl_easy_cleanup(curl);
+      curl_slist_free_all (headers);
+      
+      curl_global_cleanup();
+
       return s;
     }
 
@@ -1240,6 +1247,12 @@ namespace Crest {
 
       // always cleanup
       curl_easy_cleanup(curl);
+      curl_formfree(formpost);
+      curl_slist_free_all (headers);
+
+      curl_formfree(lastptr);      
+      curl_global_cleanup();
+
       return s;
     }
     std::string mes = "CrestClient::storeBatchPayload";
diff --git a/Database/FileStager/python/FileStagerTool.py b/Database/FileStager/python/FileStagerTool.py
index 6cf3df6808dd8adb0333957c77f215e983da8221..36f168a3781f36a25d9f59cc0b99f82cf133c510 100644
--- a/Database/FileStager/python/FileStagerTool.py
+++ b/Database/FileStager/python/FileStagerTool.py
@@ -1,9 +1,15 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from __future__ import print_function
 
 import os
-import sys, string, commands
+import sys, string
 from datetime import datetime
 
+from future import standard_library
+standard_library.install_aliases()
+import subprocess
+
 ## Needed to correct ROOT behavior; see below
 CWD = os.getcwd()
 import ROOT
@@ -88,8 +94,8 @@ class FileStagerTool:
       outFile = open(outputFile,'w')
       outFile.write(printlist)
       outFile.close()
-      print "Sample list has been written to <%s>. Goodbye.\n" % outputFile
-    else: print printlist + "\n"
+      print ("Sample list has been written to <%s>. Goodbye.\n" % outputFile)
+    else: print (printlist + "\n")
 
   def GetSampleList(self):
     return self.sampleList
@@ -119,22 +125,22 @@ class FileStagerTool:
     # last fallback
     try:
       defaultTmpdir = os.environ['TMPDIR'] 
-    except Exception,inst:
+    except Exception as inst:
       pass
     # cern lxbatch
     try:
       defaultTmpdir = os.environ['WORKDIR']
-    except Exception,inst:
+    except Exception as inst:
       pass
     # osg
     try:
       defaultTmpdir = os.environ['OSG_WN_TMP']
-    except Exception,inst:
+    except Exception as inst:
       pass
     # lcg
     try:
       defaultTmpdir = os.environ['EDG_WL_SCRATCH']
-    except Exception,inst:
+    except Exception as inst:
       pass
 
     # use given tmpdir
@@ -181,7 +187,7 @@ class FileStagerTool:
         stderr = stderr.replace(baseTmpdir,self.LogfileDir)
         stdout = stdout.replace(baseTmpdir,self.LogfileDir)
       
-      #print "TStageManager::getFile()   : Waiting till <%s> is staged." % (self.sampleList[0])
+      #print ("TStageManager::getFile()   : Waiting till <%s> is staged." % (self.sampleList[0]))
 
       if (True):
         stageman = TStageManager.instance()
@@ -218,15 +224,15 @@ class FileStagerTool:
     return doStaging
 
   def CheckGrid(self):
-    (retcode,output) = commands.getstatusoutput("grid-proxy-info -exists")
+    (retcode,output) = subprocess.getstatusoutput("grid-proxy-info -exists")
     if (retcode!=0):
-      print "\nFileStager.FileStagerTool() : ERROR : grid proxy certificate not found."
+      print ("\nFileStager.FileStagerTool() : ERROR : grid proxy certificate not found.")
     return retcode
 
   def TryGridRestart(self):
     #os.environ['X509_USER_PROXY'] = self.gridCertificate
-    (retcode,output) = commands.getstatusoutput("voms-proxy-init -voms atlas -noregen")
+    (retcode,output) = subprocess.getstatusoutput("voms-proxy-init -voms atlas -noregen")
     if (retcode!=0):
-      print "\nFileStager.FileStagerTool() : ERROR : grid proxy restart failed. Exiting."
+      print ("\nFileStager.FileStagerTool() : ERROR : grid proxy restart failed. Exiting.")
     return retcode
 
diff --git a/Database/FileStager/share/input_FileStager.py b/Database/FileStager/share/input_FileStager.py
index 1c6ab82f574bfbdd4e4aef7c5cb437e88ac0df6e..6be156469fd5eb2bd6cd466fad277ff22d6e5ed4 100644
--- a/Database/FileStager/share/input_FileStager.py
+++ b/Database/FileStager/share/input_FileStager.py
@@ -9,7 +9,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   if ('sampleList' in dir()):
     stagetool = FileStagerTool(sampleList=sampleList)
   elif ('sampleFile' in dir()):
-    print "FileStager() : Now processing sample file : %s" % sampleFile
+    printfunc ("FileStager() : Now processing sample file : %s" % sampleFile)
     stagetool = FileStagerTool(sampleFile=sampleFile)
   
   ## Configure copy command used by the stager; default is 'lcg-cp -v --vo altas -t 1200'.
@@ -28,7 +28,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   thejob = AlgSequence()
   
   ## check if collection names begin with "gridcopy"
-  print "FileStager() : doStaging ?", stagetool.DoStaging()
+  printfunc ("FileStager() : doStaging ?", stagetool.DoStaging())
   
   ## Import file stager algorithm
   from FileStager.FileStagerConf import FileStagerAlg
@@ -69,7 +69,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   try:
     svcMgr = theApp.serviceMgr()
     svcMgr.EventSelector.InputCollections = ic
-  except Exception,inst:
+  except Exception as inst:
     pass
   
   ## else: athenaCommonFlags
@@ -78,14 +78,14 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
       ## the Input AOD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.FilesInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
   else:
     try:
       ## the Input ESD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.FilesInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
   
   
diff --git a/Database/FileStager/share/input_FileStagerRFCP.py b/Database/FileStager/share/input_FileStagerRFCP.py
index 8225b83dad17c1a3af09f0ca4a8db4d088bb8011..f085f5a69def440a4d8de4097b7fe8e77904883d 100644
--- a/Database/FileStager/share/input_FileStagerRFCP.py
+++ b/Database/FileStager/share/input_FileStagerRFCP.py
@@ -9,7 +9,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   if ('sampleList' in dir()):
     stagetool = FileStagerTool(sampleList=sampleList)
   elif ('sampleFile' in dir()):
-    print "FileStager() : Processing sample file : %s" % sampleFile
+    printfunc ("FileStager() : Processing sample file : %s" % sampleFile)
     stagetool = FileStagerTool(sampleFile=sampleFile)
   
   ## Configure rf copy command used by the stager; default is 'lcg-cp -v --vo altas -t 1200'
@@ -28,7 +28,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   thejob = AlgSequence()
   
   ## check if collection names begin with "gridcopy"
-  print "FileStager() : doStaging ?", stagetool.DoStaging()
+  printfunc ("FileStager() : doStaging ?", stagetool.DoStaging())
   
   ## Import file stager algorithm
   from FileStager.FileStagerConf import FileStagerAlg
@@ -69,7 +69,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   try:
     svcMgr = theApp.serviceMgr()
     svcMgr.EventSelector.InputCollections = ic
-  except Exception,inst:
+  except Exception as inst:
     pass
   
   ## else athenaCommonFlags
@@ -78,14 +78,14 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
       ## the Input AOD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.PoolAODInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
   else:
     try:
       ## the Input ESD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.PoolESDInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
   
   
diff --git a/Database/FileStager/share/input_FileStagerRecoTrf.py b/Database/FileStager/share/input_FileStagerRecoTrf.py
index 3657a9a165d6813d36fe2cb32d056bc837af442a..c45598f5e39bf720680c4042229849707960acd3 100644
--- a/Database/FileStager/share/input_FileStagerRecoTrf.py
+++ b/Database/FileStager/share/input_FileStagerRecoTrf.py
@@ -9,7 +9,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   if ('sampleList' in dir()):
     stagetool = FileStagerTool(sampleList=sampleList)
   elif ('sampleFile' in dir()):
-    print "FileStager() : Now processing sample file : %s" % sampleFile
+    printfunc ("FileStager() : Now processing sample file : %s" % sampleFile)
     stagetool = FileStagerTool(sampleFile=sampleFile)
   
   ## Configure copy command used by the stager; default is 'lcg-cp -v --vo altas -t 1200'.
@@ -28,7 +28,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   thejob = AlgSequence()
   
   ## check if collection names begin with "gridcopy"
-  print "FileStager() : doStaging ?", stagetool.DoStaging()
+  printfunc ("FileStager() : doStaging ?", stagetool.DoStaging())
   
   ## Import file stager algorithm
   from FileStager.FileStagerConf import FileStagerAlg
diff --git a/Database/FileStager/share/input_FileStagerXRFCP.py b/Database/FileStager/share/input_FileStagerXRFCP.py
index 694faad59daaa562aa481e94e86eb213f3375f98..418d373634826541e5fcc99393844704fff0a260 100644
--- a/Database/FileStager/share/input_FileStagerXRFCP.py
+++ b/Database/FileStager/share/input_FileStagerXRFCP.py
@@ -10,7 +10,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   if ('sampleList' in dir()):
     stagetool = FileStagerTool(sampleList=sampleList)
   elif ('sampleFile' in dir()):
-    print "FileStager() : Processing sample file : %s" % sampleFile
+    printfunc ("FileStager() : Processing sample file : %s" % sampleFile)
     stagetool = FileStagerTool(sampleFile=sampleFile)
   
   ## Configure rf copy command used by the stager; default is 'lcg-cp -v --vo altas -t 1200'
@@ -29,7 +29,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   thejob = AlgSequence()
 
   ## check if collection names begin with "gridcopy"
-  print "FileStager() : doStaging ?", stagetool.DoStaging()
+  printfunc ("FileStager() : doStaging ?", stagetool.DoStaging())
 
   ## Import file stager algorithm
   from FileStager.FileStagerConf import FileStagerAlg
@@ -70,7 +70,7 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
   try:
     svcMgr = theApp.serviceMgr()
     svcMgr.EventSelector.InputCollections = ic
-  except Exception,inst:
+  except Exception as inst:
     pass
 
   ## else: athenaCommonFlags
@@ -79,13 +79,13 @@ if ('sampleList' in dir()) or ('sampleFile' in dir()):
       ## the Input AOD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.PoolAODInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
   else:
     try:
       ## the Input ESD File(s)
       from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
       athenaCommonFlags.PoolESDInput = ic
-    except Exception,inst:
+    except Exception as inst:
       pass
 
diff --git a/DetectorDescription/Identifier/Identifier/HWIdentifier.h b/DetectorDescription/Identifier/Identifier/HWIdentifier.h
index 548e58b23477de3e9b1c97e90ee816ec1d18cecc..78f79305f01041f4278fd78bab6cac872c459b43 100644
--- a/DetectorDescription/Identifier/Identifier/HWIdentifier.h
+++ b/DetectorDescription/Identifier/Identifier/HWIdentifier.h
@@ -1,5 +1,5 @@
 /*
-  Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 */
 
 //<doc><file>	$Id: HWIdentifier.h,v 1.3 2004-02-24 13:52:15 schaffer Exp $
@@ -29,6 +29,21 @@ public:
 };
 
 
+// Define a hash functional
+
+namespace std {
+template<>
+struct hash<HWIdentifier>
+{
+  size_t operator()(const HWIdentifier& id) const
+  {
+    return static_cast<size_t>(id.get_compact());
+  }
+};
+}
+
+
+
 inline HWIdentifier::HWIdentifier()
     : Identifier::Identifier()
 {}
diff --git a/DetectorDescription/Identifier/Identifier/Identifier.h b/DetectorDescription/Identifier/Identifier/Identifier.h
index d6e670c5f3c954a430633ff6fbd3afabe9d27db4..f2dbfcbdfc0d1968db32c09510db98a9e0366c4e 100644
--- a/DetectorDescription/Identifier/Identifier/Identifier.h
+++ b/DetectorDescription/Identifier/Identifier/Identifier.h
@@ -1,5 +1,5 @@
 /*
-  Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 */
 
 #ifndef IDENTIFIER_IDENTIFIER_H
@@ -200,17 +200,22 @@ private:
 //-----------------------------------------------
 
 
+// Define a hash functional
 
+namespace std {
+template<>
+struct hash<Identifier>
+{
+  size_t operator()(const Identifier& id) const
+  {
+    return static_cast<size_t>(id.get_compact());
+  }
+};
+}
 
 
 
 
-
-
-
-//<<<<<< INLINE MEMBER FUNCTIONS                                        >>>>>>
-
-
 // Constructors
 //-----------------------------------------------
 inline Identifier::Identifier ()
diff --git a/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfig.py b/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfig.py
index aa939261cad38cf703092d44325850abc6dae32a..9b7c1e86907cb4f87a6f1bc8690bdc57ddb9e598 100644
--- a/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfig.py
+++ b/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfig.py
@@ -196,30 +196,53 @@ def BasicPixelDigitizationTool(name="PixelDigitizationTool", **kwargs):
         PixelConfigCondAlg.FEI4EndcapHitDiscConfig=[2,2,2]
 
         IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_2016.dat"
-        # ITk:
-        if geoFlags.isSLHC():
-            IdMappingDat = "ITk_Atlas_IdMapping.dat"
-            if "BrlIncl4.0_ref" == commonGeoFlags.GeoType():
-                IdMappingDat = "ITk_Atlas_IdMapping_InclBrl4.dat"
-            elif "IBrlExt4.0ref" == commonGeoFlags.GeoType():
-                IdMappingDat = "ITk_Atlas_IdMapping_IExtBrl4.dat"
-            elif "BrlExt4.0_ref" == commonGeoFlags.GeoType():
-                IdMappingDat = "ITk_Atlas_IdMapping_ExtBrl4.dat"
-            elif "BrlExt3.2_ref" == commonGeoFlags.GeoType():
-                IdMappingDat = "ITk_Atlas_IdMapping_ExtBrl32.dat"
-        elif (geoFlags.isIBL() == False):
-            IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping.dat"
-        else:
-            # Planar IBL
-            if (geoFlags.IBLLayout() == "planar"):
-                if (geoFlags.isDBM() == True):
-                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_inclIBL_DBM.dat"
+        rodIDForSingleLink40=0
+        from AthenaCommon.GlobalFlags import globalflags
+        if globalflags.DataSource() == 'geant4':
+            # ITk:
+            if geoFlags.isSLHC():
+                IdMappingDat = "ITk_Atlas_IdMapping.dat"
+                if "BrlIncl4.0_ref" == commonGeoFlags.GeoType():
+                    IdMappingDat = "ITk_Atlas_IdMapping_InclBrl4.dat"
+                elif "IBrlExt4.0ref" == commonGeoFlags.GeoType():
+                    IdMappingDat = "ITk_Atlas_IdMapping_IExtBrl4.dat"
+                elif "BrlExt4.0_ref" == commonGeoFlags.GeoType():
+                    IdMappingDat = "ITk_Atlas_IdMapping_ExtBrl4.dat"
+                elif "BrlExt3.2_ref" == commonGeoFlags.GeoType():
+                    IdMappingDat = "ITk_Atlas_IdMapping_ExtBrl32.dat"
+            elif (geoFlags.isIBL() == False):
+                IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping.dat"
+            else:
+                # Planar IBL
+                if (geoFlags.IBLLayout() == "planar"):
+                    if (geoFlags.isDBM() == True):
+                        IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_inclIBL_DBM.dat"
+                    else:
+                        IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_inclIBL.dat"
+                # Hybrid IBL plus DBM
+                elif (geoFlags.IBLLayout() == "3D"):
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_Run2.dat"
+        elif globalflags.DataSource == 'data':  # for data overlay
+            from RecExConfig.AutoConfiguration import GetRunNumber
+            runNum = GetRunNumber()
+            if (runNum<222222):
+                IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_May08.dat"
+                rodIDForSingleLink40=1300000
+            else:
+                PixelConfigCondAlg.UseCablingConditions = True
+                rodIDForSingleLink40=1300000
+                # Even though we are reading from COOL, set the correct fallback map.
+                if (runNum >= 344494):
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_344494.dat"
+                elif (runNum >= 314940 and runNum < 344494):
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_314940.dat"
+                elif (runNum >= 289350 and runNum < 314940): # 2016
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_2016.dat"
+                elif (runNum >= 222222 and runNum < 289350): # 2015
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_Run2.dat"
                 else:
-                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_inclIBL.dat"
-            # Hybrid IBL plus DBM
-            elif (geoFlags.IBLLayout() == "3D"):
-                IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_Run2.dat"
-        
+                    IdMappingDat="PixelCabling/Pixels_Atlas_IdMapping_May08.dat"
+
         PixelConfigCondAlg.CablingMapFileName=IdMappingDat
 
         condSeq += PixelConfigCondAlg(name="PixelConfigCondAlg")
@@ -274,11 +297,15 @@ def BasicPixelDigitizationTool(name="PixelDigitizationTool", **kwargs):
         from PixelConditionsAlgorithms.PixelConditionsAlgorithmsConf import PixelReadoutSpeedAlg
         condSeq += PixelReadoutSpeedAlg(name="PixelReadoutSpeedAlg")
 
+    if (globalflags.DataSource=='data' and conddb.dbdata == 'CONDBR2'):  # for data overlay
+        if not conddb.folderRequested("/PIXEL/CablingMap"):
+            conddb.addFolderSplitOnline("PIXEL", "/PIXEL/Onl/CablingMap","/PIXEL/CablingMap", className="AthenaAttributeList")
+
     if not hasattr(condSeq, 'PixelCablingCondAlg'):
         from PixelConditionsAlgorithms.PixelConditionsAlgorithmsConf import PixelCablingCondAlg
         condSeq += PixelCablingCondAlg(name="PixelCablingCondAlg",
                                        MappingFile=IdMappingDat,
-                                       RodIDForSingleLink40=0)
+                                       RodIDForSingleLink40=rodIDForSingleLink40)
 
     if not conddb.folderRequested("/PIXEL/PixReco"):
         conddb.addFolder("PIXEL_OFL", "/PIXEL/PixReco", className="DetCondCFloat")
diff --git a/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfigNew.py b/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfigNew.py
index 173f5c8fc61e9540afd08940b7acb2cca4edec73..7825bfdcd8ab6292d90b49c25a61538bbd00b568 100644
--- a/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfigNew.py
+++ b/InnerDetector/InDetDigitization/PixelDigitization/python/PixelDigitizationConfigNew.py
@@ -103,24 +103,28 @@ def SensorSim3DToolCfg(flags, name="SensorSim3DTool", **kwargs):
 def BarrelRD53SimToolCfg(flags, name="BarrelRD53SimTool", **kwargs):
     """Return a RD53SimTool configured for Barrel"""
     kwargs.setdefault("BarrelEC", 0)
+    kwargs.setdefault("DoNoise", flags.Digitization.DoInnerDetectorNoise)
     return RD53SimTool(name, **kwargs)
 
 
 def EndcapRD53SimToolCfg(flags, name="EndcapRD53SimTool", **kwargs):
     """Return a RD53SimTool configured for Endcap"""
     kwargs.setdefault("BarrelEC", 2)
+    kwargs.setdefault("DoNoise", flags.Digitization.DoInnerDetectorNoise)
     return RD53SimTool(name, **kwargs)
 
 
 def BarrelFEI4SimToolCfg(flags, name="BarrelFEI4SimTool", **kwargs):
     """Return a FEI4SimTool configured for Barrel"""
     kwargs.setdefault("BarrelEC", 0)
+    kwargs.setdefault("DoNoise", flags.Digitization.DoInnerDetectorNoise)
     return FEI4SimTool(name, **kwargs)
 
 
 def DBMFEI4SimToolCfg(flags, name="DBMFEI4SimTool", **kwargs):
     """Return a FEI4SimTool configured for Endcap"""
     kwargs.setdefault("BarrelEC", 4)
+    kwargs.setdefault("DoNoise", flags.Digitization.DoInnerDetectorNoise)
     return FEI4SimTool(name, **kwargs)
 
 
@@ -234,8 +238,8 @@ def PixelDigitizationSplitNoMergePUToolCfg(flags, name="PixelDigitizationSplitNo
 def PixelOverlayDigitizationToolCfg(flags, name="PixelOverlayDigitizationTool", **kwargs):
     """Return ComponentAccumulator with PixelDigitizationTool configured for overlay"""
     kwargs.setdefault("OnlyUseContainerName", False)
-    kwargs.setdefault("RDOCollName", "StoreGateSvc+" + flags.Overlay.SigPrefix + "PixelRDOs")
-    kwargs.setdefault("SDOCollName", "StoreGateSvc+" + flags.Overlay.SigPrefix + "PixelSDO_Map")
+    kwargs.setdefault("RDOCollName", flags.Overlay.SigPrefix + "PixelRDOs")
+    kwargs.setdefault("SDOCollName", flags.Overlay.SigPrefix + "PixelSDO_Map")
     kwargs.setdefault("HardScatterSplittingMode", 0)
     return PixelDigitizationBasicToolCfg(flags, name, **kwargs)
 
@@ -276,7 +280,11 @@ def PixelOverlayDigitizationBasicCfg(flags, **kwargs):
     if "DigitizationTool" not in kwargs:
         tool = acc.popToolsAndMerge(PixelOverlayDigitizationToolCfg(flags))
         kwargs["DigitizationTool"] = tool
-    acc.addEventAlgo(PixelDigitization(**kwargs))
+
+    if flags.Concurrency.NumThreads > 0:
+        kwargs.setdefault("Cardinality", flags.Concurrency.NumThreads)
+
+    acc.addEventAlgo(PixelDigitization(name="PixelOverlayDigitization", **kwargs))
     return acc
 
 
diff --git a/InnerDetector/InDetDigitization/TRT_Digitization/python/TRT_DigitizationConfigNew.py b/InnerDetector/InDetDigitization/TRT_Digitization/python/TRT_DigitizationConfigNew.py
index ec503c3d4d815fc2aaa7d6a3ec73bc31a01f9bef..4da92cd52d4769483ac706684b7b63f2800ed318 100644
--- a/InnerDetector/InDetDigitization/TRT_Digitization/python/TRT_DigitizationConfigNew.py
+++ b/InnerDetector/InDetDigitization/TRT_Digitization/python/TRT_DigitizationConfigNew.py
@@ -43,7 +43,7 @@ def TRT_DigitizationBasicToolCfg(flags, name="TRT_DigitizationBasicTool", **kwar
     acc.merge(MagneticFieldSvcCfg(flags))
     acc.addService(PartPropSvc(InputFile="PDGTABLE.MeV"))
     if flags.Detector.Overlay and not flags.Input.isMC:
-        acc.merge(addFolders(flags, "/TRT/Cond/DigVers", "TRT_OFL", className="CondAttrListCollection"))
+        acc.merge(addFolders(flags, "/TRT/Cond/DigVers", "TRT_OFL", tag="TRTCondDigVers-Collisions-01", db="OFLP200"))
     # default arguments
     kwargs.setdefault("PAI_Tool_Ar", TRT_PAI_Process_ArToolCfg(flags))
     kwargs.setdefault("PAI_Tool_Kr", TRT_PAI_Process_KrToolCfg(flags))
@@ -114,8 +114,8 @@ def TRT_OverlayDigitizationToolCfg(flags, name="TRT_OverlayDigitizationTool", **
     """Return ComponentAccumulator with configured Overlay TRT digitization tool"""
     acc = ComponentAccumulator()
     kwargs.setdefault("OnlyUseContainerName", False)
-    kwargs.setdefault("OutputObjectName", "StoreGateSvc+" + flags.Overlay.SigPrefix + "TRT_RDOs")
-    kwargs.setdefault("OutputSDOName", "StoreGateSvc+" + flags.Overlay.SigPrefix + "TRT_SDO_Map")
+    kwargs.setdefault("OutputObjectName", flags.Overlay.SigPrefix + "TRT_RDOs")
+    kwargs.setdefault("OutputSDOName", flags.Overlay.SigPrefix + "TRT_SDO_Map")
     kwargs.setdefault("HardScatterSplittingMode", 0)
     kwargs.setdefault("Override_getT0FromData", 0)
     kwargs.setdefault("Override_noiseInSimhits", 0)
@@ -153,7 +153,11 @@ def TRT_OverlayDigitizationBasicCfg(flags, **kwargs):
     if "DigitizationTool" not in kwargs:
         tool = acc.popToolsAndMerge(TRT_OverlayDigitizationToolCfg(flags))
         kwargs["DigitizationTool"] = tool
-    acc.addEventAlgo(TRTDigitization(**kwargs))
+
+    if flags.Concurrency.NumThreads > 0:
+        kwargs.setdefault("Cardinality", flags.Concurrency.NumThreads)
+
+    acc.addEventAlgo(TRTDigitization(name="TRT_OverlayDigitization", **kwargs))
     return acc
 
 
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/python/PixelOverlayConfig.py b/InnerDetector/InDetRawAlgs/InDetOverlay/python/PixelOverlayConfig.py
index 4bb8034a4587f375fa1542726f0ce3fff55e75b1..25e314a5391bc38937ac82b9779740d1424034bf 100644
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/python/PixelOverlayConfig.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/python/PixelOverlayConfig.py
@@ -7,7 +7,31 @@ from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
 from AthenaConfiguration.ComponentFactory import CompFactory
 
 
-def PixelOverlayAlgCfg(flags, name = "PixelOverlay", **kwargs):
+def PixelRawDataProviderAlgCfg(flags, name="PixelRawDataProvider", **kwargs):
+    """Return a ComponentAccumulator for pixel raw data provider"""
+    # Temporary until available in the central location
+    acc = ComponentAccumulator()
+
+    kwargs.setdefault("RDOKey", flags.Overlay.BkgPrefix + "PixelRDOs")
+
+    PixelRawDataProvider = CompFactory.PixelRawDataProvider
+    alg = PixelRawDataProvider(name, **kwargs)
+    acc.addEventAlgo(alg)
+
+    return acc
+
+
+def PixelDataOverlayExtraCfg(flags, **kwargs):
+    """Return a ComponentAccumulator with pixel data overlay specifics"""
+    acc = ComponentAccumulator()
+
+    # We need to convert BS to RDO for data overlay
+    acc.merge(PixelRawDataProviderAlgCfg(flags))
+
+    return acc
+
+
+def PixelOverlayAlgCfg(flags, name="PixelOverlay", **kwargs):
     """Return a ComponentAccumulator for PixelOverlay algorithm"""
     acc = ComponentAccumulator()
 
@@ -16,37 +40,38 @@ def PixelOverlayAlgCfg(flags, name = "PixelOverlay", **kwargs):
     kwargs.setdefault("OutputKey", "PixelRDOs")
 
     # Do Pixel overlay
-    PixelOverlay=CompFactory.PixelOverlay
+    PixelOverlay = CompFactory.PixelOverlay
     alg = PixelOverlay(name, **kwargs)
     acc.addEventAlgo(alg)
 
     # Setup output
     from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
-    acc.merge(OutputStreamCfg(flags, "RDO", ItemList = [
+    acc.merge(OutputStreamCfg(flags, "RDO", ItemList=[
         "PixelRDO_Container#PixelRDOs"
     ]))
 
     return acc
 
 
-def PixelTruthOverlayCfg(flags, name = "PixelSDOOverlay", **kwargs):
+def PixelTruthOverlayCfg(flags, name="PixelSDOOverlay", **kwargs):
     """Return a ComponentAccumulator for the Pixel SDO overlay algorithm"""
     acc = ComponentAccumulator()
 
     # We do not need background Pixel SDOs
     kwargs.setdefault("BkgInputKey", "")
 
-    kwargs.setdefault("SignalInputKey", flags.Overlay.BkgPrefix + "PixelSDO_Map")
+    kwargs.setdefault("SignalInputKey",
+                      flags.Overlay.SigPrefix + "PixelSDO_Map")
     kwargs.setdefault("OutputKey", "PixelSDO_Map")
 
     # Do Pixel truth overlay
-    InDetSDOOverlay=CompFactory.InDetSDOOverlay
+    InDetSDOOverlay = CompFactory.InDetSDOOverlay
     alg = InDetSDOOverlay(name, **kwargs)
     acc.addEventAlgo(alg)
 
     # Setup output
     from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
-    acc.merge(OutputStreamCfg(flags, "RDO", ItemList = [
+    acc.merge(OutputStreamCfg(flags, "RDO", ItemList=[
         "InDetSimDataCollection#PixelSDO_Map"
     ]))
 
@@ -57,9 +82,13 @@ def PixelOverlayCfg(flags):
     """Configure and return a ComponentAccumulator for Pixel overlay"""
     acc = ComponentAccumulator()
 
+    # Add data overlay specifics
+    if flags.Overlay.DataOverlay:
+        acc.merge(PixelDataOverlayExtraCfg(flags))
+
     # Add Pixel overlay digitization algorithm
-    from PixelDigitization.PixelDigitizationConfigNew import PixelOverlayDigitizationCfg
-    acc.merge(PixelOverlayDigitizationCfg(flags))
+    from PixelDigitization.PixelDigitizationConfigNew import PixelOverlayDigitizationBasicCfg
+    acc.merge(PixelOverlayDigitizationBasicCfg(flags))
     # Add Pixel overlay algorithm
     acc.merge(PixelOverlayAlgCfg(flags))
     # Add Pixel truth overlay
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/python/SCTOverlayConfig.py b/InnerDetector/InDetRawAlgs/InDetOverlay/python/SCTOverlayConfig.py
index abcafb562a09605b77aeb2c218c77fef00f81e31..01f497e7a36a8f19df6161d312ae093d114c9705 100644
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/python/SCTOverlayConfig.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/python/SCTOverlayConfig.py
@@ -7,6 +7,28 @@ from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
 from AthenaConfiguration.ComponentFactory import CompFactory
 
 
+def SCT_ConfigurationConditionsCfg(flags, **kwargs):
+    """Return a ComponentAccumulator for SCT configuration conditions"""
+    # Temporary until available in the central location
+    from SCT_GeoModel.SCT_GeoModelConfig import SCT_GeometryCfg
+    acc = SCT_GeometryCfg(flags)
+
+    SCT_ConfigurationConditionsTool = CompFactory.SCT_ConfigurationConditionsTool
+    acc.addPublicTool(SCT_ConfigurationConditionsTool())
+
+    channelFolder = "/SCT/DAQ/Config/Chip"
+    moduleFolder = "/SCT/DAQ/Config/Module"
+    murFolder = "/SCT/DAQ/Config/MUR"
+    from IOVDbSvc.IOVDbSvcConfig import addFolders
+    acc.merge(addFolders(flags, [channelFolder, moduleFolder, murFolder],
+                         "SCT", className="CondAttrListVec"))
+    SCT_ConfigurationCondAlg = CompFactory.SCT_ConfigurationCondAlg
+    acc.addCondAlgo(SCT_ConfigurationCondAlg(ReadKeyChannel=channelFolder,
+                                             ReadKeyModule=moduleFolder,
+                                             ReadKeyMur=murFolder))
+    return acc
+
+
 def SCTRawDataProviderAlgCfg(flags, name="SCTRawDataProvider", **kwargs):
     """Return a ComponentAccumulator for SCT raw data provider"""
     # Temporary until available in the central location
@@ -20,24 +42,16 @@ def SCTRawDataProviderAlgCfg(flags, name="SCTRawDataProvider", **kwargs):
     alg = SCTRawDataProvider(name, **kwargs)
     acc.addEventAlgo(alg)
 
-    # load the SCTEventFlagWriter
-    SCT_ConfigurationConditionsTool = CompFactory.SCT_ConfigurationConditionsTool
-    acc.addPublicTool(SCT_ConfigurationConditionsTool())
+    return acc
 
-    channelFolder = "/SCT/DAQ/Config/Chip"
-    moduleFolder = "/SCT/DAQ/Config/Module"
-    murFolder = "/SCT/DAQ/Config/MUR"
-    SCT_ConfigurationCondAlg = CompFactory.SCT_ConfigurationCondAlg
-    acc.addCondAlgo(SCT_ConfigurationCondAlg(ReadKeyChannel = channelFolder,
-                                             ReadKeyModule = moduleFolder,
-                                             ReadKeyMur = murFolder))
-    from IOVDbSvc.IOVDbSvcConfig import addFolders
-    acc.merge(addFolders(flags, [channelFolder, moduleFolder, murFolder], "SCT", className="CondAttrListVec"))
 
+def SCTEventFlagWriterCfg(flags, **kwargs):
+    """Return a ComponentAccumulator for SCT event flag writer"""
+    # Temporary until available in the central location
+    acc = ComponentAccumulator()
     SCTEventFlagWriter = CompFactory.SCTEventFlagWriter
     alg = SCTEventFlagWriter()
     acc.addEventAlgo(alg)
-
     return acc
 
 
@@ -45,13 +59,19 @@ def SCTDataOverlayExtraCfg(flags, **kwargs):
     """Return a ComponentAccumulator with SCT data overlay specifics"""
     acc = ComponentAccumulator()
 
-    # We need to convert BS to RDO for data overlay
-    acc.merge(SCTRawDataProviderAlgCfg(flags))
-
     # Add SCT cabling conditions
     from SCT_Cabling.SCT_CablingConfig import SCT_CablingCondAlgCfg
     acc.merge(SCT_CablingCondAlgCfg(flags))
 
+    # Add SCT configuration conditions
+    acc.merge(SCT_ConfigurationConditionsCfg(flags))
+
+    # We need to convert BS to RDO for data overlay
+    acc.merge(SCTRawDataProviderAlgCfg(flags))
+
+    # Add SCT event flag writer
+    acc.merge(SCTEventFlagWriterCfg(flags))
+
     return acc
 
 
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRTOverlayConfig.py b/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRTOverlayConfig.py
index 2440b7c0e8049127eb35021b0ab8ba2be4b138a6..79ac6acb2d3d8f5a40b7da970504aa3c08a29012 100644
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRTOverlayConfig.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRTOverlayConfig.py
@@ -6,16 +6,53 @@ Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
 from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
 from AthenaConfiguration.ComponentFactory import CompFactory
 
-def TRTOverlayAlgCfg(flags, name = "TRTOverlay", **kwargs):
+
+def TRTRawDataProviderAlgCfg(flags, name="TRTRawDataProvider", **kwargs):
+    """Return a ComponentAccumulator for TRT raw data provider"""
+    # Temporary until available in the central location
+    acc = ComponentAccumulator()
+
+    kwargs.setdefault("RDOKey", flags.Overlay.BkgPrefix + "TRT_RDOs")
+
+    TRTRawDataProvider = CompFactory.TRTRawDataProvider
+    alg = TRTRawDataProvider(name, **kwargs)
+    acc.addEventAlgo(alg)
+
+    ByteStreamAddressProviderSvc = CompFactory.ByteStreamAddressProviderSvc
+    bsAddressProviderSvc = ByteStreamAddressProviderSvc(TypeNames=[
+        "InDet::TRT_DriftCircleContainer/TRT_DriftCircle",
+    ])
+    acc.addService(bsAddressProviderSvc)
+
+    return acc
+
+
+def TRTDataOverlayExtraCfg(flags, **kwargs):
+    """Return a ComponentAccumulator with TRT data overlay specifics"""
+    acc = ComponentAccumulator()
+
+    # We need to convert BS to RDO for data overlay
+    acc.merge(TRTRawDataProviderAlgCfg(flags))
+
+    # Add additional conditions infrastructure
+    from InDetOverlay.TRT_ConditionsConfig import TRT_CablingSvcCfg, TRT_OnlineFoldersCfg
+    acc.merge(TRT_CablingSvcCfg(flags))
+    acc.merge(TRT_OnlineFoldersCfg(flags))
+
+    return acc
+
+
+def TRTOverlayAlgCfg(flags, name="TRTOverlay", **kwargs):
     """Return a ComponentAccumulator for TRTOverlay algorithm"""
     acc = ComponentAccumulator()
     from TRT_GeoModel.TRT_GeoModelConfig import TRT_GeometryCfg
-    acc.merge(TRT_GeometryCfg( flags ))
+    acc.merge(TRT_GeometryCfg(flags))
     from InDetOverlay.TRT_ConditionsConfig import TRTStrawCondAlgCfg
     acc.merge(TRTStrawCondAlgCfg(flags))
 
     kwargs.setdefault("BkgInputKey", flags.Overlay.BkgPrefix + "TRT_RDOs")
     kwargs.setdefault("SignalInputKey", flags.Overlay.SigPrefix + "TRT_RDOs")
+    kwargs.setdefault("SignalInputSDOKey", flags.Overlay.SigPrefix + "TRT_SDO_Map")
     kwargs.setdefault("OutputKey", "TRT_RDOs")
 
     # HT hit correction fraction
@@ -25,41 +62,47 @@ def TRTOverlayAlgCfg(flags, name = "TRTOverlay", **kwargs):
     kwargs.setdefault("TRT_HT_OccupancyCorrectionEndcapNoE", 0.050)
 
     # Do TRT overlay
-    TRTOverlay=CompFactory.TRTOverlay
+    TRTOverlay = CompFactory.TRTOverlay
     alg = TRTOverlay(name, **kwargs)
 
     from InDetOverlay.TRT_ConditionsConfig import TRT_LocalOccupancyCfg, TRT_StrawStatusSummaryToolCfg
-    alg.TRT_LocalOccupancyTool = acc.popToolsAndMerge(TRT_LocalOccupancyCfg(flags))
-    alg.TRTStrawSummaryTool = acc.popToolsAndMerge(TRT_StrawStatusSummaryToolCfg(flags))
+    alg.TRT_LocalOccupancyTool = acc.popToolsAndMerge(
+        TRT_LocalOccupancyCfg(flags))
+    alg.TRTStrawSummaryTool = acc.popToolsAndMerge(
+        TRT_StrawStatusSummaryToolCfg(flags))
     acc.addEventAlgo(alg)
 
     # Setup output
     from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
-    acc.merge(OutputStreamCfg(flags, "RDO", ItemList = [
+    acc.merge(OutputStreamCfg(flags, "RDO", ItemList=[
         "TRT_RDO_Container#TRT_RDOs"
     ]))
 
     return acc
 
 
-def TRTTruthOverlayCfg(flags, name = "TRTSDOOverlay", **kwargs):
+def TRTTruthOverlayCfg(flags, name="TRTSDOOverlay", **kwargs):
     """Return a ComponentAccumulator for the TRT SDO overlay algorithm"""
     acc = ComponentAccumulator()
 
-    # We do not need background TRT SDOs
-    kwargs.setdefault("BkgInputKey", "")
+    # We do not need background TRT SDOs for data overlay
+    if flags.Overlay.DataOverlay:
+        kwargs.setdefault("BkgInputKey", "")
+    else:
+        kwargs.setdefault("BkgInputKey", flags.Overlay.BkgPrefix + "TRT_SDO_Map")
 
-    kwargs.setdefault("SignalInputKey", flags.Overlay.BkgPrefix + "TRT_SDO_Map")
+    kwargs.setdefault("SignalInputKey",
+                      flags.Overlay.SigPrefix + "TRT_SDO_Map")
     kwargs.setdefault("OutputKey", "TRT_SDO_Map")
 
     # Do TRT truth overlay
-    InDetSDOOverlay=CompFactory.InDetSDOOverlay
+    InDetSDOOverlay = CompFactory.InDetSDOOverlay
     alg = InDetSDOOverlay(name, **kwargs)
     acc.addEventAlgo(alg)
 
     # Setup output
     from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
-    acc.merge(OutputStreamCfg(flags, "RDO", ItemList = [
+    acc.merge(OutputStreamCfg(flags, "RDO", ItemList=[
         "InDetSimDataCollection#TRT_SDO_Map"
     ]))
 
@@ -70,9 +113,13 @@ def TRTOverlayCfg(flags):
     """Configure and return a ComponentAccumulator for TRT overlay"""
     acc = ComponentAccumulator()
 
+    # Add data overlay specifics
+    if flags.Overlay.DataOverlay:
+        acc.merge(TRTDataOverlayExtraCfg(flags))
+
     # Add TRT overlay digitization algorithm
-    from TRT_Digitization.TRT_DigitizationConfigNew import TRT_OverlayDigitizationCfg
-    acc.merge(TRT_OverlayDigitizationCfg(flags))
+    from TRT_Digitization.TRT_DigitizationConfigNew import TRT_OverlayDigitizationBasicCfg
+    acc.merge(TRT_OverlayDigitizationBasicCfg(flags))
     # Add TRT overlay algorithm
     acc.merge(TRTOverlayAlgCfg(flags))
     # Add TRT truth overlay
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRT_ConditionsConfig.py b/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRT_ConditionsConfig.py
index ed124dfb894bfa883b16c5b7d2a2dce1665dc8aa..b0e6d7bde59d45a01a9a1af055f896af7f8d5c07 100644
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRT_ConditionsConfig.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/python/TRT_ConditionsConfig.py
@@ -1,52 +1,89 @@
 """Define methods to construct configured TRT conditions tools and
 algorithms needed by TRT Overlay - FIXME should move somewhere else
 
-Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 """
 
 from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
 from AthenaConfiguration.ComponentFactory import CompFactory
 
-def TRT_CalDbToolCfg(flags, name = "TRT_CalDbTool"):
+
+def TRT_OnlineFoldersCfg(flags):
+    """Setup online conditions folders for TRT data overlay"""
+    from IOVDbSvc.IOVDbSvcConfig import addFolders
+    acc = ComponentAccumulator()
+    acc.merge(addFolders(flags, "/TRT/Onl/ROD/Compress",
+                         "TRT_ONL", className='CondAttrListCollection'))
+    return acc
+
+
+def TRT_CalDbToolCfg(flags, name="TRT_CalDbTool"):
     """Return a ComponentAccumulator for TRT_CalDbTool"""
     from IOVDbSvc.IOVDbSvcConfig import addFoldersSplitOnline
-    acc = addFoldersSplitOnline(flags,"TRT","/TRT/Onl/Calib/T0","/TRT/Calib/T0",className='TRTCond::StrawT0MultChanContainer')
-    TRT_CalDbTool=CompFactory.TRT_CalDbTool
-    acc.setPrivateTools(TRT_CalDbTool(name = "TRT_CalDbTool"))
+    acc = ComponentAccumulator()
+    acc.merge(addFoldersSplitOnline(flags, "TRT", "/TRT/Onl/Calib/RT", "/TRT/Calib/RT",
+                                    className="TRTCond::RtRelationMultChanContainer"))
+    acc.merge(addFoldersSplitOnline(flags, "TRT", "/TRT/Onl/Calib/T0", "/TRT/Calib/T0",
+                                    className='TRTCond::StrawT0MultChanContainer'))
+    acc.merge(addFoldersSplitOnline(flags, "TRT", "/TRT/Onl/Calib/errors2d", "/TRT/Calib/errors2d",
+                                    className="TRTCond::RtRelationMultChanContainer"))
+    acc.merge(addFoldersSplitOnline(flags, "TRT", "/TRT/Onl/Calib/slopes", "/TRT/Calib/slopes",
+                                    className='TRTCond::RtRelationMultChanContainer'))
+
+    TRT_CalDbTool = CompFactory.TRT_CalDbTool
+    acc.setPrivateTools(TRT_CalDbTool(name="TRT_CalDbTool"))
     return acc
 
 
-def TRT_StrawStatusSummaryToolCfg(flags, name = "TRT_StrawStatusSummaryTool"):
+def TRT_StrawStatusSummaryToolCfg(flags, name="TRT_StrawStatusSummaryTool"):
     """Return a ComponentAccumulator for TRT_StrawStatusSummaryTool"""
     acc = ComponentAccumulator()
-    TRT_StrawStatusSummaryTool=CompFactory.TRT_StrawStatusSummaryTool
-    acc.setPrivateTools(TRT_StrawStatusSummaryTool(name = "TRT_StrawStatusSummaryTool",
-                                                   isGEANT4 = flags.Detector.Simulate))
+    TRT_StrawStatusSummaryTool = CompFactory.TRT_StrawStatusSummaryTool
+    acc.setPrivateTools(TRT_StrawStatusSummaryTool(name="TRT_StrawStatusSummaryTool",
+                                                   isGEANT4=flags.Detector.Simulate))
     return acc
 
 
-def TRT_LocalOccupancyCfg(flags, name = "TRT_LocalOccupancy"):
+def TRT_LocalOccupancyCfg(flags, name="TRT_LocalOccupancy"):
     """Return a ComponentAccumulator for TRT_LocalOccupancy Tool"""
     acc = ComponentAccumulator()
     trtCalDbTool = acc.popToolsAndMerge(TRT_CalDbToolCfg(flags))
-    trtStrawStatusSummaryTool = acc.popToolsAndMerge(TRT_StrawStatusSummaryToolCfg(flags))
+    trtStrawStatusSummaryTool = acc.popToolsAndMerge(
+        TRT_StrawStatusSummaryToolCfg(flags))
 
-    InDet__TRT_LocalOccupancy=CompFactory.InDet__TRT_LocalOccupancy
-    acc.setPrivateTools(InDet__TRT_LocalOccupancy(name = "TRT_LocalOccupancy",
-                                                  isTrigger = False,
-                                                  TRTCalDbTool = trtCalDbTool,
-                                                  TRTStrawStatusSummaryTool =  trtStrawStatusSummaryTool))
+    InDet__TRT_LocalOccupancy = CompFactory.InDet__TRT_LocalOccupancy
+    acc.setPrivateTools(InDet__TRT_LocalOccupancy(name="TRT_LocalOccupancy",
+                                                  isTrigger=False,
+                                                  TRTCalDbTool=trtCalDbTool,
+                                                  TRTStrawStatusSummaryTool=trtStrawStatusSummaryTool,
+                                                  TRT_RDOContainerName="",
+                                                  TRT_DriftCircleCollection="",
+                                                  ))
     return acc
 
 
-def TRTStrawCondAlgCfg(flags, name = "TRTStrawCondAlg"):
+def TRTStrawCondAlgCfg(flags, name="TRTStrawCondAlg"):
     """Return a ComponentAccumulator for TRTStrawCondAlg algorithm"""
     acc = ComponentAccumulator()
-    trtStrawStatusSummaryTool = acc.popToolsAndMerge(TRT_StrawStatusSummaryToolCfg(flags))
+    trtStrawStatusSummaryTool = acc.popToolsAndMerge(
+        TRT_StrawStatusSummaryToolCfg(flags))
     # Alive straws algorithm
-    TRTStrawCondAlg=CompFactory.TRTStrawCondAlg
-    acc.addCondAlgo(TRTStrawCondAlg(name = "TRTStrawCondAlg",
-                                    TRTStrawStatusSummaryTool = trtStrawStatusSummaryTool,
-                                    isGEANT4 =flags.Detector.Simulate))
+    TRTStrawCondAlg = CompFactory.TRTStrawCondAlg
+    acc.addCondAlgo(TRTStrawCondAlg(name="TRTStrawCondAlg",
+                                    TRTStrawStatusSummaryTool=trtStrawStatusSummaryTool,
+                                    isGEANT4=flags.Detector.Simulate))
+    return acc
+
+
+def TRT_CablingSvcCfg(flags):
+    """Return a ComponentAccumulator for TRT_CablingSvc service"""
+    acc = ComponentAccumulator()
+    # Properly configure MC/data for TRT cabling
+    TRT_FillCablingData_DC3 = CompFactory.TRT_FillCablingData_DC3
+    tool = TRT_FillCablingData_DC3(RealData=not flags.Input.isMC)
+    acc.addPublicTool(tool)
+    # Setup TRT cabling service
+    TRT_CablingSvc = CompFactory.TRT_CablingSvc
+    acc.addService(TRT_CablingSvc())
     return acc
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/test/PixelOverlayConfig_test.py b/InnerDetector/InDetRawAlgs/InDetOverlay/test/PixelOverlayConfig_test.py
index 467864c7e429cad777a694e540adc5b8e68f4f4d..18d0938923d0440e5f55b0cacea71bfe8749d4bc 100755
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/test/PixelOverlayConfig_test.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/test/PixelOverlayConfig_test.py
@@ -8,12 +8,12 @@ import sys
 from AthenaCommon.Configurable import Configurable
 from AthenaConfiguration.AllConfigFlags import ConfigFlags
 from AthenaConfiguration.MainServicesConfig import MainServicesThreadedCfg
-from AthenaConfiguration.TestDefaults import defaultTestFiles
 from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg
 from InDetOverlay.PixelOverlayConfig import PixelOverlayCfg
 from OverlayConfiguration.OverlayTestHelpers import \
-    CommonTestArgumentParser, postprocessAndLockFlags, printAndRun
+    CommonTestArgumentParser, defaultTestFlags, postprocessAndLockFlags, printAndRun
 from OverlayCopyAlgs.OverlayCopyAlgsConfig import CopyMcEventCollectionCfg
+from xAODEventInfoCnv.xAODEventInfoCnvConfig import EventInfoOverlayCfg
 
 # Configure
 Configurable.configurableRun3Behavior = True
@@ -23,10 +23,8 @@ parser = CommonTestArgumentParser("PixelOverlayConfig_test.py")
 args = parser.parse_args()
 
 # Configure
-ConfigFlags.Input.Files = defaultTestFiles.RDO_BKG
-ConfigFlags.Input.SecondaryFiles = defaultTestFiles.HITS
-ConfigFlags.IOVDb.GlobalTag = "OFLCOND-MC16-SDR-16"
-ConfigFlags.Overlay.DataOverlay = False
+defaultTestFlags(ConfigFlags, args)
+
 ConfigFlags.Output.RDOFileName = "myRDO.pool.root"
 ConfigFlags.Output.RDO_SGNLFileName = "myRDO_SGNL.pool.root"
 
@@ -36,7 +34,8 @@ postprocessAndLockFlags(ConfigFlags, args)
 acc = MainServicesThreadedCfg(ConfigFlags)
 acc.merge(PoolReadCfg(ConfigFlags))
 
-# Add truth overlay (needed downstream)
+# Add event and truth overlay (needed downstream)
+acc.merge(EventInfoOverlayCfg(ConfigFlags))
 acc.merge(CopyMcEventCollectionCfg(ConfigFlags))
 
 # Add Pixel overlay
diff --git a/InnerDetector/InDetRawAlgs/InDetOverlay/test/TRTOverlayConfig_test.py b/InnerDetector/InDetRawAlgs/InDetOverlay/test/TRTOverlayConfig_test.py
index 1fcf8b39e6ee369fef8ce06fab8718c3743b0416..c328ced7acbabdee576ab8a5de191f84abc9c422 100755
--- a/InnerDetector/InDetRawAlgs/InDetOverlay/test/TRTOverlayConfig_test.py
+++ b/InnerDetector/InDetRawAlgs/InDetOverlay/test/TRTOverlayConfig_test.py
@@ -8,12 +8,12 @@ import sys
 from AthenaCommon.Configurable import Configurable
 from AthenaConfiguration.AllConfigFlags import ConfigFlags
 from AthenaConfiguration.MainServicesConfig import MainServicesThreadedCfg
-from AthenaConfiguration.TestDefaults import defaultTestFiles
 from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg
 from InDetOverlay.TRTOverlayConfig import TRTOverlayCfg
 from OverlayConfiguration.OverlayTestHelpers import \
-    CommonTestArgumentParser, postprocessAndLockFlags, printAndRun
+    CommonTestArgumentParser, defaultTestFlags, postprocessAndLockFlags, printAndRun
 from OverlayCopyAlgs.OverlayCopyAlgsConfig import CopyMcEventCollectionCfg
+from xAODEventInfoCnv.xAODEventInfoCnvConfig import EventInfoOverlayCfg
 
 # Configure
 Configurable.configurableRun3Behavior = True
@@ -23,10 +23,8 @@ parser = CommonTestArgumentParser("TRTOverlayConfig_test.py")
 args = parser.parse_args()
 
 # Configure
-ConfigFlags.Input.Files = defaultTestFiles.RDO_BKG
-ConfigFlags.Input.SecondaryFiles = defaultTestFiles.HITS
-ConfigFlags.IOVDb.GlobalTag = "OFLCOND-MC16-SDR-16"
-ConfigFlags.Overlay.DataOverlay = False
+defaultTestFlags(ConfigFlags, args)
+
 ConfigFlags.Output.RDOFileName = "myRDO.pool.root"
 ConfigFlags.Output.RDO_SGNLFileName = "myRDO_SGNL.pool.root"
 
@@ -36,7 +34,8 @@ postprocessAndLockFlags(ConfigFlags, args)
 acc = MainServicesThreadedCfg(ConfigFlags)
 acc.merge(PoolReadCfg(ConfigFlags))
 
-# Add truth overlay (needed downstream)
+# Add event and truth overlay (needed downstream)
+acc.merge(EventInfoOverlayCfg(ConfigFlags))
 acc.merge(CopyMcEventCollectionCfg(ConfigFlags))
 
 # Add TRT overlay
diff --git a/InnerDetector/InDetRecTools/TRT_ElectronPidTools/src/TRT_LocalOccupancy.cxx b/InnerDetector/InDetRecTools/TRT_ElectronPidTools/src/TRT_LocalOccupancy.cxx
index 52411db49680158aaca2517cbf72544afa0d2259..e9497959cefc3d33f43ecd48cbca6e76122e39c6 100644
--- a/InnerDetector/InDetRecTools/TRT_ElectronPidTools/src/TRT_LocalOccupancy.cxx
+++ b/InnerDetector/InDetRecTools/TRT_ElectronPidTools/src/TRT_LocalOccupancy.cxx
@@ -83,8 +83,8 @@ StatusCode TRT_LocalOccupancy::initialize()
   ATH_MSG_INFO ("initialize() successful in " << name());
 
   //Initlalize ReadHandleKey
-  ATH_CHECK( m_trt_rdo_location.initialize() );
-  ATH_CHECK( m_trt_driftcircles.initialize() );
+  ATH_CHECK( m_trt_rdo_location.initialize(!m_trt_rdo_location.empty()) );
+  ATH_CHECK( m_trt_driftcircles.initialize(!m_trt_driftcircles.empty()) );
   ATH_CHECK( m_strawReadKey.initialize() );
 
   return StatusCode::SUCCESS;
diff --git a/LArCalorimeter/LArCnv/LArByteStream/python/LArRawCalibDataReadingConfig.py b/LArCalorimeter/LArCnv/LArByteStream/python/LArRawCalibDataReadingConfig.py
new file mode 100644
index 0000000000000000000000000000000000000000..f90fc3aafcaa6049789f29d44decdade54507e7f
--- /dev/null
+++ b/LArCalorimeter/LArCnv/LArByteStream/python/LArRawCalibDataReadingConfig.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+
+from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
+from AthenaConfiguration.ComponentFactory import CompFactory
+from ByteStreamCnvSvc.ByteStreamConfig import ByteStreamReadCfg
+LArRawCalibDataReadingAlg=CompFactory.LArRawCalibDataReadingAlg
+
+def LArRawCalibDataReadingCfg(configFlags,gain="HIGH",doAccDigit=False,doAccCalibDigit=False,doCalibDigit=False):
+    acc=ComponentAccumulator()
+    from DetDescrCnvSvc.DetDescrCnvSvcConfig import DetDescrCnvSvcCfg
+    acc.merge(DetDescrCnvSvcCfg(configFlags))
+    acc.merge(ByteStreamReadCfg(configFlags))    
+    accKey=""
+    accCalibKey=""
+    calibKey=""
+    if doAccDigit:
+       accKey=gain
+    if doAccCalibDigit:
+       accCalibKey=gain
+    if doCalibDigit:
+       calibKey=gain
+
+    cread = LArRawCalibDataReadingAlg(LArAccDigitKey=accKey, LArAccCalibDigitKey=accCalibKey,
+                                      LArCalibDigitKey=calibKey, LArFebHeaderKey="LArFebHeader")
+    acc.addEventAlgo(cread)
+    return acc
+
+
+if __name__=="__main__":
+
+    from AthenaConfiguration.AllConfigFlags import ConfigFlags
+    from AthenaCommon.Logging import log
+    from AthenaCommon.Constants import DEBUG
+    from AthenaCommon.Configurable import Configurable
+    Configurable.configurableRun3Behavior=1
+    log.setLevel(DEBUG)
+
+    ConfigFlags.LAr.doAlign=False
+    ConfigFlags.Input.Files = ["/eos/atlas/atlastier0/rucio/data20_calib/calibration_LArElec-Pedestal-32s-High-All/00374735/data20_calib.00374735.calibration_LArElec-Pedestal-32s-High-All.daq.RAW/data20_calib.00374735.calibration_LArElec-Pedestal-32s-High-All.daq.RAW._lb0000._SFO-3._0001.data"]
+    ConfigFlags.lock()
+
+    acc=LArRawCalibDataReadingCfg(ConfigFlags)
+    
+    from LArCabling.LArCablingConfig import LArOnOffIdMappingCfg 
+    acc.merge(LArOnOffIdMappingCfg(ConfigFlags))
+
+    f=open("LArRawCalibDataReading.pkl","wb")
+    acc.store(f)
+    f.close()
diff --git a/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.cxx b/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..b46998e765336d7fa16900f01fc61680e5578a5a
--- /dev/null
+++ b/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.cxx
@@ -0,0 +1,313 @@
+/*
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+*/
+
+#include "LArRawCalibDataReadingAlg.h"
+#include "LArIdentifier/LArOnlineID.h"
+#include "ByteStreamCnvSvcBase/IROBDataProviderSvc.h" 
+#include "LArRawEvent/LArCalibDigitContainer.h"
+#include "LArRawEvent/LArAccumulatedDigitContainer.h"
+#include "LArRawEvent/LArAccumulatedCalibDigitContainer.h"
+#include "LArRawEvent/LArFebHeaderContainer.h"
+#include "eformat/Version.h"
+#include "eformat/index.h"
+
+#include "LArByteStream/LArRodBlockStructure.h"
+#include "LArByteStream/LArRodBlockAccumulatedV3.h"
+#include "LArByteStream/LArRodBlockCalibrationV3.h"
+#include "LArByteStream/LArRodBlockTransparentV0.h"
+
+LArRawCalibDataReadingAlg::LArRawCalibDataReadingAlg(const std::string& name, ISvcLocator* pSvcLocator) :  
+  AthReentrantAlgorithm(name, pSvcLocator) {}
+
+  StatusCode LArRawCalibDataReadingAlg::initialize() {
+
+  if (m_calibDigitKey.key().size()>0) {
+    ATH_CHECK(m_calibDigitKey.initialize());
+    m_doCalibDigits=true;
+  }
+  else {
+    m_doCalibDigits=false;
+  }
+
+  if (m_accDigitKey.key().size()>0) {
+    ATH_CHECK(m_accDigitKey.initialize());
+    m_doAccDigits=true;
+  }
+  else {
+    m_doAccDigits=false;
+  }
+
+  if (m_accCalibDigitKey.key().size()>0) {
+    ATH_CHECK(m_accCalibDigitKey.initialize());
+    m_doAccCalibDigits=true;
+  }
+  else {
+    m_doAccCalibDigits=false;
+  }
+
+  if (m_febHeaderKey.key().size()>0) {
+    ATH_CHECK(m_febHeaderKey.initialize());
+    m_doFebHeaders=true;
+  }
+  else {
+    m_doFebHeaders=false;
+  }
+  
+  if(!(m_doCalibDigits || m_doAccDigits || m_doAccCalibDigits)) {
+     ATH_MSG_FATAL("Needs ether CalibDigits or AccDigits  or AccCalibDigit Key");
+     return StatusCode::FAILURE;
+  }
+
+  if(m_doCalibDigits && m_doAccCalibDigits) {
+     ATH_MSG_FATAL("Could not have both CalibDigits, AccCalibDigits Key");
+     return StatusCode::FAILURE;
+  }
+
+  if(m_doAccDigits && (m_doCalibDigits || m_doAccCalibDigits)) {
+     ATH_MSG_FATAL("Could not have AccDigits with Calib Key");
+     return StatusCode::FAILURE;
+  }
+
+  ATH_CHECK(m_robDataProviderSvc.retrieve());
+  ATH_CHECK(detStore()->retrieve(m_onlineId,"LArOnlineID"));  
+  return StatusCode::SUCCESS;
+}     
+  
+StatusCode LArRawCalibDataReadingAlg::execute(const EventContext& ctx) const {
+  LArCalibDigitContainer* cdigits=nullptr;
+  LArAccumulatedDigitContainer* accdigits=nullptr;
+  LArAccumulatedCalibDigitContainer* caccdigits=nullptr;
+  LArFebHeaderContainer* febHeaders=nullptr;
+
+  if (m_doCalibDigits) {
+    SG::WriteHandle<LArCalibDigitContainer> cdigitsHdl(m_calibDigitKey,ctx);
+    ATH_CHECK(cdigitsHdl.record(std::make_unique<LArCalibDigitContainer>()));
+    cdigits=cdigitsHdl.ptr();
+    cdigits->reserve(200000); //Enough space for the full calo
+  }
+
+  if (m_doAccDigits) {
+    SG::WriteHandle<LArAccumulatedDigitContainer> accdigitsHdl(m_accDigitKey,ctx);
+    ATH_CHECK(accdigitsHdl.record(std::make_unique<LArAccumulatedDigitContainer>()));
+    accdigits=accdigitsHdl.ptr();
+    accdigits->reserve(200000); //Enough space for the full calo
+  }
+
+  if (m_doAccCalibDigits) {
+    SG::WriteHandle<LArAccumulatedCalibDigitContainer> caccdigitsHdl(m_accCalibDigitKey,ctx);
+    ATH_CHECK(caccdigitsHdl.record(std::make_unique<LArAccumulatedCalibDigitContainer>()));
+    caccdigits=caccdigitsHdl.ptr();
+    caccdigits->reserve(200000); //Enough space for the full calo
+  }
+
+  if (m_doFebHeaders) {
+    SG::WriteHandle<LArFebHeaderContainer> febHeadersHdl(m_febHeaderKey,ctx);
+    ATH_CHECK(febHeadersHdl.record(std::make_unique<LArFebHeaderContainer>()));
+    febHeaders=febHeadersHdl.ptr();
+    febHeaders->reserve(1524); //Total number of LAr Front End Boards
+  }
+
+  //Get full events and filter out LAr ROBs
+  const RawEvent* fullEvent=m_robDataProviderSvc->getEvent(ctx);
+  std::map<eformat::SubDetectorGroup, std::vector<const uint32_t*> > rawEventTOC;
+  eformat::helper::build_toc(*fullEvent, rawEventTOC);
+  auto larRobs=rawEventTOC.find(eformat::LAR);
+  if (larRobs==rawEventTOC.end()) {
+     ATH_MSG_DEBUG("No LAr data found in this event.");
+     return StatusCode::SUCCESS;
+  } 
+  
+  
+  std::unique_ptr<LArRodBlockStructure> rodBlock;
+  uint16_t rodMinorVersion=0x0;
+  uint32_t rodBlockType=0x0;
+
+
+  for (const uint32_t* robPtr : larRobs->second) {
+    OFFLINE_FRAGMENTS_NAMESPACE::ROBFragment rob(robPtr);
+    ATH_MSG_VERBOSE("Decoding ROB fragment 0x" << std::hex << rob.rob_source_id () << " with " << std::dec << rob.rod_fragment_size_word() << "ROB words");
+
+    if (rob.rod_fragment_size_word() <3) {
+      ATH_MSG_ERROR("Encountered corrupt ROD fragment, less than 3 words!");
+      if (m_failOnCorruption) {
+	return StatusCode::FAILURE;
+      }else 
+	continue;
+    }
+    
+     eformat::helper::Version ver(rob.rod_version());
+    //(re-)init rodBlock only once per event or if (very unlikly or even impossible) some FEBs have a differnt firmware
+    if (rodBlock==nullptr || rodMinorVersion !=ver.minor_version() || rodBlockType!=(rob.rod_detev_type()&0xff)) {
+      rodMinorVersion=ver.minor_version();
+      rodBlockType=rob.rod_detev_type()&0xff;
+      ATH_MSG_VERBOSE("Found version " << rodMinorVersion <<  " of Rod Block Type  " <<  rodBlockType);
+      if (rodBlockType==10) { // Accumulated calib. digits
+	  rodBlock.reset(new LArRodBlockAccumulatedV3);
+      }//end of rodBlockType ==10
+      else if (rodBlockType==7 || rodBlockType==2) { // Calib. digits
+         if(rodMinorVersion>=6) {
+            rodBlock.reset(new LArRodBlockCalibrationV3);
+         } else {
+            ATH_MSG_ERROR("Found unsupported ROD Block version " << rodMinorVersion
+                        << " of ROD block type " << rodBlockType);
+            return m_failOnCorruption ? StatusCode::FAILURE : StatusCode::SUCCESS;
+         }
+      } else {
+	ATH_MSG_ERROR("Found unsupported Rod block type " << rodBlockType);
+	return m_failOnCorruption ? StatusCode::FAILURE : StatusCode::SUCCESS;
+      }
+    }//End if need to re-init RodBlock
+
+    const uint32_t* pData=rob.rod_data();
+    const uint32_t  nData=rob.rod_ndata();
+    if (!rodBlock->setFragment(pData,nData)) {
+      ATH_MSG_ERROR("Failed to assign fragment pointer to LArRodBlockStructure");
+      return StatusCode::FAILURE;
+    }
+
+    if(m_verifyChecksum) {
+      const uint32_t onsum  = rodBlock->onlineCheckSum();
+      const uint32_t offsum = rodBlock->offlineCheckSum();
+      if(onsum!=offsum) {
+	ATH_MSG_ERROR("Checksum error:");
+	ATH_MSG_ERROR("online checksum  = 0x" << MSG::hex << onsum);
+	ATH_MSG_ERROR("offline checksum = 0x" << MSG::hex << offsum << MSG::dec);
+	if (m_failOnCorruption) 
+	  return StatusCode::FAILURE;
+	else
+	  continue; //Jump to the next ROD-block
+      }
+    }
+
+    //Loop over FEBs in ROD:
+    do {
+      HWIdentifier fId(Identifier32(rodBlock->getFEBID()));
+      if (!m_onlineId->isValidId(fId)) {
+	ATH_MSG_ERROR("Invalid FEB identifer 0x" << std::hex << fId.get_identifier32().get_compact()); 
+	if (m_failOnCorruption) 
+	  return StatusCode::FAILURE;
+	else
+	  continue;
+      }
+      const int NthisFebChannel=m_onlineId->channelInSlotMax(fId);
+
+      //Decode LArCalibDigits (if requested)
+      if (m_doCalibDigits) {
+	uint32_t gain;
+        uint16_t dac;
+        uint16_t delay;
+        bool ispulsed;
+	int fcNb;
+	std::vector<short> samples;
+	while (rodBlock->getNextRawData(fcNb,samples,gain)) {
+	  if (fcNb>=NthisFebChannel)
+	    continue;
+	  if (samples.size()==0) continue; // Ignore missing cells
+          dac = rodBlock->getDAC();
+          delay = rodBlock->getDelay();
+          ispulsed = rodBlock->getPulsed(fcNb);
+	  HWIdentifier cId = m_onlineId->channel_Id(fId,fcNb);
+	  cdigits->emplace_back(new LArCalibDigit(cId, (CaloGain::CaloGain)gain, std::move(samples), dac, delay, ispulsed));
+	  samples.clear();
+	}//end getNextRawData loop
+      }//end if m_doCalibDigits
+
+      //Decode LArAccumulatedDigits (if requested)
+      if (m_doAccDigits && rodBlockType==10) {
+	uint32_t gain;
+	int fcNb;
+	std::vector<uint32_t> samplesSum;
+	std::vector<uint32_t> samples2Sum;
+        uint32_t nTrigger;
+	while (rodBlock->getNextAccumulatedDigit(fcNb,samplesSum,samples2Sum,gain)) {
+	  if (fcNb>=NthisFebChannel)
+	    continue;
+	  if (samplesSum.size()==0 || samples2Sum.size()==0) continue; // Ignore missing cells
+          nTrigger = rodBlock->getNTrigger();
+	  HWIdentifier cId = m_onlineId->channel_Id(fId,fcNb);
+	  accdigits->emplace_back(new LArAccumulatedDigit(cId, (CaloGain::CaloGain)gain, std::move(samplesSum), std::move(samples2Sum), nTrigger));
+	  samplesSum.clear();
+	  samples2Sum.clear();
+	}//end getNext loop
+      }//end if m_doAccDigits
+
+      //Decode LArAccumulatedCalibDigits (if requested)
+      if (m_doAccCalibDigits) {
+	uint32_t gain;
+	uint32_t itmp;
+        uint16_t dac;
+        uint16_t delay;
+        uint16_t nstep;
+        uint16_t istep;
+        bool ispulsed;
+	int fcNb;
+	std::vector<uint32_t> samplesSum;
+	std::vector<uint32_t> samples2Sum;
+        uint32_t nTrigger;
+	while (rodBlock->getNextAccumulatedCalibDigit(fcNb,samplesSum,samples2Sum,itmp,gain)) {
+	  if (fcNb>=NthisFebChannel)
+	    continue;
+	  if (samplesSum.size()==0 || samples2Sum.size()==0) continue; // Ignore missing cells
+          dac = rodBlock->getDAC();
+          delay = rodBlock->getDelay();
+          ispulsed = rodBlock->getPulsed(fcNb);
+          nTrigger = rodBlock->getNTrigger();
+          nstep = rodBlock->getNStep();
+          istep = rodBlock->getStepIndex();
+	  HWIdentifier cId = m_onlineId->channel_Id(fId,fcNb);
+	  caccdigits->emplace_back(new LArAccumulatedCalibDigit(cId, (CaloGain::CaloGain)gain, std::move(samplesSum), std::move(samples2Sum), nTrigger, dac, delay, ispulsed, nstep, istep));
+	  samplesSum.clear();
+	  samples2Sum.clear();
+	}//end getNext loop
+      }//end if m_doAccDigits
+
+      //Decode FebHeaders (if requested)
+      if (m_doFebHeaders) {
+	std::unique_ptr<LArFebHeader> larFebHeader(new LArFebHeader(fId));
+	larFebHeader->SetFormatVersion(rob.rod_version());
+	larFebHeader->SetSourceId(rob.rod_source_id());
+	larFebHeader->SetRunNumber(rob.rod_run_no());
+	larFebHeader->SetELVL1Id(rob.rod_lvl1_id());
+	larFebHeader->SetBCId(rob.rod_bc_id());
+	larFebHeader->SetLVL1TigType(rob.rod_lvl1_trigger_type());
+	larFebHeader->SetDetEventType(rob.rod_detev_type());
+  
+	//set DSP data
+	const unsigned nsample=rodBlock->getNumberOfSamples();
+	larFebHeader->SetRodStatus(rodBlock->getStatus());
+	larFebHeader->SetDspCodeVersion(rodBlock->getDspCodeVersion()); 
+	larFebHeader->SetDspEventCounter(rodBlock->getDspEventCounter()); 
+	larFebHeader->SetRodResults1Size(rodBlock->getResults1Size()); 
+	larFebHeader->SetRodResults2Size(rodBlock->getResults2Size()); 
+	larFebHeader->SetRodRawDataSize(rodBlock->getRawDataSize()); 
+	larFebHeader->SetNbSweetCells1(rodBlock->getNbSweetCells1()); 
+	larFebHeader->SetNbSweetCells2(rodBlock->getNbSweetCells2()); 
+	larFebHeader->SetNbSamples(nsample); 
+	larFebHeader->SetOnlineChecksum(rodBlock->onlineCheckSum());
+	larFebHeader->SetOfflineChecksum(rodBlock->offlineCheckSum());
+
+	if(!rodBlock->hasControlWords()) {
+	  larFebHeader->SetFebELVL1Id(rob.rod_lvl1_id());
+	  larFebHeader->SetFebBCId(rob.rod_bc_id());
+	} else {
+	  const uint16_t evtid = rodBlock->getCtrl1(0) & 0x1f;
+	  const uint16_t bcid  = rodBlock->getCtrl2(0) & 0x1fff;
+	  larFebHeader->SetFebELVL1Id(evtid);
+	  larFebHeader->SetFebBCId(bcid);
+	  for(int iadc=0;iadc<16;iadc++) {
+	    larFebHeader->SetFebCtrl1(rodBlock->getCtrl1(iadc));
+	    larFebHeader->SetFebCtrl2(rodBlock->getCtrl2(iadc));
+	    larFebHeader->SetFebCtrl3(rodBlock->getCtrl3(iadc));
+	  }
+	  for(unsigned int i = 0; i<nsample; i++ ) {
+	    larFebHeader->SetFebSCA(rodBlock->getRadd(0,i) & 0xff);
+	  }
+	}//end else no control words
+	febHeaders->push_back(std::move(larFebHeader));
+      }//end if m_doFebHeaders
+
+    }while (rodBlock->nextFEB()); //Get NextFeb
+  } //end loop over ROBs
+  return StatusCode::SUCCESS;
+}
diff --git a/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.h b/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.h
new file mode 100644
index 0000000000000000000000000000000000000000..1c5b1121fb787b18632208eb58c791b34c187719
--- /dev/null
+++ b/LArCalorimeter/LArCnv/LArByteStream/src/LArRawCalibDataReadingAlg.h
@@ -0,0 +1,55 @@
+/*
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+*/
+
+#ifndef LARBYTESTREAM_LARRAWCALIBDATAREADINDINGALG_H
+#define LARBYTESTREAM_LARRAWCALIBDATAREADINDINGALG_H
+
+#include "AthenaBaseComps/AthReentrantAlgorithm.h"
+#include "StoreGate/WriteHandle.h"
+#include "StoreGate/ReadCondHandle.h"
+#include "LArCabling/LArOnOffIdMapping.h"
+#include "GaudiKernel/ServiceHandle.h"
+
+
+//Event classes
+class LArCalibDigitContainer;
+class LArAccumulatedDigitContainer;
+class LArAccumulatedCalibDigitContainer;
+class LArFebHeaderContainer;
+class LArOnlineID;
+class IROBDataProviderSvc;
+
+class LArRawCalibDataReadingAlg : public  AthReentrantAlgorithm {
+ public:
+  LArRawCalibDataReadingAlg(const std::string& name, ISvcLocator* pSvcLocator);
+
+  StatusCode initialize() override;
+  StatusCode execute(const EventContext& ctx) const override;
+
+ private:
+  //Event output:
+  SG::WriteHandleKey<LArCalibDigitContainer> m_calibDigitKey{this,"LArCalibDigitKey",""};
+  SG::WriteHandleKey<LArAccumulatedDigitContainer> m_accDigitKey{this,"LArAccDigitKey",""};
+  SG::WriteHandleKey<LArAccumulatedCalibDigitContainer> m_accCalibDigitKey{this,"LArAccCalibDigitKey",""};
+  SG::WriteHandleKey<LArFebHeaderContainer> m_febHeaderKey{this,"LArFebHeaderKey",""};
+    
+  //Service providing the input data
+  ServiceHandle<IROBDataProviderSvc> m_robDataProviderSvc{this,"ROBDataProviderSvc","ROBDataProviderSvc"};
+  
+  //Other properties:
+  BooleanProperty m_verifyChecksum{this,"VerifyChecksum",true,"Calculate and compare checksums to detect data transmission errors"}; 
+  BooleanProperty m_failOnCorruption{this,"FailOnCorruption",true,"Return FAILURE if data corruption is found"};
+
+  //Identifier helper
+  const LArOnlineID* m_onlineId=nullptr;
+
+  //Switches set in initialize() based of SG keys of output object
+  bool m_doCalibDigits;
+  bool m_doAccDigits;
+  bool m_doAccCalibDigits;
+  bool m_doFebHeaders;
+ 
+};
+
+#endif
diff --git a/LArCalorimeter/LArCnv/LArByteStream/src/components/LArByteStream_entries.cxx b/LArCalorimeter/LArCnv/LArByteStream/src/components/LArByteStream_entries.cxx
index a60b8cb93d279e55322e9ea24f799b6fa5ce4cc2..29f499259298831dc081b3e056b4c94b27c7a46c 100644
--- a/LArCalorimeter/LArCnv/LArByteStream/src/components/LArByteStream_entries.cxx
+++ b/LArCalorimeter/LArCnv/LArByteStream/src/components/LArByteStream_entries.cxx
@@ -9,6 +9,7 @@
 #include "LArByteStream/LArABBADecoder.h"
 #include "ByteStreamCnvSvcBase/CollectionByteStreamCnv.h"
 #include "../LArRawDataReadingAlg.h"
+#include "../LArRawCalibDataReadingAlg.h"
 //#include "LArByteStream/LArRawChannelCollByteStreamTool.h"
 
 // Containers 
@@ -16,6 +17,7 @@ DECLARE_COMPONENT( LArRawDataContByteStreamTool )
 DECLARE_COMPONENT( LArRodDecoder )
 DECLARE_COMPONENT( LArABBADecoder )
 DECLARE_COMPONENT( LArRawDataReadingAlg )
+DECLARE_COMPONENT( LArRawCalibDataReadingAlg )
 
 DECLARE_CONVERTER( LArRawChannelContByteStreamCnv )
 DECLARE_CONVERTER( LArDigitContByteStreamCnv )
diff --git a/LArCalorimeter/LArIdentifier/LArIdentifier/LArOnlineID_Base.h b/LArCalorimeter/LArIdentifier/LArIdentifier/LArOnlineID_Base.h
index bdaa6fef12e0781ce2450012dae2c580199854bc..ec14f1c073b987039f66b8c3cabfee2ba8da84bb 100755
--- a/LArCalorimeter/LArIdentifier/LArIdentifier/LArOnlineID_Base.h
+++ b/LArCalorimeter/LArIdentifier/LArIdentifier/LArOnlineID_Base.h
@@ -1,5 +1,5 @@
 /*
-  Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 */
 
 #ifndef LARONLINEID_BASE_H
@@ -13,6 +13,7 @@
 #include "IdDict/IdDictFieldImplementation.h"
 #include "IdDict/IdDictDefs.h"
 #include "LArIdentifier/LArOnlID_Exception.h"
+#include "boost/range/iterator_range.hpp"
 #include "string.h"
 #include <vector>
 #include <algorithm>
@@ -106,6 +107,11 @@ class LArOnlineID_Base : public AtlasDetectorID
 
   typedef Identifier::size_type  size_type;
   
+  /** Type for iterators over identifiers. */
+  typedef std::vector<HWIdentifier>::const_iterator id_iterator;
+  /** Type for range over identifiers. */
+  typedef boost::iterator_range<id_iterator> id_range;
+
   /** 
    * @brief Default constructor
    */
@@ -135,8 +141,10 @@ class LArOnlineID_Base : public AtlasDetectorID
   /**
    * @brief Return an iterator pointing to Feedthrough identifiers collection
    */
-  std::vector<HWIdentifier>::const_iterator feedthrough_begin(void) const;
-  std::vector<HWIdentifier>::const_iterator feedthrough_end  (void) const;
+  id_iterator feedthrough_begin() const;
+  id_iterator feedthrough_end  () const;
+  id_range    feedthrough_range() const;
+
 
 
   /**
@@ -164,8 +172,9 @@ class LArOnlineID_Base : public AtlasDetectorID
   /**
    * @brief Returns an iterator pointing to a feb identifier collection
    */
-  std::vector<HWIdentifier>::const_iterator feb_begin(void) const;
-  std::vector<HWIdentifier>::const_iterator feb_end  (void) const;
+  id_iterator feb_begin() const;
+  id_iterator feb_end  () const;
+  id_range    feb_range() const;
 
 
 
@@ -196,8 +205,9 @@ class LArOnlineID_Base : public AtlasDetectorID
   /**
    * @brief Returns an iterator pointing to a channel identifier collection
    */
-  std::vector<HWIdentifier>::const_iterator channel_begin(void) const;
-  std::vector<HWIdentifier>::const_iterator channel_end  (void) const;
+  id_iterator channel_begin() const;
+  id_iterator channel_end  () const;
+  id_range    channel_range() const;
   
 
   /**
@@ -258,8 +268,9 @@ class LArOnlineID_Base : public AtlasDetectorID
   /**
    * @brief Return an iterator pointing to a calibFeb identifier 's collection
    */
-  std::vector<HWIdentifier>::const_iterator calib_module_begin(void) const;
-  std::vector<HWIdentifier>::const_iterator calib_module_end  (void) const;
+  id_iterator calib_module_begin() const;
+  id_iterator calib_module_end  () const;
+  id_range    calib_module_range() const;
 
 
   /**
@@ -277,8 +288,9 @@ class LArOnlineID_Base : public AtlasDetectorID
   /**
    * @brief Return an iterator pointing to a collection of calibration channel identifiers
    */
-  std::vector<HWIdentifier>::const_iterator calib_channel_begin(void) const;
-  std::vector<HWIdentifier>::const_iterator calib_channel_end  (void) const;
+  id_iterator calib_channel_begin() const;
+  id_iterator calib_channel_end  () const;
+  id_range    calib_channel_range() const;
 
 
   /**
@@ -680,27 +692,37 @@ inline LArOnlineID_Base::size_type LArOnlineID_Base::calibChannelHashMax (void)
 
 
 /* Calib */
-inline std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::calib_module_begin(void) const
+inline LArOnlineID_Base::id_iterator LArOnlineID_Base::calib_module_begin() const
 /*====================================================================*/
 {
   return(m_calib_module_vec.begin());
 }
-inline std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::calib_module_end(void) const
+inline LArOnlineID_Base::id_iterator LArOnlineID_Base::calib_module_end() const
 /*==================================================================*/
 {
   return(m_calib_module_vec.end());
 }
+inline LArOnlineID_Base::id_range LArOnlineID_Base::calib_module_range() const
+/*==================================================================*/
+{
+  return id_range (calib_module_begin(), calib_module_end());
+}
 
-inline std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::calib_channel_begin(void) const
+inline LArOnlineID_Base::id_iterator LArOnlineID_Base::calib_channel_begin() const
 /*======================================================================*/
 {
   return(m_calib_channel_vec.begin());
 }
-inline std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::calib_channel_end(void) const
+inline LArOnlineID_Base::id_iterator LArOnlineID_Base::calib_channel_end() const
 /*======================================================================*/
 {
   return(m_calib_channel_vec.end());
 }
+inline LArOnlineID_Base::id_range LArOnlineID_Base::calib_channel_range() const
+/*======================================================================*/
+{
+  return id_range (calib_channel_begin(), calib_channel_end());
+}
 
 
 
diff --git a/LArCalorimeter/LArIdentifier/src/LArOnlineID_Base.cxx b/LArCalorimeter/LArIdentifier/src/LArOnlineID_Base.cxx
index 04c167806e48def774a105280745efea6de82371..ed2c18417146390e8a45026d7b77eea154d36a09 100755
--- a/LArCalorimeter/LArIdentifier/src/LArOnlineID_Base.cxx
+++ b/LArCalorimeter/LArIdentifier/src/LArOnlineID_Base.cxx
@@ -1,5 +1,5 @@
 /*
-  Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 */
 
 #include "LArIdentifier/LArOnlineID_Base.h"
@@ -1466,18 +1466,24 @@ HWIdentifier LArOnlineID_Base::feedthrough_Id(const HWIdentifier Id) const
   return(result);
 }
 
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::feedthrough_begin(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::feedthrough_begin() const
 /*====================================================================*/
 {
   return(m_feedthrough_vec.begin());
 }
 
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::feedthrough_end(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::feedthrough_end() const
 /*==================================================================*/
 {
   return(m_feedthrough_vec.end());
 }
 
+LArOnlineID_Base::id_range LArOnlineID_Base::feedthrough_range() const
+/*==================================================================*/
+{
+  return id_range (feedthrough_begin(), feedthrough_end());
+}
+
 
 /* FEB id */
 /*========*/
@@ -1977,27 +1983,37 @@ LArOnlineID_Base::size_type LArOnlineID_Base::channelHashMax (void) const
 
 
 
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::feb_begin(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::feb_begin() const
 /*====================================================================*/
 {
   return(m_feb_vec.begin());
 }
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::feb_end(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::feb_end() const
 /*==================================================================*/
 {
   return(m_feb_vec.end());
 }
+LArOnlineID_Base::id_range LArOnlineID_Base::feb_range() const
+/*==================================================================*/
+{
+  return id_range (feb_begin(), feb_end());
+}
 
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::channel_begin(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::channel_begin() const
 /*======================================================================*/
 {
   return(m_channel_vec.begin());
 }
-std::vector<HWIdentifier>::const_iterator LArOnlineID_Base::channel_end(void) const
+LArOnlineID_Base::id_iterator LArOnlineID_Base::channel_end() const
 /*======================================================================*/
 {
   return(m_channel_vec.end());
 }
+LArOnlineID_Base::id_range LArOnlineID_Base::channel_range() const
+/*======================================================================*/
+{
+  return id_range (channel_begin(), channel_end());
+}
 
 int LArOnlineID_Base::barrel_ec(const HWIdentifier id)const
 /*=========================================================*/
diff --git a/LArCalorimeter/LArMonitoring/python/LArCalibMonAlg.py b/LArCalorimeter/LArMonitoring/python/LArCalibMonAlg.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb8176002ba54d329f440ad98e38ae50780d0dba
--- /dev/null
+++ b/LArCalorimeter/LArMonitoring/python/LArCalibMonAlg.py
@@ -0,0 +1,88 @@
+#
+#  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+#
+
+def LArCalibMonConfig(inputFlags,gain="",doAccDigit=False,doCalibDigit=False,doAccCalibDigit=False):
+
+    from AthenaMonitoring import AthMonitorCfgHelper
+    helper = AthMonitorCfgHelper(inputFlags,'LArCalibMonCfg')
+
+    from LArMonitoring.GlobalVariables import lArDQGlobals
+
+    from AthenaConfiguration.ComponentFactory import CompFactory
+    larCalibMonAlg = helper.addAlgorithm(CompFactory.LArCalibPedMonAlg,'larCalibMonAlg')
+    if gain != "":
+       if doAccDigit:
+          larCalibMonAlg.LArAccumulatedDigitContainerKey=gain
+       elif doAccCalibDigit:
+          larCalibMonAlg.LArAccumulatedCalibDigitContainerKey=gain
+       elif doCalibDigit:
+          larCalibMonAlg.LArCalibDigitContainerKey=gain
+
+    GroupName="CalibMonGroup"
+
+    larCalibMonAlg.LArPedGroupName=GroupName
+
+    Group = helper.addGroup(
+        larCalibMonAlg,
+        GroupName,
+        '/LAr/'+GroupName+'/'
+    )
+
+
+    #Summary histos
+    summary_hist_path='Summary/'
+    
+
+    Group.defineHistogram('nbChan;NbOfReadoutChannelsGlobal', 
+                                  title='# of readout channels',
+                                  type='TH1I',
+                                  path=summary_hist_path,
+                                  xbins=lArDQGlobals.N_FEB*lArDQGlobals.FEB_N_channels+5, 
+                                  xmin=-0.5, xmax=lArDQGlobals.N_FEB*lArDQGlobals.FEB_N_channels+4.5)
+
+    return helper.result()
+
+    
+
+if __name__=='__main__':
+
+   from AthenaConfiguration.AllConfigFlags import ConfigFlags
+   from AthenaCommon.Logging import log
+   from AthenaCommon.Constants import DEBUG
+   from AthenaCommon.Configurable import Configurable
+   Configurable.configurableRun3Behavior=1
+   log.setLevel(DEBUG)
+
+
+   from LArMonitoring.LArMonConfigFlags import createLArMonConfigFlags
+   createLArMonConfigFlags()
+
+   ConfigFlags.Input.Files = ["/eos/atlas/atlastier0/rucio/data20_calib/calibration_LArElec-Delay-32s-Medium-Em/00374740/data20_calib.00374740.calibration_LArElec-Delay-32s-Medium-Em.daq.RAW/data20_calib.00374740.calibration_LArElec-Delay-32s-Medium-Em.daq.RAW._lb0000._SFO-2._0001.data"]
+   ConfigFlags.Output.HISTFileName = 'LArCalibMonOutput.root'
+   ConfigFlags.DQ.enableLumiAccess = False
+   ConfigFlags.DQ.useTrigger = False
+   ConfigFlags.Beam.Type = 'collisions'
+   ConfigFlags.DQ.DataType = 'collisions'
+   ConfigFlags.AtlasVersion = 'ATLAS-R2-2016-01-00-01'
+   ConfigFlags.Detector.GeometryCSC=False
+   ConfigFlags.Detector.GeometrysTGC=False
+   ConfigFlags.Detector.GeometryMM=False
+   ConfigFlags.lock()
+
+   from AthenaConfiguration.MainServicesConfig import MainServicesSerialCfg
+   cfg = MainServicesSerialCfg()
+
+   from LArByteStream.LArRawCalibDataReadingConfig import LArRawCalibDataReadingCfg
+   cfg.merge(LArRawCalibDataReadingCfg(ConfigFlags,gain="MEDIUM",doAccCalibDigit=True))
+
+   cfg.merge(LArCalibMonConfig(ConfigFlags, gain="MEDIUM",doAccCalibDigit=True))
+
+   cfg.printConfig()
+
+   ConfigFlags.dump()
+   f=open("LArCalibPedMon.pkl","w")
+   cfg.store(f)
+   f.close()
+
+   cfg.run(500,OutputLevel=DEBUG)
diff --git a/LArCalorimeter/LArMonitoring/python/LArCalibPedMonAlg.py b/LArCalorimeter/LArMonitoring/python/LArCalibPedMonAlg.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed2b1451f3f1a2dded1941fc169fccd358cb62e6
--- /dev/null
+++ b/LArCalorimeter/LArMonitoring/python/LArCalibPedMonAlg.py
@@ -0,0 +1,88 @@
+#
+#  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+#
+
+def LArCalibPedMonConfig(inputFlags,gain="",doAccDigit=False,doCalibDigit=False,doAccCalibDigit=False):
+
+    from AthenaMonitoring import AthMonitorCfgHelper
+    helper = AthMonitorCfgHelper(inputFlags,'LArCalibPedMonCfg')
+
+    from LArMonitoring.GlobalVariables import lArDQGlobals
+
+    from AthenaConfiguration.ComponentFactory import CompFactory
+    larPedMonAlg = helper.addAlgorithm(CompFactory.LArCalibPedMonAlg,'larCalibPedMonAlg')
+    if gain != "":
+       if doAccDigit:
+          larPedMonAlg.LArAccumulatedDigitContainerKey=gain
+       elif doAccCalibDigit:
+          larPedMonAlg.LArAccumulatedCalibDigitContainerKey=gain
+       elif doCalibDigit:
+          larPedMonAlg.LArCalibDigitContainerKey=gain
+
+    GroupName="PedMonGroup"
+
+    larPedMonAlg.LArPedGroupName=GroupName
+
+    Group = helper.addGroup(
+        larPedMonAlg,
+        GroupName,
+        '/LAr/'+GroupName+'/'
+    )
+
+
+    #Summary histos
+    summary_hist_path='Summary/'
+    
+
+    Group.defineHistogram('nbChan;NbOfReadoutChannelsGlobal', 
+                                  title='# of readout channels',
+                                  type='TH1I',
+                                  path=summary_hist_path,
+                                  xbins=lArDQGlobals.N_FEB*lArDQGlobals.FEB_N_channels+5, 
+                                  xmin=-0.5, xmax=lArDQGlobals.N_FEB*lArDQGlobals.FEB_N_channels+4.5)
+
+    return helper.result()
+
+    
+
+if __name__=='__main__':
+
+   from AthenaConfiguration.AllConfigFlags import ConfigFlags
+   from AthenaCommon.Logging import log
+   from AthenaCommon.Constants import DEBUG
+   from AthenaCommon.Configurable import Configurable
+   Configurable.configurableRun3Behavior=1
+   log.setLevel(DEBUG)
+
+
+   from LArMonitoring.LArMonConfigFlags import createLArMonConfigFlags
+   createLArMonConfigFlags()
+
+   ConfigFlags.Input.Files = ["/eos/atlas/atlastier0/rucio/data20_calib/calibration_LArElec-Pedestal-32s-High-All/00374735/data20_calib.00374735.calibration_LArElec-Pedestal-32s-High-All.daq.RAW/data20_calib.00374735.calibration_LArElec-Pedestal-32s-High-All.daq.RAW._lb0000._SFO-3._0001.data"]
+   ConfigFlags.Output.HISTFileName = 'LArCalibPedMonOutput.root'
+   ConfigFlags.DQ.enableLumiAccess = False
+   ConfigFlags.DQ.useTrigger = False
+   ConfigFlags.Beam.Type = 'collisions'
+   ConfigFlags.DQ.DataType = 'collisions'
+   ConfigFlags.AtlasVersion = 'ATLAS-R2-2016-01-00-01'
+   ConfigFlags.Detector.GeometryCSC=False
+   ConfigFlags.Detector.GeometrysTGC=False
+   ConfigFlags.Detector.GeometryMM=False
+   ConfigFlags.lock()
+
+   from AthenaConfiguration.MainServicesConfig import MainServicesSerialCfg
+   cfg = MainServicesSerialCfg()
+
+   from LArByteStream.LArRawCalibDataReadingConfig import LArRawCalibDataReadingCfg
+   cfg.merge(LArRawCalibDataReadingCfg(ConfigFlags,gain="HIGH",doAccDigit=True))
+
+   cfg.merge(LArCalibPedMonConfig(ConfigFlags, gain="HIGH",doAccDigit=True))
+
+   cfg.printConfig()
+
+   ConfigFlags.dump()
+   f=open("LArCalibPedMon.pkl","w")
+   cfg.store(f)
+   f.close()
+
+   cfg.run(500,OutputLevel=DEBUG)
diff --git a/LArCalorimeter/LArMonitoring/python/LArFEBMonAlg.py b/LArCalorimeter/LArMonitoring/python/LArFEBMonAlg.py
index 882010a57f85ce4c0b9e9f36390908a94ad51d74..233d820cc0f67de29ea8f914a25f132cab94e666 100644
--- a/LArCalorimeter/LArMonitoring/python/LArFEBMonAlg.py
+++ b/LArCalorimeter/LArMonitoring/python/LArFEBMonAlg.py
@@ -22,6 +22,11 @@ def LArFEBMonConfig(inputFlags, cellDebug=False, dspDebug=False):
     larFEBMonAlg.SubDetNames=lArDQGlobals.SubDet
     larFEBMonAlg.Streams=lArDQGlobals.defaultStreamNames
 
+    # adding LArFebErrorSummary algo
+    from LArROD.LArFebErrorSummaryMakerConfig import LArFebErrorSummaryMakerCfg
+    acc = LArFebErrorSummaryMakerCfg(inputFlags)
+    helper.resobj.merge(acc)
+
     if "COMP200" not in inputFlags.IOVDb.DatabaseInstance:
        iovDbSvc=helper.resobj.getService("IOVDbSvc")
        condLoader=helper.resobj.getCondAlgo("CondInputLoader")
@@ -38,14 +43,6 @@ def LArFEBMonConfig(inputFlags, cellDebug=False, dspDebug=False):
        helper.resobj.addFolderList(inputFlags,[(fld,db,obj)])
        larFEBMonAlg.keyDSPThresholds="LArDSPThresholds"
 
-    #from AthenaCommon.Constants import VERBOSE
-    #larFEBMonAlg.OutputLevel=VERBOSE
-
-    # adding LArFebErrorSummary algo
-    from LArROD.LArFebErrorSummaryMakerConfig import LArFebErrorSummaryMakerCfg
-    acc = LArFebErrorSummaryMakerCfg(inputFlags)
-    helper.resobj.merge(acc)
-
     Group = helper.addGroup(
         larFEBMonAlg,
         GroupName,
@@ -361,7 +358,7 @@ if __name__=='__main__':
 
    from AthenaConfiguration.AllConfigFlags import ConfigFlags
    from AthenaCommon.Logging import log
-   from AthenaCommon.Constants import DEBUG
+   from AthenaCommon.Constants import DEBUG,WARNING
    from AthenaCommon.Configurable import Configurable
    Configurable.configurableRun3Behavior=1
    log.setLevel(DEBUG)
@@ -370,18 +367,20 @@ if __name__=='__main__':
    from LArMonitoring.LArMonConfigFlags import createLArMonConfigFlags
    createLArMonConfigFlags()
 
-   from AthenaConfiguration.TestDefaults import defaultTestFiles
-   ConfigFlags.Input.Files = defaultTestFiles.RAW
+   ConfigFlags.Input.Files = ["/cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/Tier0ChainTests/data17_13TeV.00330470.physics_Main.daq.RAW._lb0310._SFO-1._0001.data",]
 
    ConfigFlags.Output.HISTFileName = 'LArFEBMonOutput.root'
-   ConfigFlags.DQ.enableLumiAccess = True
-   ConfigFlags.DQ.useTrigger = True
+   ConfigFlags.DQ.enableLumiAccess = False
+   ConfigFlags.DQ.useTrigger = False
    ConfigFlags.Beam.Type = 'collisions'
    ConfigFlags.lock()
 
+   from AthenaConfiguration.MainServicesConfig import MainServicesSerialCfg
+   cfg = MainServicesSerialCfg()
+
 
    from CaloRec.CaloRecoConfig import CaloRecoCfg
-   cfg=CaloRecoCfg(ConfigFlags)
+   cfg.merge(CaloRecoCfg(ConfigFlags))
 
    #from CaloD3PDMaker.CaloD3PDConfig import CaloD3PDCfg,CaloD3PDAlg
    #cfg.merge(CaloD3PDCfg(ConfigFlags, filename=ConfigFlags.Output.HISTFileName, streamname='CombinedMonitoring'))
@@ -396,4 +395,4 @@ if __name__=='__main__':
    cfg.store(f)
    f.close()
 
-   #cfg.run(100,OutputLevel=WARNING)
+   cfg.run(10,OutputLevel=WARNING)
diff --git a/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.cxx b/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..757c2648d577ad4aae998f85c7f6c5ceea3b77dc
--- /dev/null
+++ b/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.cxx
@@ -0,0 +1,117 @@
+/*
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+*/
+
+#include "LArCalibPedMonAlg.h"
+
+
+/*---------------------------------------------------------*/
+LArCalibPedMonAlg::LArCalibPedMonAlg(const std::string& name,ISvcLocator* pSvcLocator )
+  : AthMonitorAlgorithm(name,pSvcLocator)
+{}
+
+/*---------------------------------------------------------*/
+LArCalibPedMonAlg::~LArCalibPedMonAlg()
+{}
+
+/*---------------------------------------------------------*/
+StatusCode 
+LArCalibPedMonAlg::initialize()
+{
+  ATH_MSG_INFO( "Initialize LArCalibPedMonAlg"  );
+
+  ATH_MSG_INFO( "m_accDigitContainerKey.empty() " << m_accDigitContainerKey.empty()
+        );
+  if(!m_calibDigitContainerKey.empty()) {
+    ATH_CHECK( m_calibDigitContainerKey.initialize() );
+  } else if(!m_accDigitContainerKey.empty()) {
+    ATH_CHECK( m_accDigitContainerKey.initialize() );
+  } else if(!m_accCalibDigitContainerKey.empty()) {
+    ATH_CHECK( m_accCalibDigitContainerKey.initialize() );
+  } else {
+     ATH_MSG_FATAL("Either LArCalibDigitContainerKey or LArAccumulatedDigitContainerKey or LArAccumulatedCalibDigitContainerKey must be set");
+     return StatusCode::FAILURE;
+  }
+
+  return AthMonitorAlgorithm::initialize();
+}
+
+
+/*---------------------------------------------------------*/
+StatusCode 
+LArCalibPedMonAlg::fillHistograms( const EventContext& ctx ) const
+{
+
+  ATH_MSG_DEBUG( "in fillHists()"  );
+  
+  SG::ReadHandle<LArCalibDigitContainer> pLArCalibDigitContainer;
+  SG::ReadHandle<LArAccumulatedDigitContainer> pLArAccDigitContainer;
+  SG::ReadHandle<LArAccumulatedCalibDigitContainer> pLArAccCalibDigitContainer;
+
+  std::unordered_set<unsigned int> chanids;
+
+  if(!m_calibDigitContainerKey.empty()) {
+    pLArCalibDigitContainer= SG::ReadHandle<LArCalibDigitContainer>{m_calibDigitContainerKey,ctx};
+    if(pLArCalibDigitContainer.isValid()){
+       ATH_MSG_DEBUG("Got LArCalibDigitContainer with key "<< m_calibDigitContainerKey.key());
+    } else {
+       ATH_MSG_WARNING("Do not have LArCalibDigitContainer with key "<< m_calibDigitContainerKey.key());
+    }  
+  }
+
+  if(!m_accDigitContainerKey.empty()) {
+    pLArAccDigitContainer= SG::ReadHandle<LArAccumulatedDigitContainer>{m_accDigitContainerKey,ctx};
+    if(pLArAccDigitContainer.isValid()){
+       ATH_MSG_DEBUG("Got LArAccumulatedDigitContainer with key "<< m_accDigitContainerKey.key());
+    } else {
+       ATH_MSG_WARNING("Do not have LArAccumulatedDigitContainer with key "<< m_accDigitContainerKey.key());
+    }  
+
+    if(pLArAccDigitContainer->empty()) return StatusCode::SUCCESS; // Nothing to fill
+
+    LArAccumulatedDigitContainer::const_iterator itDig = pLArAccDigitContainer->begin();
+    LArAccumulatedDigitContainer::const_iterator itDig_e = pLArAccDigitContainer->end();
+    const LArAccumulatedDigit* pLArDigit;
+    for ( ; itDig!=itDig_e;++itDig) {
+        pLArDigit = *itDig;
+        unsigned int id = (pLArDigit->hardwareID()).get_identifier32().get_compact();
+        if(chanids.find(id) == chanids.end()) chanids.emplace(id);
+    }
+
+    ATH_MSG_DEBUG("Filling nbChan: "<<chanids.size());
+
+    auto nbchan = Monitored::Scalar<unsigned int>("nbChan",chanids.size());
+    fill(m_MonGroupName,nbchan);
+    
+  }
+
+  if(!m_accCalibDigitContainerKey.empty()) {
+    pLArAccCalibDigitContainer= SG::ReadHandle<LArAccumulatedCalibDigitContainer>{m_accCalibDigitContainerKey,ctx};
+    if(pLArAccCalibDigitContainer.isValid()){
+       ATH_MSG_DEBUG("Got LArAccumulatedCalibDigitContainer with key "<< m_accCalibDigitContainerKey.key());
+    } else {
+       ATH_MSG_WARNING("Do not have LArAcumulatedCalibDigitContainer with key "<< m_accCalibDigitContainerKey.key());
+    }  
+
+    if(pLArAccCalibDigitContainer->empty()) return StatusCode::SUCCESS; // Nothing to fill
+
+    LArAccumulatedCalibDigitContainer::const_iterator itDig = pLArAccCalibDigitContainer->begin();
+    LArAccumulatedCalibDigitContainer::const_iterator itDig_e = pLArAccCalibDigitContainer->end();
+    const LArAccumulatedCalibDigit* pLArDigit;
+    for ( ; itDig!=itDig_e;++itDig) {
+        pLArDigit = *itDig;
+        unsigned int id = (pLArDigit->hardwareID()).get_identifier32().get_compact();
+        if(chanids.find(id) == chanids.end()) chanids.emplace(id);
+    }
+
+    ATH_MSG_DEBUG("Filling nbChan: "<<chanids.size());
+
+    auto nbchan = Monitored::Scalar<unsigned int>("nbChan",chanids.size());
+    fill(m_MonGroupName,nbchan);
+    
+  }
+  return StatusCode::SUCCESS;
+}
+
+
+
diff --git a/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.h b/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.h
new file mode 100644
index 0000000000000000000000000000000000000000..e97cb61443f46a8552a2cd5fb70779c16e13f7bc
--- /dev/null
+++ b/LArCalorimeter/LArMonitoring/src/LArCalibPedMonAlg.h
@@ -0,0 +1,50 @@
+/*
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
+*/
+
+#ifndef LARCALIBPEDMONALG_H
+#define LARCALIBPEDMONALG_H
+
+#include "AthenaMonitoring/AthMonitorAlgorithm.h"
+#include "AthenaMonitoringKernel/Monitored.h"
+
+#include "StoreGate/ReadHandleKey.h"
+#include "LArRawEvent/LArCalibDigitContainer.h"
+#include "LArRawEvent/LArAccumulatedDigitContainer.h"
+#include "LArRawEvent/LArAccumulatedCalibDigitContainer.h"
+
+#include <string>
+
+
+
+
+class LArCalibPedMonAlg: public AthMonitorAlgorithm
+{
+ public:
+  LArCalibPedMonAlg(const std::string& name,ISvcLocator* pSvcLocator );		      
+
+  /** @brief Default destructor */
+  virtual ~LArCalibPedMonAlg();
+
+  /** @brief Overwrite dummy method from AlgTool */
+  virtual StatusCode initialize() override;
+
+
+  /** Called each event */
+  virtual StatusCode fillHistograms( const EventContext& ctx ) const override;
+
+ private:
+
+  // keys to access info
+  SG::ReadHandleKey<LArCalibDigitContainer> m_calibDigitContainerKey{this,"LArCalibDigitContainerKey","","SG key of LArCalibDigitContainer read from Bytestream"};
+  SG::ReadHandleKey<LArAccumulatedDigitContainer> m_accDigitContainerKey{this,"LArAccumulatedDigitContainerKey","","SG key of LArAccumulatedDigitContainer read from Bytestream"};
+  SG::ReadHandleKey<LArAccumulatedCalibDigitContainer> m_accCalibDigitContainerKey{this,"LArAccumulatedCalibDigitContainerKey","","SG key of LArAccumulatedCalibDigitContainer read from Bytestream"};
+
+  // Properties
+  //MonGroup(s) name
+  Gaudi::Property<std::string> m_MonGroupName {this,"LArPedGroupName","LArPedMonGroup"};
+
+};
+
+#endif
+
diff --git a/LArCalorimeter/LArMonitoring/src/components/LArMonitoring_entries.cxx b/LArCalorimeter/LArMonitoring/src/components/LArMonitoring_entries.cxx
index 8992b9573054bdebe19535c40d5be0418309e134..720db6dc172c33ede5b0b4b044af87cdc0f3bdb0 100755
--- a/LArCalorimeter/LArMonitoring/src/components/LArMonitoring_entries.cxx
+++ b/LArCalorimeter/LArMonitoring/src/components/LArMonitoring_entries.cxx
@@ -4,6 +4,7 @@
 #include "../LArFEBMonAlg.h"
 #include "../LArRODMonAlg.h"
 #include "../LArHVCorrectionMonAlg.h"
+#include "../LArCalibPedMonAlg.h"
 
 
 DECLARE_COMPONENT(LArCollisionTimeMonAlg)
@@ -12,4 +13,5 @@ DECLARE_COMPONENT(LArDigitMonAlg)
 DECLARE_COMPONENT(LArFEBMonAlg)
 DECLARE_COMPONENT(LArRODMonAlg)
 DECLARE_COMPONENT(LArHVCorrectionMonAlg)
+DECLARE_COMPONENT(LArCalibPedMonAlg)
 
diff --git a/MuonSpectrometer/MuonConfig/python/MuonReconstructionConfig.py b/MuonSpectrometer/MuonConfig/python/MuonReconstructionConfig.py
index 592c624ea33a90946706bd28818e4236c887e15d..ab194938790bd6dea312322f289bb57080b8d9c4 100644
--- a/MuonSpectrometer/MuonConfig/python/MuonReconstructionConfig.py
+++ b/MuonSpectrometer/MuonConfig/python/MuonReconstructionConfig.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # Core configuration
 from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
@@ -60,9 +60,9 @@ if __name__=="__main__":
     
     cfg.printConfig(withDetails = True, summariseProps = True)
               
-    f=open("MuonReconstruction.pkl","w")
+    f=open("MuonReconstruction.pkl","wb")
     cfg.store(f)
     f.close()
     
     if args.run:
-        cfg.run(20)
\ No newline at end of file
+        cfg.run(20)
diff --git a/MuonSpectrometer/MuonValidation/MuonRecValidation/MuonTrackPerformance/src/MuonTrackPerformanceAlg.cxx b/MuonSpectrometer/MuonValidation/MuonRecValidation/MuonTrackPerformance/src/MuonTrackPerformanceAlg.cxx
index 104c98cd8a92b7e7190e44cbfc6f5d874f27a0e8..3b1a4caa510ea1756d910e1e278857829d4a7632 100644
--- a/MuonSpectrometer/MuonValidation/MuonRecValidation/MuonTrackPerformance/src/MuonTrackPerformanceAlg.cxx
+++ b/MuonSpectrometer/MuonValidation/MuonRecValidation/MuonTrackPerformance/src/MuonTrackPerformanceAlg.cxx
@@ -303,7 +303,7 @@ bool MuonTrackPerformanceAlg::goodTruthTrack( const Muon::IMuonTrackTruthTool::T
     if( trackRecord->GetMomentum().mag() < m_momentumCutSim ) return false; 
   }
   if( !selectPdg(trackRecord->GetPDGCode()) ) return false;
-  if( m_isCombined && fabs(trackRecord->GetMomentum().eta()) > 2.5 ) return false;
+  if( m_isCombined && fabs(trackRecord->GetMomentum().eta()) > 2.8 ) return false;
   int hits = entry.mdtHits.size();
   if(m_idHelperSvc->hasCSC()) hits += entry.cscHits.size();
   if(m_idHelperSvc->hasMM()) hits += entry.mmHits.size();
diff --git a/Reconstruction/Jet/JetRecCalo/JetRecCalo/MissingCellListTool.h b/Reconstruction/Jet/JetRecCalo/JetRecCalo/MissingCellListTool.h
index d0a8aa56dc7d76d7f8c2c16722bcce760f4ae4b7..7eb602783503f064a318bded63724ec72eb5cf34 100644
--- a/Reconstruction/Jet/JetRecCalo/JetRecCalo/MissingCellListTool.h
+++ b/Reconstruction/Jet/JetRecCalo/JetRecCalo/MissingCellListTool.h
@@ -1,7 +1,7 @@
 ///////////////////////// -*- C++ -*- /////////////////////////////
 
 /*
-  Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+  Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 */
 
 #ifndef JETMOMENTTOOLS_MISSINGCALOCELLLISTTOOL_H
@@ -50,14 +50,6 @@ class CaloCell;
 class CaloDetDescrManager;
 class ITileBadChanTool ;
 
-namespace std {
-  template<>
-  struct hash<Identifier> {    
-    size_t operator()(Identifier id) const {return static_cast<size_t>(id.get_identifier32().get_compact());}
-  };
-}
-
-
 namespace jet {
   ///////////////////////////////////////////////////  
   /// \class CellPosition representation of a cell position for the geometric map
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_data17_13tev.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_data17_13tev.sh
index 7198eda7ceab3a182b1b7def113140d7b9e5dfef..146ff49584fd11e69e352af49cc7d08d15348785 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_data17_13tev.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_data17_13tev.sh
@@ -11,7 +11,11 @@
 
 Reco_tf.py --inputBSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/RecJobTransformTests/data17_13TeV.00324910.physics_Main.daq.RAW._lb0713._SFO-6._0001.data --maxEvents 300 --AMI=f908 --outputESDFile myESD.pool.root --outputAODFile myAOD.pool.root --outputHISTFile myHist.root
 
+#Remember retval of transform as art result
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 return $RES
 
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_7teV_data.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_7teV_data.sh
index c5afe48e505c8845a772c2ee297e0ba7eed0a514..d88a41bf0e100046a8f0b960e43224f1eb0ccc36 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_7teV_data.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_7teV_data.sh
@@ -13,5 +13,8 @@
 export TRF_ECHO=True; Reco_tf.py --inputBSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/RecJobTransformTests/high_mu-data11_7TeV.00179725.physics_JetTauEtmiss.merge.RAW._lb0021.data --autoConfiguration everything --conditionsTag="COMCOND-BLKPA-RUN1-07" --maxEvents 5 --outputESDFile myESD.pool.root --outputAODFile myAOD.pool.root --outputHISTFile myHist.root --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'rec.doTrigger=False'
 
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_8teV_data.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_8teV_data.sh
index 8c15b66367513e5de2afeac45f35dc41b15f1fc5..66dc2e0ea6e817c601badb59e86091708fe4f470 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_8teV_data.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_8teV_data.sh
@@ -12,6 +12,11 @@
 export TRF_ECHO=True; Reco_tf.py --inputBSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/RecJobTransformTests/data12_8TeV.00209109.physics_JetTauEtmiss.merge.RAW._lb0186._SFO-1._0001.1 --autoConfiguration everything --conditionsTag="COMCOND-BLKPA-RUN1-07" --maxEvents 5 --outputESDFile myESD.pool.root --outputAODFile myAOD.pool.root --outputHISTFile myHist.root --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'rec.doTrigger=False'
 
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 
 
+
+
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q221.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q221.sh
index 00a94e1b47f01f1c012a1184a5540044e0c76f2a..3f2d5a933ad8d1492c47820a912566fae232d140 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q221.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q221.sh
@@ -1,3 +1,4 @@
+
 #!/bin/sh
 #
 # art-description: RIG convenors - q221 reconstruction
@@ -11,5 +12,8 @@
 export TRF_ECHO=True; Reco_tf.py --AMIConfig=q221 --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True'
 
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q222.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q222.sh
index ffa65b861bc51be5cbe648583de43d715bb39a9d..4cdbf1cc638f6ff54ff3718a4176b622682ead48 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q222.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q222.sh
@@ -12,5 +12,8 @@
 export TRF_ECHO=True; Reco_tf.py --AMIConfig=q222 --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'DQMonFlags.doCTPMon=False' 'DQMonFlags.doHLTMon=False' --inputBSFile='/cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/RecJobTransformTests/data12_8TeV.00209109.physics_JetTauEtmiss.merge.RAW._lb0186._SFO-1._0001.1'
 
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 
diff --git a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q223.sh b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q223.sh
index 868c996bff5e3d0ab417cbcf961b74d55a1ee2bf..4a195840ae1b041e37351aece52f94fa4db4806e 100755
--- a/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q223.sh
+++ b/Reconstruction/RecExample/RecJobTransformTests/test/test_recoshift_recotf_q223.sh
@@ -11,5 +11,8 @@
 export TRF_ECHO=True; Reco_tf.py --AMIConfig=q223 --preExec 'rec.doDetailedAuditor=True' 'rec.doNameAuditor=True' 'DQMonFlags.doCTPMon=False' --inputBSFile='/cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/RecJobTransformTests/data15_comm.00264034.physics_MinBias.daq.RAW._lb0644._SFO-6._0001.data'
 
 RES=$?
+
+xAODDigest.py myAOD.pool.root digest.txt
+
 echo "art-result: $RES Reco"
 
diff --git a/Simulation/Digitization/test/DigitizationComparisonNew_test.py b/Simulation/Digitization/test/DigitizationComparisonNew_test.py
index 9c912b891556452b78c82adbc944c2649cc05997..2af4a1494f95921b356a642668505512f02384da 100755
--- a/Simulation/Digitization/test/DigitizationComparisonNew_test.py
+++ b/Simulation/Digitization/test/DigitizationComparisonNew_test.py
@@ -59,8 +59,8 @@ acc.merge(writeDigitizationMetadata(ConfigFlags))
 # Inner Detector
 # acc.merge(BCM_DigitizationCfg(ConfigFlags))
 acc.merge(PixelDigitizationCfg(ConfigFlags))
-# acc.merge(SCT_DigitizationCfg(ConfigFlags))
-# acc.merge(TRT_DigitizationCfg(ConfigFlags))
+acc.merge(SCT_DigitizationCfg(ConfigFlags))
+acc.merge(TRT_DigitizationCfg(ConfigFlags))
 
 # Calorimeter
 acc.merge(LArTriggerDigitizationCfg(ConfigFlags))
@@ -83,6 +83,8 @@ acc.getSequence("AthOutSeq").OutputStreamRDO.ItemList.remove("xAOD::EventAuxInfo
 # Calorimeter truth output from DigiOutput.py#0082
 acc.getSequence("AthOutSeq").OutputStreamRDO.ItemList += ["CaloCalibrationHitContainer#*"]
 acc.getSequence("AthOutSeq").OutputStreamRDO.ItemList += ["TileHitVector#MBTSHits"]
+# FIXME hack to match in random seed
+acc.getSequence("AthAlgSeq").StandardPileUpToolsAlg.PileUpTools["TRTDigitizationTool"].RandomSeedOffset = 170
 
 # Dump config
 acc.getService("StoreGateSvc").Dump = True
diff --git a/Simulation/Overlay/OverlayConfiguration/python/OverlaySteering.py b/Simulation/Overlay/OverlayConfiguration/python/OverlaySteering.py
index 2fc9e28852cca5f612248543211e753a1292105c..c48554e826d3229f424ce25188244962e2fc0740 100644
--- a/Simulation/Overlay/OverlayConfiguration/python/OverlaySteering.py
+++ b/Simulation/Overlay/OverlayConfiguration/python/OverlaySteering.py
@@ -9,7 +9,9 @@ from AthenaConfiguration.MainServicesConfig import MainServicesThreadedCfg
 from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg
 from AthenaPoolCnvSvc.PoolWriteConfig import PoolWriteCfg
 
+from InDetOverlay.PixelOverlayConfig import PixelOverlayCfg
 from InDetOverlay.SCTOverlayConfig import SCTOverlayCfg
+from InDetOverlay.TRTOverlayConfig import TRTOverlayCfg
 from OverlayCopyAlgs.OverlayCopyAlgsConfig import \
     CopyCaloCalibrationHitContainersCfg, CopyJetTruthInfoCfg, CopyMcEventCollectionCfg, \
     CopyTimingsCfg, CopyTrackRecordCollectionsCfg
@@ -50,7 +52,11 @@ def OverlayMainCfg(configFlags):
     acc.merge(CopyTrackRecordCollectionsCfg(configFlags))
 
     # Inner detector
+    if configFlags.Detector.OverlayPixel:
+        acc.merge(PixelOverlayCfg(configFlags))
     if configFlags.Detector.OverlaySCT:
         acc.merge(SCTOverlayCfg(configFlags))
+    if configFlags.Detector.OverlayTRT:
+        acc.merge(TRTOverlayCfg(configFlags))
 
     return acc
diff --git a/Simulation/Overlay/OverlayConfiguration/python/OverlayTestHelpers.py b/Simulation/Overlay/OverlayConfiguration/python/OverlayTestHelpers.py
index b06c127dd4806c1248d5f755af00fe5797ecefb9..1387bbfb99b6ff1011ff6fcb4109df4b9a666b1f 100644
--- a/Simulation/Overlay/OverlayConfiguration/python/OverlayTestHelpers.py
+++ b/Simulation/Overlay/OverlayConfiguration/python/OverlayTestHelpers.py
@@ -60,6 +60,7 @@ def setupOverlayTestDetectorFlags(configFlags, detectors):
 def defaultTestFlags(configFlags, args):
     """Fill default overlay flags for testing"""
     configFlags.GeoModel.Align.Dynamic = False
+    configFlags.Digitization.DoInnerDetectorNoise = False
     if args.data:
         configFlags.Input.isMC = False  # TODO: this one should be autodetected
         configFlags.Input.Files = defaultTestFiles.HITS_DATA_OVERLAY
diff --git a/Simulation/Tests/DigitizationTests/test/DigitizationComparison_test.sh b/Simulation/Tests/DigitizationTests/test/DigitizationComparison_test.sh
index 8ecd8e361a2717a1a63c655dfa530176187aefbd..75f624c9beac6b157ca0a2bf37b53d56dc7eebae 100755
--- a/Simulation/Tests/DigitizationTests/test/DigitizationComparison_test.sh
+++ b/Simulation/Tests/DigitizationTests/test/DigitizationComparison_test.sh
@@ -36,6 +36,8 @@ _preInclude="\
 HITtoRDO:Digitization/ForceUseOfAlgorithms.py,\
 SimulationJobOptions/preInclude.LArOnlyConfig.py,\
 SimulationJobOptions/preInclude.PixelOnlyConfig.py,\
+SimulationJobOptions/preInclude.SCTOnlyConfig.py,\
+SimulationJobOptions/preInclude.TRTOnlyConfig.py,\
 SimulationJobOptions/preInclude.TruthOnlyConfig.py\
 "
 
diff --git a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Pixel.sh b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Pixel.sh
new file mode 100755
index 0000000000000000000000000000000000000000..db6d3defc4bce6f0b3f917b8a79f1a35f7675c08
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Pixel.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+# art-description: MC+data Overlay with MT support, config test
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: legacyDataOverlayRDO.pool.root
+# art-output: dataOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+events=2
+
+OverlayBS_tf.py \
+--inputBS_SKIMFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc15_valid.00200010.overlay_streamsAll_2016_pp_1.skim.DRAW.r8381/DRAW.09331084._000146.pool.root.1 \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc16_13TeV.361107.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zmumu.OverlaySim/HITS.pool.root \
+--outputRDOFile legacyDataOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag CONDBR2-BLKPA-2016-12 \
+--fSampltag LARElecCalibMCfSampl-G496-19213- \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
+--postExec 'outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--preInclude 'SimulationJobOptions/preInclude.PixelOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
+--ignorePatterns "L1TopoMenuLoader.+ERROR." \
+--imf False \
+--athenaopts '"--config-only=ConfigLegacy.pkl"'
+
+OverlayBS_tf.py \
+--inputBS_SKIMFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc15_valid.00200010.overlay_streamsAll_2016_pp_1.skim.DRAW.r8381/DRAW.09331084._000146.pool.root.1 \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc16_13TeV.361107.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zmumu.OverlaySim/HITS.pool.root \
+--outputRDOFile legacyDataOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag CONDBR2-BLKPA-2016-12 \
+--fSampltag LARElecCalibMCfSampl-G496-19213- \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt"); outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--preInclude 'SimulationJobOptions/preInclude.PixelOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
+--ignorePatterns "L1TopoMenuLoader.+ERROR." \
+--imf False
+
+rc=$?
+echo "art-result: $rc configLegacy"
+mv log.OverlayBS log.OverlayLegacy
+
+rc2=-9999
+if [ $rc -eq 0 ]
+then
+    OverlayTest.py Pixel -d -t 1 -n $events 2>&1 | tee log.OverlayTest
+    rc2=$?
+fi
+echo  "art-result: $rc2 configNew"
+
+rc3=-9999
+if [ $rc2 -eq 0 ]
+then
+    acmd.py diff-root legacyDataOverlayRDO.pool.root dataOverlayRDO.pool.root \
+        --error-mode resilient --mode=semi-detailed \
+        --ignore-leaves RecoTimingObj_p1_EVNTtoHITS_timings RecoTimingObj_p1_HITStoRDO_timings index_ref \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventIndex \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventTime \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventType \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask0 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask1 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask2 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask3 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.actualInteractionsPerCrossing \
+            xAOD::EventAuxInfo_v1_EventInfoAux.averageInteractionsPerCrossing
+    rc3=$?
+fi
+echo  "art-result: $rc3 comparison"
diff --git a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_SCT.sh b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_SCT.sh
index 3145403eb42d384014935f1813a3d1abdda3124f..a60548aa40a6e00ec467e7ab272d03dba1b30d83 100755
--- a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_SCT.sh
+++ b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_SCT.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 events=2
 
@@ -36,7 +40,7 @@ OverlayBS_tf.py \
 --conditionsTag CONDBR2-BLKPA-2016-12 \
 --fSampltag LARElecCalibMCfSampl-G496-19213- \
 --preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
---postExec 'outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt"); outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
 --preInclude 'SimulationJobOptions/preInclude.SCTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
 --postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
 --ignorePatterns "L1TopoMenuLoader.+ERROR." \
@@ -49,7 +53,7 @@ mv log.OverlayBS log.OverlayLegacy
 rc2=-9999
 if [ $rc -eq 0 ]
 then
-    OverlayTest.py SCT -d -t 0 -n $events 2>&1 | tee log.OverlayTest
+    OverlayTest.py SCT -d -t 1 -n $events 2>&1 | tee log.OverlayTest
     rc2=$?
 fi
 echo  "art-result: $rc2 configNew"
diff --git a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_TRT.sh b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_TRT.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9863dc9078744edf10c33d27f33da0153eec5041
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_TRT.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+# art-description: MC+data Overlay with MT support, config test
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: legacyDataOverlayRDO.pool.root
+# art-output: dataOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+events=2
+
+OverlayBS_tf.py \
+--inputBS_SKIMFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc15_valid.00200010.overlay_streamsAll_2016_pp_1.skim.DRAW.r8381/DRAW.09331084._000146.pool.root.1 \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc16_13TeV.361107.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zmumu.OverlaySim/HITS.pool.root \
+--outputRDOFile legacyDataOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag CONDBR2-BLKPA-2016-12 \
+--fSampltag LARElecCalibMCfSampl-G496-19213- \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
+--postExec 'outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--preInclude 'SimulationJobOptions/preInclude.TRTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
+--ignorePatterns "L1TopoMenuLoader.+ERROR." \
+--imf False \
+--athenaopts '"--config-only=ConfigLegacy.pkl"'
+
+OverlayBS_tf.py \
+--inputBS_SKIMFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc15_valid.00200010.overlay_streamsAll_2016_pp_1.skim.DRAW.r8381/DRAW.09331084._000146.pool.root.1 \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/mc16_13TeV.361107.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zmumu.OverlaySim/HITS.pool.root \
+--outputRDOFile legacyDataOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag CONDBR2-BLKPA-2016-12 \
+--fSampltag LARElecCalibMCfSampl-G496-19213- \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt"); outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--preInclude 'SimulationJobOptions/preInclude.TRTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
+--ignorePatterns "L1TopoMenuLoader.+ERROR." \
+--imf False
+
+rc=$?
+echo "art-result: $rc configLegacy"
+mv log.OverlayBS log.OverlayLegacy
+
+rc2=-9999
+if [ $rc -eq 0 ]
+then
+    OverlayTest.py TRT -d -t 1 -n $events 2>&1 | tee log.OverlayTest
+    rc2=$?
+fi
+echo  "art-result: $rc2 configNew"
+
+rc3=-9999
+if [ $rc2 -eq 0 ]
+then
+    acmd.py diff-root legacyDataOverlayRDO.pool.root dataOverlayRDO.pool.root \
+        --error-mode resilient --mode=semi-detailed \
+        --ignore-leaves RecoTimingObj_p1_EVNTtoHITS_timings RecoTimingObj_p1_HITStoRDO_timings index_ref \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventIndex \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventTime \
+            xAOD::EventAuxInfo_v1_EventInfoAuxDyn.subEventType \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask0 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask1 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask2 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.detectorMask3 \
+            xAOD::EventAuxInfo_v1_EventInfoAux.actualInteractionsPerCrossing \
+            xAOD::EventAuxInfo_v1_EventInfoAux.averageInteractionsPerCrossing
+    rc3=$?
+fi
+echo  "art-result: $rc3 comparison"
diff --git a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Truth.sh b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Truth.sh
index 2d8a02d008f7fad94d8f6c235f534d65ff0763bd..a7619077d3dbd807afcfb9e5d61c05823702f64b 100755
--- a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Truth.sh
+++ b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_Truth.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 events=2
 
@@ -36,7 +40,7 @@ OverlayBS_tf.py \
 --conditionsTag CONDBR2-BLKPA-2016-12 \
 --fSampltag LARElecCalibMCfSampl-G496-19213- \
 --preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.nSamples.set_Value_and_Lock(4);from LArConditionsCommon.LArCondFlags import larCondFlags; larCondFlags.OFCShapeFolder.set_Value_and_Lock("4samples1phase")' \
---postExec 'outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt"); outStream.ItemList.remove("xAOD::EventInfoContainer#*"); outStream.ItemList.remove("xAOD::EventInfoAuxContainer#*");' \
 --preInclude 'SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
 --postInclude 'EventOverlayJobTransforms/Rt_override_CONDBR2-BLKPA-2015-12.py' \
 --ignorePatterns "L1TopoMenuLoader.+ERROR." \
@@ -49,7 +53,7 @@ mv log.OverlayBS log.OverlayLegacy
 rc2=-9999
 if [ $rc -eq 0 ]
 then
-    OverlayTest.py Truth -d -t 0 -n $events 2>&1 | tee log.OverlayTest
+    OverlayTest.py Truth -d -t 1 -n $events 2>&1 | tee log.OverlayTest
     rc2=$?
 fi
 echo  "art-result: $rc2 configNew"
diff --git a/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_sequential.sh b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_sequential.sh
new file mode 100755
index 0000000000000000000000000000000000000000..169f057dc40016a4fcc80ea191b7c2fd4aff4206
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_DataOverlay_ConfigTest_Zmumu_sequential.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# art-description: MC+data Overlay with MT support, running sequentially, new config
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: dataOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+OverlayTest.py -d -n 10 -t 0 2>&1 | tee log.OverlayTest
+
+rc=$?
+echo "art-result: $rc overlay"
+
+# Regression disabled as many changes are planned
+# rc2=-9999
+# if [ $rc -eq 0 ]
+# then
+#     ArtPackage=$1
+#     ArtJobName=$2
+#     art.py compare grid --entries 10 ${ArtPackage} ${ArtJobName} --error-mode resilient --mode=semi-detailed --order-trees
+#     rc2=$?
+# fi
+# echo  "art-result: $rc2 regression"
diff --git a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Pixel.sh b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Pixel.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1a02f0bb8e5393c2e33a72c1ec406284d52f663d
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Pixel.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+# art-description: MC+MC Overlay with MT support, config test
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: legacyMcOverlayRDO.pool.root
+# art-output: mcOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+events=2
+
+Overlay_tf.py \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/Tier0ChainTests/valid1.410000.PowhegPythiaEvtGen_P2012_ttbar_hdamp172p5_nonallhad.simul.HITS.e4993_s3091/HITS.10504490._000425.pool.root.1 \
+--inputRDO_BKGFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/PileupPremixing/22.0/v4/RDO.merged-pileup-MT.100events.pool.root \
+--outputRDOFile legacyMcOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag OFLCOND-MC16-SDR-20 \
+--geometryVersion ATLAS-R2-2016-01-00-01 \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--preInclude 'Overlay:SimulationJobOptions/preInclude.PixelOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--imf False \
+--athenaopts '"--config-only=ConfigLegacy.pkl"'
+
+Overlay_tf.py \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/valid1.410000.PowhegPythiaEvtGen_P2012_ttbar_hdamp172p5_nonallhad.simul.HITS.e4993_s3091/HITS.10504490._000425.pool.root.1 \
+--inputRDO_BKGFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/PileupPremixing/22.0/v4/RDO.merged-pileup-MT.100events.pool.root \
+--outputRDOFile legacyMcOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag OFLCOND-MC16-SDR-20 \
+--geometryVersion ATLAS-R2-2016-01-00-01 \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt");' \
+--preInclude 'Overlay:SimulationJobOptions/preInclude.PixelOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--imf False
+
+rc=$?
+echo "art-result: $rc configLegacy"
+mv log.Overlay log.OverlayLegacy
+
+rc2=-9999
+if [ $rc -eq 0 ]
+then
+    OverlayTest.py Pixel -t 1 -n $events 2>&1 | tee log.OverlayTest
+    rc2=$?
+fi
+echo  "art-result: $rc2 configNew"
+
+rc3=-9999
+if [ $rc2 -eq 0 ]
+then
+    acmd.py diff-root legacyMcOverlayRDO.pool.root mcOverlayRDO.pool.root --error-mode resilient --mode=semi-detailed --ignore-leaves RecoTimingObj_p1_EVNTtoHITS_timings RecoTimingObj_p1_HITStoRDO_timings index_ref
+    rc3=$?
+fi
+echo  "art-result: $rc3 comparison"
diff --git a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_SCT.sh b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_SCT.sh
index 3df2bda92321dc7d157e333cd8f49ad2c808cf02..2bd7e73d2fee296bbde3677a6c1576538aef5f20 100755
--- a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_SCT.sh
+++ b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_SCT.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 events=2
 
@@ -33,6 +37,7 @@ Overlay_tf.py \
 --conditionsTag OFLCOND-MC16-SDR-20 \
 --geometryVersion ATLAS-R2-2016-01-00-01 \
 --preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt");' \
 --preInclude 'Overlay:SimulationJobOptions/preInclude.SCTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
 --imf False
 
@@ -43,7 +48,7 @@ mv log.Overlay log.OverlayLegacy
 rc2=-9999
 if [ $rc -eq 0 ]
 then
-    OverlayTest.py SCT -t 0 -n $events 2>&1 | tee log.OverlayTest
+    OverlayTest.py SCT -t 1 -n $events 2>&1 | tee log.OverlayTest
     rc2=$?
 fi
 echo  "art-result: $rc2 configNew"
diff --git a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_TRT.sh b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_TRT.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1d06f36adcde9a386e4f29477927628adb74a58c
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_TRT.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+# art-description: MC+MC Overlay with MT support, config test
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: legacyMcOverlayRDO.pool.root
+# art-output: mcOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+events=2
+
+Overlay_tf.py \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/Tier0ChainTests/valid1.410000.PowhegPythiaEvtGen_P2012_ttbar_hdamp172p5_nonallhad.simul.HITS.e4993_s3091/HITS.10504490._000425.pool.root.1 \
+--inputRDO_BKGFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/PileupPremixing/22.0/v4/RDO.merged-pileup-MT.100events.pool.root \
+--outputRDOFile legacyMcOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag OFLCOND-MC16-SDR-20 \
+--geometryVersion ATLAS-R2-2016-01-00-01 \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--preInclude 'Overlay:SimulationJobOptions/preInclude.TRTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--imf False \
+--athenaopts '"--config-only=ConfigLegacy.pkl"'
+
+Overlay_tf.py \
+--inputHITSFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/valid1.410000.PowhegPythiaEvtGen_P2012_ttbar_hdamp172p5_nonallhad.simul.HITS.e4993_s3091/HITS.10504490._000425.pool.root.1 \
+--inputRDO_BKGFile /cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/OverlayMonitoringRTT/PileupPremixing/22.0/v4/RDO.merged-pileup-MT.100events.pool.root \
+--outputRDOFile legacyMcOverlayRDO.pool.root \
+--maxEvents $events \
+--conditionsTag OFLCOND-MC16-SDR-20 \
+--geometryVersion ATLAS-R2-2016-01-00-01 \
+--preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt");' \
+--preInclude 'Overlay:SimulationJobOptions/preInclude.TRTOnlyConfig.py,SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
+--imf False
+
+rc=$?
+echo "art-result: $rc configLegacy"
+mv log.Overlay log.OverlayLegacy
+
+rc2=-9999
+if [ $rc -eq 0 ]
+then
+    OverlayTest.py TRT -t 1 -n $events 2>&1 | tee log.OverlayTest
+    rc2=$?
+fi
+echo  "art-result: $rc2 configNew"
+
+rc3=-9999
+if [ $rc2 -eq 0 ]
+then
+    acmd.py diff-root legacyMcOverlayRDO.pool.root mcOverlayRDO.pool.root --error-mode resilient --mode=semi-detailed --ignore-leaves RecoTimingObj_p1_EVNTtoHITS_timings RecoTimingObj_p1_HITStoRDO_timings index_ref
+    rc3=$?
+fi
+echo  "art-result: $rc3 comparison"
diff --git a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Truth.sh b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Truth.sh
index 9d989cb716ad2aaa44006eee33d9b039a6979700..220f7eccf2490a7bc539570a493daf7439ab32c0 100755
--- a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Truth.sh
+++ b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_Truth.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 events=2
 
@@ -33,6 +37,7 @@ Overlay_tf.py \
 --conditionsTag OFLCOND-MC16-SDR-20 \
 --geometryVersion ATLAS-R2-2016-01-00-01 \
 --preExec 'from LArROD.LArRODFlags import larRODFlags;larRODFlags.NumberOfCollisions.set_Value_and_Lock(20);larRODFlags.nSamples.set_Value_and_Lock(4);larRODFlags.doOFCPileupOptimization.set_Value_and_Lock(True);larRODFlags.firstSample.set_Value_and_Lock(0);larRODFlags.useHighestGainAutoCorr.set_Value_and_Lock(True); from LArDigitization.LArDigitizationFlags import jobproperties;jobproperties.LArDigitizationFlags.useEmecIwHighGain.set_Value_and_Lock(False);' \
+--postExec 'job+=CfgMgr.JobOptsDumperAlg(FileName="OverlayLegacyConfig.txt");' \
 --preInclude 'Overlay:SimulationJobOptions/preInclude.TruthOnlyConfig.py' \
 --imf False
 
@@ -43,7 +48,7 @@ mv log.Overlay log.OverlayLegacy
 rc2=-9999
 if [ $rc -eq 0 ]
 then
-    OverlayTest.py Truth -t 0 -n $events 2>&1 | tee log.OverlayTest
+    OverlayTest.py Truth -t 1 -n $events 2>&1 | tee log.OverlayTest
     rc2=$?
 fi
 echo  "art-result: $rc2 configNew"
diff --git a/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_sequential.sh b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_sequential.sh
new file mode 100755
index 0000000000000000000000000000000000000000..87697bf644d760e7cc2ade4c7016df587b13f946
--- /dev/null
+++ b/Simulation/Tests/OverlayTests/test/test_MCOverlay_ConfigTest_ttbar_sequential.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# art-description: MC+MC Overlay with MT support, running sequentially, new config
+# art-type: grid
+# art-include: master/Athena
+
+# art-output: mcOverlayRDO.pool.root
+# art-output: log.*
+# art-output: mem.summary.*
+# art-output: mem.full.*
+# art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
+
+OverlayTest.py -n 10 -t 0 2>&1 | tee log.OverlayTest
+
+rc=$?
+echo "art-result: $rc overlay"
+
+# Regression disabled as many changes are planned
+# rc2=-9999
+# if [ $rc -eq 0 ]
+# then
+#     ArtPackage=$1
+#     ArtJobName=$2
+#     art.py compare grid --entries 10 ${ArtPackage} ${ArtJobName} --error-mode resilient --mode=semi-detailed --order-trees
+#     rc2=$?
+# fi
+# echo  "art-result: $rc2 regression"
diff --git a/Simulation/Tests/OverlayTestsMT/test/test_DataOverlay_MT_Zmumu_8threads_NewConfig.sh b/Simulation/Tests/OverlayTestsMT/test/test_DataOverlay_MT_Zmumu_8threads_NewConfig.sh
index 0ad572c04249c1c5301e2c4ee317440084fa0c44..59e0a87d7c0dffdadcba260cd8a163544b19d30a 100755
--- a/Simulation/Tests/OverlayTestsMT/test/test_DataOverlay_MT_Zmumu_8threads_NewConfig.sh
+++ b/Simulation/Tests/OverlayTestsMT/test/test_DataOverlay_MT_Zmumu_8threads_NewConfig.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 OverlayTest.py -d -n 100 -t 8 2>&1 | tee log.OverlayTest
 
diff --git a/Simulation/Tests/OverlayTestsMT/test/test_MCOverlay_MT_ttbar_8threads_NewConfig.sh b/Simulation/Tests/OverlayTestsMT/test/test_MCOverlay_MT_ttbar_8threads_NewConfig.sh
index 3a2edb032abffc3ec1f1a1b959fc2f0d54522a95..c624ff1ad0e7fe77a86227ce1f1ee45b2be99e9b 100755
--- a/Simulation/Tests/OverlayTestsMT/test/test_MCOverlay_MT_ttbar_8threads_NewConfig.sh
+++ b/Simulation/Tests/OverlayTestsMT/test/test_MCOverlay_MT_ttbar_8threads_NewConfig.sh
@@ -10,6 +10,10 @@
 # art-output: mem.summary.*
 # art-output: mem.full.*
 # art-output: runargs.*
+# art-output: *.pkl
+# art-output: *Config.txt
+
+set -o pipefail
 
 OverlayTest.py -n 100 -t 8 2>&1 | tee log.OverlayTest
 
diff --git a/TileCalorimeter/TileRecAlgs/src/TileCellSelector.cxx b/TileCalorimeter/TileRecAlgs/src/TileCellSelector.cxx
index 170bce5460015652372a4b5d376af23b56d0ea30..5d23980a9334b6c38959d6384ba5db29c7c910ca 100644
--- a/TileCalorimeter/TileRecAlgs/src/TileCellSelector.cxx
+++ b/TileCalorimeter/TileRecAlgs/src/TileCellSelector.cxx
@@ -88,6 +88,10 @@ TileCellSelector::TileCellSelector(const std::string& name, ISvcLocator* pSvcLoc
   declareProperty( "MaxTimeMBTS", m_maxTimeChan[2] =  100.);  // cut on channel time
   declareProperty( "PtnTimeMBTS", m_ptnTimeChan[2] =  10);  // channel time pattern
 
+  declareProperty( "SelectGain",  m_selectGain = 2); // 0 - select LG only,  1 - HG only, 2 - both gains
+  m_skipGain[TileID::LOWGAIN] = false;
+  m_skipGain[TileID::HIGHGAIN] = false;
+
   // pattern - decimal number with up to 5 digits
   // only values 1(=true) and 0(=false) for every digit are used
   // digit 0 set to 1  - accept event if value < min
@@ -218,6 +222,22 @@ StatusCode TileCellSelector::initialize() {
 
   }
 
+  switch (m_selectGain) {
+    case 0:
+      ATH_MSG_INFO( "Select Low gain channels only");
+      m_skipGain[TileID::LOWGAIN] = false;
+      m_skipGain[TileID::HIGHGAIN] = true;
+      break;
+    case 1:
+      ATH_MSG_INFO( "Select High gain channels only");
+      m_skipGain[TileID::LOWGAIN] = true;
+      m_skipGain[TileID::HIGHGAIN] = false;
+      break;
+    default:
+      ATH_MSG_INFO( "Select both gains");
+      break;
+  }
+
   if (!m_digitsContainerKey.key().empty()) {
     if (m_checkJumps) {
       ATH_MSG_INFO( "JumpDeltaHG " << m_jumpDeltaHG);
@@ -652,7 +672,7 @@ StatusCode TileCellSelector::execute() {
             bool ene1Ok = false;
             bool time1Ok = false;
 
-            if (!bad1) {
+            if ( !(bad1 || m_skipGain[tile_cell->gain1()]) ) {
               if (time1 < m_minTimeChan[ch_type] ) {
                 time1Ok = m_bitTimeChan[ch_type][0];
               } else if (time1 > m_maxTimeChan[ch_type] ) {
@@ -685,7 +705,7 @@ StatusCode TileCellSelector::execute() {
             bool ene2Ok = false;
             bool time2Ok = false;
 
-            if (!bad2) {
+            if ( !(bad2 || m_skipGain[tile_cell->gain2()]) ) {
               if (ene2 < m_minEneChan[ch_type] ) {
                 ene2Ok = m_bitEneChan[ch_type][0];
               } else if (ene2 > m_maxEneChan[ch_type] ) {
@@ -719,7 +739,7 @@ StatusCode TileCellSelector::execute() {
             bool over2=false;
             if (checkOver) {
               over1 = ( (!bad1) && (tile_cell->qbit1() & TileCell::MASK_OVER) && tile_cell->gain1()==TileID::LOWGAIN);
-              over2 = ( (!bad2) && (tile_cell->qbit2() & TileCell::MASK_OVER) && tile_cell->gain1()==TileID::LOWGAIN);
+              over2 = ( (!bad2) && (tile_cell->qbit2() & TileCell::MASK_OVER) && tile_cell->gain2()==TileID::LOWGAIN);
             }
             
             if ((ene1Ok && time1Ok) || over1) {
@@ -1191,6 +1211,7 @@ StatusCode TileCellSelector::execute() {
             HWIdentifier chId = m_tileHWID->channel_id(adcId);
             m_tileHWID->get_hash(chId, hash, &chan_context);
             if ( m_chanToSkip[hash] ) continue;
+            int adc = m_tileHWID->adc(adcId);
             int channel = m_tileHWID->channel(adcId);
             int ch_type = 0;
             if (channel == chMBTS) {
@@ -1200,7 +1221,6 @@ StatusCode TileCellSelector::execute() {
               ch_type = 1;
             }
             if (emptyBad  && !m_chanBad[hash] ) {
-              int adc = m_tileHWID->adc(adcId);
               m_chanBad[hash] = m_tileBadChanTool->getAdcStatus(drawerIdx,channel,adc).isBad() ||
                 (DQstatus && !DQstatus->isAdcDQgood(ros,drawer,channel,adc)) ||
                 (m_checkDCS && m_tileDCS->getDCSStatus(ros, drawer, channel) > TileDCSState::WARNING);
@@ -1221,7 +1241,8 @@ StatusCode TileCellSelector::execute() {
 
               if ( (m_skipMasked && m_chanBad[hash]) ||
                    (m_skipMBTS && channel == chMBTS) ||
-                   (m_skipEmpty && TileDQstatus::isChEmpty(ros, drawer, channel) > 0) )
+                   (m_skipEmpty && TileDQstatus::isChEmpty(ros, drawer, channel) > 0) ||
+                   m_skipGain[adc] )
                 continue;
 
               bool ampOk = false;
diff --git a/TileCalorimeter/TileRecAlgs/src/TileCellSelector.h b/TileCalorimeter/TileRecAlgs/src/TileCellSelector.h
index a65f16bbfcb6de174f149df9e13f62a824a24319..606998f652f857868a8e6406eb22aeccc5d06793 100644
--- a/TileCalorimeter/TileRecAlgs/src/TileCellSelector.h
+++ b/TileCalorimeter/TileRecAlgs/src/TileCellSelector.h
@@ -131,6 +131,8 @@ class TileCellSelector: public AthAlgorithm {
     int m_ptnEneChan[3];
     int m_ptnTimeCell;
     int m_ptnTimeChan[3];
+    int m_selectGain;
+    bool m_skipGain[2];
 #define ptnlength 5
     bool m_bitEneCell[ptnlength];
     bool m_bitTimeCell[ptnlength];
diff --git a/Tools/PyUtils/python/Decorators.py b/Tools/PyUtils/python/Decorators.py
index 6934ff7209cb84cc0476409df8d528ee2e7ee679..8fb44a4ef2d6c50c3c2c0bed1623de6e27634d0c 100644
--- a/Tools/PyUtils/python/Decorators.py
+++ b/Tools/PyUtils/python/Decorators.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @author: Sebastien Binet <binet@cern.ch>
 # @date:   March 2008
@@ -126,6 +126,7 @@ def forking(func, *args, **kwargs):
             exc_string = traceback.format_exc(limit=10)
             for l in exc_string.splitlines():
                 print ("[%d]"%os.getpid(),l.rstrip())
+            sys.stdout.flush()
             result = exc, exc_string
             status = 1
         with os.fdopen(pwrite, 'wb') as f:
diff --git a/Tools/PyUtils/python/FilePeekerTool.py b/Tools/PyUtils/python/FilePeekerTool.py
index cf016252312800943004239d2f4f6c6cbafad32e..947381bb6f44192ea519663abfc7619061de6168 100644
--- a/Tools/PyUtils/python/FilePeekerTool.py
+++ b/Tools/PyUtils/python/FilePeekerTool.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
 
 # @file PyUtils.FilePeekerTool
 # @purpose peek into APR files to read in-file metadata without Athena (based on PyAthena.FilePeekerLib code by Sebastian Binet) 
@@ -58,6 +58,9 @@ class FilePeekerTool():
                 guid = d['value']
 
         meta = self.f.Get( 'MetaData' )
+        if not meta:
+            print ('No metadata', file=stdout)
+            return {}
 
         from AthenaPython.FilePeekerLib import toiter
 
diff --git a/Trigger/TrigMonitoring/TrigSteerMonitor/src/TrigSignatureMoniMT.cxx b/Trigger/TrigMonitoring/TrigSteerMonitor/src/TrigSignatureMoniMT.cxx
index d6dfe792f7da9c890efac61823e335f241d6547c..01e97d166c0e46196e4541eb858750f518332062 100644
--- a/Trigger/TrigMonitoring/TrigSteerMonitor/src/TrigSignatureMoniMT.cxx
+++ b/Trigger/TrigMonitoring/TrigSteerMonitor/src/TrigSignatureMoniMT.cxx
@@ -51,11 +51,15 @@ StatusCode TrigSignatureMoniMT::start() {
     }
 
     if( gotL1Menu && !chain.l1item().empty() ) {
-      TrigConf::L1Item item = l1MenuHandle->item(chain.l1item());
-      for ( const std::string & group : item.bunchgroups() ) {
-        if ( group != "BGRP0" ) {
-          m_chainIDToBunchMap[HLT::Identifier(chain.name())].insert(group);
-        }
+      try {
+	TrigConf::L1Item item = l1MenuHandle->item(chain.l1item());
+	for ( const std::string & group : item.bunchgroups() ) {
+	  if ( group != "BGRP0" ) {
+	    m_chainIDToBunchMap[HLT::Identifier(chain.name())].insert(group);
+	  }
+	}
+      } catch(...) {
+	ATH_MSG_WARNING("The item " << chain.l1item() << " is not part of the L1 menu" );
       }
     }
   }
diff --git a/Trigger/TrigValidation/TrigUpgradeTest/CMakeLists.txt b/Trigger/TrigValidation/TrigUpgradeTest/CMakeLists.txt
index a37d2b35589f574d1692fa026f95d6ca52fc8ba4..1bbda8cb6b1175f650cea1482174f51ca64167ee 100644
--- a/Trigger/TrigValidation/TrigUpgradeTest/CMakeLists.txt
+++ b/Trigger/TrigValidation/TrigUpgradeTest/CMakeLists.txt
@@ -50,7 +50,6 @@ function( _add_test name )
 endfunction( _add_test )
 
 
-_add_test( emu_step_processing LOG_SELECT_PATTERN "TrigSignatureMoniMT.*INFO HLT_.*|TriggerSummaryStep.* chains passed:|TriggerSummaryStep.*+++ HLT_.*|TriggerSummaryStep.*+++ leg.*") # should be moved to TriggerMenuMT
 
 # Unit tests of the test scripts
 atlas_add_test( flake8_test_dir
diff --git a/Trigger/TrigValidation/TrigUpgradeTest/test/test_emu_step_processing.sh b/Trigger/TrigValidation/TrigUpgradeTest/test/test_emu_step_processing.sh
deleted file mode 100755
index 935cd8a961df4b59c0d96f106d6e3a3ed9715a83..0000000000000000000000000000000000000000
--- a/Trigger/TrigValidation/TrigUpgradeTest/test/test_emu_step_processing.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# This is not an ART test, but a unit test
-
-# This is a unit test of HLT Control Flow and should be moved to TriggerMenuMT
-
-athena.py -l DEBUG --imf --threads=1 TrigUpgradeTest/EmuStepProcessingTest.py
diff --git a/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_emu_step_processing_build.sh b/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_emu_step_processing_build.sh
new file mode 100755
index 0000000000000000000000000000000000000000..95bbda52a4c6c001b1a4b5ec3f17f33d0b767f6f
--- /dev/null
+++ b/Trigger/TrigValidation/TrigUpgradeTest/test/test_trigUpgr_emu_step_processing_build.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+# art-description: athenaMT HLT emulation test
+# art-type: build
+# art-include: master/Athena
+
+# This is a unit test of HLT Control Flow and should be moved to TriggerMenuMT
+
+export THREADS=1
+export EVENTS=4
+export SLOTS=1
+export JOBOPTION="TrigUpgradeTest/EmuStepProcessingTest.py"
+export REGTESTEXP="TrigSignatureMoniMT.*INFO HLT_.*|TriggerSummaryStep.* chains passed:|TriggerSummaryStep.*+++ HLT_.*|TriggerSummaryStep.*+++ leg.*"
+export DOPERFMON=0
+export ATHENAOPTS=" -l DEBUG"
+export REGTESTREF=`find_data.py TrigUpgradeTest/emu_step_processing.ref`
+
+
+source exec_TrigUpgradeTest_art_athenaMT.sh
+source exec_TrigUpgradeTest_art_post.sh
diff --git a/Trigger/TriggerCommon/TrigEDMConfig/python/TriggerEDMRun3.py b/Trigger/TriggerCommon/TrigEDMConfig/python/TriggerEDMRun3.py
index f3cffa15028cea4203249f3f62c8fbc4daad4a6d..67891a9ecbded0a4734f434085d6de909d305657 100644
--- a/Trigger/TriggerCommon/TrigEDMConfig/python/TriggerEDMRun3.py
+++ b/Trigger/TriggerCommon/TrigEDMConfig/python/TriggerEDMRun3.py
@@ -270,7 +270,7 @@ TriggerHLTListRun3 = [
     ('xAOD::TrigMissingETAuxContainer#HLT_MET_trkmhtAux.',                 'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),
 
     ('xAOD::TrigMissingETContainer#HLT_MET_pfsum',                         'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),
-    ('xAOD::TrigMissingETContainer#HLT_MET_pfsumAux.',                     'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),
+    ('xAOD::TrigMissingETAuxContainer#HLT_MET_pfsumAux.',                  'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),
 
     ('xAOD::CaloClusterContainer#HLT_TopoCaloClustersFS',                  'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),
     ('xAOD::CaloClusterTrigAuxContainer#HLT_TopoCaloClustersFSAux.nCells', 'BS ESD AODFULL AODSLIM AODVERYSLIM', 'MET'),