From 80234f3342fcc922e23bc2d2de9f5e333967a940 Mon Sep 17 00:00:00 2001
From: Walter Lampl <Walter.Lampl@cern.ch>
Date: Thu, 4 Sep 2014 12:44:47 +0200
Subject: [PATCH] gen_klass of AlgTool: Remove obsolete usage of StoreGateSvc
 (done by AthAlgTool base now) (PyUtils-00-13-22)

---
 Tools/PyUtils/bin/abootstrap-wkarea.py        |  300 ++
 Tools/PyUtils/bin/acmd.py                     |   72 +
 Tools/PyUtils/bin/atl-gen-athena-d3pd-reader  |  800 +++++
 Tools/PyUtils/bin/avn.py                      |  291 ++
 Tools/PyUtils/bin/build_cmt_pkg_db.py         |  160 +
 Tools/PyUtils/bin/checkFile.py                |  114 +
 Tools/PyUtils/bin/checkPlugins.py             |  235 ++
 Tools/PyUtils/bin/checkSG.py                  |  107 +
 Tools/PyUtils/bin/checkTP.py                  |  214 ++
 Tools/PyUtils/bin/checkTag.py                 |  637 ++++
 Tools/PyUtils/bin/checkxAOD.py                |  167 +
 Tools/PyUtils/bin/cmtClients.py               |   80 +
 Tools/PyUtils/bin/diff-athfile                |  135 +
 Tools/PyUtils/bin/diff-jobo-cfg.py            |  212 ++
 Tools/PyUtils/bin/diffConfigs.py              |   73 +
 Tools/PyUtils/bin/diffPoolFiles.py            |   65 +
 Tools/PyUtils/bin/diffTAGTree.py              |  208 ++
 Tools/PyUtils/bin/dlldep.py                   |  281 ++
 Tools/PyUtils/bin/dso-stats.py                |  239 ++
 Tools/PyUtils/bin/dump-athfile.py             |  142 +
 Tools/PyUtils/bin/filter-and-merge-d3pd.py    |  982 ++++++
 Tools/PyUtils/bin/gen-typereg-dso.py          |   34 +
 Tools/PyUtils/bin/gen_klass.py                | 1177 +++++++
 Tools/PyUtils/bin/get-tag-diff.py             |   51 +
 Tools/PyUtils/bin/getTagDiff.py               |  707 ++++
 Tools/PyUtils/bin/gprof2dot                   | 2896 +++++++++++++++++
 Tools/PyUtils/bin/icython.py                  |  180 +
 Tools/PyUtils/bin/isDSinFAX.py                |  140 +
 Tools/PyUtils/bin/lstags                      |   98 +
 Tools/PyUtils/bin/magnifyPoolFile.py          |  150 +
 Tools/PyUtils/bin/merge-poolfiles.py          |   76 +
 Tools/PyUtils/bin/pep8.py                     | 1360 ++++++++
 Tools/PyUtils/bin/pkgco.py                    |  262 ++
 .../PyUtils/bin/pool_extractFileIdentifier.py |   77 +
 Tools/PyUtils/bin/pool_insertFileToCatalog.py |   85 +
 Tools/PyUtils/bin/print_auditor_callgraph.py  |   84 +
 Tools/PyUtils/bin/pyroot.py                   |  185 ++
 Tools/PyUtils/bin/setupWorkArea.py            |  279 ++
 Tools/PyUtils/bin/tabnanny-checker.py         |  120 +
 Tools/PyUtils/bin/tcSubmitTag.py              |    4 +
 Tools/PyUtils/bin/vmem-sz.py                  |  174 +
 Tools/PyUtils/cmt/requirements                |   90 +
 Tools/PyUtils/doc/mainpage.h                  |  141 +
 Tools/PyUtils/python/AmiLib.py                |  524 +++
 Tools/PyUtils/python/AthFile/__init__.py      |  199 ++
 Tools/PyUtils/python/AthFile/impl.py          | 1441 ++++++++
 Tools/PyUtils/python/AthFile/tests.py         |  479 +++
 .../PyUtils/python/AthFile/timerdecorator.py  |   61 +
 Tools/PyUtils/python/Cmt.py                   |   13 +
 Tools/PyUtils/python/Decorators.py            |   20 +
 Tools/PyUtils/python/Dso.py                   |  650 ++++
 Tools/PyUtils/python/Helpers.py               |  172 +
 Tools/PyUtils/python/Logging.py               |   13 +
 Tools/PyUtils/python/MpUtils.py               |   77 +
 Tools/PyUtils/python/PoolFile.py              | 1219 +++++++
 Tools/PyUtils/python/RootUtils.py             |  312 ++
 Tools/PyUtils/python/WorkAreaLib.py           |  416 +++
 Tools/PyUtils/python/__init__.py              |    3 +
 Tools/PyUtils/python/_rfio.py                 |  468 +++
 Tools/PyUtils/python/acmdlib.py               |  232 ++
 Tools/PyUtils/python/bwdcompat.py             |    7 +
 Tools/PyUtils/python/castor.py                |  414 +++
 Tools/PyUtils/python/coverage.py              |  338 ++
 Tools/PyUtils/python/dbsqlite.py              |  240 ++
 Tools/PyUtils/python/decorator.py             |    4 +
 Tools/PyUtils/python/fileutils.py             |  297 ++
 Tools/PyUtils/python/merge_join.py            |  103 +
 Tools/PyUtils/python/path.py                  | 1007 ++++++
 Tools/PyUtils/python/pshell.py                |  214 ++
 Tools/PyUtils/python/reimport.py              |  559 ++++
 Tools/PyUtils/python/rfio.py                  |  233 ++
 Tools/PyUtils/python/scripts/__init__.py      |   31 +
 Tools/PyUtils/python/scripts/ath_dump.py      |   98 +
 Tools/PyUtils/python/scripts/check_file.py    |   90 +
 Tools/PyUtils/python/scripts/check_reflex.py  |  282 ++
 Tools/PyUtils/python/scripts/check_sg.py      |   97 +
 Tools/PyUtils/python/scripts/cmt_newpkg.py    |  118 +
 .../PyUtils/python/scripts/diff_pool_files.py |   38 +
 .../PyUtils/python/scripts/diff_root_files.py |  236 ++
 .../PyUtils/python/scripts/dump_root_file.py  |   79 +
 Tools/PyUtils/python/scripts/filter_files.py  |  197 ++
 Tools/PyUtils/python/scripts/gen_klass.py     | 1163 +++++++
 Tools/PyUtils/python/scripts/get_tag_diff.py  |   38 +
 Tools/PyUtils/python/scripts/merge_files.py   |  117 +
 Tools/PyUtils/python/scripts/tc_find_pkg.py   |   37 +
 Tools/PyUtils/python/scripts/tc_find_tag.py   |   55 +
 .../PyUtils/python/scripts/tc_show_clients.py |   90 +
 Tools/PyUtils/python/scripts/tc_submit_tag.py |  314 ++
 Tools/PyUtils/python/smem.py                  |  641 ++++
 Tools/PyUtils/python/xmldict.py               |  172 +
 Tools/PyUtils/test/PyUtils.xml                |   28 +
 91 files changed, 27520 insertions(+)
 create mode 100755 Tools/PyUtils/bin/abootstrap-wkarea.py
 create mode 100755 Tools/PyUtils/bin/acmd.py
 create mode 100755 Tools/PyUtils/bin/atl-gen-athena-d3pd-reader
 create mode 100755 Tools/PyUtils/bin/avn.py
 create mode 100755 Tools/PyUtils/bin/build_cmt_pkg_db.py
 create mode 100755 Tools/PyUtils/bin/checkFile.py
 create mode 100755 Tools/PyUtils/bin/checkPlugins.py
 create mode 100755 Tools/PyUtils/bin/checkSG.py
 create mode 100755 Tools/PyUtils/bin/checkTP.py
 create mode 100755 Tools/PyUtils/bin/checkTag.py
 create mode 100755 Tools/PyUtils/bin/checkxAOD.py
 create mode 100755 Tools/PyUtils/bin/cmtClients.py
 create mode 100755 Tools/PyUtils/bin/diff-athfile
 create mode 100755 Tools/PyUtils/bin/diff-jobo-cfg.py
 create mode 100755 Tools/PyUtils/bin/diffConfigs.py
 create mode 100755 Tools/PyUtils/bin/diffPoolFiles.py
 create mode 100755 Tools/PyUtils/bin/diffTAGTree.py
 create mode 100755 Tools/PyUtils/bin/dlldep.py
 create mode 100755 Tools/PyUtils/bin/dso-stats.py
 create mode 100755 Tools/PyUtils/bin/dump-athfile.py
 create mode 100755 Tools/PyUtils/bin/filter-and-merge-d3pd.py
 create mode 100755 Tools/PyUtils/bin/gen-typereg-dso.py
 create mode 100755 Tools/PyUtils/bin/gen_klass.py
 create mode 100755 Tools/PyUtils/bin/get-tag-diff.py
 create mode 100755 Tools/PyUtils/bin/getTagDiff.py
 create mode 100755 Tools/PyUtils/bin/gprof2dot
 create mode 100755 Tools/PyUtils/bin/icython.py
 create mode 100755 Tools/PyUtils/bin/isDSinFAX.py
 create mode 100755 Tools/PyUtils/bin/lstags
 create mode 100755 Tools/PyUtils/bin/magnifyPoolFile.py
 create mode 100755 Tools/PyUtils/bin/merge-poolfiles.py
 create mode 100755 Tools/PyUtils/bin/pep8.py
 create mode 100755 Tools/PyUtils/bin/pkgco.py
 create mode 100755 Tools/PyUtils/bin/pool_extractFileIdentifier.py
 create mode 100755 Tools/PyUtils/bin/pool_insertFileToCatalog.py
 create mode 100755 Tools/PyUtils/bin/print_auditor_callgraph.py
 create mode 100755 Tools/PyUtils/bin/pyroot.py
 create mode 100755 Tools/PyUtils/bin/setupWorkArea.py
 create mode 100755 Tools/PyUtils/bin/tabnanny-checker.py
 create mode 100755 Tools/PyUtils/bin/tcSubmitTag.py
 create mode 100755 Tools/PyUtils/bin/vmem-sz.py
 create mode 100755 Tools/PyUtils/cmt/requirements
 create mode 100755 Tools/PyUtils/doc/mainpage.h
 create mode 100644 Tools/PyUtils/python/AmiLib.py
 create mode 100644 Tools/PyUtils/python/AthFile/__init__.py
 create mode 100644 Tools/PyUtils/python/AthFile/impl.py
 create mode 100644 Tools/PyUtils/python/AthFile/tests.py
 create mode 100644 Tools/PyUtils/python/AthFile/timerdecorator.py
 create mode 100755 Tools/PyUtils/python/Cmt.py
 create mode 100644 Tools/PyUtils/python/Decorators.py
 create mode 100755 Tools/PyUtils/python/Dso.py
 create mode 100755 Tools/PyUtils/python/Helpers.py
 create mode 100644 Tools/PyUtils/python/Logging.py
 create mode 100644 Tools/PyUtils/python/MpUtils.py
 create mode 100755 Tools/PyUtils/python/PoolFile.py
 create mode 100644 Tools/PyUtils/python/RootUtils.py
 create mode 100644 Tools/PyUtils/python/WorkAreaLib.py
 create mode 100755 Tools/PyUtils/python/__init__.py
 create mode 100644 Tools/PyUtils/python/_rfio.py
 create mode 100644 Tools/PyUtils/python/acmdlib.py
 create mode 100644 Tools/PyUtils/python/bwdcompat.py
 create mode 100644 Tools/PyUtils/python/castor.py
 create mode 100644 Tools/PyUtils/python/coverage.py
 create mode 100644 Tools/PyUtils/python/dbsqlite.py
 create mode 100644 Tools/PyUtils/python/decorator.py
 create mode 100644 Tools/PyUtils/python/fileutils.py
 create mode 100644 Tools/PyUtils/python/merge_join.py
 create mode 100644 Tools/PyUtils/python/path.py
 create mode 100644 Tools/PyUtils/python/pshell.py
 create mode 100644 Tools/PyUtils/python/reimport.py
 create mode 100644 Tools/PyUtils/python/rfio.py
 create mode 100644 Tools/PyUtils/python/scripts/__init__.py
 create mode 100644 Tools/PyUtils/python/scripts/ath_dump.py
 create mode 100644 Tools/PyUtils/python/scripts/check_file.py
 create mode 100644 Tools/PyUtils/python/scripts/check_reflex.py
 create mode 100644 Tools/PyUtils/python/scripts/check_sg.py
 create mode 100644 Tools/PyUtils/python/scripts/cmt_newpkg.py
 create mode 100644 Tools/PyUtils/python/scripts/diff_pool_files.py
 create mode 100644 Tools/PyUtils/python/scripts/diff_root_files.py
 create mode 100644 Tools/PyUtils/python/scripts/dump_root_file.py
 create mode 100644 Tools/PyUtils/python/scripts/filter_files.py
 create mode 100644 Tools/PyUtils/python/scripts/gen_klass.py
 create mode 100644 Tools/PyUtils/python/scripts/get_tag_diff.py
 create mode 100644 Tools/PyUtils/python/scripts/merge_files.py
 create mode 100644 Tools/PyUtils/python/scripts/tc_find_pkg.py
 create mode 100644 Tools/PyUtils/python/scripts/tc_find_tag.py
 create mode 100644 Tools/PyUtils/python/scripts/tc_show_clients.py
 create mode 100644 Tools/PyUtils/python/scripts/tc_submit_tag.py
 create mode 100644 Tools/PyUtils/python/smem.py
 create mode 100644 Tools/PyUtils/python/xmldict.py
 create mode 100755 Tools/PyUtils/test/PyUtils.xml

diff --git a/Tools/PyUtils/bin/abootstrap-wkarea.py b/Tools/PyUtils/bin/abootstrap-wkarea.py
new file mode 100755
index 00000000000..2535b4d3965
--- /dev/null
+++ b/Tools/PyUtils/bin/abootstrap-wkarea.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+#
+# @file: abootstrap-wkarea.py
+# @purpose: install all needed $(CMTCONFIG) directories into a temporary area
+#           so to not eat our precious AFS disk space.
+#           also creates a WorkArea CMT package to ease administration
+# @author: Sebastien Binet
+# @date: February 2007
+
+from __future__ import with_statement
+
+__version__ = "$Revision: 323486 $"
+
+import os
+import os.path as osp
+import glob
+import sys
+
+### basic logging and messages -----------------------------------------------
+from PyCmt.Logging import logging
+
+msg = logging.getLogger("AthBoot")
+
+##########################
+# recognized user options
+##########################
+import getopt
+
+_useropts = 'i:o:hl:v'
+_userlongopts = [ 'input-dir=',  'output-dir=',
+                  'help',     'loglevel=',
+                  'version' ]
+
+def _usage():
+   print """Accepted command line options (CLI):
+   -i, --input-dir <dir>      ...  directory where the original WorkArea is
+                                   sitting.
+   -o, --output-dir <dir>     ...  directory where to install the bin dirs
+   -h, --help                 ...  print this help message
+   -l, --loglevel <level>     ...  logging level (DEBUG, INFO, WARNING, ERROR, FATAL)
+   -v, --version              ...  print version number
+   """
+   return
+
+## Helper class to recursively find files
+import fnmatch
+class _GlobDirectoryWalker:
+   """a forward iterator that traverses a directory tree"""
+
+   def __init__(self, directory, pattern="*"):
+      self.stack = [directory]
+      if type(pattern) != type([]):
+         pattern = [ pattern ]
+      self.pattern = pattern
+      self.files = []
+      self.index = 0
+      return
+    
+   def __getitem__(self, index):
+      while 1:
+         try:
+            file = self.files[self.index]
+            self.index = self.index + 1
+         except IndexError:
+            # pop next directory from stack
+            self.directory = self.stack.pop()
+            self.files = os.listdir(self.directory)
+            self.index = 0
+         else:
+            # got a filename
+            fullname = osp.join(self.directory, file)
+            if osp.isdir(fullname) and not osp.islink(fullname):
+               self.stack.append(fullname)
+            for pattern in self.pattern:
+               if fnmatch.fnmatch(file, pattern):
+                  msg.debug(" --> %s",fullname)
+                  return fullname
+
+         pass
+      return
+   
+   pass # class _GlobDirectoryWalker
+
+## Helper function to find all the bin dirs to be installed
+def register_bin_dirs(top_dir = os.getcwd(),
+                      pattern = os.environ['CMTCONFIG']):
+   """helper function to find all 'bin' dirs to be installed in the temp space
+   """
+   top_dir = osp.abspath(osp.expanduser(osp.expandvars(top_dir)))
+   msg.info("registering 'bin dirs' [%s]..." % str(pattern))
+   msg.info("parsing [%s]..." % top_dir)
+   if not osp.exists(top_dir):
+      return []
+   bin_dirs = [ d for d in _GlobDirectoryWalker(top_dir, pattern) 
+                if osp.isdir(d) and not osp.islink(d)
+                and d.count("InstallArea") <= 0 ]
+
+   msg.info(" ==> found [%i] 'bin dirs' to process", len(bin_dirs))
+   return bin_dirs
+
+## Main entry point
+import shutil, glob
+def symlink_bin_dirs(input_dir, output_dir, bin_dirs):
+   input_dir = osp.abspath(input_dir)
+   output_dir= osp.abspath(output_dir)
+
+   msg.info("."*50)
+   msg.info("symlinking ...")
+   if not osp.exists(output_dir):
+      msg.warning("[%s] does NOT exists : creating it...", output_dir)
+      os.mkdir(output_dir)
+      pass
+   
+   for bin in bin_dirs:
+      bin = osp.abspath(bin)
+      root = osp.commonprefix([input_dir, bin]) + os.sep
+      out_bin = output_dir + os.sep + bin.split(root)[1]
+      msg.info(" -- %s", bin.split(root)[1])
+
+      ## remove the linked dir if it exists
+      ## (this assumes the registering of bin_dirs does not register already
+      ## symlinked bin_dirs !! Worse case scenario is to rebuild...)
+      if osp.exists(out_bin):
+         msg.debug("... removing [%s] ...", out_bin)
+         shutil.rmtree(out_bin)
+         pass
+
+      ## create all the parent path if it does not exist yet
+      if not osp.exists(osp.dirname(out_bin)):
+         os.makedirs(osp.dirname(out_bin))
+         pass
+      
+      # symlink the output bin...
+      shutil.move(bin, out_bin)
+      os.symlink(out_bin, bin)
+
+      # symlink the other directories so relative paths are also working
+      # and g++ -o bla ../src/Bla.cxx will work
+      pkg_dir = osp.dirname(bin)
+      other_dirs = [d for d in glob.glob(pkg_dir + os.sep + "*") 
+                    if d != bin and osp.isdir(d)]
+      #msg.debug(" symlinking [%r]", other_dirs)
+      for d in other_dirs:
+         symlink_dest = osp.join(output_dir, d.split(root)[1])
+         if osp.exists(symlink_dest):
+            os.remove(symlink_dest)
+         os.symlink(d, symlink_dest)
+
+   msg.info("symlinking [DONE]")
+   msg.info("."*50)
+   return
+
+class Options(object):
+   """a dummy class to collect options"""
+   pass
+
+def _processOptions(useropts, userlongopts):
+
+   # defaults
+   input_dir  = None
+   output_dir = None
+   lvl = logging.INFO
+   opts = Options()
+   
+   try:
+      optlist,args = getopt.getopt(sys.argv[1:],
+                                   useropts,
+                                   userlongopts)
+   except getopt.error:
+      msg.error(str(sys.exc_value))
+      _usage()
+      sys.exit(2)
+      pass
+   
+   for opt, arg in optlist:
+      if opt in ('-h', '--help'):
+         _usage()
+         sys.exit()
+      elif opt in ('-v', '--version'):
+         print "version:",__version__
+         print "By Sebastien Binet"
+         sys.exit()
+      elif opt in ('-i', '--input-dir'):
+         input_dir = osp.expanduser(osp.expandvars(arg))
+      elif opt in ('-o', '--output-dir'):
+         output_dir = osp.expanduser(osp.expandvars(arg))
+      elif opt in ('-l', '--loglevel'):
+         lvl = string.upper(arg)
+         logLevel = getattr(logging, lvl)
+         msg.setLevel(logLevel)
+         del lvl,logLevel
+         pass
+      else:
+         pass
+      pass
+
+   if input_dir  is None: input_dir = os.getcwd()
+
+   if output_dir is None:
+      fname = osp.join(os.getcwd(), ".abootstrap.cfg")
+      if osp.exists(fname):
+         # (try) to get them from a previous run of abootstrap-wrkarea
+         with open(fname, 'r') as sticky_file:
+            from ConfigParser import SafeConfigParser
+            cfg = SafeConfigParser()
+            cfg.readfp(sticky_file)
+            ath_cfg = dict(cfg.items('abootstrap'))
+            hostname   = ath_cfg['hostname']
+            input_dir  = ath_cfg['input-dir']
+            output_dir = ath_cfg['output-dir']
+            del cfg
+            pass
+
+      if output_dir is None:
+         # use parent-dir...
+         output_dir = osp.join('/tmp',
+                               '$USER',
+                               'aboot-tmp-'+osp.basename(os.getcwd()))
+   opts.input_dir = osp.abspath(osp.expanduser(osp.expandvars(input_dir)))
+   opts.output_dir= osp.abspath(osp.expanduser(osp.expandvars(output_dir)))
+
+   return opts
+    
+if __name__ == "__main__":
+
+   msg = logging.getLogger('AthBoot')
+   msg.setLevel(logging.INFO)
+
+   ## process user options
+   opts = _processOptions(_useropts, _userlongopts)
+
+   msg.info("#"*50)
+   msg.info(" input-dir:  [%s]", opts.input_dir)
+   msg.info(" output-dir: [%s]", opts.output_dir)
+   msg.info("#"*50)
+   msg.info("")
+
+   import shutil
+   
+   # removing output dir
+   if osp.exists(opts.output_dir):
+      shutil.rmtree(opts.output_dir)
+
+   # remove InstallArea if any
+   if osp.exists('InstallArea'):
+      shutil.rmtree('InstallArea')
+      
+   # create $CMTCONFIG directories...
+   msg.info("creating WorkArea...")
+   import commands as com
+   sc,out = com.getstatusoutput('setupWorkArea.py')
+   if sc:
+      print out
+      sys.exit(1)
+   
+   orig_dir = os.getcwd()
+   os.chdir('WorkArea/cmt')
+
+   ## msg.info("cmt bro cmt config...")
+   ## sc,out = com.getstatusoutput('cmt bro cmt config')
+   ## if sc:
+   ##    print out
+   ##    sys.exit(1)
+
+   msg.info("creating $CMTCONFIG directories...")
+   sc,out = com.getstatusoutput('cmt bro \"/bin/rm -rf ../$CMTCONFIG; /bin/mkdir -p ../$CMTCONFIG; echo 1\"')
+   if sc:
+      print out
+      sys.exit(1)
+
+   os.chdir(orig_dir)
+
+   msg.info('registering bin dirs...')
+   bin_dirs = register_bin_dirs(opts.input_dir)
+   msg.info('installing symlinks...')
+   symlink_bin_dirs(opts.input_dir, opts.output_dir, bin_dirs)
+
+   for d in ('python', os.environ['CMTCONFIG']):
+      d = osp.join('InstallArea', d)
+      if not osp.exists(d):
+         msg.info('creating [%s] (to prevent CMT bug)...', d)
+         os.makedirs(d)
+   
+
+   ## to remember where we put those binary files, in case we logged on a
+   ## different lxplus node...
+   with open(osp.join(opts.input_dir, ".abootstrap.cfg"), 'w') as f:
+      import socket
+      f.writelines([
+         "[abootstrap]\n",
+         "hostname   = %s\n" % socket.gethostname(),
+         "input-dir  = %s\n" % opts.input_dir,
+         "output-dir = %s\n" % opts.output_dir,
+         ])
+      pass
+   msg.info("## Bye.")
+   
+   sys.exit(0)
diff --git a/Tools/PyUtils/bin/acmd.py b/Tools/PyUtils/bin/acmd.py
new file mode 100755
index 00000000000..7c3f0e78694
--- /dev/null
+++ b/Tools/PyUtils/bin/acmd.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# @file PyUtils.acmd
+# @purpose main command line script for the general purpose athena scripts
+# @author Sebastien Binet
+# @date January 2010
+
+from __future__ import with_statement
+
+__version__ = "$Revision: 276499 $"
+__author__ = "Sebastien Binet"
+__doc__ = "main command line script for the general purpose athena scripts"
+
+import PyUtils.acmdlib as acmdlib
+
+def main():
+    import PyUtils.scripts
+    import PyUtils.Logging as L
+    msg = L.logging.getLogger('Acmd')
+    msg.setLevel(L.logging.INFO)
+    
+    ## if 0:
+    ##     acmdlib.register_file('acmd_plugins.cfg')
+    ## else:
+    ##     import os
+    ##     if os.path.exists('acmd_plugins.py'):
+    ##         execfile('acmd_plugins.py')
+            
+    commands = {}
+    plugins = list(acmdlib.ext_plugins.get(group=acmdlib.ACMD_GROUPNAME))
+    #print plugins
+    for i, plugin in enumerate(plugins):
+        #print i, plugin.name
+        commands[plugin.name] = plugin
+
+    if 1:
+        acmdlib._load_commands()
+
+    parser = acmdlib.ACMD_PARSER
+    args = parser.parse_args()
+
+    msg.info('running sub-command [%s]...', args.command)
+    cmd_name = args.command
+
+    import sys
+    sys_args = sys.argv[1:]
+    if sys_args[0] != cmd_name:
+        # special case of a sub(sub,...) command:
+        # acmd a b c cmd arg1 arg2 ...
+        # -> a.b.c.cmd
+        idx = sys_args.index(cmd_name)
+        cmd_name = '.'.join(sys_args[:idx+1])
+
+    cmd = commands[cmd_name].load()
+    exitcode = 1
+    try:
+        exitcode = cmd(args)
+    except Exception:
+        exitcode = 1
+        import sys
+        print sys.exc_info()[0]
+        print sys.exc_info()[1]
+        raise
+    
+    return exitcode
+
+
+if __name__ == "__main__":
+    import sys
+    sys.exit(main())
+    
diff --git a/Tools/PyUtils/bin/atl-gen-athena-d3pd-reader b/Tools/PyUtils/bin/atl-gen-athena-d3pd-reader
new file mode 100755
index 00000000000..2dc2f4cd15f
--- /dev/null
+++ b/Tools/PyUtils/bin/atl-gen-athena-d3pd-reader
@@ -0,0 +1,800 @@
+#!/usr/bin/env python
+
+#------------
+# std imports
+import os
+import sys
+import textwrap
+
+import argparse
+
+# 3rd-party imports
+# delay ROOT import as it is quite slow...
+ROOT = None
+
+def _is_object_metadata(n):
+    sz = ROOT.D3PD.ObjectMetadata.RANDOM_NAME_POSTFIX_LENGTH+1
+    return len(n) > sz and \
+           n[-sz] == "_"
+
+def collect_objects(d, objs):
+    for k in d.GetListOfKeys():
+        if k.GetClassName() != "TObjString":
+            continue
+        if not _is_object_metadata(k.GetName()):
+            continue
+        obj_name = ROOT.D3PD.ObjectMetadata.objectName(k.GetName())
+        print " >",obj_name
+        # access the variable description
+        kk = "%s;%d"%(k.GetName(), k.GetCycle())
+        ostr = d.Get(kk)
+        if not ostr:
+            print "** could not access key [%s]" % kk
+            return 1
+        #print ostr,type(ostr)
+        md = ROOT.D3PD.RootObjectMetadata()
+        md.setName(obj_name)
+        if not md.read(ostr.GetString().Data()).isSuccess():
+            print "** problem reading metadata for [%s]" % kk
+            return 1
+
+        if not md.checkPrefixes().isSuccess():
+            print "** could not fix prefixes for metadata with name [%s]" % md.name()
+            return 1
+        
+        if md.name() in objs:
+            objs[md.name()].merge(md)
+        else:
+            objs[md.name()] = md
+        pass # keys
+    return 0
+
+
+def merge_objects(objs):
+    res = {}
+    for o in objs:
+        n = o.name()
+        if n in res:
+            res[n].setName(n)
+            res[n].setPrefix(o.prefix())
+            res[n].setContainer(o.container())
+            res[n].merge(o)
+        else:
+            res[n] = o
+            pass
+    return res.values()
+
+def normalize_type(n):
+    tcle = ROOT.TClassEdit
+    return tcle.ShortType(n, tcle.kDropDefaultAlloc)
+
+def normalize_fct_ret_type(n):
+    tcle = ROOT.TClassEdit
+    n = normalize_type(n)
+    ret= tcle.ShortType(n, tcle.kDropDefaultAlloc|tcle.kInnerClass)
+    if tcle.IsSTLCont(ret):
+        return 'const %s&' % ret
+    return ret
+
+def normalize_fct_ref_type(n):
+    tcle = ROOT.TClassEdit
+    n = normalize_type(n)
+    ret= tcle.ShortType(n, tcle.kDropDefaultAlloc|tcle.kInnerClass)
+    return ret
+
+def _is_vect_of_vect(n):
+    tcle = ROOT.TClassEdit
+    n = normalize_type(n)
+    ret= tcle.ShortType(n, tcle.kDropDefaultAlloc|tcle.kInnerClass)
+    return tcle.IsSTLCont(ret)
+    
+def gen_varname(n):
+    n = n.replace(" ", "_").replace(":","_")
+    return "m_"+n
+
+def gen_fctname(n):
+    n = n.replace(" ", "_").replace(":","_")
+    if n[0].isdigit():
+        n = "x"+n
+    return n
+
+def pbuf_is_cont(v):
+    tcle = ROOT.TClassEdit
+    if isinstance(v, basestring):
+        n = v
+    else:
+        n = v.type()
+        pass
+    return _is_vect_of_vect(n)
+
+def pbuf_normalize_type(v):
+    tcle = ROOT.TClassEdit
+    if isinstance(v, basestring):
+        n = v
+    else:
+        n = v.type()
+    is_cont = pbuf_is_cont(n)
+    inner = tcle.ShortType(n, tcle.kDropDefaultAlloc|tcle.kInnerClass)
+    if is_cont:
+        return pbuf_normalize_type(inner)
+    n = normalize_type(inner)
+    return cxx_to_pbuf(n)
+
+def cxx_to_pbuf(n):
+    return {
+        'double': 'double',
+        'float': 'float',
+        'int': 'int32',
+        'long': 'int64',
+        'long int': 'int64',
+        'unsigned int': 'uint32',
+        'unsigned long': 'uint64',
+        'unsigned long int': 'uint64',
+        'long long': 'int64',
+        'unsigned long long': 'uint64',
+        'bool': 'bool',
+        'std::string': 'string',
+        'const char*': 'string',
+        'char*': 'string',
+        'short': 'int32',           # FIXME ?!
+        'unsigned short': 'uint32', # FIXME ?!
+        }.get(n, n)
+
+def pbuf_pluralize(n, is_cont):
+    if is_cont:
+        # pluralize
+        if n.endswith('y'):
+            n = n[:-1] + "ies"
+        elif n.endswith("s"):
+            n = n + "es"
+        else:
+            n = n + "s"
+    return n
+    
+def pbuf_gen_varname(v):
+    n = gen_varname(v.name())
+    if n.startswith("m_"):
+        n = n[len("m_"):]
+    is_cont = _is_vect_of_vect(v.type())
+    n = pbuf_pluralize(n,is_cont)
+    return n
+
+def gen_header(clsname, outdir, obj):
+    fname = os.path.join(outdir, clsname+".h")
+    if not os.path.exists(outdir):
+        os.makedirs(outdir)
+        pass
+
+    # file already exists...
+    if os.path.exists(fname):
+        return
+    
+    f = open(fname, "w")
+    vardict = {
+        'classname' : clsname,
+        'CLASSNAME': clsname.upper(),
+        'decl_element_class': '',
+        }
+
+    cls_vars = []
+    cls_fcts = []
+    ovars = ROOT.D3PDMakerReaderDict.to_vector(obj.variables())
+    vardict['add-obj-arg'] = ', std::size_t idx' if obj.container() else ''
+    
+    for v in ovars:
+        if v.doc():
+            fmt = "%(indent)s /// %(cls_var_doc)s"
+            cls_vars.append(fmt % {
+                'indent': " "*2,
+                'cls_var_doc': v.doc(),
+                })
+            cls_fcts.append(fmt % {
+                'indent': " "*2,
+                'cls_var_doc': v.doc(),
+                })
+            pass
+        
+        fmt = "%(indent)s %(cls_var_type)s* %(cls_var_name)s;"
+        cls_vars.append(fmt % {
+            'indent': " "*2,
+            'cls_var_type': normalize_type(v.type()),
+            'cls_var_name': gen_varname(v.name()),
+            })
+
+        tcle = ROOT.TClassEdit
+        is_cont = tcle.IsSTLCont(v.type())
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s %(cls_fct_type)s  %(cls_fct_name)s(%(cls_fct_args)s) const;
+            %(indent)s %(cls_fct_ref_type)s& %(cls_fct_name)s(%(cls_fct_args)s);
+            ''')
+        cls_fcts.append(snippet % {
+            'indent': " "*2,
+            'cls_fct_type': normalize_fct_ret_type(v.type()),
+            'cls_fct_ref_type': normalize_fct_ref_type(v.type()),
+            'cls_fct_name': gen_fctname(v.name()),
+            'cls_fct_args': '' if not is_cont else 'std::size_t idx',
+            })
+        pass
+    vardict['class_vars'] = '\n'.join(cls_vars)
+    vardict['class_fcts'] = '\n'.join(cls_fcts)
+    vardict['class-prefix'] = obj.prefix()
+    print >> f, textwrap.dedent(
+'''\
+// dear emacs, this is -*- c++ -*-
+#ifndef ATH_D3PDREADER_%(CLASSNAME)s_H
+#define ATH_D3PDREADER_%(CLASSNAME)s_H 1
+
+// stl includes
+#include <map>
+#include <vector>
+#include <string>
+
+// fwk includes
+#include "GaudiKernel/ServiceHandle.h"
+#include "StoreGate/StoreGateSvc.h"
+
+// fwd declarations
+class %(classname)s;
+
+class %(classname)s
+{
+   typedef ServiceHandle<StoreGateSvc> StoreGateSvc_t;
+public:
+   %(classname)s(const std::string& prefix = "%(class-prefix)s",
+                 const StoreGateSvc_t& svc = StoreGateSvc_t("StoreGateSvc", "%(classname)s"));
+
+   /// retrieve data from the store
+   StatusCode retrieve() const;
+
+   /// retrieve data from the store
+   StatusCode retrieve();
+
+   /// record data into the store
+   StatusCode record();
+
+   /// add an entry from another object
+   void add_object(const %(classname)s& o, std::size_t idx);
+
+   /// change prefix
+   void setPrefix(const std::string& prefix)
+   { m_prefix = prefix; }
+
+   /// return prefix
+   const std::string& prefix() const
+   { return m_prefix; }
+
+   /// allocate a brain new instance
+   static
+   %(classname)s create(const std::string& prefix,
+                        const StoreGateSvc_t& svc = StoreGateSvc_t("StoreGateSvc", "%(classname)s"));
+                        
+public:
+%(class_fcts)s
+
+private:
+   std::string m_prefix;
+   mutable StoreGateSvc_t m_store;
+   
+%(class_vars)s
+}; // %(classname)s
+
+#endif /* not ATH_D3PDREADER_%(CLASSNAME)s_H */
+'''
+        % vardict)
+
+    f.flush()
+    return
+
+def gen_source(clsname, outdir, obj):
+    fname = os.path.join(outdir, clsname+".cxx")
+    if not os.path.exists(outdir):
+        os.makedirs(outdir)
+        pass
+
+    # file already exists...
+    if os.path.exists(fname):
+        return
+    
+    f = open(fname, "w")
+    vardict = {
+        'classname' : clsname,
+        'CLASSNAME': clsname.upper(),        
+        'ctor-impl': '',
+        'factory-impl': '',
+        'record-impl': '',
+        'retrieve-const': '',
+        'retrieve-non-const': '',
+        'add-obj-impl': '',
+        'cls-fcts-impl': '',
+        }
+
+    ctor_impl = ['%(classname)s::%(classname)s(const std::string& prefix, const ServiceHandle<StoreGateSvc>& store)' % vardict,
+                 '   : m_prefix(prefix)',
+                 '   , m_store(store)']
+    factory_impl = [
+        '/// allocate a brain new instance',
+        '%(classname)s' % vardict,
+        '%(classname)s::create(const std::string& prefix, const StoreGateSvc_t& svc)' % vardict,
+        '{',
+        '   %(classname)s o(prefix, svc);' % vardict,
+        ]
+    
+    retr_const = [
+        'StatusCode',
+        '%(classname)s::retrieve() const' % vardict,
+        '{',
+        ]
+    
+    retr_non_const = [
+        'StatusCode',
+        '%(classname)s::retrieve()' % vardict,
+        '{',
+        ]
+
+    record_impl = [
+        'StatusCode',
+        '%(classname)s::record()' % vardict,
+        '{',
+        ]
+
+    add_obj_impl = []
+    cls_fcts_impl = []
+    
+    cls_vars = []
+    cls_fcts = []
+    ovars = ROOT.D3PDMakerReaderDict.to_vector(obj.variables())
+
+    if len([v for v in ovars if v.type().startswith('std::vector')]):
+        add_obj_impl = [
+            'void',
+            '%(classname)s::add_object(const %(classname)s& o, std::size_t idx)' % vardict,
+            '{',
+            ]
+    else:
+        add_obj_impl = [
+            'void',
+            '%(classname)s::add_object(const %(classname)s& o, std::size_t /*idx*/)' % vardict,
+            '{',
+            ]
+
+    add_obj_impl_nidx = []
+    add_obj_impl_vidx = None
+    for ii,v in enumerate(ovars):
+        ctor_impl.append('   , %s(NULL)' % gen_varname(v.name()))
+
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s o.%(cls_var_name)s = new %(cls_var_type)s(0);
+            '''
+            )
+        factory_impl.append(snippet % {
+            'indent': " "*2,
+            'cls_var_type': normalize_type(v.type()),
+            'cls_var_name': gen_varname(v.name()),
+            })
+
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s {
+            %(indent)s   std::string key = m_prefix + "%(cls_var_name_str)s";
+            %(indent)s   if (m_store->contains< %(cls_var_type)s >(key)) {
+            %(indent)s      if (!m_store->retrieve((const %(cls_var_type)s*&)%(cls_var_name)s, key).isSuccess()) {
+            %(indent)s         return StatusCode::FAILURE;
+            %(indent)s      }
+            %(indent)s   } else {
+            %(indent)s      ((%(cls_var_type)s*&)%(cls_var_name)s) = NULL;
+            %(indent)s   }
+            %(indent)s }
+            ''' )
+                
+        retr_const.append(snippet % {
+            'indent': " "*2,
+            'cls_var_type': normalize_type(v.type()),
+            'cls_var_name': gen_varname(v.name()),
+            'cls_var_name_str': v.name(),
+            })
+
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s {
+            %(indent)s   std::string key = m_prefix + "%(cls_var_name_str)s";
+            %(indent)s   if (m_store->contains< %(cls_var_type)s >(key)) {
+            %(indent)s      if (!m_store->retrieve((%(cls_var_type)s*&)%(cls_var_name)s, key).isSuccess()) {
+            %(indent)s         return StatusCode::FAILURE;
+            %(indent)s      }
+            %(indent)s   } else {
+            %(indent)s      %(cls_var_name)s = NULL;
+            %(indent)s   }
+            %(indent)s }
+            ''' )
+        retr_non_const.append(snippet % {
+            'indent': " "*2,
+            'cls_var_type': normalize_type(v.type()),
+            'cls_var_name': gen_varname(v.name()),
+            'cls_var_name_str': v.name(),
+            })
+
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s {
+            %(indent)s   std::string key = m_prefix + "%(cls_var_name_str)s";
+            %(indent)s   if (!m_store->record((%(cls_var_type)s*&)%(cls_var_name)s, key).isSuccess()) {
+            %(indent)s      return StatusCode::FAILURE;
+            %(indent)s   }
+            %(indent)s }
+            ''' )
+        record_impl.append(snippet % {
+            'indent': " "*2,
+            'cls_var_type': normalize_type(v.type()),
+            'cls_var_name': gen_varname(v.name()),
+            'cls_var_name_str': v.name(),
+            })
+
+        tcle = ROOT.TClassEdit
+        is_cont = tcle.IsSTLCont(v.type())
+        
+        snippet = textwrap.dedent(
+            '''\
+            %(cls_fct_type)s
+            %(classname)s::%(cls_fct_name)s(%(cls_fct_args)s) const
+            {
+              static %(cls_fct_ref_type)s s_default %(cls_var_default)s;
+              return this->%(cls_var_name)s
+                ? %(do_deref)sthis->%(cls_var_name)s%(cls_fct_args_body)s
+                : s_default;
+            }
+
+            %(cls_fct_ref_type)s&
+            %(classname)s::%(cls_fct_name)s(%(cls_fct_args)s)
+            {
+              static %(cls_fct_ref_type)s s_default %(cls_var_default)s;
+              return this->%(cls_var_name)s
+                ? %(do_deref)sthis->%(cls_var_name)s%(cls_fct_args_body)s
+                : s_default;
+            }
+            ''' % {
+            'indent': " "*2,
+            'classname': clsname,
+            'do_deref': '' if is_cont else '*',
+            'cls_fct_type': normalize_fct_ret_type(v.type()),
+            'cls_fct_ref_type': normalize_fct_ref_type(v.type()),
+            'cls_fct_name': gen_fctname(v.name()),
+            'cls_fct_args': '' if not is_cont else 'std::size_t idx',
+            'cls_fct_args_body': '' if not is_cont else '->at(idx)',
+            'cls_var_name': gen_varname(v.name()),
+            'cls_var_default': '' if is_cont else '= 0',    
+            })
+        cls_fcts_impl.append(snippet)
+
+        # ignore the container size variable
+        if not (v.name() in ('N', 'n')):
+            if is_cont:
+                add_obj_impl_vidx = ii
+                snippet = textwrap.dedent(
+                    '''\
+                    %(indent)sthis->%(cls_var_name)s->push_back(o.%(cls_var_name)s->at(idx));
+                    ''' % {
+                        'indent': " "*2,
+                        'classname': clsname,
+                        'cls_var_name': gen_varname(v.name()),
+                        })
+            else:
+                snippet = textwrap.dedent(
+                    '''\
+                    %(indent)s*this->%(cls_var_name)s = *o.%(cls_var_name)s;
+                    ''' % {
+                        'indent': " "*2,
+                        'cls_var_name': gen_varname(v.name()),
+                        })
+            add_obj_impl.append(snippet)
+        else:
+            add_obj_impl_nidx.append(ii)
+            pass
+        pass # loop over ovars
+
+    for ii in add_obj_impl_nidx:
+        v = ovars[ii]
+        if add_obj_impl_vidx is None:
+            # case for which a block has only a <prefix>_n or <prefix>_N
+            # but no other <prefix>_xyz branch
+            continue
+        vv= ovars[add_obj_impl_vidx]
+        snippet = textwrap.dedent(
+            '''\
+            %(indent)s*this->%(cls_var_name)s = this->%(cls_var_name_v)s->size();
+            ''' % {
+                'indent': " "*2,
+                'cls_var_name': gen_varname(v.name()),
+                'cls_var_name_v': gen_varname(vv.name()),
+                })
+        add_obj_impl.append(snippet)
+        pass
+        
+    ctor_impl.append("{}\n")
+    factory_impl.append("  return o;\n}\n")
+    retr_const.append("  return StatusCode::SUCCESS;\n}\n");
+    retr_non_const.append("  return StatusCode::SUCCESS;\n}\n");
+    record_impl.append("  return StatusCode::SUCCESS;\n}\n");
+    add_obj_impl.append("  return;\n}\n");
+
+    vardict['class_vars'] = '\n'.join(cls_vars)
+    vardict['class_fcts'] = '\n'.join(cls_fcts)
+
+    vardict['ctor-impl'] = '\n'.join(ctor_impl)
+    vardict['factory-impl'] = '\n'.join(factory_impl)
+    vardict['retrieve-const'] = '\n'.join(retr_const)
+    vardict['retrieve-non-const'] = '\n'.join(retr_non_const)
+    vardict['record-impl'] = '\n'.join(record_impl)
+    vardict['add-obj-impl'] = '\n'.join(add_obj_impl)
+    vardict['cls-fcts-impl'] = '\n'.join(cls_fcts_impl)
+    
+    print >> f, textwrap.dedent(
+'''\
+// dear emacs, this is -*- c++ -*-
+// stl includes
+#include <map>
+#include <vector>
+#include <string>
+
+// fwk includes
+#include "GaudiKernel/ServiceHandle.h"
+#include "StoreGate/StoreGateSvc.h"
+#include "SGTools/BuiltinsClids.h"  // to put/get builtins into/from storegate
+#include "SGTools/StlVectorClids.h" // similarly for vectors
+
+// pkg includes
+#include "%(classname)s.h"
+
+/// constructor for %(classname)s
+%(ctor-impl)s
+
+/// factory
+%(factory-impl)s
+
+/// retrieve data from store
+%(retrieve-const)s
+
+/// retrieve data from store
+%(retrieve-non-const)s
+
+/// record data into the store
+%(record-impl)s
+
+/// populate object from a slice of another
+%(add-obj-impl)s
+
+//@{ accessors implementation
+%(cls-fcts-impl)s
+//@}
+'''
+        % vardict)
+
+    f.flush()
+    return
+
+def gen_protobuf(clsname, outdir, obj):
+    if clsname.endswith("D3PDObject"):
+        clsname = clsname[:-len("D3PDObject")]
+    fname = os.path.join(outdir, clsname+".proto")
+    if not os.path.exists(outdir):
+        os.makedirs(outdir)
+        pass
+
+    # file already exists...
+    if os.path.exists(fname):
+        return
+    
+    f = open(fname, "w")
+    vardict = {
+        'classname' : clsname,
+        }
+
+    print >> f, "package atlas.d3pd;\n"
+    print >> f, "message %(classname)s\n{" % vardict
+    ovars = ROOT.D3PDMakerReaderDict.to_vector(obj.variables())
+    id_nbr = 0
+    for v in ovars:
+        id_nbr += 1
+        is_cont = pbuf_is_cont(v.type())
+        print >> f, \
+        "%(indent)s%(repeated)s %(cls_var_type)s %(cls_var_name)s = %(id)s;" \
+        % {
+            'indent': " "*4,
+            'repeated': 'optional' if not is_cont else 'repeated',
+            'cls_var_type': pbuf_normalize_type(v),
+            'cls_var_name': pbuf_gen_varname(v),
+            'id': id_nbr,
+            }
+    
+    print >> f, "} // message %(classname)s\n\n//EOF" % vardict
+    f.flush()
+    f.close()
+    return
+
+def gen_protobuf_evt(evt_block_descr, outdir):
+    clsname = "Event"
+    fname = os.path.join(outdir, clsname+".proto")
+    if not os.path.exists(outdir):
+        os.makedirs(outdir)
+        pass
+
+    # file already exists...
+    if os.path.exists(fname):
+        print "** error: file [%s] exists !" % fname
+        return 1
+
+    f = open(fname, "w")
+    vardict = {
+        'classname' : clsname,
+        }
+
+    print >> f, "package atlas.d3pd;\n"
+    for v in evt_block_descr.values():
+        print >> f, 'import "%s.proto";' % v['type']
+    else:
+        print >> f, ""
+    print >> f, "message %(classname)s\n{" % vardict
+    id_cnt = 0
+    for k,v in evt_block_descr.items():
+        id_cnt += 1
+        is_cont = v['is_cont']
+        var_name = k[1]
+        if k[0] != '':
+            var_name = k[0]
+            if var_name.endswith('_'):
+                var_name = var_name[:-len("_")]
+            var_name = pbuf_pluralize(var_name, is_cont)
+        print >> f, \
+    "%(indent)s%(repeated)s %(var_type)s %(var_name)s = %(id)s;" \
+    % {
+        'indent': " "*4,
+        'repeated': 'optional' if not is_cont else 'repeated',
+        'var_type': v['type'],
+        'var_name': var_name,
+        'id': id_cnt,
+        }
+        pass
+    print >> f, "} // message %(classname)s\n\n//EOF" % vardict
+    f.flush()
+    f.close()
+    return 0
+
+def run_protobuf(output):
+    orig_dir = os.getcwd()
+    try:
+        os.chdir(output)
+        import commands, glob
+        rc, pbuf_bin = commands.getstatusoutput("which protoc")
+        if rc:
+            print "** pb while fetching protobuf compiler:\n%s" % pbuf_bin
+            return rc
+        print ":: protobuf compiler: [%s]" % pbuf_bin
+        pbuf_files = glob.glob("*.proto")
+        for pbuf_file in pbuf_files:
+            cmd = " ".join([
+                pbuf_bin, "-I.",
+                "--cpp_out=.", #"-o", pbuf_file[:-len(".proto")]+".pb.cxx",
+                pbuf_file,
+                ])
+            #print "-->",cmd
+            rc, out = commands.getstatusoutput(cmd)
+            if rc:
+                print "** pb while running protoc on [%s]" % pbuf_file
+                print out
+                return rc
+    finally:
+        os.chdir(orig_dir)
+    return 0
+
+def process(fnames, output, do_gen_protobuf=False):
+    import PyCintex; PyCintex.Cintex.Enable()
+    global ROOT
+    import PyUtils.RootUtils as ru
+    ROOT = ru.import_root(batch=True)
+
+    objects = {}
+    for fname in fnames:
+        print ":: processing [%s]..." % fname
+        f = ROOT.TFile.Open(fname)
+        if not f or f.IsZombie():
+            print ":: could not open [%s]" % fname
+            return 1
+
+        # collect metadata directories
+        metadirs = []
+        for k in f.GetListOfKeys():
+            if "Meta" in k.GetName() and k.GetClassName() == "TDirectoryFile":
+                metadirs.append(k.GetName())
+
+        print "-- metadirs:",metadirs
+
+        # collect object metadata for each of these directories
+        for dname in metadirs:
+            d = f.GetDirectory(dname)
+            if not d:
+                print "** could not get directory [%s]" % dname
+                return 1
+            sc = collect_objects(d, objects)
+            if sc:
+                print "** could not collect objects for [%s]" % dname
+                return sc
+
+    merged_objs = merge_objects(objects.values())
+
+    evt_block_descr = {}
+    # generate the sources for each object
+    for obj in merged_objs:
+        ovars = ROOT.D3PDMakerReaderDict.to_vector(obj.variables())
+        #print "--",obj.name(), len(obj.variables()), [v.name() for v in ovars]
+
+        # generate the header...
+        if gen_header(obj.name(), output, obj):
+            print "** pb while generating header for [%s]" % obj.name()
+            return 1
+
+        # generate sources
+        if gen_source(obj.name(), output, obj):
+            print "** pb while generating source for [%s]" % obj.name()
+            return 1
+        # generate protobuf files
+        if do_gen_protobuf:
+            if gen_protobuf(obj.name(), output, obj):
+                print "** pb while generating protobuf for [%s]" % obj.name()
+                return 1
+            pbuf_evt_type = obj.name()
+            if pbuf_evt_type.endswith("D3PDObject"):
+                pbuf_evt_type = pbuf_evt_type[:-len("D3PDObject")]
+            pbuf_evt_name = pbuf_pluralize(pbuf_evt_type, obj.container())
+            evt_block_descr[(obj.prefix(), pbuf_evt_name)] = {
+                'type': pbuf_evt_type,
+                'is_cont': obj.container(),
+                }
+            if 0:
+                print obj.name(), "prefix='%s'"%obj.prefix(),
+                print 'container:',obj.container()
+
+    if do_gen_protobuf:
+        if gen_protobuf_evt(evt_block_descr, output):
+            print "** pb while generating protobuf for [Event]"
+            return 1
+        if run_protobuf(output):
+            print "** pb while generating c++ files from .proto"
+            return 1
+    return 0
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='generate d3pd objects for use in Athena, '+ \
+                    'from metadata contained in d3pd n-tuples'
+        )
+    parser.add_argument('fnames',
+                        type=str,
+                        nargs='+',
+                        help='input d3pd files')
+    parser.add_argument('--protobuf',
+                        default=False,
+                        action='store_true',
+                        help='switch to enable protobuf files generation.'
+                        )
+    parser.add_argument('--output-dir', '-o',
+                        type=str,
+                        default='code',
+                        help='output directory containing the generated code'
+                        )
+    args = parser.parse_args()
+    print ":"*80
+    print ":: generating d3pd-reader from:"
+    for f in args.fnames:
+        print ":: - %s" % f
+        pass
+    sc = process(args.fnames, args.output_dir, args.protobuf)
+    print ":: done [sc=%s]" % sc
+    print ":"*80
+    return sc
+
+
+if __name__ == "__main__":
+    sc = main()
+    sys.exit(sc)
+    
diff --git a/Tools/PyUtils/bin/avn.py b/Tools/PyUtils/bin/avn.py
new file mode 100755
index 00000000000..b9cfa04e6fe
--- /dev/null
+++ b/Tools/PyUtils/bin/avn.py
@@ -0,0 +1,291 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file avn.py
+# @purpose an atlas oriented SVN helper script
+# @date October 2009
+
+# credits: Scott Snyder provided the first version.
+
+__doc__ = """
+an atlas oriented SVN helper script
+-----------------------------------
+
+examples of usage:
+$ cd Control/AthenaServices
+$ avn ci -m 'some interesting comment'
+$ avn tag AthenaServices-99-99-99 -m 'some interesting comment'
+$ avn diff @AthenaServices-66-66-66 @AthenaServices-99-99-99
+$ avn diff @AthenaServices-66-66-66/src @AthenaServices-99-99-99/src
+$ avn diff @AthenaServices-66-66-66/src/f.cxx @AthenaServices-99-99-99/src/f.cxx
+$ avn lstags
+$ avn switch @TRUNK
+$ avn switch @AthenaServices-01-33-06-branch
+$ avn up
+$ avn -n some_command # => dryrun (does not execute the command)
+
+"""
+__version__ = "$Revision: 508489 $"
+__author__ = "Sebastien Binet"
+
+import sys
+import os
+import commands
+import re
+import string
+import subprocess
+
+tagchars = string.ascii_letters + string.digits + '-' + '_'
+
+# patterns taken from atlas svn-hooks...
+_is_tag_name_valid_tc = re.compile("(^[A-Za-z_]+-[A-Za-z_]+-[0-9]{2}-[0-9]{2}-[0-9]{2}$)|(^[A-Za-z_]+-[A-Za-z_]+-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}$)").match
+_is_tag_name_valid =    re.compile("(^[A-Za-z0-9_]+-[0-9]{2}-[0-9]{2}-[0-9]{2}$)|(^[A-Za-z0-9_]+-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}$)").match
+        
+class AvnObject(object):
+    """dummy object to hold options and variables
+    """
+
+    @property
+    def url(self):
+        url = None
+        try:
+            url = self._url
+        except AttributeError:
+            lines = commands.getoutput('svn info').split('\n')
+            for l in lines:
+                if l.startswith('URL: '):
+                    url = self._url = l[5:]
+                    break
+        if url is None:
+            err = "Can't find svn URL."
+            print >> sys.stderr, err
+            raise RuntimeError(err)
+        return url
+
+    @property
+    def root(self):
+        root = None
+        try:
+            root = self._root
+        except AttributeError:
+            lines = commands.getoutput('svn info').split('\n')
+            for l in lines:
+                if l.startswith('Repository Root: '):
+                    root = self._root = l[17:]
+                    break
+        if root is None:
+            err = "Can't find svn repository root. will assume to be $SVNROOT"
+            print >> sys.stderr, err
+            root = self._root = os.environ['SVNROOT']
+        return root
+
+    @property
+    def testarea(self):
+        try:
+            return self._testarea
+        except AttributeError:
+            if not 'TestArea' in os.environ:
+                err = 'TestArea is not defined.'
+                print sys.stderr, err
+                raise RuntimeError(err)
+            self._testarea = os.path.realpath(os.environ['TestArea'])
+        return self._testarea
+
+    @property
+    def cwd(self):
+        cwd = os.getcwd()
+        if not cwd.startswith(self.testarea):
+            err = 'Current directory is not within TestArea.'
+            print >> sys.stderr, err
+            raise RuntimeError(err)
+        return cwd
+    
+    @property
+    def packpath(self):
+        try:
+            return self._packpath
+        except AttributeError:
+            testarea = self.testarea
+            packpath = self.cwd[len(testarea):]
+            if packpath.startswith('/'):
+                packpath = packpath[1:]
+
+            path = None
+            while not os.path.exists (os.path.join (testarea, packpath, 'cmt')):
+                comp = os.path.basename (packpath)
+                packpath = os.path.dirname (packpath)
+                if path is None:
+                    path = comp
+                else:
+                    path = os.path.join (comp, path)
+                if packpath == '':
+                    err = "Can't find cmt directory."
+                    print >> sys.stderr, err
+                    raise RuntimeError(err)
+            self._path = path
+            self._packpath = packpath
+        return self._packpath
+
+    @property
+    def path(self):
+        packpath = self.packpath # side-effect will compute self._path
+        return self._path
+        
+    pass # class AvnObject
+
+
+def mungtag(s, allow_use_cmt=False):
+    if allow_use_cmt:
+        try:
+            return mungtag(s)
+        except Exception:
+            import PyCmt.Cmt as Cmt
+            cmt = Cmt.CmtWrapper()
+            pkg = cmt.find_pkg(s.split('-')[0])
+            if pkg:
+                avn._packpath = os.path.join(pkg.path, pkg.name)
+                avn._path = None
+            
+    if s in ('HEAD','TRUNK'):
+        thisurl = os.path.join (avn.root, avn.packpath, 'trunk')
+    elif s == 'THIS':
+        thisurl = avn.url
+    elif s.endswith ('-branch'):
+        thisurl = os.path.join (avn.root, avn.packpath, 'branches', s)
+    else:
+        thisurl = os.path.join (avn.root, avn.packpath, 'tags', s)
+    if not (avn.path is None):
+        thisurl = os.path.join (thisurl, avn.path)
+    return thisurl
+
+def mungarg(s,do_mung=True):
+    if do_mung:
+        beg = 0
+        while True:
+            beg = s.find ('@', beg)
+            if beg < 0: break
+            end = beg + 1
+            while end < len(s) and s[end] in tagchars:
+                end += 1
+            if beg == end:
+                beg += 1
+                continue
+            s = s[:beg] + mungtag(s[beg+1:end], allow_use_cmt=True) + s[end:]
+    if s.find(' ') >= 0:
+        s = "'" + s + "'"
+    return s
+
+avn = AvnObject()
+"""the one and only one Atlas Svn helper object"""
+del AvnObject
+
+### ---------------------------------------------------------------------------
+### command functions
+
+def avn_help():
+    print __doc__
+    print "normal svn help output follows"
+    print "-"*80
+    print
+    return
+
+def avn_tag(args):
+    cmd_args = args[:]
+    for i,tag in enumerate(cmd_args):
+        if tag[0] == '-':
+            if tag.startswith(('-r', '--revision')):
+                has_r_switch = True
+            continue
+        if tag.find(':') < 0:
+            if tag[0] == '@':
+                tag = tag[1:]
+            
+            if _is_tag_name_valid(tag):
+                tag = mungtag(tag)
+                cmd_args[i] = tag
+                pass
+            pass
+        pass
+    cmd_args = ['cp', avn.url,] + cmd_args[1:]
+    return cmd_args
+
+def avn_lstags(args):
+    cmd_args = ['ls', os.path.join (avn.root, avn.packpath, 'tags'),
+                os.path.join (avn.root, avn.packpath, 'branches')]
+    return cmd_args
+
+def avn_ls(args):
+    cmd_args = args[:]
+    if cmd_args[1].find('//') < 0:
+        cmd_args[1] = os.path.join (avn.root, avn.packpath, cmd_args[1])
+    return cmd_args
+
+def avn_diff(args):
+    cmd_args = args[:]
+    ## if cmd_args[1][0] != '-' and cmd_args[2][0] != '-':
+    ##     rev_ref = mungtag(cmd_args[1], allow_use_cmt=True)
+    ##     rev_chk = mungtag(cmd_args[2], allow_use_cmt=True)
+    ##     if len(cmd_args) == 4:
+    ##         file_or_dir = cmd_args[3]
+    ##         rev_ref = '/'.join([rev_ref, file_or_dir])
+    ##         rev_chk = '/'.join([rev_chk, file_or_dir])
+    ##     cmd_args[:] = ['diff', rev_ref, rev_chk]
+    return cmd_args
+
+def main(args):
+    import os
+    
+    dryrun = False
+    if len(args)>0 and args[0] == '-n':
+        dryrun = True
+        args = args[1:]
+
+    if not os.environ.has_key ('TestArea'):
+        print >> sys.stderr, 'TestArea is not defined.'
+        return 1
+
+    avn.dryrun = dryrun
+    
+    ###
+
+    cmd_args = args[:]
+
+    ## command dispatch...
+    if '-h' in args or '--help' in args:
+        avn_help()
+        
+    if len(args)>=2 and args[0] == 'tag':
+        cmd_args = avn_tag(args)
+        
+    elif len(args)>=1 and args[0] == 'lstags':
+        cmd_args = avn_lstags(args)
+        pass
+
+    if len(args) >= 2 and args[0] == 'ls':
+        cmd_args = avn_ls(args)
+
+    if len(args) >= 3 and args[0] == 'diff':
+        cmd_args = avn_diff(args)
+
+    args = ['svn']
+    do_mung = True # remember if next arg has to munged or not
+    for s in cmd_args:
+        args.append(mungarg(s,do_mung=do_mung))
+        do_mung = True
+        if s in ('-m', '--message'):
+            do_mung = False
+
+    cmd = subprocess.list2cmdline(args)
+    print cmd
+    sc = 0
+    if not avn.dryrun:
+        sc = subprocess.call(args)
+
+    return sc
+
+if __name__ == "__main__":
+    import sys
+    args = sys.argv[1:]
+    sys.exit(main(args))
+    
diff --git a/Tools/PyUtils/bin/build_cmt_pkg_db.py b/Tools/PyUtils/bin/build_cmt_pkg_db.py
new file mode 100755
index 00000000000..e207f677100
--- /dev/null
+++ b/Tools/PyUtils/bin/build_cmt_pkg_db.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    PyUtils/bin/build_cmt_pkg_db.py
+# @purpose: build the static list of packages (and their dependencies) of a
+#           given project
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    June 2008
+#
+# @example:
+#  build_cmt_pkg_db AtlasCore
+#  build_cmt_pkg_db -p AtlasCore -o atlas_core_cmt_db.pkl
+#  build_cmt_pkg_db --project AtlasCore -o atlas_core_cmt.db
+
+__version__ = "$Revision: 1.2 $"
+
+from optparse import OptionParser
+
+import os, sys, commands
+
+import PyCmt.Logging as L
+from PyCmt import Cmt
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage='usage: %prog [options] [-p] ProjectName [[-o] OutputDb]')
+    p = parser.add_option
+    p('-p', '--project',
+      dest='project',
+      help='name of the project to inspect and build the package-db')
+
+    p('-o', '--output',
+      dest='output',
+      help='name of the output file where to store the package-db (shelve)')
+
+    p('-l', '--level',
+      dest='log_lvl',
+      default='INFO',
+      help='logging level (aka verbosity)')
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.project = args[0]
+        pass
+
+    if len(args) > 1 and args[1][0] != "-":
+        options.output = args[1]
+        pass
+
+    if options.project is None:
+        str(parser.print_help() or '')
+        raise SystemExit(1)
+
+    if options.output is None:
+        options.output = 'proj_%s_cmt_pkg_shelve.db' % options.project.lower()
+    options.output = os.path.expanduser (options.output)
+    options.output = os.path.expandvars (options.output)
+    options.output = os.path.realpath   (options.output)
+    
+
+    options.log_lvl = options.log_lvl.upper()
+    if hasattr(L.logging, options.log_lvl):
+        lvl = getattr(L.logging, options.log_lvl)
+    else:
+        print "*"*80
+        print "BootStrap: Unknown logging level [%s] !!" % options.log_lvl
+        print "BootStrap: Defaulting to [INFO]..."
+        print "*"*80
+        lvl = L.logging.INFO
+
+    cmt = Cmt.CmtWrapper(lvl)
+    
+    msg = cmt.msg
+    msg.info ('building dependencies...')
+
+##     _allowed_output_db_formats = ('.pkl',)
+##     _ext = os.path.splitext (options.output)[1]
+##     if not _ext in _allowed_output_db_formats:
+##         msg.error ('unknown output-db format: [%s]', _ext)
+##         msg.error ('allowed formats are %r', _allowed_output_db_formats)
+##         raise SystemExit(1)
+
+##     if _ext == '.pkl':
+##         try:
+##             import cPickle as pickle
+##         except ImportError:
+##             import pickle
+##         ofile = open (options.output, 'w')
+##     else:
+##         pass
+    if os.path.exists (options.output):
+        os.unlink (options.output)
+    import shelve
+    out_db = shelve.open (options.output)
+    msg.info ('project used: %sRelease', options.project)
+    msg.info ('output-db:    %s',        options.output)
+
+    import tempfile,atexit,shutil
+    tmp_root = tempfile.mkdtemp()
+    atexit.register (shutil.rmtree, tmp_root)
+    cwd = os.getcwd()
+    os.chdir (tmp_root)
+
+    cmt_tmp_dir = os.path.join (os.path.realpath (tmp_root),
+                                'Dep%s'%options.project,
+                                'cmt')
+    if not os.path.exists (cmt_tmp_dir):
+        os.makedirs (cmt_tmp_dir)
+
+    os.chdir (cmt_tmp_dir)
+
+    proj_releases  = [cmt.project_release(options.project)]
+    proj_releases += [cmt.project_release(d)
+                      for d in cmt.project_deps(options.project)]
+    msg.debug('==> proj_releases: %s', proj_releases)
+    
+    req = open ('requirements', 'w')
+    req.writelines (os.linesep.join ([
+        "package Dep%s" % options.project,
+        "",
+        "author AtlasCollaboration",
+        ""
+        "".join( [ "use %s *\n" % p for p in proj_releases ] ),
+        ""
+        ]))
+    req.close()
+    sc, out = commands.getstatusoutput ('%s config'%cmt.bin)
+    if sc != 0:
+        err = "*** problem running [cmt config] ! ***\n%s" % out
+        raise RuntimeError(err)
+    else:
+        msg.debug ('[cmt config] cmd was OK')
+        
+    sc, out = commands.getstatusoutput ('%s show uses >| uses.%s.cmt'%\
+                                        (cmt.bin, options.project))
+    if sc != 0:
+        err = "*** problem running [cmt show uses] ! ***\n%s"%out
+        raise RuntimeError(err)
+    else:
+        msg.debug ('[cmt show uses >| uses.%s.cmt] cmd was OK',
+                   options.project)
+
+    msg.info ('building package db...')
+    pkg_db = Cmt.buildPkgDb ('uses.%s.cmt'%options.project, msg)
+
+    msg.info ('building dependency graph...')
+    db = Cmt.buildDepGraph ('uses.%s.cmt'%options.project, pkg_db, msg)
+    
+    os.chdir(cwd)
+    out_db[options.project] = db
+    out_db.close()
+
+    msg.info ('nbr of packages: %i', len(db.pkgs()))
+##     pkgs = db.pkgs().keys()[:10]
+##     for i in pkgs:
+##         msg.info('\t%s',i)
+    raise SystemExit(0)
+
diff --git a/Tools/PyUtils/bin/checkFile.py b/Tools/PyUtils/bin/checkFile.py
new file mode 100755
index 00000000000..d3f96dfbc12
--- /dev/null
+++ b/Tools/PyUtils/bin/checkFile.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    checkFile.py
+# @purpose: read a POOL file and dump its content.
+#           Inspired from CheckESD.C ROOT macro from Fredrik Akesson.
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    August 2005
+#
+# @example:
+# @code
+# checkFile aod.pool.root
+# checkFile /castor/cern.ch/user/j/johndoe/aod.pool.root
+# checkFile somedir/*/*.pool
+# @endcode
+#
+
+__version__ = "$Revision: 1.5 $"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [-f] my.file.pool")
+    p = parser.add_option
+    p( "-f",
+       "--file",
+       dest = "fileName",
+       help = "The path to the POOL file to analyze" )
+    p( "-d",
+       "--detailed-dump",
+       action  = "store_true",
+       dest    = "doDetailedDump",
+       default = False,
+       help = "Switch to activate or not a detailed dump of each TTree in the POOL file" )
+    p( "--sort-fct",
+       dest    = "sortFctName",
+       default = "diskSize",
+       help = "Sorting function used to list containers (allowed are: diskSize, memSize, name)" )
+    p( "-o",
+       "--output",
+       dest = "outFileName",
+       default = None,
+       help = "Name of the output file which will contain the informations gathered during checkFile processing. These informations will be stored into a python-shelve file." )
+    p( "--fast",
+       dest = "fastMode",
+       default = False,
+       action  = "store_true",
+       help = "Switch to enable the fast mode of checkFile.py (memory size will not be accurate -AT ALL-)"
+       )
+    p( "--detailed-branch-size",
+       dest = "super_detailed_branch_sz",
+       default = False,
+       action  = "store_true",
+       help = "Switch to enable a very detailed computation of the branch sizes (computed from the basket length) [SLOW]"
+       )
+    (options, args) = parser.parse_args()
+
+    fileNames = []
+    
+    if len(args) > 0:
+        fileNames = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.fileName == None and len(fileNames) == 0:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    if options.fileName != None:
+        fileName = os.path.expandvars(os.path.expanduser(options.fileName))
+        fileNames.append(fileName)
+
+    fileNames = set( fileNames )
+    sc = 0
+    for fileName in fileNames:
+        try:
+            import PyUtils.PoolFile as PF
+            PF.PoolOpts.FAST_MODE = options.fastMode
+            PF.PoolOpts.SUPER_DETAILED_BRANCH_SZ = options.super_detailed_branch_sz
+            poolFile = PF.PoolFile( fileName )
+            poolFile.checkFile( sorting = options.sortFctName )
+            if options.doDetailedDump:
+                dumpFile = os.path.basename(fileName)+ ".txt"
+                print "## dumping details into [%s]" % dumpFile
+                poolFile.detailedDump( dumpFile )
+            if options.outFileName:
+                outFileName = options.outFileName
+                print "## saving checkFile report into [%s]..." % outFileName
+                poolFile.saveReport( outFileName )
+        except Exception, e:
+            print "## Caught exception [%s] !!" % str(e.__class__)
+            print "## What:",e
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            sc = 1
+            pass
+
+        except :
+            print "## Caught something !! (don't know what)"
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            sc = 10
+            pass
+        if len(fileNames) > 1:
+            print ""
+        pass # loop over fileNames
+    
+    print "## Bye."
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/checkPlugins.py b/Tools/PyUtils/bin/checkPlugins.py
new file mode 100755
index 00000000000..49ccfa1fd98
--- /dev/null
+++ b/Tools/PyUtils/bin/checkPlugins.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+## @author: Sebastien Binet
+## @file : checkPlugins.py
+## @purpose: a script to check the definitions of plugins across multiple
+##           so-called 'rootmap' files
+
+__version__ = "$Revision: 1.3 $"
+__author__  = "Sebastien Binet"
+
+import os
+import sys
+from PyUtils import Dso
+
+_suppression_dct = {
+    'TMath' : ('libCore.so', 'libMathCore.so'),
+    'string': ('libGaudiKernelDict.so',
+               'libCore.so',
+               'liblcg_PyCoolDict.so',
+               'libSTLAddRflx.so'),
+    'vector<vector<double> >': ('libMathCore.so',
+                                'libAtlasSTLAddReflexDict.so'),
+    'RooStats': ('libHistFactory.so',
+                 'libRooStats.so'),
+    }
+
+def _currentProject():
+    return os.environ.get('AtlasProject')
+
+
+def printDb( db, detailedDump = False ):
+    if detailedDump : fct = lambda x: x
+    else:             fct = os.path.basename
+    keys = db.keys()
+    keys.sort()
+    for k in keys:
+        print "%s:" % k
+        libs = db[k]
+        libs.sort()
+        for lib in libs:
+            print "  ",fct(lib)
+    return
+
+if __name__ == "__main__":
+
+    from optparse import OptionParser
+    parser = OptionParser(usage="usage: %prog [options]")
+    parser.add_option(
+        "--capabilities",
+        dest = "capabilities",
+        default = None,
+        help = "Dump the capabilities of a given library (ex: libAthenaServices.so)"
+        )
+    parser.add_option(
+        "--dups",
+        dest = "checkDups",
+        default = None,
+        help = "Check if there is any duplicates among dictionaries for a given library"
+        )
+    parser.add_option(
+        "--dump-content",
+        dest = "dumpContent",
+        action = "store_true",
+        default = False,
+        help = "Dump the content of all the known plugins (dicts. and components)"
+        )
+    parser.add_option(
+        "--dso",
+        dest = "dumpDso",
+        action = "store_true",
+        default = False,
+        help = "Dump all the dsomap/rootmap files known to the Dso repository"
+        )
+    parser.add_option(
+        "--libs",
+        dest = "dumpLibs",
+        action = "store_true",
+        default = False,
+        help = "Dump all the libraries known to the Dso repository"
+        )
+    parser.add_option(
+        "--check-dict-dups",
+        action = "store_true",
+        default = False,
+        dest = "checkDictDuplicates",
+        help = "Check if there is any duplicates among dictionaries"
+        )
+    parser.add_option(
+        "--check-pf-dups",
+        action = "store_true",
+        default = False,
+        dest = "checkPfDuplicates",
+        help = "Check if there is any duplicates among components declared to the PluginSvc"
+        )
+    parser.add_option(
+        "--check-all-dups",
+        dest = "checkAllDuplicates",
+        action = "store_true",
+        default = False,
+        help = "Check dictionaries *and* components"
+        )
+    parser.add_option(
+        "--detailed-dump",
+        action = "store_true",
+        dest = "detailedDump",
+        default = False,
+        help = "Performs a detailed dump if duplicates are found"
+        )
+    parser.add_option(
+        "--pedantic",
+        action = "store_true",
+        dest = "isPedantic",
+        default = False,
+        help = "Pedantic mode: if a component is found in 2 libraries which have the same name (usual case of a developer working on a (set of) package(s)), it is still being reported as being duplicated"
+        )
+    parser.add_option(
+        "-l",
+        "--level",
+        dest = "logLvl",
+        default = "INFO",
+        help = "Logging level (aka verbosity)"
+        )
+
+    (options, args) = parser.parse_args()
+
+    print ":"*80
+    print "::: checkPlugins :::"
+    sc = 0
+    dsoDb = Dso.DsoDb()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.capabilities = args[0]
+        pass
+    
+    if options.capabilities:
+        libName = options.capabilities
+        try:
+            capabilities = dsoDb.capabilities(libName)
+            print "::: capabilities of [%s]" % libName
+            print os.linesep.join( [ "  "+str(c) for c in capabilities ] )
+        except ValueError, err:
+            sc = 1
+            pass
+
+    if options.checkDups:
+        libName = options.checkDups
+        try:
+            print "::: checking duplicates for [%s]..." % libName
+            dups = dsoDb.duplicates(libName, pedantic = options.isPedantic)
+            for k in dups:
+                print " -",k
+                print os.linesep.join( [ "  "+str(v) for v in dups[k] ] )
+            if len(dups.keys())>0: sc = 1
+        except ValueError, err:
+            sc = 1
+            pass
+        
+    if options.dumpContent:
+        print "::: dumping content of all known plugins..."
+        entries = dsoDb.content( pedantic = options.isPedantic )
+        printDb(entries, options.detailedDump)
+        print "::: known entries:",len(entries.keys())
+        
+    if options.dumpLibs:
+        print "::: dumping all known libraries..."
+        libs = dsoDb.libs(options.detailedDump)
+        for lib in libs:
+            print " -",lib
+        print "::: known libs:",len(libs)
+        
+    if options.dumpDso:
+        print "::: dumping all known dsomap/rootmap files..."
+        dsoFiles = [ dso for dso in dsoDb.dsoFiles]
+        dsoFiles.sort()
+        for dsoFile in dsoFiles:
+            if not options.detailedDump: dsoFile = os.path.basename(dsoFile)
+            print " -",dsoFile
+        print "::: known dsos:",len(dsoFiles)
+        
+    if options.checkDictDuplicates or options.checkAllDuplicates:
+        print ":: checking dict. duplicates..."
+        dups = dsoDb.dictDuplicates( pedantic = options.isPedantic )
+        # restrict to just this project
+        currProj = _currentProject()
+        restrictedDups = {}
+        for label, libPaths in dups.items():
+            paths = [l for l in libPaths if ('/%s/' % currProj) in l]
+            if paths:
+                restrictedDups[label] = paths
+        dups = restrictedDups
+
+        sc = 0
+        suppression_log = []
+        for k in dups:
+            v = dups[k]
+
+            # mark as error only if it isn't a know dup'
+            if k in _suppression_dct:
+                suppressed = [os.path.basename(ii) in _suppression_dct[k]
+                              for ii in v]
+                if all(suppressed):
+                    msg = "---> ignoring [%s]" % k
+                    suppression_log.append(k[:])
+                    #print msg
+                    pass
+                else:
+                    # that's a new one !
+                    sc = 1
+            else:
+                # that's a new one !
+                sc = 1
+                #print "---> NOT ignoring [%s]" % k
+        printDb(dups, options.detailedDump)
+        if len(suppression_log):
+            print "-"*40
+            print "## ignoring the following dups':"
+            for k in suppression_log:
+                print " -",k
+            print "-"*40
+        print "## all dups:",len(dups.keys())
+        print "##     dups:",len(dups.keys())-len(suppression_log)
+    if options.checkPfDuplicates or options.checkAllDuplicates:
+        print ":: checking (plugin factories) components duplicates..."
+        dups = dsoDb.pfDuplicates( pedantic = options.isPedantic )
+        if len(dups.keys()) > 0: sc = 1
+        printDb(dups, options.detailedDump)
+        print "## dups:",len(dups.keys())
+
+    if sc != 0: print ":: ERROR !!"
+    else:       print ":: All good."
+
+    print ":"*80
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/checkSG.py b/Tools/PyUtils/bin/checkSG.py
new file mode 100755
index 00000000000..5e5a665ee06
--- /dev/null
+++ b/Tools/PyUtils/bin/checkSG.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    checkSG.py
+# @purpose: read a POOL file and dump the DataHeader's content
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    May 2008
+#
+# @example:
+# @code
+# checkSG aod.pool.root
+# checkSG /castor/cern.ch/user/j/johndoe/aod.pool.root
+# checkSG somedir/*/*.pool
+# @endcode
+#
+
+__version__ = "$Revision: 1.1 $"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [-f] my.file.pool")
+    parser.add_option( "-f",
+                       "--file",
+                       dest = "fileName",
+                       help = "The path to the POOL file to analyze" )
+    parser.add_option( "-o",
+                       "--output",
+                       dest = "outFileName",
+                       default = None,
+                       help = "Name of the output file which will contain the informations gathered during checkSG processing. These informations will be stored into a python-shelve or an ASCII/py file (depending on the extension: .pkl,.dat -> shelve; everything else -> ASCII/py)" )
+    
+    (options, args) = parser.parse_args()
+
+    fileNames = []
+    
+    if len(args) > 0:
+        fileNames = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.fileName == None and len(fileNames) == 0:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    if not (options.fileName is None):
+        fileName = os.path.expandvars(os.path.expanduser(options.fileName))
+        fileNames.append(fileName)
+
+    fileNames = set( fileNames )
+    sc = 0
+    for fileName in fileNames:
+        try:
+            from AthenaCommon.KeyStore import loadKeyStoreFromPoolFile
+            print "## checking [%s]..."%fileName
+            ks = loadKeyStoreFromPoolFile(keyStore=os.path.basename(fileName),
+                                          pool_file=fileName,
+                                          label='inputFile')
+            print "="*80
+            print "%40s%s%-40s" % ("Container type", " | ","StoreGate keys")
+            print "%40s%s%-40s" % ("-"*40, "-+-", "-"*(40-3))
+            for name,sgkeys in ks.inputFile.dict().items():
+                print "%40s%s%-40s" % (name, " | ", ', '.join(sgkeys))
+            print "="*80
+            if options.outFileName:
+                osp = os.path
+                outFileName = options.outFileName
+                outFileName = osp.expanduser(outFileName)
+                outFileName = osp.expandvars(outFileName)
+                print "## saving checkSG report into [%s]..." % outFileName
+                if os.path.splitext(outFileName)[1] in ('.pkl', '.dat'):
+                    # we explicitely import 'bsddb' to try to always
+                    # get that particular backend for the shelve...
+                    import bsddb
+                    import shelve
+                    if os.path.exists(outFileName):
+                        os.remove(outFileName)
+                    db = shelve.open(outFileName)
+                    db['eventdata_items'] = ks.inputFile.dict()
+                    db.close()
+                else:
+                    ks.write(outFileName, label='inputFile')
+        except Exception, e:
+            print "## Caught exception [%s] !!" % str(e.__class__)
+            print "## What:",e
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            sc = 1
+            pass
+
+        except :
+            print "## Caught something !! (don't know what)"
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            sc = 10
+            pass
+        if len(fileNames) > 1:
+            print ""
+        pass # loop over fileNames
+    
+    print "## Bye."
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/checkTP.py b/Tools/PyUtils/bin/checkTP.py
new file mode 100755
index 00000000000..368cf721982
--- /dev/null
+++ b/Tools/PyUtils/bin/checkTP.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    checkTP.py
+# @purpose: dump the layout of a class (data members and bases)
+#           Inspired from SealDictTest/DictClassCheck.py from RD.
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    September 2006
+#
+# @example:
+#
+# python checkTP.py TruthParticle
+#
+# if checkTP.py has been made 'chmod +x' one can just do:
+# ./checkTP.py CaloCellContainer
+
+import user
+import sys
+import os
+import PyCintex
+
+__version__ = "$Revision: 1.3 $"
+__author__  = "Sebastien Binet"
+
+S   = 4 # SCOPED
+SF  = 5 # SCOPED|FINAL
+SQ  = 6 # SCOPED|QUALIFIED
+SFQ = 7 # SCOPED|FINAL|QUALIFIED
+DICTSCOPE = SF
+
+_cpp_builtins = (
+    'char',      'unsigned char',      'signed char',
+    'short',     'unsigned short',
+    'int',       'unsigned int',
+    'long',      'unsigned long',
+    'long long', 'unsigned long long', 'ulonglong',
+    'float',
+    'double',
+    'bool',
+    )
+
+class DataMember(object):
+    def __init__(self, offset, name, _type):
+        self.offset = offset
+        self.name   = name
+        self.type   = _type
+        return
+    
+class FctMember(object):
+    def __init__(self, offset, name, _type):
+        self.name   = name
+        self.type   = _type
+        return
+    
+class Columbo(object):
+
+    def __init__(self):
+        object.__init__(self)
+        self.__initialize()
+
+        self.report = []
+        return
+
+    def __initialize(self):
+
+        PyCintex.Cintex.Enable()
+        
+        # global name space
+        self.gbl = gbl = PyCintex.Namespace('')
+        
+        # load reflex
+        _load_dict = PyCintex.loadDict
+        _load_dict ('ReflexRflx')
+        
+        # Create the Reflex::Type class
+        print "...creating Reflex::Type class..."
+        _rflx = PyCintex.makeNamespace ('Reflex')
+        if not _rflx:
+            _rflx = PyCintex.makeNamespace ('ROOT::Reflex')
+        _rflx_type = _rflx.Type.ByName
+        self.rflxType = _rflx.Type
+
+        return
+
+    def loadDicts(self, klassName):
+        klassNames = [klassName]
+        print "## loading dictionary... [%s]" % klassName
+
+        ## protect against STL internals...
+        if klassName.startswith("std::_"):
+            return klassNames
+            #return []
+
+        ## protect against builtins
+        if klassName in _cpp_builtins:
+            return klassNames
+        
+        loaded = False
+        try:
+            loaded = getattr (self.gbl, klassName)
+        except:
+            print "Error loading dict. for [%s]" % klassName
+        if not loaded:
+            print "Failed to load dict for [%s]" % klassName
+            return klassNames
+        klass = self.rflxType.ByName(klassName)
+
+        if not klass.IsStruct() and not klass.IsClass():
+            return klassNames
+        
+        for i in range(klass.BaseSize()):
+            baseKlassName = klass.BaseAt(i).Name(DICTSCOPE)
+            klassNames.extend (self.loadDicts(baseKlassName))
+            pass
+##         for i in xrange(klass.DataMemberSize()):
+##             mbr = klass.DataMemberAt(i).TypeOf().Name(DICTSCOPE)
+##             klassNames.append (self.loadDicts(mbr))
+        return klassNames
+    
+    def dumpDataMembers(self, klass):
+        dataMembers = []
+        for i in range(klass.DataMemberSize()):
+            d = klass.DataMemberAt(i)
+            dataMembers.append( DataMember( d.Offset(),
+                                            d.Name(SFQ),
+                                            d.TypeOf().Name(SFQ) ) )
+            pass
+        return dataMembers
+
+    def dumpFctMembers(self, klass):
+        fctMembers = []
+        for i in range(klass.FunctionMemberSize()):
+            f = klass.FunctionMemberAt(i)
+            fctMembers.append( FctMember( f.Name(SFQ),
+                                          f.TypeOf().Name(SFQ) ) )
+            pass
+        return fctMembers
+
+    def inspect(self, klassName):
+        
+        self.report = []
+        print ""
+        print "#"*80
+        print "## loading all relevant dictionaries..."
+        try:
+            klassNames = self.loadDicts(klassName)
+            print "#"*80
+        except Exception, err:
+            print ""
+            print "#"*80
+            print "## ERROR while trying to load dict for [%s]" % klassName
+            print "##  -Most probably you DIDN'T give a fully qualified name !"
+            print "##   Ex: try 'Analysis::Muon' instead of 'Muon'"
+            print "##"
+            print "##  -Could also mean that you are missing a dictionary "
+            print "##   of one of the base classes..."
+            print "#"*80
+            print err
+            raise
+            return
+
+        print ""
+        print "#"*80
+        print "## infos for class [%s]:" % klassName
+        print "## sizeof(%s) = %i" % \
+              (klassName,
+               self.rflxType.SizeOf(self.rflxType.ByName(klassName)))
+        print "##"
+        print "## (offset, data member name, data member type)"
+        print ""
+        # we want to dump from the base to the most derived class
+        klassNames.reverse()
+        for klass in klassNames:
+            line = "%s %s %s" % (
+                "-" * (40-len(klass)/2-1),
+                "[%s]" % klass, 
+                "-" * (40-len(klass)/2-1) )
+                
+            print line
+            self.report.append(line)
+            dataMembers = self.dumpDataMembers( self.rflxType.ByName(klass) )
+            for i in dataMembers:
+                line = "%3i %s %-10s %-50s %s %s" % ( i.offset,
+                                                      " "*5,
+                                                      i.name.split(" ")[0],
+                                                      i.name.split(" ")[1],
+                                                      " "*5, i.type )
+                print line
+                self.report.append(line)
+        print "#"*80
+        return
+
+    def save(self, fileName = "./columbo.out" ):
+        file = open(os.path.expandvars(os.path.expanduser(fileName)),
+                    "w+")
+        for line in self.report:
+            file.writelines(line + os.linesep)
+            pass
+        file.close()
+        
+    pass # Columbo
+
+
+if __name__ == '__main__':
+    if len(sys.argv) > 1:
+        klassName = sys.argv[1]
+    else:
+        klassName = "TruthParticle"
+        pass
+
+    columbo = Columbo()
+    columbo.inspect(klassName)
+    columbo.save()
diff --git a/Tools/PyUtils/bin/checkTag.py b/Tools/PyUtils/bin/checkTag.py
new file mode 100755
index 00000000000..8dafa2aaba0
--- /dev/null
+++ b/Tools/PyUtils/bin/checkTag.py
@@ -0,0 +1,637 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    checkTag.py
+# @purpose: Check the differences between 2 tags in CVS
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    July 2006
+#
+# @example:
+#
+# python checkTag.py MyTag-00-00-01
+#
+
+__version__ = "$Revision: 1.1 $"
+
+from optparse import OptionParser
+
+import sys
+import os
+import sets
+import commands
+
+import logging
+class CheckTag:
+    LoggerName = "CheckTag"
+    pass
+
+from HTMLParser import HTMLParser
+class LxrParser(HTMLParser):
+    def __init__(self):
+        HTMLParser.__init__(self)
+        self.files = []
+        pass
+    def handle_starttag(self, tag, attrs):
+        #print "===> %s" % tag, " --- ", attrs
+        if len(attrs) == 2:
+            name0, value0 = attrs[0]
+            name1, value1 = attrs[1]
+            if name0 == "class" and value0 == "find-file" and \
+               name1 == "href":
+                self.files.append(value1)
+                pass
+            pass
+        pass
+    def handle_endtag(self, tag):
+        #print "<=== %s" % tag
+        pass
+    def getFiles(self):
+        return self.files
+    pass
+
+import urllib
+class Lxr(object):
+    AtlasServer  = "http://alxr.usatlas.bnl.gov/lxr/search?"
+    Repositories = [ 'current', 'gaudi16' ]
+    
+    def __init__( self, lxrServerUrl = AtlasServer ):
+        object.__init__(self)
+        self.lxrUrl = lxrServerUrl
+        return
+
+    def query( self,
+               filePattern = None,
+               queryPattern = None,
+               repository   = None ):
+        log = logging.getLogger("Lxr");
+        query = {}
+        if repository != None and \
+           repository in Lxr.Repositories :
+            query['v'] = repository
+            pass
+        query['filestring'] = filePattern
+        query['string']     = queryPattern
+
+        query = urllib.urlencode( query )
+        url = urllib.urlopen( "".join( [self.lxrUrl, query] ) )
+        log.debug( "URL: %s" % url.geturl() )
+        query = url.read()
+        return query
+    
+    pass #Lxr
+
+class CvsOpts:
+    ContextDiff = "-c"
+    UniDiff     = "-u"
+    pass #CvsOpts
+
+class Cvs( object ):
+
+    def __init__( self,
+                  CVSROOT=":kserver:atlas-sw.cern.ch:/atlascvs",
+                  diffFormat = CvsOpts.UniDiff ):
+        self.cvsroot = CVSROOT
+        self.cvs = commands.getoutput( "which cvs" )
+        self.diffFormat = diffFormat
+        return
+
+    def __exec( self, cmdLine ):
+        #print "===>",cmdLine
+        sc,out = commands.getstatusoutput( "%s %s" % (self.cvs, cmdLine) )
+        if sc != 0:
+            log = logging.getLogger("PyCvs")
+            if sc == 256 and \
+               out.count( "cvs rdiff: failed to read diff file header" ) > 0:
+                log.warning( "cvs rdiff returned exit-code 256 !!" )
+                log.warning( "Maybe because a binary was put into CVS..." )
+                log.warning( "Command which failed: [cvs %s]" % cmdLine )
+                return out
+            else:
+                log.error( "CVS command failed !!" )
+                log.error( out )
+                raise RuntimeError,"Exit code [%i] !!" % sc
+        return out
+    
+    def rdiff( self,
+               tag1 = None,
+               tag2 = None,
+               modules = None ):
+        if tag1 == None or tag2 == None or modules == None:
+            raise RuntimeError, "Wrong arguments for Cvs.rdiff !!"
+        
+        return self.__exec( "rdiff %s -r %s -r %s offline/%s" % \
+                            ( self.diffFormat,
+                              tag1,
+                              tag2,
+                              modules ) )
+    pass # Cvs class
+
+class TagCollector( object ):
+    """
+    Interface to TagCollector to retrieve tags and the like
+    """
+    class MsgStrings:
+        NoPackage = "This package doesn't exist. "
+        pass
+    
+    def __init__( self ):
+
+        object.__init__(self)
+        sc,bin = commands.getstatusoutput( "which get_tag" )
+        if sc != 0:
+            raise RuntimeError, "Could not fetch 'get_tag': %s" % bin
+        
+        self.bin = bin
+        self.log = logging.getLogger("TagCollector")
+        return
+
+    def getTag( self, pkgFullName, release = "" ):
+        sc, out = commands.getstatusoutput( "%s %s %s" % ( self.bin,
+                                                           pkgFullName,
+                                                           release ) )
+        if sc != 0:
+            raise RuntimeError, "Prbl running 'get_tag': %s" % out
+
+        import re
+        pattern = re.compile( r'(?P<ProjectTag>.*?);(?P<ProjectName>.*?);(?P<PkgName>.*?);(?P<PkgTag>.*?)')
+
+        out = [ l for l in out.splitlines() if re.match(pattern, l.strip()) ]
+        if TagCollector.MsgStrings.NoPackage in out:
+            errMsg = "get_tag could not find [%s]" % pkgFullName
+            if release != "":
+                errMsg = "".join( [errMsg, " in release [%s]" % release] )
+                pass
+            self.log.error( errMsg )
+            raise RuntimeError, errMsg
+                
+        if len(out) > 1:
+            self.log.warning( "[%s] has more than 1 entry in getTag !!" %
+                              pkgFullName )
+            self.log.warning( [ "project: %s-%s" % ( l.split(";")[1],
+                                                     l.split(";")[0] )
+                                for l in out ] )
+            pass
+        out = re.match( pattern, out[0] )
+        projectTag  = out.group( 'ProjectTag' )
+        projectName = out.group( 'ProjectName' )
+        pkgName     = out.group( 'PkgName' )
+        pkgTag      = out.group( 'PkgTag' )
+
+        if pkgName[0] == "/":
+            pkgName = pkgName[1:]
+            pass
+        tag = {
+            'project' : { 'name' : projectName,
+                          'tag'  : projectTag },
+            'pkg'     : { 'name' : pkgName,
+                          'tag'  : pkgTag }
+            }
+        return tag
+
+    def getProject( self, pkgFullName, release = "" ):
+        project = self.getTag( pkgFullName, release )['project']
+        return "%s-%s" % ( project['name'], project['tag'] )
+                           
+    pass # TagCollector
+
+class CmtWrapper(object):
+    """
+    A python wrapper around CMT
+    """
+
+    def __init__(self):
+        object.__init__(self)
+        self.bin = commands.getoutput("which cmt.exe")
+        return
+
+    def checkOut(self, pkgFullName, pkgVersion = None ):
+        args = pkgFullName
+        cmd  = "%s co %s" % ( self.bin, pkgFullName )
+        if pkgVersion != None:
+            cmd = "%s co -r %s %s" % ( self.bin, pkgVersion, pkgFullName )
+            pass
+        
+        sc,out = commands.getstatusoutput( "%s" % cmd )
+        if sc != 0:
+            log = logging.getLogger(CheckTag.LoggerName)
+            log.warning( "Problem doing 'cmt co' !" )
+            log.warning( "Failed to issue [%s]" % cmd )
+            log.warning( out )
+            pass
+        return
+    
+class TagChecker(object):
+    """
+    A class to check that a tag someone is requesting on tag-approve is
+    satisfying some criteria.
+    """
+
+    def __init__( self,
+                  pkgTag,
+                  pkgFullName = None,
+                  refPkgTag   = None,
+                  refRelease  = None,
+                  checkTagsConsistency = True,
+                  doCheckOut = False ):
+        """
+         - pkgTag is the tag one wants to check for sanity : eg MyPkg-00-00-00
+         - pkgFullName is the optional full name of the pkg: Reco/Foo/Bar/MyPkg
+        """
+        object.__init__(self)
+
+        # fetch logger object
+        self.log = logging.getLogger(CheckTag.LoggerName)
+        
+        self.pkgTag      = pkgTag
+        self.pkgFullName = pkgFullName
+
+        self.refPkgTag   = refPkgTag
+        self.refRelease  = refRelease
+
+        # to check consistency of tags when both refPkgTag ans refRelease
+        # have been configured by the user
+        self.checkConsistency = checkTagsConsistency
+
+        # to check-out a package from CVS (for further tests, as compilation)
+        self.doCheckOut = doCheckOut
+        
+        # List of extensions we want to flag as a 'header file'
+        self.headersExts = [ '.h', '.hpp', '.hh', '.icc', '.hcc' ]
+        
+        # We need an access point to the Atlas CVS repository to fetch
+        # modifications between a pair of tags
+        self.cvs = Cvs()
+
+        # We need an access point to LXR to get the full name of a package
+        # if it hasn't been given (hence the 'None')
+        self.lxr = None
+
+        # We need an access point to TagCollector to fetch the current
+        # tag of a package within a given Release
+        self.tc  = TagCollector()
+
+        # We need an access point to CMT in case a package has to be checkout
+        # from CVS to be checked for compilation
+        self.cmt = CmtWrapper()
+
+        # The list of files which have been modified between the 2 tags
+        # we are checking
+        self.modifiedFiles = []
+
+        # A dictionary containing our findings wrt the analysis of the
+        # package tag we have been configured to check
+        self.statusReport = {}
+        
+        return
+
+    def setPkgTag( self, pkgTag, refRelease = None ):
+        """
+        reinitialize method in case we want to re-use this TagChecker object
+        with a different package tag and (optionaly) with a different
+        referent release.
+        """
+        self.pkgTag      = pkgTag
+        self.pkgFullName = None
+        self.refPkgTag   = None
+        if refRelease != None:
+            self.refRelease = refRelease
+            pass
+        
+        self.modifiedFiles = []
+        self.statusReport = {}
+        return
+    
+    def check(self):
+        """
+        The main method to check the sanity of a package tag.
+        """
+        self.log.info( "#"*80 )
+        self.log.info( "...Checking tag set..." )
+        
+        if self.pkgFullName == None:
+            self.retrievePkgFullName()
+            pass
+
+        self.retrieveRefPkgTag()
+        if self.pkgTag == self.refPkgTag:
+            self.printStatusReport()
+            self.log.info( "==> Same tags: nothing to compare." )
+            return
+        
+        ## Interogates CVS for modifications between
+        ## a pair of tags and retrieves the modified files
+        self.fetchModsList()
+
+        ## if there are header files which have been modified
+        ## then we fetch the list of clients of this package so
+        ## their status wrt the tag being analysed will be also checked
+        if self.tagTouchsHeaderFile():
+            self.buildClientList()
+            pass
+
+        ## Fetchs the package from CVS, using CMT
+        if self.doCheckOut:
+            self.fetchPkgFromCvs()
+        
+        ## Now we test that the package at least compiles
+        ## as well as its clients (if a header file has been modified)
+        self.checkCompilation()
+
+        ## Finally we report our findings
+        self.printStatusReport()
+        
+        return
+
+    def retrievePkgFullName( self ):
+        if self.lxr == None:
+            self.lxr = Lxr( Lxr.AtlasServer )
+            pass
+
+        # pkgTag is like "MyPackage-00-00-00"
+        pkgName = self.pkgTag.split("-")[0]
+
+        query = self.lxr.query( filePattern  = "/%s/CVS/Entries" % pkgName,
+                                queryPattern = "%s" % pkgName,
+                                repository   = "current" )
+
+        
+        lxr = LxrParser()
+        lxr.feed( query )
+        lxrFiles = lxr.getFiles()
+        del lxr
+        if len(lxrFiles) == 0:
+            self.log.warning( "Didn't find any CVS/Entries for [%s]" %
+                              pkgName )
+            self.log.warning( "Check for a typo in the package tag [%s]" %
+                              self.pkgTag )
+            self.log.warning( " or for a LXR limitation..." )
+            raise RuntimeError, "Can't find the package full name !"
+    
+        #lxrFiles are like "/lxr/source/atlas/SomePath/MyPackage/CVS/Entries#001"
+        lxrFiles = [ f.split("/CVS/Entries")[0]\
+                     .split("/lxr/source/atlas/")[1] \
+                     for f in lxrFiles ]
+        lxrFiles = [f for f in sets.Set(lxrFiles) \
+                    if f.count("/%s/%s" % (pkgName,pkgName) ) <= 0]
+
+        if len(lxrFiles) > 0:
+            self.pkgFullName = lxrFiles[0]
+            pass
+    
+        if os.path.basename(self.pkgFullName) != pkgName:
+            self.log.warning( "INCONSISTENCY:" )
+            self.log.warning( "pkgFullName= %s" % self.pkgFullName )
+            self.log.warning( "pkgName=     %s" % pkgName )
+            pass
+        if len(lxrFiles) > 1:
+            self.log.warning( "Found more than one CVS/Entries for [%s]" %
+                              pkgName )
+            for f in lxrFiles:
+                self.log.warning( "\t%s" % f )
+                pass
+            pass
+        self.log.info( "Package (full) name: %s" % self.pkgFullName )
+        return
+
+    def retrieveRefPkgTag( self ):
+
+        if self.refRelease == None and self.refPkgTag != None:
+            # nothing to do
+            return
+
+        if self.refRelease == None and self.refPkgTag == None:
+            self.log.error( "You have to provide a reference tag !!" )
+            self.log.error( " - either through an explicit one," )
+            self.log.error( " - or via a reference release" )
+            raise RuntimeError, "Invalid State"
+
+        # TODO: check validity of self.refRelease eg: "2.0.2" not "FooBooYou"
+        if self.refRelease != None and self.refPkgTag == None:
+            self.refPkgTag = self.tc.getTag( self.pkgFullName,
+                                             self.refRelease )['pkg']['tag']
+            return
+
+        if self.refRelease != None and self.refPkgTag != None:
+            refPkgTag = None
+            if self.checkConsistency:
+                refPkgTag = self.tc.getTag( self.pkgFullName,
+                                            self.refRelease )['pkg']['tag']
+                pass
+            else:
+                self.log.warning( "You have setup both :" )
+                self.log.warning( " - an explicit reference tag [%s]" %
+                                  self.refPkgTag )
+                self.log.warning( " - a release reference tag [project-%s]" %
+                                  self.refRelease )
+                self.log.warning( "...but no consistency is enforced..." )
+                return
+            
+            if refPkgTag != self.refPkgTag and \
+               refPkgTag != None:
+                self.log.warning( "Inconsistency in reference tags:" )
+                self.log.warning( " from release  tag: %s (project-%s)" %
+                                  ( refPkgTag, self.refRelease ) )
+                self.log.warning( " from explicit tag: %s" % self.refPkgTag )
+                self.log.warning( "==> explicit tag WINS !!" )
+                pass
+            pass
+        return
+
+    def fetchModsList( self ):
+        self.log.info( "Checking for modified files [%s VS %s]" %
+                       ( self.pkgTag,
+                         self.refPkgTag ) )
+
+        out = self.cvs.rdiff( tag1 = self.refPkgTag,
+                              tag2 = self.pkgTag,
+                              modules = self.pkgFullName )
+        self.modifiedFiles = []
+        lineHdr = "Index: "
+        for l in out.splitlines():
+            if len(l) > len(lineHdr) and l[:len(lineHdr)] == lineHdr:
+                fileName = l.split("Index: offline/")
+                if len(fileName) != 2 :
+                    self.log.error( "Could not get modified file name:" )
+                    self.log.error( l )
+                    continue
+                fileName = fileName[1]
+                self.log.info( " - %s" % fileName )
+                self.modifiedFiles.append(fileName)
+                pass
+            pass
+        return
+
+    def isHeaderFile( self, f ):
+        """Little helper to decide if a file is considered as a header file"""
+        return os.path.splitext(f)[1] in self.headersExts
+    
+    def tagTouchsHeaderFile( self ):
+        return len( [ f for f in self.modifiedFiles \
+                      if self.isHeaderFile(f) ] ) > 0
+
+    def buildClientList( self ):
+        return
+
+    def fetchPkgFromCvs( self ):
+        self.log.info( "Retrieving [%s/%s] from CVS..." % 
+                       ( self.pkgFullName, self.pkgTag ) )
+        self.cmt.checkOut( pkgFullName = self.pkgFullName,
+                           pkgVersion  = self.pkgTag )
+
+        return
+
+    def checkCompilation( self ):
+        return
+
+    def printStatusReport( self ):
+        self.log.info( "#"*80 )
+        if self.refRelease != None:
+            self.log.info( "## Ref Project     : %s" %
+                           self.tc.getProject( self.pkgFullName,
+                                               self.refRelease ) )
+        self.log.info( "## Ref release     : %s" % self.refRelease )
+        self.log.info( "## Ref package tag : %s" % self.refPkgTag  )
+        self.log.info( "## Package name    : %s" % self.pkgFullName )
+        self.log.info( "## Package tag     : %s" % self.pkgTag )
+        self.log.info( "##" )
+        self.log.info( "## Header file(s) affected: %s" %
+                       self.tagTouchsHeaderFile() or "No" )
+        for f in [ f for f in self.modifiedFiles if self.isHeaderFile(f) ] :
+            self.log.info( "##  - %s" % f )
+            pass
+        self.log.info( "#"*80 )
+        return
+
+    def printHeadersDiff( self, fd = sys.stdout ):
+        for f in [ f for f in self.modifiedFiles if self.isHeaderFile(f) ] :
+            diff = self.cvs.rdiff( tag1    = self.refPkgTag,
+                                   tag2    = self.pkgTag,
+                                   modules = f )
+            print >>fd, diff
+            pass
+        return
+    
+    def printDiff( self, fd = sys.stdout ):
+        for f in self.modifiedFiles:
+            diff = self.cvs.rdiff( tag1    = self.refPkgTag,
+                                   tag2    = self.pkgTag,
+                                   modules = f )
+            print >>fd, diff
+            pass
+        return
+    
+    pass # TagChecker
+
+def _installLogger( lvl        = "INFO",
+                    loggerName = CheckTag.LoggerName ):
+    # define a Handler which writes DEBUG messages or higher to the sys.stderr
+    logger = logging.StreamHandler()
+    logger.setLevel(logging.DEBUG)
+    # set a format which is simpler for console use
+    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+    # tell the handler to use this format
+    logger.setFormatter(formatter)
+    # add the handler to the root logger
+    logging.getLogger('').addHandler(logger)
+
+    log = logging.getLogger(loggerName)
+    exec ( """
+try:
+ log.setLevel( logging.%s )
+except AttributeError,err:
+ log.warning( 'Invalid logging.Level [%s]' )
+ log.warning( 'setting to [INFO]' )
+ log.setLevel( logging.INFO )
+""" % (lvl, lvl) )
+    return
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] MyPkg-00-00-00")
+    parser.add_option( "-p",
+                       "--pkg-tag",
+                       dest = "pkgTag",
+                       help = "The pkg tag one wants to check (eg: MyPkg-00-00-01)" )
+    parser.add_option( "-P",
+                       "--pkg-full-name",
+                       dest = "pkgFullName",
+                       help = "The full name of the package one wants to check (eg: Deconstruction/MyContainer/MyPkg)" )
+    parser.add_option( "-r",
+                       "--ref-tag",
+                       dest = "refPkgTag",
+                       help = "The reference pkg tag (eg: MyPkg-00-00-01)" )
+    parser.add_option( "-R",
+                       "--ref-rel",
+                       dest    = "refRelease",
+                       default = "13.0.0",
+                       help    = "The reference release (eg: 13.0.0)" )
+    parser.add_option( "--show-headers-diff",
+                       action  = "store_true",
+                       dest    = "showHeadersDiff",
+                       default = False,
+                       help    = "Switch to dump or not the diff for header files" )
+    
+    parser.add_option( "--show-diff",
+                       action  = "store_true",
+                       dest    = "showDiff",
+                       default = False,
+                       help    = "Switch to dump or not the diff for all files" )
+    
+    parser.add_option( "--co",
+                       action  = "store_true",
+                       dest    = "checkOut",
+                       default = False,
+                       help    = "Switch to check-out or not a package from CVS" )
+    
+    parser.add_option( "-l",
+                       "--loglevel",
+                       type    = "string",
+                       dest    = "logLevel",
+                       default = "INFO",
+#                       choices = [ "DEBUG", "INFO", "WARNING", "ERROR" ],
+                       help    = "Logging message level [DEBUG, INFO, WARNING, ERROR]"
+                       )
+    
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.pkgTag = args[0]
+        pass
+
+    if options.pkgTag == None:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    ## install the user loglevel
+    _installLogger( loggerName = CheckTag.LoggerName,
+                    lvl        = options.logLevel )
+    log = logging.getLogger( CheckTag.LoggerName )
+    
+    log.info( "...Checking tags..." )
+
+    log.info( "...checking tag [%s]..." % options.pkgTag )
+
+    tagChecker = TagChecker( pkgTag      = options.pkgTag,
+                             pkgFullName = options.pkgFullName,
+                             refPkgTag   = options.refPkgTag,
+                             refRelease  = options.refRelease )
+    tagChecker.check()
+
+    if options.showDiff:
+        tagChecker.printDiff()
+        tagChecker.printStatusReport()
+
+    if options.showHeadersDiff:
+        tagChecker.printHeadersDiff()
+        tagChecker.printStatusReport()
+
+    if options.checkOut:
+        cmt = CmtWrapper()
+        log.info( "Retrieving [%s/%s] from CVS..." %
+                  ( tagChecker.pkgFullName, tagChecker.pkgTag ) )
+        cmt.checkOut( pkgFullName = tagChecker.pkgFullName,
+                      pkgVersion  = tagChecker.pkgTag )
+        pass
+    
+    pass
diff --git a/Tools/PyUtils/bin/checkxAOD.py b/Tools/PyUtils/bin/checkxAOD.py
new file mode 100755
index 00000000000..870d19a1894
--- /dev/null
+++ b/Tools/PyUtils/bin/checkxAOD.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+#
+# $Id: checkxAOD.py 592348 2014-04-10 12:06:41Z krasznaa $
+#
+# This is a modified version of PyUtils/bin/checkFile.py. It has been taught
+# how to sum up the sizes of all the branches belonging to a single xAOD
+# object/container.
+#
+
+__version__ = "$Revision: 592348 $"
+__author__  = "Sebastien Binet <binet@cern.ch>, " \
+    "Attila Krasznahorkay <Attila.Krasznahorkay@cern.ch>"
+
+import sys
+import os
+import re
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser( usage = "usage: %prog [-f] my.xAOD.file.pool.root" )
+    p = parser.add_option
+    p( "-f",
+       "--file",
+       dest = "fileName",
+       help = "The path to the POOL file to analyze" )
+    ( options, args ) = parser.parse_args()
+
+    fileNames = []
+
+    if len( args ) > 0:
+        fileNames = [ arg for arg in args if arg[ 0 ] != "-" ]
+        pass
+
+    if options.fileName == None and len( fileNames ) == 0:
+        str( parser.print_help() or "" )
+        sys.exit( 1 )
+
+    if options.fileName != None:
+        fileName = os.path.expandvars( os.path.expanduser( options.fileName ) )
+        fileNames.append( fileName )
+        pass
+
+    fileNames = set( fileNames )
+
+    # Loop over the specified file(s):
+    for fileName in fileNames:
+
+        # Open the file:
+        import PyUtils.PoolFile as PF
+        poolFile = PF.PoolFile( fileName )
+
+        # Loop over all the branches of the file, and sum up the information
+        # about them in a smart way...
+        summedData = {}
+        for d in poolFile.data:
+            # Skip metadata/TAG/etc. branches:
+            if d.dirType != "B": continue
+            # The name of this branch:
+            brName = d.name
+            # Check if this is a static auxiliary store:
+            m = re.match( "(.*)Aux\..*", d.name )
+            if m:
+                # Yes, it is. And the name of the main object/container is:
+                brName = m.group( 1 )
+                pass
+            # Check if this is a dynamic auxiliary variable:
+            m = re.match( "(.*)AuxDyn\..*", d.name )
+            if m:
+                # Oh yes, it is. Let's construct the name of the main
+                # object/container:
+                brName = m.group( 1 )
+                pass
+            # Check if we already know this container:
+            if brName in summedData.keys():
+                summedData[ brName ].memSize  += d.memSize
+                summedData[ brName ].diskSize += d.diskSize
+            else:
+                summedData[ brName ] = \
+                    PF.PoolRecord( brName,
+                                   d.memSize,
+                                   d.diskSize,
+                                   d.memSizeNoZip,
+                                   d.nEntries,
+                                   d.dirType )
+                pass
+            pass
+
+        # Order the records by size:
+        orderedData = []
+        for br in summedData.keys():
+            orderedData += [ summedData[ br ] ]
+            pass
+        sorter = PF.PoolRecord.Sorter.DiskSize
+        import operator
+        orderedData.sort( key = operator.attrgetter( sorter ) )
+
+        # Access the CollectionTree directly:
+        import ROOT
+        tfile = ROOT.TFile.Open( fileName )
+        ttree = tfile.Get( "CollectionTree" )
+
+        # Print a header:
+        print( "" )
+        print( "=" * 80 )
+        print( "         Event data" )
+        print( "=" * 80 )
+        print( PF.PoolOpts.HDR_FORMAT %
+               ( "Mem Size", "Disk Size", "Size/Evt", "Compression",
+                 "Items", "Container Name (Type)" ) )
+        print( "-" * 80 )
+
+        # Now, let's print the event-wise info that we gathered:
+        memSize = 0.0
+        diskSize = 0.0
+        for d in orderedData:
+            if d.nEntries != poolFile.dataHeader.nEntries: continue
+            nameType = "%s (%s)" % \
+                ( d.name, ttree.GetBranch( d.name ).GetClassName() )
+            print( PF.PoolOpts.ROW_FORMAT %
+                   ( d.memSize,
+                     d.diskSize,
+                     ( d.diskSize / d.nEntries ),
+                     ( d.memSize / d.diskSize ),
+                     d.nEntries,
+                     nameType ) )
+            memSize = memSize + d.memSize
+            diskSize = diskSize + d.diskSize
+            pass
+        print( "-" * 80 )
+        print( PF.PoolOpts.ROW_FORMAT %
+               ( memSize,
+                 diskSize,
+                 ( diskSize / poolFile.dataHeader.nEntries ),
+                 0.0,
+                 poolFile.dataHeader.nEntries,
+                 "Total" ) )
+        print( "=" * 80 )
+        print( "         Meta data" )
+        print( "=" * 80 )
+        print( "     Mem Size       Disk Size         Container Name" )
+        print( "-" * 80 )
+
+        # Now print the info about the metadata:
+        memSize = 0.0
+        diskSize = 0.0
+        for d in orderedData:
+            if d.nEntries == poolFile.dataHeader.nEntries: continue
+            print( "%12.3f kb %12.3f kb       %s" %
+                   ( d.memSize, d.diskSize, d.name ) )
+            memSize = memSize + d.memSize
+            diskSize = diskSize + d.diskSize
+            pass
+        print( "-" * 80 )
+        print( "%12.3f kb %12.3f kb       %s" %
+               ( memSize, diskSize, "Total" ) )
+        print( "=" * 80 )
+
+        if len(fileNames) > 1:
+            print ""
+        pass # loop over fileNames
+
+    print "## Bye."
+    sys.exit( 0 )
diff --git a/Tools/PyUtils/bin/cmtClients.py b/Tools/PyUtils/bin/cmtClients.py
new file mode 100755
index 00000000000..7145e110ab6
--- /dev/null
+++ b/Tools/PyUtils/bin/cmtClients.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+## @author: Sebastien Binet
+## @file : cmtClients.py
+## @purpose: a little wrapper around cmt.exe show clients
+
+__version__ = "$Revision: 1.4 $"
+__author__  = "Sebastien Binet"
+
+import sys
+from PyCmt import Cmt
+
+if __name__ == "__main__":
+
+    from optparse import OptionParser
+    parser = OptionParser(usage="usage: %prog [options] [-p] PkgName")
+    parser.add_option(
+        "-p",
+        "--pkg",
+        dest = "pkgName",
+        help = "The name of the package one wants to see the clients"
+        )
+    parser.add_option(
+        "--co",
+        action = "store_true",
+        dest   = "doCheckOut",
+        default= False,
+        help = "Switch to check out the list of client of the given package"
+        )
+    parser.add_option(
+        "-l",
+        "--level",
+        dest = "logLvl",
+        default = "INFO",
+        help = "Logging level (aka verbosity)"
+        )
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.pkgName = args[0]
+        pass
+    
+    if options.pkgName == None:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    options.logLvl = options.logLvl.upper()
+    import PyCmt.Logging as L
+    if hasattr(L.logging, options.logLvl):
+        lvl = getattr(L.logging, options.logLvl)
+    else:
+        print "*"*80
+        print "BootStrap: Unknown logging level [%s] !!" % options.logLvl
+        print "BootStrap: Defaulting to [INFO]..."
+        print "*"*80
+        lvl = L.logging.INFO
+    
+    cmt = Cmt.CmtWrapper( lvl )
+    cmt.msg.info( "cmt show clients [%s]", options.pkgName )
+    clients = cmt.showClients( options.pkgName )
+
+    if options.doCheckOut:
+        def do_checkout (pkg):
+            cmt.checkOut (pkg.fullName(), pkg.version)
+        try:
+            import multiprocessing as mp
+            nworkers = min (mp.cpu_count(), 4)
+            workers = mp.Pool(processes=nworkers)
+            res = workers.map_async (func=do_checkout, iterable=clients)
+            res = res.get()
+        except ImportError: # no multiprocessing module
+            for client in clients:
+                res = do_checkout(client)
+        
+    sys.exit(0)
+    pass
+
diff --git a/Tools/PyUtils/bin/diff-athfile b/Tools/PyUtils/bin/diff-athfile
new file mode 100755
index 00000000000..195c15db3fe
--- /dev/null
+++ b/Tools/PyUtils/bin/diff-athfile
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+
+# @file:    diff-athfile.py
+# @purpose: simple command-line utility to diff metadata in two files.
+#           Uses PyUtils.AthFile.fopen. Based on dump-athfile.py.
+# @author:  Graeme Stewart <graeme.andrew.stewart@cern.ch>
+# @date:    Jan 2012
+# @version: $Id: diff-athfile 493697 2012-04-02 17:30:56Z binet $
+#
+# @example:
+# @code
+# diff-athfile.py aod.1.pool.root aod.2.pool.root 
+# @endcode
+#
+
+import sys
+import os
+
+try:                import cPickle as pickle
+except ImportError: import pickle
+    
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] my.1.file my.2.file")
+    parser.add_option( "-o",
+                       "--output",
+                       dest = "oname",
+                       default = 'athfile-infos.ascii',
+                       help = "Name of the output file which will contain the informations gathered during AthFile processing. These informations will be stored into a python-shelve file." )
+    parser.add_option( '--evtmax',
+                       dest = 'evtmax',
+                       default = 1,
+                       type = int,
+                       help = 'Maximum number of events to process in the file(s)')
+    parser.add_option( "-q",
+                       "--quiet",
+                       dest = "quiet",
+                       default = False,
+                       action = "store_true",
+                       help = 'Quiet output - only print differences')
+    
+    (options, args) = parser.parse_args()
+
+    fnames = []
+    
+    if len(args) > 0:
+        fnames = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if len(fnames) != 2:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    sc = 0
+    import PyUtils.AthFile as af
+    msg = af.msg
+    if options.quiet:
+        from logging import WARNING
+        msg.setLevel(WARNING)
+    
+    fhandles = {}
+    try:
+        for fname in fnames:
+            fhandles[fname] = af.fopen(fname, evtmax=options.evtmax)
+            msg.info(':'*80)
+            msg.info('Opened file %s.' % fname)
+            
+
+        # Ignore the following keys, which are bound to be different:
+        # 'file_md5sum', 'file_name', 'file_type', 'file_guid',
+        simpleCompKeys = ('nentries','run_number', 'run_type', 'evt_number', 'evt_type', 'lumi_block',
+                        'beam_energy', 'beam_type',
+                        'stream_tags', 'stream_names', 'geometry', 'conditions_tag',)
+        bitByBitKeys = ('metadata',)
+    
+        for k in simpleCompKeys:
+            if fhandles[fnames[0]].infos[k] == fhandles[fnames[1]].infos[k]:
+                msg.info('%s equal in %s and %s: %s' % (k, fnames[0], fnames[1], fhandles[fnames[0]].infos[k]))
+            else:
+                msg.warning('%s not equal in %s and %s: %s != %s' % 
+                            (k, fnames[0], fnames[1], fhandles[fnames[0]].infos[k], fhandles[fnames[1]].infos[k]))
+                sc = 1
+
+        for k in bitByBitKeys:
+            # First check dictionary keys are the same
+            skeys = fhandles[fnames[0]].infos[k].keys()
+            skeys1 = fhandles[fnames[1]].infos[k].keys()
+            skeys.sort()
+            skeys1.sort()
+            if skeys != skeys1:
+                msg.warning('%s keys not equal for %s and %s: %s != %s' % 
+                            (k, fnames[0], fnames[1], skeys, skeys1))
+                sc = 1
+            else:
+                msg.info('%s keys are equal for %s and %s: %s' % 
+                         (k, fnames[0], fnames[1], skeys))
+                for subk in skeys:
+                    if fhandles[fnames[0]].infos[k][subk] == fhandles[fnames[1]].infos[k][subk]:
+                        # Here suppress the very long value output
+                        msg.info('%s element %s values are equal for %s and %s: (value suppressed)' % 
+                                 (k, subk, fnames[0], fnames[1]))
+                    else:
+                        msg.warning('%s element %s values are not equal for %s and %s: %s != %s' % 
+                                 (k, subk, fnames[0], fnames[1], fhandles[fnames[0]].infos[k][subk], 
+                                  fhandles[fnames[1]].infos[k][subk]))
+                        sc = 1
+                        
+                
+        msg.info(':'*80)
+    except Exception, e:
+        msg.error("Caught exception [%s] !!", str(e.__class__))
+        msg.error("What:\n%s\n%s\n%s",e,
+                  sys.exc_info()[0],
+                  sys.exc_info()[1])
+        sc = 2
+        pass
+
+    except :
+        msg.error("Caught something !! (don't know what)")
+        msg.error("\n%s\n%s",sys.exc_info()[0], sys.exc_info()[1])
+        sc = 2
+        pass
+
+    
+    if options.oname:
+        oname = options.oname
+        msg.info("saving report into [%s]..." % oname)
+        if os.path.exists(oname):
+            os.rename(oname, oname+'.bak')
+        af.server.save_cache(oname)
+
+    msg.info("Bye.")
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/diff-jobo-cfg.py b/Tools/PyUtils/bin/diff-jobo-cfg.py
new file mode 100755
index 00000000000..2eba8774755
--- /dev/null
+++ b/Tools/PyUtils/bin/diff-jobo-cfg.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    diff-jobo-cfg.py
+# @purpose: check that 2 jobosvc.ascii files (produced with find_cfg_dups.py)
+#           have same content (both in configurables and properties)
+# @author:  Sebastien Binet <binet@cern.ch>
+#           Adrien Renaud <renaud@lal.in2p3.fr>
+# @date:    May 2010
+#
+# @example:
+#
+# diff-jobo-cfg ref.josvc.ascii chk.josvc.ascii
+#
+
+__version__ = "$Revision: 298860 $"
+__author__  = "Sebastien Binet, Adrien Renaud"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+def dump_seq(seq):
+    for i in seq:
+        print i
+    pass
+
+def cxx_sort(dpp):
+    from collections import defaultdict
+    cxx_dpp = defaultdict(list)          
+
+    for k,v in dpp.iteritems():
+        #print v['cxx_type']
+        cxx_dpp[v['cxx_type']].append({k:v})
+
+    for k,v in cxx_dpp.iteritems():
+        print '---',k
+        for vv in v: 
+            print '------',vv.keys()
+            print '---------',vv.values()#['comp_type']
+    return cxx_dpp
+
+def load_cfg_file(fname):
+    """return the dictionary of components and their properties
+    """
+    comps_db = {}
+    try:
+        import shelve
+        comps_db = shelve.open(fname, 'r')
+        return comps_db['all-cfgs']
+    except Exception, err:
+        execfile(fname, comps_db)
+        return comps_db['d']
+
+def dict_diff(ref, chk):
+    """ Return a dict of keys that differ with another config object.  If a value is
+        not found in one fo the configs, it will be represented by KEYNOTFOUND.
+        @param ref:   First dictionary to diff.
+        @param chk:   Second dicationary to diff.
+        @return diff:   Dict of Key => (ref.val, chk.val)
+    """
+    diff = {}
+    # Check all keys in ref dict
+    for k in ref.iterkeys():
+        if not (k in chk):
+            diff[k] = (ref[k], '<KEYNOTFOUND>')
+        elif (ref[k] != chk[k]):
+            diff[k] = (ref[k], chk[k])
+    # Check all keys in chk dict to find missing
+    for k in chk.iterkeys():
+        if not (k in ref):
+            diff[k] = ('<KEYNOTFOUND>', chk[k])
+    return diff
+
+def cmp_component_db(ref, chk, verbose=True):
+    """ compare 2 dicts of components
+    dicts are of the form:
+     { 'comp_type' : <name of component>,
+       'cxx_type'  : <C++ type of the component>,
+       'props' : { 'property-name' : 'property-value', }
+       }
+    """
+    common_keys = []
+    ref_keys = set(ref.keys())
+    chk_keys = set(chk.keys())
+
+    common_keys = ref_keys & chk_keys
+    ref_only_keys = ref_keys - chk_keys
+    chk_only_keys = chk_keys - ref_keys
+
+    print "::: components in both files: [%5s]" % (len(common_keys),)
+    print "::: components in ref only:   [%5s]" % (len(ref_only_keys),)
+    if len(ref_only_keys)>0:
+        dump_seq(ref_only_keys)
+        print "="*80
+    print "::: components in chk only:   [%5s]" % (len(chk_only_keys),)
+    if len(chk_only_keys)>0:
+        dump_seq(chk_only_keys)
+        print "="*80
+
+    diff = []
+    for comp_name in common_keys:
+        is_diff = False
+        comp_ref = ref[comp_name]
+        comp_chk = chk[comp_name]
+
+        for k in ('comp_type', 'cxx_type',):
+            if comp_ref[k] != comp_chk[k]:
+                is_diff = True
+        ref_props = sorted([(k,v) for k,v in comp_ref['props'].iteritems()])
+        chk_props = sorted([(k,v) for k,v in comp_chk['props'].iteritems()])
+        if ref_props != chk_props:
+            is_diff = True
+            diff.append((comp_name, ref_props, chk_props,
+                         dict_diff(ref=comp_ref['props'],
+                                   chk=comp_chk['props'])))
+        pass
+
+    print "::: components with different properties: [%5s]" % (len(diff),)
+    for name, ref_props, chk_props, diff_props in diff:
+        print ":::  - component: [%s]" % (name,)
+        for prop_name, prop_value in diff_props.iteritems():
+            ref_value = prop_value[0]
+            chk_value = prop_value[1]
+            if isinstance(ref_value, list):
+                ref_value = sorted(ref_value)
+            if isinstance(chk_value, list):
+                chk_value = sorted(chk_value)
+
+            if isinstance(ref_value, list) and isinstance(chk_value, list):
+                dref_value = set(ref_value) - set(chk_value)
+                dchk_value = set(chk_value) - set(ref_value)
+                ref_value = sorted(list(dref_value))
+                chk_value = sorted(list(dchk_value))
+            print "-%s: %r" %(prop_name, ref_value,)
+            print "+%s: %r" %(prop_name, chk_value,)
+    
+        
+    if (len(ref_only_keys) > 0 or
+        len(chk_only_keys) > 0 or
+        len(diff) > 0):
+        return 1
+    return 0
+
+if __name__ == "__main__":
+
+    parser = OptionParser(
+        usage="usage: %prog [options] [-r] ref.josvc.ascii [-f] chk.josvc.ascii"
+        )
+    _add = parser.add_option
+    
+    _add( "-r",
+          "--ref",
+          dest = "ref_fname",
+          help = "The path to the first josvc.ascii file to analyze" )
+    
+    _add( "-f",
+          "--file",
+          dest = "chk_fname",
+          help = "The path to the second josvc.ascii file to analyze" )
+
+#    _add("-o", "--output",
+#         dest = "o_fname",
+#         default = "cfg.diff",
+#         help = "file where to store the output of `which diff` run on the input files given to %PROG")
+    
+    _add( "-v",
+          "--verbose",
+          action  = "store_true",
+          dest = "verbose",
+          default = False,
+          help = "Switch to activate verbose printout" )
+
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.ref_fname = args[0]
+        pass
+    if len(args) > 1 and args[1][0] != "-":
+        options.chk_fname = args[1]
+        pass
+
+    if (options.chk_fname == None or 
+        options.ref_fname == None) :
+        str(parser.print_help() or "")
+        sys.exit(1)
+        pass
+
+    chk_fname = os.path.expandvars(os.path.expanduser(options.chk_fname))
+    ref_fname = os.path.expandvars(os.path.expanduser(options.ref_fname))
+
+    print ":"*80
+    print "::: comparing configurations"
+    print ":::  ref: %s" % ref_fname
+    ref_db = load_cfg_file(ref_fname)
+    print ":::    -> [%d] components" % (len(ref_db.keys()),)
+    print ":::  chk: %s" % chk_fname
+    chk_db = load_cfg_file(chk_fname)
+    print ":::    -> [%d] components" % (len(chk_db.keys()),)
+
+    sc = cmp_component_db(ref_db, chk_db, options.verbose)
+    
+    if sc==0:
+        print "::: all good"
+    else:
+        print "::: configurations differ !"
+    print "::: bye."
+    print ":"*80
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/diffConfigs.py b/Tools/PyUtils/bin/diffConfigs.py
new file mode 100755
index 00000000000..d1a2b7a7059
--- /dev/null
+++ b/Tools/PyUtils/bin/diffConfigs.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    diffConfigs.py
+# @purpose: check that 2 ConfigurationShelves have same content (both in
+#           configurables and properties)
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    September 2008
+#
+# @example:
+#
+# diffConfigs ref.pkl chk.pkl
+#
+
+__version__ = "$Revision: 1.1 $"
+__author__  = "Sebastien Binet"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [-r] ref.pkl [-f] chk.pkl")
+    parser.add_option( "-r",
+                       "--ref",
+                       dest = "refFileName",
+                       help = "The path to the first ConfigurationShelve file to analyze" )
+    parser.add_option( "-f",
+                       "--file",
+                       dest = "fileName",
+                       help = "The path to the second ConfigurationShelve file to analyze" )
+    parser.add_option( "-v",
+                       "--verbose",
+                       action  = "store_true",
+                       dest = "verbose",
+                       default = False,
+                       help = "Switch to activate verbose printout" )
+
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.refFileName = args[0]
+        pass
+    if len(args) > 1 and args[1][0] != "-":
+        options.fileName = args[1]
+        pass
+
+    if options.fileName    == None or \
+       options.refFileName == None :
+        str(parser.print_help() or "")
+        sys.exit(1)
+        pass
+
+    chkFileName = os.path.expandvars(os.path.expanduser(options.fileName))
+    refFileName = os.path.expandvars(os.path.expanduser(options.refFileName))
+
+    print "::: comparing configurations:"
+    print ":::  ref: %s" % refFileName
+    print ":::  chk: %s" % chkFileName
+    from AthenaCommon.ConfigurationShelve import cmpConfigs
+    ref, chk, report = cmpConfigs (ref=refFileName,
+                                   chk=chkFileName)
+    if len(report)==0:
+        print "::: all good"
+        sys.exit(0)
+
+    for l in report: print l
+    print "::: configurations differ !"
+    sys.exit(1)
diff --git a/Tools/PyUtils/bin/diffPoolFiles.py b/Tools/PyUtils/bin/diffPoolFiles.py
new file mode 100755
index 00000000000..222bdc139d8
--- /dev/null
+++ b/Tools/PyUtils/bin/diffPoolFiles.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    diffPoolFiles.py
+# @purpose: check that 2 POOL files have same content (containers and sizes)
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    March 2007
+#
+# @example:
+#
+# diffPoolFiles aod.pool ref.aod.pool
+#
+
+__version__ = "$Revision: 1.3 $"
+__author__  = "Sebastien Binet"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [-r] file1.pool [-f] file2.pool")
+    parser.add_option( "-r",
+                       "--ref",
+                       dest = "refFileName",
+                       help = "The path to the first POOL file to analyze" )
+    parser.add_option( "-f",
+                       "--file",
+                       dest = "fileName",
+                       help = "The path to the second POOL file to analyze" )
+    parser.add_option( "-v",
+                       "--verbose",
+                       action  = "store_true",
+                       dest = "verbose",
+                       default = False,
+                       help = "Switch to activate verbose printout" )
+
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.refFileName = args[0]
+        pass
+    if len(args) > 1 and args[1][0] != "-":
+        options.fileName = args[1]
+        pass
+
+    if options.fileName    == None or \
+       options.refFileName == None :
+        str(parser.print_help() or "")
+        sys.exit(1)
+        pass
+
+    chkFileName = os.path.expandvars(os.path.expanduser(options.fileName))
+    refFileName = os.path.expandvars(os.path.expanduser(options.refFileName))
+    
+    import PyUtils.PoolFile as PF
+    diff = PF.DiffFiles( refFileName = refFileName,
+                         chkFileName = chkFileName,
+                         verbose = options.verbose )
+    diff.printSummary()
+    sys.exit(diff.status())
diff --git a/Tools/PyUtils/bin/diffTAGTree.py b/Tools/PyUtils/bin/diffTAGTree.py
new file mode 100755
index 00000000000..6b9a3debd48
--- /dev/null
+++ b/Tools/PyUtils/bin/diffTAGTree.py
@@ -0,0 +1,208 @@
+#!/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    diffTAGTree.py
+# @purpose: Check that 2 TAG files have same content.
+#           (Based on original script from PROC, modified to deal with 
+#           prodsys generated TAGs by Graeme.)
+# @author:  Graeme Andrew Stewart <graeme.andrew.stewart@cern.ch>
+# @date:    April 2012
+#
+# @example:
+#
+# diffTAGTree.py oldTAG newTAG
+#
+
+__version__ = "$Revision: 1.3 $"
+__author__  = "Graeme Andrew Stewart"
+
+
+import sys
+import os
+sys.argv += [ '-b' ] # tell ROOT to not use graphics
+from ROOT import TFile, TTree
+sys.argv.pop()
+
+setIgnoreLeaves=("Token","StreamESD_ref","StreamRDO_ref","StreamAOD_ref","RecoTimeRAWtoESD","RecoTimeESDtoAOD")
+
+def diffTTree(tOld,tNew,details=None): 
+    nOld = tOld.GetEntriesFast()
+    nNew = tNew.GetEntriesFast()
+    n=min(nOld,nNew)
+
+    if nOld != nNew:
+        msg="Different number of entries: %i vs %i. Comparing first %i" % \
+            (nOld,nNew,n)
+        print msg
+        if details is not None: details.write(msg+"\n")
+
+    leavesOld=tOld.GetListOfLeaves()
+    leavesNew=tNew.GetListOfLeaves()
+    
+    checkLeavesOld=set()
+    
+    for l in leavesOld:
+        name=l.GetName()
+        if not name in setIgnoreLeaves:
+            checkLeavesOld.add(name)
+
+    checkLeavesNew=set()
+    for l in leavesNew:
+        name=l.GetName()
+        if not name in setIgnoreLeaves:
+            checkLeavesNew.add(name)
+    
+    #print checkLeavesOld
+    checkLeaves=checkLeavesOld & checkLeavesNew
+    
+    diffLeaves=checkLeavesOld ^ checkLeavesNew
+    if len(diffLeaves):
+        msg="The following variables exist in only one tree, not compared:\n"
+        for d in diffLeaves:
+            msg+=d+"\n"
+        print msg
+        if details is not None: details.write(msg)
+
+    nGood=0
+    nBad=0
+    diffSummary=dict()
+
+    # To cope with events being out of order in the new TAG file
+    # (which can happen when running through prodsys) build up
+    # a cache for the new TAG file, mapping run/event numbers -> index
+    newRunEventDict=dict()  # Hold information about the run/event numbers vs entry index here
+    cachedIndex=0           # How far we looked through the new file already
+    
+    # Loop over events in the old file
+    for iEntry in range(n):
+        tOld.GetEntry(iEntry)
+
+        try:
+            evOld=tOld.EventNumber
+            runOld=tOld.RunNumber
+            # Simple index combining run and event
+            runEventIndex = "%d-%d" % (runOld, evOld)
+#            print "Trying to match %i-%i" % (runOld, evOld)
+            if runEventIndex in newRunEventDict:
+#                print "Cache hit!"
+                tNew.GetEntry(newRunEventDict[runEventIndex])
+                evNew=tNew.EventNumber
+                runNew=tNew.RunNumber
+            else:
+                for sEntry in range(cachedIndex, n):
+                    tNew.GetEntry(sEntry)
+                    evNew=tNew.EventNumber
+                    runNew=tNew.RunNumber
+                    newRunEventDict["%d-%d" % (runNew, evNew)] = sEntry
+#                    print "Cached %i-%i" % (runNew, evNew)
+                    cachedIndex = sEntry + 1
+                    if evNew == evOld and runNew == runOld:
+                        break
+            if evOld != evNew or runOld != runNew:
+                msg="Run/Event numbers don't match: found no partner for RunNbr: %i, EventNbr: %i\n" % (runOld, evOld)
+                msg+="\nStop comparison now."
+                print msg
+                if details is not None:
+                    details.write(msg+"\n")
+                break
+        except AttributeError:
+            pass
+
+        foundDiff=False
+        for name in checkLeaves:
+            #exec "vOld=tOld."+name
+            vOld=getattr(tOld,name)
+            vNew=getattr(tNew,name)
+        
+            if vOld != vNew:
+                foundDiff=True
+                try: #Get Run/Event number
+                    evt=tNew.EventNumber
+                    rn=tNew.RunNumber
+
+                    evtO=tOld.EventNumber
+                    rnO=tOld.RunNumber
+                    
+                    evId="(Run %i, Evt %i)" % (rn, evt)
+                    evId+="(Run %i, Evt %i)" % (rnO, evtO)
+                except:
+                    evId=""
+                    
+                #print "Event #",iEntry,"Difference:",name,
+                diffmsg="Event #%i %s Difference: %s %s -> %s" %\
+                    (iEntry,evId,name,str(vOld),str(vNew))
+                #print vOld,"vs",vNew,
+                try:
+                    d=100.0*(vNew-vOld)/vOld
+                    diffmsg+=" (%.3f%%)" % d
+                except:
+                    pass
+                if details is not None:
+                    details.write(diffmsg+"\n")
+                else:
+                    print diffmsg
+
+                if diffSummary.has_key(name):
+                    diffSummary[name]+=1
+                else:
+                    diffSummary[name]=1
+        if foundDiff:
+            nBad+=1
+        else:
+            nGood+=1
+
+    msg="Found %i identical events and %i different events" % (nGood,nBad)
+    print msg
+    if details is not None:
+        details.write(msg+"\n")
+        
+    for n,v in diffSummary.iteritems():
+        msg="\tName: %s: %i Events differ" % (n,v)
+        print msg
+        if details is not None:
+            details.write(msg+"\n")
+
+    return (nGood,nBad)
+
+if __name__=='__main__':
+
+    if len(sys.argv)<3 or len(sys.argv)>4 or sys.argv[1]=="-help":
+        print "Usage:",sys.argv[0],"File1 File2 <treename>"
+        sys.exit(-1)
+
+    fnOld=sys.argv[1]
+    fnNew=sys.argv[2]
+    if len(sys.argv)>3:
+        treename=sys.argv[3]
+    else:
+        treename="POOLCollectionTree"
+
+    if not os.access(fnOld,os.R_OK):
+        print "Can't access file",fnOld
+        sys.exit(-1)
+        
+    if not os.access(fnNew,os.R_OK):
+        print "Can't access file",fnNew
+        sys.exit(-1)
+
+
+    fOld = TFile(fnOld)
+    if fOld is None:
+        print "Failed to open file",fnOld
+        
+    tOld = fOld.Get(treename)
+    if tOld is None:
+        print "Tree",treename,"not found in file",fnOld
+        sys.exit(-1)
+
+    fNew = TFile(fnNew)
+    if fNew is None:
+        print "Failed to open file",fnNew
+
+    tNew = fNew.Get(treename)
+    if fNew is None:
+        print  "Tree",treename,"not found in file",fnNew
+ 
+    ndiff=diffTTree(tOld,tNew)
+
diff --git a/Tools/PyUtils/bin/dlldep.py b/Tools/PyUtils/bin/dlldep.py
new file mode 100755
index 00000000000..a1e89f9bb7d
--- /dev/null
+++ b/Tools/PyUtils/bin/dlldep.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+#
+# @file:    dlldep.py
+#
+# @purpose: Create a graph of the runtime dependencies of a dynamic library using ldd.
+#           The dependencies are printed in DOT (graphviz) language.
+#
+# @author:  Frank Winklmeier (CERN)
+#           based on Dominik Seichter's 'dependencies.sh':
+#           http://domseichter.blogspot.com/2008/02/visualize-dependencies-of-binaries-and.html
+#
+# $Id: dlldep.py,v 1.1 2009-02-09 17:56:35 fwinkl Exp $
+#
+
+import sys, os
+from os.path import basename
+import subprocess as sp
+import re
+
+import PyUtils.Dso as Dso
+
+class Cache:
+   """Global cache of already processed files"""
+   
+   files = {}   # Global cache of already processed libs
+   stats = []   # Global statistics
+   
+   def __init__(self):
+      self.myfiles = {}    # Dependencies of the currently processed lib
+      self.dotsrc = []     # DOT source code
+      
+   def add(self, shlib):
+      """Add file to local and global cache"""
+      self.myfiles[shlib.lib] = shlib
+      Cache.files[shlib.lib] = shlib
+
+   def writeDOT(self, file):      
+      for d in self.dotsrc: print >> file, d
+
+   def dot(self, code, style={}):
+      """Output a line of dot code"""   
+      if len(style)>0:
+         code += ' ['
+         for k,v in style.iteritems():
+            code += '%s="%s" ' % (k,v)
+         code += ']'
+      
+      self.dotsrc.append(code)
+      return
+   
+      
+class SharedLib:
+   """Represent a shared library with name, dependencies and other stats"""
+   
+   def __init__(self, distance, lib):
+      import os.path as osp
+      lib = osp.expanduser(osp.expandvars(lib))
+      if not osp.exists(lib):
+         l = Dso.find_library(lib)
+         if l:
+            lib = l
+      assert osp.exists(lib), "no such path [%s]" % (lib,)
+      self.lib = lib              # path of library
+      self.distance = distance    # distance from root lib
+      self.deplibs = self._getLibs(lib)   # direct dependencies
+
+   def _getLibs(self, lib):
+      """Get direct dependencies of shared library"""
+
+      # First check if already in global cache
+      cachedlib = Cache.files.get(lib)
+      if cachedlib: return cachedlib.deplibs
+      
+      # Run readelf to find direct dependencies
+      # Note: ldd itself does recursions so we cannot use it here
+      p = sp.Popen(["readelf","-d",lib], stdout=sp.PIPE)
+      output = p.communicate()[0]
+      if p.returncode != 0:
+         print "Cannot run 'readelf' on",lib
+         return []
+
+      libs = []
+      for l in output.split("\n"):
+         if l.find("NEEDED")==-1: continue
+         libs += [l.split()[-1].strip("[]")]
+
+      # Run ldd to find full path of libraries
+      p = sp.Popen(["ldd",lib], stdout=sp.PIPE)
+      output = p.communicate()[0]
+      if p.returncode != 0:
+         print "Cannot run 'ldd' on",lib
+         return []
+
+      libpaths = []
+      for l in output.split("\n"):
+         fields = l.strip().split()
+         if len(fields)!=4: continue
+         path = fields[2]
+         if (fields[0] in libs) and len(path)>0:
+            libpaths += [path]
+
+      return libpaths
+
+               
+class Color:
+   """Helper class for colored nodes"""
+   
+   default = "white"
+   scheme = "rdbu8"
+   projects = {"DetCommon" : 2,
+               "AtlasCore" : 1,
+               "AtlasEvent" : 3,
+               "AtlasConditions" : 4,
+               "AtlasReconstruction" : 5,
+               "AtlasSimulation" : 6,
+               "AtlasTrigger" : 7,
+               "AtlasAnalysis" : 8
+               }
+
+   @classmethod
+   def get(cls, lib):
+      for p,c in cls.projects.iteritems():
+         if lib.find(p)!=-1: return "/%s/%s" % (cls.scheme, c)
+      return cls.default
+
+
+class Stats:
+   """Statistics calculated from Cache object"""
+   
+   def __init__(self, lib, cache):
+      self.lib = lib
+      self.depTotal = len(cache.myfiles)-1   # do not count ourselves
+      self.depDirect = 0
+      
+      for lib in cache.myfiles.values():
+         if lib.distance==1: self.depDirect += 1
+      return
+
+      
+def anaLib(lib, opt, cache, select = [], ignore = [], depth = 0):
+   """Get dependencies of shared library recursively"""
+
+   def process(path):
+      """Should this lib be processed?"""
+      for regexp in select:
+         if regexp.match(path): return True
+      if len(select)>0: return False
+      
+      for regexp in ignore:
+         if regexp.match(path): return False
+      return True
+
+   if opt.maxdepth and depth>=opt.maxdepth: return
+   
+   # Check if we analyzed this lib already
+   cachedlib = cache.myfiles.get(lib)
+   if cachedlib:
+      # Always save minimum distance
+      if depth<cachedlib.distance: cachedlib.distance=depth
+      return
+
+   shlib = SharedLib(depth, lib)
+   cache.add(shlib)
+   
+   for l in shlib.deplibs:
+      if process(l):
+         cache.dot('  "%s" -> "%s"' % (basename(lib), basename(l)))
+         anaLib(l, opt, cache, select, ignore, depth+1)
+         
+   return
+
+
+def processLib(lib, opt, dotFileName = None):
+   """Process one library"""
+
+   cache = Cache()
+   dot = cache.dot   # shortcut
+   
+   dot('digraph DependencyTree {')
+   dot('  ratio=0.9 nodesep=0.05')         # some reasonable default values
+   dot('  "%s" [shape=box]' % basename(lib))
+
+   select = []
+   ignore = []   # currently not used
+   if opt.filter:
+      for f in opt.filter: select += [re.compile(f)]
+   else:
+      select = [re.compile(".*atlas/software.*")]
+
+   anaLib(lib, opt, cache, select, ignore)
+
+   # Declare style of all nodes
+   for l,v in cache.myfiles.iteritems():
+      style = {}
+      # Special style for direct dependencies
+      if v.distance==1:
+         style["shape"] = "box"
+         
+      if not opt.nocolor:
+         style["style"] = "filled"
+         style["fillcolor"] = Color.get(l)
+         
+      dot('  "%s"' % (basename(l)), style)
+      
+   dot('}')
+   
+   # Write output to file
+   if dotFileName: outFile = open(dotFileName, "w")
+   else: outFile = open(basename(lib)+".dot", "w")
+   
+   cache.writeDOT(outFile)
+
+   # Calculate statistics
+   if opt.stats:
+      st = Stats(lib, cache)
+      Cache.stats += [st]
+      return st
+
+   return None
+
+
+   
+def printStats():
+   """Print statistics"""
+   import operator
+   
+   print "%-50s %7s %7s" % ("Library dependencies","Direct","Total")
+   print "-"*70
+   for s in sorted(Cache.stats, key=operator.attrgetter("depDirect"), reverse=True):
+      print "%-50s %7d %7d" % (basename(s.lib), s.depDirect, s.depTotal)
+
+   return
+
+      
+def main():
+
+   import optparse
+   parser = optparse.OptionParser(description="Create runtime dependecy graph for shared library. The output is a graph in DOT language. To visualize it use, e.g. 'dot -O -Tps mygraph.dot'. The rectangular nodes represent direct dependencies. Nodes belonging to the same project have the same color.",
+                                  usage="%prog [OPTIONS] LIB [LIB...]")
+
+   parser.add_option("-o", "--output",
+                     help="File for DOT source code (default is LIB.dot)")
+
+   parser.add_option("-d", "--maxdepth", type="int",
+                     help="Maximum depth of dependency tree [1..]")
+
+   parser.add_option("-f", "--filter", action="append",
+                     help="Only analyze libraries matching regular expression (can be specified multiple times) [default: .*atlas/software.*]")
+   
+   parser.add_option("--nocolor", action="store_true",
+                     help="Do not use colors")
+
+   parser.add_option("-s", "--stats", action="store_true",
+                     help="Print statistics")
+
+   (opt, args) = parser.parse_args()
+   if len(args)==0:
+      parser.error("Invalid number of arguments specified")
+      
+
+   if len(args)>1 and opt.output:
+      print "Multiple libraries specified. Ignoring output file name."
+      opt.output = None
+      
+   for lib in args:
+      processLib(lib, opt, opt.output)
+
+   if opt.stats:
+      printStats()
+      
+   return 0
+
+
+if __name__ == "__main__":
+   try:
+      sys.exit(main()) 
+   except KeyboardInterrupt:
+      sys.exit(1)
+      
diff --git a/Tools/PyUtils/bin/dso-stats.py b/Tools/PyUtils/bin/dso-stats.py
new file mode 100755
index 00000000000..ed8801c7b95
--- /dev/null
+++ b/Tools/PyUtils/bin/dso-stats.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file: PyUtils/bin/dso-stats.py
+# @purpose: compile statistics about shared libraries
+# @author:  Scott Snyder
+
+import re
+import sys
+import os
+import subprocess
+
+## monkey patch subprocess to be forward compatible with py-3k
+def getstatusoutput(cmd):
+    if isinstance(cmd, basestring):
+        cmd = cmd.split()
+    if not isinstance(cmd, (list, tuple)):
+        raise TypeError('expects a list, a tuple or a space separated string')
+    process = subprocess.Popen(cmd,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.STDOUT)
+    stdout, _ = process.communicate()
+    return process.returncode, stdout
+subprocess.getstatusoutput = getstatusoutput
+del getstatusoutput
+
+def getstatus(cmd):
+    sc,_ = subprocess.getstatusoutput(cmd)
+    return sc
+subprocess.getstatus = getstatus
+del getstatus
+
+def getoutput(cmd):
+    _,out = subprocess.getstatusoutput(cmd)
+    return out
+subprocess.getoutput = getoutput
+del getoutput
+## --------------------------------------------------------------
+
+#lib = 'atlas-work4/InstallArea/i686-slc4-gcc34-dbg/lib/libAthenaROOTAccess.so'
+def _getpagesz():
+    import resource
+    sz = resource.getpagesize()
+    del resource
+    return sz
+PAGESIZE = int(_getpagesz())
+del _getpagesz
+
+
+pat = re.compile (' *[0-9]* ([^ ]+) *([0-9a-f]+)')
+
+format = "%(name)-30s %(dso)5s %(code)5s %(puredata)5s %(cpp)5s %(initdata)5s %(bss)5s %(frag)5s %(total)6s"
+
+def parse_lib (lib):
+    out = subprocess.getoutput ("objdump -h " + lib)
+    secs = []
+    for l in out.split ('\n'):
+        m = pat.match (l)
+        if m:
+            secs.append ((m.group(1), int(m.group(2), 16)))
+    return secs
+
+
+def _frag (sz):
+    return ((sz + PAGESIZE-1) & (~(PAGESIZE-1))) - sz
+
+
+def _cleanname (name):
+    if name == 'None':
+        return 'None'
+    name = os.path.basename (name)
+    name = os.path.splitext(name)[0]
+    if name.startswith ('lib'):
+        name = name[3:]
+    return name
+
+
+def _form (x):
+    return int ((x+512) / 1024)
+
+class Data:
+    def __init__ (self, secs = None, name = None):
+        self.name = name
+        self.dso = 0
+        self.code = 0
+        self.puredata = 0
+        self.cpp = 0
+        self.java = 0
+        self.initdata = 0
+        self.bss = 0
+        self.frag = 0
+
+        self.ro = 0
+        self.rw = 0
+        
+        if secs:
+            self.add_secs (secs)
+            self.est_frag()
+        return
+
+
+    def __iadd__ (self, other):
+        self.dso += other.dso
+        self.code += other.code
+        self.puredata += other.puredata
+        self.cpp += other.cpp
+        self.java += other.java
+        self.initdata += other.initdata
+        self.bss += other.bss
+        self.ro += other.ro
+        self.rw += other.rw
+        self.frag += other.frag
+        return self
+
+
+    def est_frag (self):
+        self.frag += _frag (self.ro)
+        self.frag += _frag (self.rw)
+        self.frag += _frag (self.bss)
+        return
+
+
+    def total (self):
+        return (self.dso + self.code + self.puredata + self.cpp +
+                self.java + self.initdata + self.frag + self.bss)
+
+
+    def add_secs (self, secs):
+        for s, sz in secs:
+            if s in ['.hash', '.dynsym', '.dynstr', '.gnu.version',
+                     '.gnu.version_r', '.rel.dyn', '.rel.plt',
+                     '.init', '.plt', '.fini']:
+                self.dso += sz
+                self.ro += sz
+
+            elif s in ['.text']:
+                self.code += sz
+                self.ro += sz
+
+            elif s in ['.rodata']:
+                self.puredata += sz
+                self.ro += sz
+
+            elif s in ['.eh_frame_hdr', '.eh_frame', '.gcc_except_table']:
+                self.cpp += sz
+                self.ro += sz
+
+            elif s in ['.ctors', '.dtors']:
+                self.cpp += sz
+                self.rw += sz
+
+                
+            elif s in ['.jcr']:
+                self.java += sz
+                self.rw += sz
+
+            elif s in ['.dynamic', '.got', '.got.plt']:
+                self.dso += sz
+                self.rw += sz
+
+            elif s in ['.data']:
+                self.initdata += sz
+                self.rw += sz
+
+            elif s in ['.bss']:
+                self.bss += sz
+
+            elif s == '.comment' or s.startswith ('.debug'):
+                pass
+
+            else:
+                print >> sys.stderr, '** Unknown section [%s] **' % s
+
+        return
+
+
+
+    def dump (self, f):
+        kw = {}
+        kw['name'] = _cleanname (self.name)
+        kw['dso'] = _form (self.dso)
+        kw['code'] = _form (self.code)
+        kw['puredata'] = _form (self.puredata)
+        kw['cpp'] = _form (self.cpp)
+        kw['java'] = _form (self.java)
+        kw['initdata'] = _form (self.initdata)
+        kw['frag'] = _form (self.frag)
+        kw['bss'] = _form (self.bss)
+        kw['total'] = _form (self.total())
+        print >> f, format % kw
+        
+
+# secs = parse_lib (lib)
+# data = Data(secs, name = lib)
+# print data.dso, data.code, data.puredata, data.cpp, data.java, data.initdata, data.bss
+# print data.ro, data.rw, data.frag
+
+# data2 = Data(secs)
+# data += data2
+# print data.dso, data.code, data.puredata, data.cpp, data.java, data.initdata, data.bss
+# print data.ro, data.rw, data.frag
+
+
+
+# data.dump (sys.stdout)
+
+
+kw = {'name' : 'Name',
+      'dso'  : 'DSO',
+      'code' : 'Code',
+      'puredata': 'Pure',
+      'cpp'  : 'C++',
+      'java' : 'Java',
+      'initdata': 'data',
+      'bss'  : 'BSS',
+      'frag' : 'Frag',
+      'total': 'Total'}
+print >> sys.stdout, format % kw
+
+
+total = Data(name = 'Total')
+import fileinput
+libs = []
+for l in fileinput.input():
+    if l[-1] == '\n':
+        l = l[:-1]
+    secs = parse_lib(l)
+    data = Data (secs, name = l)
+    libs.append (data)
+    total += data
+
+def fn (a, b):
+    return b.total() - a.total()
+libs.sort (fn)
+
+for l in libs:
+    l.dump (sys.stdout)
+total.dump (sys.stdout)
diff --git a/Tools/PyUtils/bin/dump-athfile.py b/Tools/PyUtils/bin/dump-athfile.py
new file mode 100755
index 00000000000..a2823d84f59
--- /dev/null
+++ b/Tools/PyUtils/bin/dump-athfile.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    dump-athfile.py
+# @purpose: simple command-line utility wrapping PyUtils.AthFile.fopen
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    May 2009
+#
+# @example:
+# @code
+# dump-athfile aod.pool.root
+# dump-athfile daq.data
+# dump-athfile /castor/cern.ch/user/j/johndoe/aod.pool.root
+# dump-athfile somedir/*/*.pool
+# @endcode
+#
+
+__version__ = "$Revision: 1.4 $"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+
+try:                import cPickle as pickle
+except ImportError: import pickle
+    
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [-f] my.file")
+    parser.add_option( "-f",
+                       "--file",
+                       dest = "fname",
+                       help = "The path to the POOL/BS file to analyze" )
+    parser.add_option( "-o",
+                       "--output",
+                       dest = "oname",
+                       default = 'athfile-infos.ascii',
+                       help = "Name of the output file which will contain the informations gathered during AthFile processing. These informations will be stored into a python-shelve file." )
+    parser.add_option( '--evtmax',
+                       dest = 'evtmax',
+                       default = 1,
+                       type = int,
+                       help = 'Maximum number of events to process in the file(s)')
+    parser.add_option( '--debug',
+                       dest = 'debug',
+                       default = False,
+                       action='store_true',
+                       help = 'enable debugging informations')
+    
+    (options, args) = parser.parse_args()
+
+    if options.debug:
+        os.environ['ATHFILE_DEBUG'] = '1'
+        os.environ['PYUTILS_SHUTUP_DEBUG'] = '1'
+        pass
+        
+    fnames = []
+    
+    if len(args) > 0:
+        fnames = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.fname == None and len(fnames) == 0:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+    if options.fname != None:
+        fname = os.path.expandvars(os.path.expanduser(options.fname))
+        fnames.append(fname)
+
+    fnames = list(set(fnames))
+    sc = 0
+    import PyUtils.AthFile as af
+    msg = af.msg
+    infos = []
+    try:
+        infos = af.pfopen(fnames, evtmax=options.evtmax)
+    except Exception, e:
+        msg.error("Caught exception [%s] !!", str(e.__class__))
+        msg.error("What:\n%s\n%s\n%s",e,
+                  sys.exc_info()[0],
+                  sys.exc_info()[1])
+        sc = 1
+        pass
+
+    except :
+        msg.error("Caught something !! (don't know what)")
+        msg.error("\n%s\n%s",sys.exc_info()[0], sys.exc_info()[1])
+        sc = 10
+        pass
+
+    if sc != 0:
+        msg.info("Bye.")
+        sys.exit(sc)
+        pass
+    
+    for f in infos:
+        file_size = f.infos['file_size']/1024./1024.
+        if file_size < 0: file_size = None
+        else:             file_size = str(file_size)+' MB'
+
+        msg.info(':'*80)
+        msg.info('::::: summary :::::')
+        fmt = ' - %-15s: %s'
+        print fmt % ('file md5',       f.infos['file_md5sum'])
+        print fmt % ('file name',      f.infos['file_name'])
+        print fmt % ('file type',      f.infos['file_type'])
+        print fmt % ('file size',      file_size)
+        print fmt % ('file guid',      f.infos['file_guid'])
+        print fmt % ('nentries',       f.infos['nentries'])
+        print fmt % ('run number',     f.infos['run_number'])
+        print fmt % ('run type',       f.infos['run_type'])
+        print fmt % ('evt number',     f.infos['evt_number'])
+        print fmt % ('evt type',       f.infos['evt_type'])
+        print fmt % ('lumi block',     f.infos['lumi_block'])
+        print fmt % ('beam energy',    f.infos['beam_energy'])
+        print fmt % ('beam type',      f.infos['beam_type'])
+        print fmt % ('stream tags',    f.infos['stream_tags'])
+        print fmt % ('stream names',   f.infos['stream_names'])
+        print fmt % ('geometry',       f.infos['geometry'])
+        print fmt % ('conditions tag', f.infos['conditions_tag'])
+        _metadata = f.infos['metadata']
+        _metadata = _metadata.keys() if isinstance(_metadata,dict) else None
+        print fmt % ('meta data',      _metadata)
+
+        msg.info(':'*80)
+        if len(infos) > 1:
+            print ""
+        pass # loop over infos
+    
+    if options.oname:
+        oname = options.oname
+        msg.info("saving report into [%s]..." % oname)
+        if os.path.exists(oname):
+            os.rename(oname, oname+'.bak')
+        af.server.save_cache(oname)
+
+    msg.info("Bye.")
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/filter-and-merge-d3pd.py b/Tools/PyUtils/bin/filter-and-merge-d3pd.py
new file mode 100755
index 00000000000..03dd855ecf3
--- /dev/null
+++ b/Tools/PyUtils/bin/filter-and-merge-d3pd.py
@@ -0,0 +1,982 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# bwd compat
+from __future__ import with_statement
+
+# stdlib imports
+import os
+import sys
+import getopt
+import atexit
+
+# 3rd party imports
+import ROOT
+import PyCintex; PyCintex.Cintex.Enable()
+
+# root globals to prevent ROOT garbage collector to sweep the rug....
+_root_files = []
+_root_trees = []
+
+# Root has a global dtor ordering problem: the cintex trampolines
+# may be deleted before open files are closed.  Workaround is to explicitly
+# close open files before terminating.
+#
+def _close_root_files():
+    for f in _root_files:
+        if hasattr (f, 'Close'): f.Close()
+    del _root_files[0:-1]
+    return
+atexit.register(_close_root_files)
+
+def _fnmatch(fname, patterns):
+    """helper function wrapping the original `fnmatch:fnmatch` function but providing
+    support for a list of patterns to match against
+    """
+    from fnmatch import fnmatch
+    if isinstance(patterns, basestring):
+        patterns = [patterns]
+    for pattern in patterns:
+        if fnmatch(fname, pattern):
+            return True
+    return False
+
+def _make_fake_output(fname, tree_name, tree_title=None):
+    f = ROOT.TFile.Open(fname, "recreate")
+    if tree_title is None:
+        tree_title = tree_name
+    t = ROOT.TTree(tree_name, tree_title)
+    f.Write()
+    f.Close()
+    del t, f
+    return
+    
+class LBRange(object):
+    def __init__(self, run, lbmin, lbmax):
+        self.run = run
+        self.lbmin = lbmin
+        self.lbmax = lbmax
+
+def _interpret_grl(fname):
+    if not os.path.exists(fname):
+        raise OSError
+
+    lbs = []
+    if fname.endswith('.dat'):
+        for l in open(fname):
+            l = l.strip()
+            run, lbmin, lbmax = map(int, l.split())
+            lbs.append(LBRange(run, lbmin, lbmax))
+    elif fname.endswith('.xml'):
+        data = extract_data_from_xml(fname)
+        for i in data:
+            run, lbmin, lbmax = map(int, i)
+            lbs.append(LBRange(run, lbmin, lbmax))
+    else:
+        raise RuntimeError("unknown file extension (%s)" % (fname,))
+    return lbs
+
+def interpret_grl(fname="GRL.dat"):
+    fnames = []
+    if isinstance(fname, basestring):
+        fnames = [fname]
+    elif isinstance(fname, (list,tuple)):
+        fnames = fname[:]
+    else:
+        raise TypeError('fname must be a string or a sequence (got: %s)' %
+                        type(fname))
+    lbs = []
+    for fname in fnames:
+        lbs.extend(_interpret_grl(fname))
+    return lbs
+
+def pass_grl(run, lb, good_lbs):
+
+    for ilb in good_lbs:
+        if run != ilb.run:
+            continue
+
+        if ilb.lbmin <= lb <= ilb.lbmax:
+            return True
+
+    return False
+
+def warm_up(fname):
+    assert os.path.exists(fname)
+    import commands
+    rc,_ = commands.getstatusoutput("/bin/dd if=%s of=/dev/null" % (fname,))
+    return rc
+
+def apply_filters(branches, patterns):
+    """extract the branches which match the patterns.
+    a pattern can add or remove a branch.
+    if a branch matches no pattern, it is discarded.
+    if a branch matches several patterns, the last pattern wins.
+    """
+    from fnmatch import fnmatch
+    from collections import defaultdict
+    filtered = defaultdict(list)
+    matched_patterns = []
+    for br in branches:
+        for p in patterns:
+            if p == '':
+                continue
+            op = '-'
+            if p.startswith('+') or not p.startswith('-'):
+                if p[0] == '+':
+                    p = p[1:]
+                op = '+'
+            if p.startswith('-'):
+                op = '-'
+                p = p[1:]
+            if fnmatch(br, p):
+                filtered[br].append(op)
+                matched_patterns.append(p)
+    for p in patterns:
+        if not (p in matched_patterns):
+            print '::: warning: pattern [%s] could not be matched against any branch' % p
+            pass
+        pass
+    filtered = dict(filtered)
+    return sorted([k for k,v in filtered.iteritems() if v[-1] == '+'])
+
+def merge_all_trees(fnames, tree_name, memory, sfo,
+                    vars_fname=None, grl_fname=None,
+                    filter_fct=None,
+                    keep_all_trees=False,
+                    apply_recursive_opt=True):
+    
+    oname = sfo[:]
+    if not oname.endswith(".root"):
+        oname = oname + ".root"
+        pass
+
+    root_open = ROOT.TFile.Open
+    fout = root_open(oname, "RECREATE", "", 1)
+    fout.ResetBit(ROOT.kCanDelete)
+    
+    memory *= 1024 # change to bytes
+
+    tree_maxsz = ROOT.TTree.GetMaxTreeSize()
+
+    ## summing up branch sizes over all the files
+    orig_file = root_open(fnames[0], "read")
+    orig_tree = getattr(orig_file, tree_name)
+    br_names = []
+    all_br_names = set(br.GetName() for br in orig_tree.GetListOfBranches())
+    
+    if not (vars_fname is None):
+        # open the file containing the list of branches to keep or discard
+        patterns = []
+        with open(vars_fname, 'r') as br_file:
+            for p in br_file:
+                patterns.append(p.strip())
+        orig_tree.SetBranchStatus("*", 0)
+        # apply_filters returns the list of branches to keep
+        br_names = apply_filters(all_br_names, patterns)
+        print "::: keeping only the following branches: (from file-list %s)" %\
+              vars_fname
+        for b in br_names:
+            print ":::   [%s]" % (b,)
+            orig_tree.SetBranchStatus(b,1)                            
+    else:
+        br_names = [br.GetName() for br in orig_tree.GetListOfBranches()]
+
+    nleaves = len(br_names)
+    print "::: nleaves=[%04i] tree=[%s]" % (nleaves, orig_tree.GetName())
+
+    tot_sz = [0]*nleaves    # zipped sizes collected from all files
+    basket_sz = [0]*nleaves # size to be optimized (starts with `tot_sz`)
+    baskets = [1]*nleaves   # cache
+
+    for idx,fname in enumerate(fnames):
+        f = root_open(fname, "read")
+        tree = getattr(f, tree_name)
+        for ibr,br_name in enumerate(br_names):
+            branch = tree.GetBranch(br_name)
+            if not branch:
+                print "***warning*** - tree [%s] has no branch [%s]" % (tree.GetName(),
+                                                                        br_name)
+                continue
+            branch.SetAddress(0)
+
+            tot_sz[ibr] += branch.GetTotBytes()
+            basket_sz[ibr] = tot_sz[ibr]
+            #baskets[ibr] = 1
+
+            pass # loop over branches
+        del tree
+        f.Close()
+        del f
+        pass # loop over trees
+
+    if apply_recursive_opt:
+        while 1: # recursive optimization
+            tot_mem = sum(basket_sz)
+            if tot_mem < memory:
+                break
+
+            max_spare = -1
+            max_spare_idx = None
+            for i in xrange(nleaves):
+                spare = tot_sz[i]/baskets[i] - tot_sz[i]/(baskets[i]+1)
+                if max_spare < spare:
+                    max_spare = spare
+                    max_spare_idx = i
+            if max_spare_idx is not None:
+                idx = max_spare_idx
+                baskets[idx] += 1
+                basket_sz[idx] = tot_sz[idx]/baskets[idx]
+            pass # end-while
+        pass # apply_recursive_opt
+    
+    # create the new (optimized) tree
+    new_tree = orig_tree.CloneTree(0) # no copy of events
+    new_tree.ResetBit(ROOT.kCanDelete)
+    new_tree.SetDirectory(fout)
+    # once cloning is done, separate the trees to avoid as many side-effects
+    # as possible
+    #orig_tree.GetListOfClones().Remove(new_tree)
+    orig_tree.ResetBranchAddresses()
+    new_tree.ResetBranchAddresses()
+
+    if vars_fname is not None:
+        orig_tree.SetBranchStatus("*", 0)
+        new_tree.SetBranchStatus("*", 0)
+        for br_name in br_names:
+            orig_tree.SetBranchStatus(br_name, 1)
+            new_tree.SetBranchStatus(br_name, 1)
+
+    # a list of other tree names to filter-and-merge
+    other_trees = []
+    if keep_all_trees:
+        print "::: capturing other trees to filter and merge..."
+        # also handle all other payload-trees
+        # to decide if a tree is a payload-tree (and not a metadata tree
+        # which we don't know -by default- what is the correct way to merge)
+        # we just compare the number of events...
+        # FIXME: handle deeply-nested structures (/dir1/dir2/tree0,...)
+        _all_tree_names = list(n.GetName()
+                               for n in orig_file.GetListOfKeys()
+                               if (isinstance(getattr(orig_file, n.GetName()),
+                                              ROOT.TTree)
+                                   and n.GetName() != tree_name))
+        for n in _all_tree_names:
+            _old_tree = orig_file.Get(n)
+            print ":::  ->",n,
+            if _old_tree.GetEntries() != orig_tree.GetEntries():
+                # probably not a payload-tree but a metadata one...
+                del _old_tree
+                print "[reject]"
+                continue
+            print "[keep]"
+            _new_tree = _old_tree.CloneTree(0) # no copy of events
+            _new_tree.ResetBit(ROOT.kCanDelete)
+            _new_tree.SetDirectory(fout)
+            _old_tree.ResetBranchAddresses()
+            _new_tree.ResetBranchAddresses()
+            other_trees.append(_new_tree)
+            del _old_tree
+        print "::: capturing other trees to filter and merge... [done]"
+
+    if apply_recursive_opt:
+        # setting optimized basket sizes
+        tot_mem = 0.
+        tot_bkt = 0
+        max_bkt = 0
+        min_bkt = 1024**3
+
+        for ibr in xrange(nleaves):
+            br = new_tree.GetBranch(br_names[ibr])
+            if basket_sz[ibr] == 0:
+                basket_sz[ibr] = 16
+
+            basket_sz[ibr] = basket_sz[ibr] - (basket_sz[ibr] % 8)
+            br.SetBasketSize(basket_sz[ibr])
+
+            tot_mem += basket_sz[ibr]
+            tot_bkt += baskets[ibr]
+
+            if basket_sz[ibr] < min_bkt:
+                min_bkt = basket_sz[ibr]
+            if basket_sz[ibr] > max_bkt:
+                max_bkt = basket_sz[ibr]
+
+            pass # loop over leaves
+
+        print "::: optimize baskets: "
+        print ":::   total memory buffer: %8.3f kb" % (tot_mem/1024,)
+        print ":::   total baskets:       %8.3f (min= %8.3f) (max= %8.3f) kb" % (
+            tot_bkt, min_bkt, max_bkt)
+
+        del tot_sz, basket_sz, baskets
+        pass # apply_recursive_opt
+
+    # copying data
+    n_pass = 0
+    n_tot = 0
+    do_grl_selection = not (grl_fname is None)
+    
+    if do_grl_selection:
+        good_lbs = interpret_grl(fname=grl_fname)
+
+    print "::: processing [%i] trees..." % (len(fnames,))
+    for idx, fname in enumerate(fnames):
+        f = root_open(fname, "READ")
+
+        for other_tree in other_trees:
+            tree = getattr(f, other_tree.GetName())
+            other_tree.CopyAddresses(tree)
+                    
+        tree = getattr(f, tree_name)
+        new_tree.CopyAddresses(tree)
+        nentries = tree.GetEntries()
+        print ":::   entries:", nentries
+        for i in xrange(nentries):
+
+            nb = tree.GetEntry(i)
+            if nb <= 0:
+                print "*** error loading entry [%i]. got (%i) bytes" % (i,nb)
+                raise RuntimeError
+            n_tot += 1
+
+            accept_entry = True
+            if do_grl_selection:
+                if not pass_grl(tree.RunNumber, tree.lbn, good_lbs):
+                    accept_entry = False
+                pass
+            
+            if filter_fct and accept_entry:
+                try:
+                    if not filter_fct(tree):
+                        accept_entry = False
+                except Exception, err:
+                    print "*** problem running user filter fct:"
+                    print err
+                    print "*** (filter fct is now disabled)"
+                    filter_fct = None
+
+            if accept_entry:
+                n_pass += 1
+                if n_pass > 10:
+                    _nentries_cur = new_tree.GetEntries()
+                    fout = new_tree.GetCurrentFile()
+                    fout.Flush()
+                    out_fsize = fout.GetSize()
+                    avg_entry_sz = out_fsize / float(_nentries_cur or 1.)
+                    do_change_file = out_fsize + avg_entry_sz > 0.9 * tree_maxsz
+                    if do_change_file:
+                        #print "--- manually triggering TTree::ChangeFile..."
+                        # manually trigger the file split...
+                        # this is to ensure the split doesn't happen in between
+                        # the new_tree.Fill() and the other_tree.Fill() which
+                        # would de-synchronize the entries between the trees...
+                        fout = new_tree.ChangeFile(fout)
+                new_tree.Fill()
+                for other_tree in other_trees:
+                    _tree = f.Get(other_tree.GetName())
+                    nb = _tree.GetEntry(i)
+                    if nb <= 0:
+                        print "*** error loading entry [%i] for tree [%s]. got (%i) bytes" % (
+                            i, other_tree.GetName(), nb)
+                        continue
+                    other_tree.Fill()
+                    del _tree
+                    pass # loop over other trees
+                pass # entry accepted
+            pass # loop over entries
+        del tree
+        f.Close()
+        del f
+        pass # loop over input trees
+    print "::: processing [%i] trees... [done]" % (len(fnames,))
+
+    eff = 0.
+    if n_tot != 0:
+        eff = float(n_pass)/float(n_tot)
+    print "::: filter efficiency: %d/%d -> %s" % (n_pass, n_tot, eff)
+
+    fout = new_tree.GetCurrentFile()
+    fout.Write()
+    fout.Close()
+    del fout
+
+    return
+
+def order(m, chain_name, fnames, workdir):
+
+    # disabling the file-split as it may interfere badly with the re-ordering...
+    # set it to 2Tb
+    ROOT.TTree.SetMaxTreeSize(2 * 1024 * 1024 * 1024 * 1024)
+
+    print "::: nbr of files:", len(fnames)
+    for i,fn in enumerate(fnames):
+
+        timer = ROOT.TStopwatch()
+        timer.Start()
+        print "::: optimizing   [%s]..." % (fn,)
+        #warm_up(fn)
+
+        timer.Start()
+        fin = ROOT.TFile.Open(fn, "read")
+        tmp_fname = "%s_temporary_%03i.root" % (
+            chain_name.replace("/","_").replace(" ","_"),
+            i)
+        fout = ROOT.TFile.Open(tmp_fname, "recreate", "", 6)
+
+        # perform the (re)ordering for all trees
+        _all_tree_names = list(
+            n.GetName()
+            for n in fin.GetListOfKeys()
+            if isinstance(getattr(fin, n.GetName()),
+                          ROOT.TTree))
+        for chain_name in _all_tree_names:
+            tc2 = fin.Get(chain_name)
+            opt = {
+                0: "SortBasketsByOffset",
+                1: "SortBasketsByBranch",
+                2: "SortBasketsByEntry",
+                }.get(m, "SortBasketsByBranch")
+            opt_tree = tc2.CloneTree(-1, opt + " fast")
+            opt_tree.Write("", ROOT.TObject.kOverwrite)
+        # -
+
+        timer.Stop()
+
+        print ":::   wallclock time:", timer.RealTime()
+        print ":::   CPU time:      ", timer.CpuTime()
+
+        try:
+            # fout may have been invalidated if the file-size limit was hit
+            # and _1.root, _2.root,... files were created...
+            if fout:
+                fout.Close()
+        except Exception,err:
+            print "**error**:",err
+        fin.Close()
+
+        dst = os.path.join(workdir, os.path.basename(fn))
+        print "::: optimized as [%s]... [done]" % (dst,)
+        
+        # rename the temporary into the original
+        import shutil
+        shutil.move(src=tmp_fname,
+                    dst=dst)
+                                                    
+        #os.rename(tmp_fname, fn)
+    return
+
+def _load_filter_fct(selection):
+    """
+    helper function to locate a filter function or compile one from the
+    source code snippet
+    if `selection` begins with 'file:' selection is then interpreted as a
+    string holding the location to a file where a 'filter_fct' fonction is
+    defined and importable.
+    otherwise, `selection` is compiled into a lambda function
+    """
+    import imp
+    import inspect
+    import os.path as osp
+
+    filter_fct = None
+
+    if selection is None:
+        return filter_fct
+    
+    if not isinstance(selection, basestring):
+        print "** invalid filter-fct type (%r)" % (type(selection),)
+        return filter_fct
+    
+    if selection == "":
+        return filter_fct
+
+    def plugin_filter(obj):
+        if inspect.isfunction(obj):
+            return obj.__name__ == 'filter_fct'
+        
+    if selection.startswith('file:'):
+        fname = selection[len('file:'):]
+        fname = osp.expanduser(osp.expandvars(fname))
+        plugin = open(fname, 'r')
+        mod = imp.load_source(plugin.name[:-3], plugin.name, plugin)
+        plugin.close()
+        filter_fct = inspect.getmembers(mod, plugin_filter)[0][1]
+    else:
+        fct_code = "filter_fct = lambda t: %s" % selection
+        my_locals = dict(locals())
+        exec fct_code in {}, my_locals
+        filter_fct = my_locals['filter_fct']
+    return filter_fct
+
+class Options(object):
+    """place holder for command line options values"""
+    pass
+
+def main():
+
+    global _root_files, _root_trees
+    
+    _opts = []
+    _useropts = "i:o:t:m:s:h"
+    _userlongopts = [
+        "in=", "out=", "tree=", "var=", "maxsize=", "grl=", "fakeout",
+        "selection=",
+        "keep-all-trees",
+        "disable-recursive-opt",
+        "help"
+        ]
+    _error_msg = """\
+Accepted command line options:
+ -i, --in=<INFNAME>                   ...  file containing the list of input files
+ -o, --out=<OUTFNAME>                 ...  output file name
+ -t, --tree=<TREENAME>                ...  name of the tree to be filtered.
+                                           other trees won't be copied by default
+                                           (except if you pass --keep-all-trees)
+     --var=<VARSFNAME>                ...  path to file listing the branch names
+                                           to be kept in the output file.
+     --grl=<GRLFNAME>                 ...  path to a GRL XML file or a list of
+                                           comma-separated GRL XML files
+ -m, --maxsize=<sz>                   ...  maximum zip size of the main tree (in Mb.)
+     --fakeout                        ...  create fake output file if empty or
+                                           non valid input tree is found (ease
+                                           the pain on the GRID)
+ -s, --selection=<PYTHON_CODE>        ...  a python snippet to select events
+                                           or the path to python file holding
+                                           the definition of a 'filter_fct'.
+                                           ex:
+                                             t.eg_px[0] > 10000 and t.eg_py[0] > 10000
+                                           NOTE: the tree must be named 't' in your code.
+                                           or:
+                                            file:foo.py
+                                            where foo.py contains:
+                                            def filter_fct(t):
+                                                return t.eg_px[0] > 10000
+                                           NOTE: the function must be named 'filter_fct' and take the tree as a parameter
+     --keep-all-trees                 ...  keep, filter and merge all other trees.
+     --disable-recursive-opt          ...  switch to disable a recursive (size)
+                                           optimization. (The recursive optimization
+                                           might be excessively SLOW on large n-tuples.)
+ """
+
+    for arg in sys.argv[1:]:
+        _opts.append(arg)
+    
+    opts = Options()
+    opts.maxsize = 1800
+    opts.output_file = None
+    opts.vars_fname = None
+    opts.grl_fname = None
+    opts.fake_output = False
+    opts.selection = None
+    opts.keep_all_trees = False
+    opts.apply_recursive_opt = True
+    
+    try:
+        optlist, args = getopt.getopt(_opts, _useropts, _userlongopts)
+    except getopt.error:
+        print sys.exc_value
+        print _error_msg
+        sys.exit(1)
+
+    for opt,arg in optlist:
+        if opt in ("-i", "--in"):
+            opts.input_files = arg
+
+        elif opt in ("-o", "--out"):
+            opts.output_file = arg
+
+        elif opt in ("-t", "--tree"):
+            opts.tree_name = str(arg).strip()
+
+        elif opt in ("--var",):
+            opts.vars_fname = arg
+
+        elif opt in ("-m", "--maxsize"):
+            opts.maxsize = int(arg)
+
+        elif opt in ('--grl',):
+            opts.grl_fname = arg
+
+        elif opt in ('--fakeout',):
+            opts.fake_output = True
+
+        elif opt in ('-s', '--selection',):
+            opts.selection = str(arg).strip()
+
+        elif opt in ('--keep-all-trees',):
+            opts.keep_all_trees = True
+
+        elif opt in ('--disable-recursive-opt',):
+            opts.apply_recursive_opt = False
+            
+        elif opt in ("-h", "--help"):
+            print _error_msg
+            sys.exit(0)
+
+    print ":"*80
+    print "::: filter'n'merge d3pds"
+    print ":::"
+    # for AttributeListLayout which uses CINT for its dict...
+    #ROOT.gSystem.Load('liblcg_RootCollection')
+    
+    workdir = os.path.dirname(opts.output_file)
+    if workdir == '':
+        workdir = '.'
+    if not os.path.exists(workdir):
+        os.makedirs(workdir)
+
+    if isinstance(opts.grl_fname, basestring):
+        opts.grl_fname = opts.grl_fname.split(',')
+        from glob import glob
+        grl_fnames = []
+        for grl_fname in opts.grl_fname:
+            grl_fnames.extend(glob(grl_fname))
+        opts.grl_fname = grl_fnames
+        
+    print "::: input files:   ",opts.input_files
+    print "::: output file:   ",opts.output_file
+    print "::: vars fname:    ",opts.vars_fname
+    print "::: tree name:     ",opts.tree_name
+    print "::: GRL file:      ",opts.grl_fname
+    print "::: max tree sz:   ",opts.maxsize, "Mb"
+    if opts.fake_output:
+        print "::: creation of fake-output (if needed) [ON]"
+    print "::: user filter:   ",opts.selection
+    print "::: keep all trees:", opts.keep_all_trees
+    print "::: recursive opt: ", opts.apply_recursive_opt
+    
+    # slightly increase the max size (so that the manual ChangeFile at 0.9 of
+    # the current MaxTreeSize will fall within the user-provided one...)
+    ROOT.TTree.SetMaxTreeSize(long(opts.maxsize * 1024 * 1024 / 0.9))
+    
+    ## try to compile the user filtering function
+    filter_fct = None
+    try:
+        filter_fct = _load_filter_fct(opts.selection)
+    except Exception,err:
+        print "*** problem loading filter-fct:"
+        print err
+        print "*** filter-fct is now disabled"
+        filter_fct = None
+        
+    iflist = [l.strip() for l in open(opts.input_files, "r") if l.strip()]
+    for l in iflist:
+        fname = l.strip()
+        if not fname:
+            continue
+        f = ROOT.TFile.Open(fname,"read")
+        if not f:
+            raise RuntimeError("no such file [%s]" % fname)
+
+        tree = f.Get(opts.tree_name)
+        if not tree:
+            print "***warning*** no such tree [%s] in file [%s] (IGNORING!)" % (
+                opts.tree_name, fname,
+                )
+            continue
+        if tree.GetEntries()==0:
+            print "**warning** no entries in tree [%s] in file [%s] (IGNORING!)" % (
+                opts.tree_name, fname,
+                )
+            continue
+        if tree.GetListOfBranches().GetEntriesFast() == 0:
+            print "**warning** tree [%s] in file [%s] has no branches (IGNORING!)" % (
+                opts.tree_name, fname,
+                )
+            continue
+                
+        #f.ResetBit(ROOT.kCanDelete)
+        _root_files.append(fname)
+        print " - loaded [%s]" % (fname,)
+
+        #tree.ResetBit(ROOT.kCanDelete)
+        _root_trees.append(opts.tree_name) # whatever...
+        del tree
+        f.Close()
+        del f
+
+    if len(_root_trees) == 0:
+        print "::: no valid tree left"
+        if opts.fake_output:
+            print "::: crafting an empty output file"
+            _make_fake_output(opts.output_file, opts.tree_name)
+            return 0
+        return 0 # FIXME: should this become an error of some sort ?
+    
+    ## chain = ROOT.TChain(opts.tree_name)
+    ## _root_chains.append(chain)
+    
+    nfiles = len(_root_files)
+    if nfiles <= 0:
+        print "::: no input files found"
+        return 2
+
+    timer = ROOT.TStopwatch()
+    timer.Start()
+    merge_all_trees(fnames=_root_files,
+                    tree_name =opts.tree_name,
+                    memory=1024*30,
+                    sfo=opts.output_file,
+                    vars_fname=opts.vars_fname,
+                    grl_fname=opts.grl_fname,
+                    filter_fct=filter_fct,
+                    keep_all_trees=opts.keep_all_trees,
+                    apply_recursive_opt=opts.apply_recursive_opt)
+
+    timer.Stop()
+
+    print "::: merging done in:"
+    print ":::   wallclock:",timer.RealTime()
+    print ":::   CPU time: ",timer.CpuTime()
+
+    # del _root_chains[:]
+    
+    print "::: performing re-ordering..."
+    import glob
+    import os.path as osp
+    fname_pattern = osp.splitext(opts.output_file)[0]
+    # re-order all output files (in case they were split off)
+    fnames= sorted(glob.glob(fname_pattern + "*.root"))
+    order(m=2,
+          chain_name=opts.tree_name,
+          fnames=fnames,
+          workdir=workdir)
+    print "::: performing re-ordering... [done]"
+
+    print "::: bye."
+    print ":"*80
+    return 0
+
+###################### xmldict #########################
+# @file PyUtils/python/xmldict.py
+# @purpose converts an XML file into a python dict, back and forth
+# @author http://code.activestate.com/recipes/573463
+#         slightly adapted to follow PEP8 conventions
+
+__version__ = "$Revision: 547442 $"
+__doc__ = """\
+functions to convert an XML file into a python dict, back and forth
+"""
+__author__ = "Sebastien Binet <binet@cern.ch>"
+
+
+# hack: LCGCMT had the py-2.5 xml.etree module hidden by mistake.
+#       this is to import it, by hook or by crook
+def import_etree():
+    import xml
+    # first try the usual way
+    try:
+        import xml.etree
+        return xml.etree
+    except ImportError:
+        pass
+    # do it by hook or by crook...
+    import sys, os, imp
+    xml_site_package = os.path.join(os.path.dirname(os.__file__), 'xml')
+    m = imp.find_module('etree', [xml_site_package])
+
+    etree = imp.load_module('xml.etree', *m)
+    setattr(xml, 'etree', etree)
+    return etree
+try:
+    etree = import_etree()
+    from xml.etree import ElementTree
+
+    ## module implementation ---------------------------------------------------
+    class XmlDictObject(dict):
+        def __init__(self, initdict=None):
+            if initdict is None:
+                initdict = {}
+            dict.__init__(self, initdict)
+
+        def __getattr__(self, item):
+            return self.__getitem__(item)
+
+        def __setattr__(self, item, value):
+            self.__setitem__(item, value)
+
+        def __str__(self):
+            if '_text' in self:
+                return self['_text']
+            else:
+                return dict.__str__(self)
+
+        @staticmethod
+        def wrap(x):
+            if isinstance(x, dict):
+                return XmlDictObject ((k, XmlDictObject.wrap(v))
+                                      for (k, v) in x.iteritems())
+            elif isinstance(x, list):
+                return [XmlDictObject.wrap(v) for v in x]
+            else:
+                return x
+
+        @staticmethod
+        def _unwrap(x):
+            if isinstance(x, dict):
+                return dict ((k, XmlDictObject._unwrap(v))
+                             for (k, v) in x.iteritems())
+            elif isinstance(x, list):
+                return [XmlDictObject._unwrap(v) for v in x]
+            else:
+                return x
+
+        def unwrap(self):
+            return XmlDictObject._unwrap(self)
+
+        pass # Class XmlDictObject
+    
+    def _dict2xml_recurse(parent, dictitem):
+        assert type(dictitem) is not type(list)
+
+        if isinstance(dictitem, dict):
+            for (tag, child) in dictitem.iteritems():
+                if str(tag) == '_text':
+                    parent.text = str(child)
+                elif type(child) is type(list):
+                    for listchild in child:
+                        elem = ElementTree.Element(tag)
+                        parent.append(elem)
+                        _dict2xml_recurse (elem, listchild)
+                else:                
+                    elem = ElementTree.Element(tag)
+                    parent.append(elem)
+                    _dict2xml_recurse (elem, child)
+        else:
+            parent.text = str(dictitem)
+    
+    def dict2xml(xmldict):
+        """convert a python dictionary into an XML tree"""
+        roottag = xmldict.keys()[0]
+        root = ElementTree.Element(roottag)
+        _dict2xml_recurse (root, xmldict[roottag])
+        return root
+
+    def _xml2dict_recurse (node, dictclass):
+        nodedict = dictclass()
+
+        if len(node.items()) > 0:
+            # if we have attributes, set them
+            nodedict.update(dict(node.items()))
+
+        for child in node:
+            # recursively add the element's children
+            newitem = _xml2dict_recurse (child, dictclass)
+            if nodedict.has_key(child.tag):
+                # found duplicate tag, force a list
+                if type(nodedict[child.tag]) is type([]):
+                    # append to existing list
+                    nodedict[child.tag].append(newitem)
+                else:
+                    # convert to list
+                    nodedict[child.tag] = [nodedict[child.tag], newitem]
+            else:
+                # only one, directly set the dictionary
+                nodedict[child.tag] = newitem
+
+        if node.text is None: 
+            text = ''
+        else: 
+            text = node.text.strip()
+
+        if len(nodedict) > 0:            
+            # if we have a dictionary add the text as a dictionary value
+            # (if there is any)
+            if len(text) > 0:
+                nodedict['_text'] = text
+        else:
+            # if we don't have child nodes or attributes, just set the text
+            if node.text: nodedict = node.text.strip()
+            else:         nodedict = ""
+
+
+
+        return nodedict
+        
+    def xml2dict (root, dictclass=XmlDictObject):
+        """convert an xml tree into a python dictionary
+        """
+        return dictclass({root.tag: _xml2dict_recurse (root, dictclass)})
+    #####################################################################
+
+except ImportError:
+    print "**WARNING: could not import 'xml.etree' (check your python version)"
+    print "           you won't be able to correctly read GRL XML files !"
+    
+def extract_data_from_xml(fname="GRL.xml"):
+    """simple helper function to convert a GRL xml file into a list
+    of tuples (run-nbr, lumi-block-start, lumi-block-stop)
+    """
+    import sys
+    assert "xml.etree" in sys.modules, \
+           "no 'xml.etree' module were imported/available"
+    data =[]
+    dd=xml2dict(etree.ElementTree.parse(str(fname)).getroot())
+
+    lbks = dd['LumiRangeCollection']['NamedLumiRange']['LumiBlockCollection']
+    if not isinstance(lbks, (list, tuple)):
+        lbks = [lbks]
+    for lbk in lbks:
+        assert isinstance(lbk,dict), \
+               "expect a dict-like object (got type=%s - value=%r)" % (type(lbk), repr(lbk))
+        runnumber=lbk['Run']
+        run_ranges=lbk['LBRange']
+
+        #xml2dict return a dataset when only one lbn range per run
+        #and return a list when there are several lbn ranges per run
+        #==> need different piece of code
+        #The following lines 'convert' a dict into a list of 1 dict 
+        if isinstance(run_ranges,dict):
+            run_ranges=[run_ranges]
+            pass
+
+        #loop over run ranges
+        for lbrange in run_ranges: 
+            lbn_min=lbrange['Start']
+            lbn_max=lbrange['End']
+            # GRL schema changed from:
+            #  <LumiBlockCollection>
+            #     <Run>178044</Run>
+            #     <LBRange Start="42" End="666"/>
+            #    ...
+            # to:
+            #  <LumiBlockCollection>
+            #     <Run PrescaleRD0="8" PrescaleRD1="8">178044</Run>
+            #     <LBRange Start="42" End="666"/>
+            #    ...
+            if isinstance(runnumber, XmlDictObject):
+                runnumber = runnumber['_text']
+            #print runnumber,"  ", lbn_min,"  ", lbn_max
+            data.append((runnumber, lbn_min, lbn_max))
+            pass
+    return data
+
+### script entry point ###
+if __name__ == "__main__":
+    sys.exit(main())
+
+"""
+tests:
+
+xrdcp root://eosatlas//eos/atlas/user/b/binet/utests/utests/filter-d3pd/ntuple.0.root .
+xrdcp root://eosatlas//eos/atlas/user/b/binet/utests/utests/filter-d3pd/ntuple.1.root .
+cat > input.txt << EOF
+ntuple.0.root
+ntuple.1.root
+EOF
+cat > vars.txt << EOF
+-*
++el_vertx
++el_verty
++el_L2_errpt
+EOF
+filter-and-merge-d3pd -i input.txt -o merged.root -t egamma --var=vars.txt -s ''
+filter-and-merge-d3pd -i input.txt -o merged.root -t egamma --var=vars.txt -s 't.el_verty.size() > 0 and t.el_verty[0]>=0.'
+cat > foo.py << EOF
+def filter_fct(t):
+    return t.el_verty.size() > 0 and t.el_verty[0]>=0.
+EOF
+filter-and-merge-d3pd -i input.txt -o merged.root -t egamma --var=vars.txt -s 'file:foo.py'
+"""
diff --git a/Tools/PyUtils/bin/gen-typereg-dso.py b/Tools/PyUtils/bin/gen-typereg-dso.py
new file mode 100755
index 00000000000..0e1b74e5972
--- /dev/null
+++ b/Tools/PyUtils/bin/gen-typereg-dso.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/bin/gen-typereg-dso.py
+# @purpose a python script to workaround various limitations of rootmap files
+#          and reflex/cint typename impedance mismatches
+# @author Sebastien Binet <binet@cern.ch>
+# @date   February 2009
+
+__doc__ = '''a python script to workaround various limitations of rootmap
+files and reflex/cint typename impedance mismatches.
+'''
+__version__ = '$Revision: 1.1 $'
+__author__ = 'Sebastien Binet <binet@cern.ch>'
+
+
+if __name__ == "__main__":
+    import sys
+    import os
+    import PyUtils.Dso as Dso
+    oname = 'typereg_dso_db.csv'
+    if len(sys.argv) > 1:
+        oname = sys.argv[1]
+    else:
+        from PyCmt.Cmt import CmtWrapper
+        project_root = CmtWrapper().projects()[0]
+        from PyUtils.path import path
+        oname = path(project_root) / "InstallArea" / "share" / oname
+        if not os.path.exists(oname.dirname()):
+            os.makedirs(oname.dirname())
+            pass
+    rflx_names = Dso.gen_typeregistry_dso(oname)
+    sys.exit(0)
diff --git a/Tools/PyUtils/bin/gen_klass.py b/Tools/PyUtils/bin/gen_klass.py
new file mode 100755
index 00000000000..daea617c1cc
--- /dev/null
+++ b/Tools/PyUtils/bin/gen_klass.py
@@ -0,0 +1,1177 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# @purpose: helper script to generate header and cxx files of various
+#           athena components (svc/tool/alg/isvc/itool/object)
+# @author Sebastien Binet
+# @date   April 2008
+
+import sys,os
+
+class GenTypes:
+    values = ('object',
+              'isvc', 'svc',
+              'itool', 'tool',
+              'alg',
+              ## the python ones
+              'pyalg', 'pysvc', 'pytool', 'pyaud'
+              )
+    needing_iface = ('svc', 'tool')
+    pass
+
+class Templates:
+    isvc_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s 
+#define %(guard)s 1 
+
+/** @class %(klass)s
+ */
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "GaudiKernel/IService.h"
+
+// %(pkg)s includes
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::IService
+{ 
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /** Destructor: 
+   */
+  virtual ~%(klass)s();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  static const InterfaceID& interfaceID();
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+  static const InterfaceID IID_%(klass)s("%(klass)s", 1, 0);
+  return IID_%(klass)s; 
+}
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    isvc_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    itool_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+
+// HepMC / CLHEP includes
+
+// FrameWork includes
+#include "GaudiKernel/IAlgTool.h"
+
+// Forward declaration
+
+%(namespace_begin)s
+
+static const InterfaceID IID_%(klass)s("%(klass)s", 1, 0);
+
+class %(klass)s
+  : virtual public ::IAlgTool
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /** Destructor: 
+   */
+  virtual ~%(klass)s();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+  static const InterfaceID& interfaceID();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Protected data: 
+  /////////////////////////////////////////////////////////////////// 
+ protected: 
+
+}; 
+
+/// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+/// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+   return IID_%(klass)s; 
+}
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    itool_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// Framework includes
+//#include "GaudiKernel/MsgStream.h"
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+///////////////////////////////////////////////////////////////////
+
+%(namespace_end)s
+"""
+    object_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <iosfwd>
+
+// Gaudi includes
+
+// Forward declaration
+
+%(namespace_begin)s
+
+class %(klass)s
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Copy constructor: 
+  %(klass)s( const %(klass)s& rhs );
+
+  /// Assignment operator: 
+  %(klass)s& operator=( const %(klass)s& rhs ); 
+
+  /// Constructor with parameters: 
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+}; 
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+//std::ostream& operator<<( std::ostream& out, const %(klass)s& o );
+
+%(namespace_end)s
+
+#endif //> !%(guard)s
+"""
+
+    object_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+// STL includes
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    svc_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthService.h"
+
+// %(ipkg)s
+#include "%(ipkg)s/%(iklass)s.h"
+
+// Forward declaration
+class ISvcLocator;
+template <class TYPE> class SvcFactory;
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::%(iklass)s,
+            public ::AthService
+{ 
+  friend class SvcFactory<%(klass)s>;
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& name, ISvcLocator* pSvcLocator );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Assignment operator: 
+  //%(klass)s &operator=(const %(klass)s &alg); 
+
+  /// Gaudi Service Implementation
+  //@{
+  virtual StatusCode initialize();
+  virtual StatusCode finalize();
+  virtual StatusCode queryInterface( const InterfaceID& riid, 
+                                     void** ppvInterface );
+  //@}
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  static const InterfaceID& interfaceID();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+  return %(iklass)s::interfaceID(); 
+}
+
+%(namespace_end)s
+
+#endif //> !%(guard)s
+"""
+
+    svc_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/Property.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& name, 
+		      ISvcLocator* pSvcLocator ) : 
+  ::AthService( name, pSvcLocator )
+{
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena Service's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+// Query the interfaces.
+//   Input: riid, Requested interface ID
+//          ppvInterface, Pointer to requested interface
+//   Return: StatusCode indicating SUCCESS or FAILURE.
+// N.B. Don't forget to release the interface after use!!!
+StatusCode 
+%(klass)s::queryInterface(const InterfaceID& riid, void** ppvInterface) 
+{
+  if ( %(iklass)s::interfaceID().versionMatch(riid) ) {
+    *ppvInterface = dynamic_cast<%(iklass)s*>(this);
+  } else {
+    // Interface is not directly available : try out a base class
+    return ::AthService::queryInterface(riid, ppvInterface);
+  }
+  addRef();
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    alg_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthAlgorithm.h"
+
+%(namespace_begin)s
+
+class %(klass)s
+  : public ::AthAlgorithm
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& name, ISvcLocator* pSvcLocator );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Assignment operator: 
+  //%(klass)s &operator=(const %(klass)s &alg); 
+
+  // Athena algorithm's Hooks
+  virtual StatusCode  initialize();
+  virtual StatusCode  execute();
+  virtual StatusCode  finalize();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    alg_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/Property.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& name, 
+			  ISvcLocator* pSvcLocator ) : 
+  ::AthAlgorithm( name, pSvcLocator )
+{
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena Algorithm's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::execute()
+{  
+  ATH_MSG_DEBUG ("Executing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    tool_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthAlgTool.h"
+#include "GaudiKernel/ServiceHandle.h"
+
+// %(ipkg)s includes
+#include "%(ipkg)s/%(iklass)s.h"
+
+// Forward declaration
+class StoreGateSvc;
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::%(iklass)s,
+            public ::AthAlgTool
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& type,
+	     const std::string& name, 
+	     const IInterface* parent );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Athena algtool's Hooks
+  virtual StatusCode  initialize();
+  virtual StatusCode  finalize();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  typedef ServiceHandle<StoreGateSvc> StoreGateSvc_t;
+  /// Pointer to the StoreGate service
+  StoreGateSvc_t m_storeGate;
+
+  // Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    tool_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/IToolSvc.h"
+
+// StoreGate
+#include "StoreGate/StoreGateSvc.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& type, 
+		      const std::string& name, 
+		      const IInterface* parent ) : 
+  ::AthAlgTool  ( type, name, parent   ),
+  m_storeGate( "StoreGateSvc", name )
+{
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena algtool's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  // Get pointer to StoreGateSvc and cache it :
+  if ( !m_storeGate.retrieve().isSuccess() ) {
+    ATH_MSG_ERROR ("Unable to retrieve pointer to StoreGateSvc");
+    return StatusCode::FAILURE;
+  }
+  
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    pyalg_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 1.5 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Alg):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def execute(self):
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pysvc_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 1.5 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Svc):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pytool_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 1.5 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.AlgTool):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pyaud_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 1.5 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Aud):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+def gen_files(pkg="", klass="", klass_type='object', fname='foo',
+              ipkg="", iklass=""):
+    """Simple helper function to generate files based off some informations
+     @param pkg the name of the package holding the class we want to generate
+     @param klass the (fully qualified) name of the C++ class to generate
+     @param klass_type the type of class to generate (svc/tool/alg/object)
+     @param fname the filename to generate
+     @param ipkg the name of the package holding the interface of the class
+     @param iklass the name of the interface of the class we generate
+    """
+    try:
+        hdr = getattr(Templates, '%s_hdr_template'%klass_type)
+        cxx = getattr(Templates, '%s_cxx_template'%klass_type)
+    except AttributeError,err:
+        print "::: UNKNOWN klass_type [%s] !" % klass_type
+        raise err
+
+    namespace_begin,namespace_end = "",""
+    if klass.count("::")>0:
+        nm    = klass.split("::")[0]
+        klass = klass.split("::")[1]
+        namespace_begin = "namespace %s {" % nm
+        namespace_end   = "} //> end namespace %s" % nm
+        pass
+
+    guard = "%s_%s_H" % (pkg.upper(), klass.upper())
+
+    d = dict( pkg=pkg,
+              klass=klass,
+              ipkg=ipkg,
+              iklass=iklass,
+              guard=guard,
+              namespace_begin=namespace_begin,
+              namespace_end=namespace_end
+              )
+    fname = os.path.splitext(fname)[0]
+    
+    o_hdr = open(fname+'.h', 'w')
+    o_hdr.writelines(hdr%d)
+    o_hdr.flush()
+    o_hdr.close()
+
+    o_cxx = open(fname+'.cxx', 'w')
+    o_cxx.writelines(cxx%d)
+    o_cxx.flush()
+    o_cxx.close()
+
+    return
+
+   
+def gen_pyfiles(pkg="", klass="", klass_type='pyalg', fname='foo'):
+    """Simple helper function to generate (python) files based off some
+       user informations.
+     @param pkg the name of the package holding the class we want to generate
+     @param klass the name of the python class to generate
+     @param klass_type the type of class to generate (pysvc/pytool/pyalg/pyaud)
+     @param fname the filename to generate
+    """
+    try:
+        py_template = getattr(Templates, '%s_template'%klass_type)
+    except AttributeError,err:
+        print "::: UNKNOWN klass_type [%s] !" % klass_type
+        raise err
+
+    invalid_py_chars = ( ':', '.', '>', '<', ' ' )
+    
+    if any([c for c in invalid_py_chars if c in klass]):
+        err = "::: INVALID class name ! (%s) !\n"%klass
+        err += "::: python class names can *NOT* contain any character of %s"%\
+               repr(invalid_py_chars)
+        print err
+        raise RuntimeError(err)
+
+    fname=''.join([fname,'.py'])
+    d = dict( pkg=pkg,
+              klass=klass,
+              fname=fname
+              )
+    o = open(fname, 'w')
+    o.writelines(py_template%d)
+    o.flush()
+    o.close()
+    return
+
+if __name__ == "__main__":
+    from optparse import OptionParser
+    parser = OptionParser( usage = "usage: %prog [options] filename")
+    p = parser.add_option
+    p("--klass",
+      dest = "klass",
+      default = None,
+      help = "The (fully qualified) name of the python or C++ class to create (ex: ElectronContainer, Analysis::Electron, MyAlgTool, PyTestAlg)")
+
+    p("--pkg",
+      dest = "pkg",
+      default = None,
+      help = "The name of the package holding the C++ class to create (ex: MyAnalysis, JetEvent)")
+
+    p("--type",
+      dest = "klass_type",
+      default = None,
+      help = "The type of class to create (%s)"%("|".join(GenTypes.values)))
+
+    p("--ipkg",
+      dest = "ipkg",
+      default = None,
+      help = "The name of the package holding the interface of the C++ class (mandatory for 'svc' and 'tool' types)")
+
+    p("--iklass",
+      dest = "iklass",
+      default = None,
+      help = "The name of the interface the C++ class is implementing (mandatory for 'svc' and 'tool' types)")
+
+    p("-o",
+      "--output-file",
+      dest = "fname",
+      default = None,
+      help = "The name of the file(s) which will hold header and implementation of the class (ex: 'Foo' --> ('Foo.h','Foo.cxx'))")
+    
+    (options, args) = parser.parse_args()
+
+    if options.klass is None or \
+       options.klass_type is None or \
+       options.pkg is None or \
+       options.fname is None :
+        parser.print_help()
+        print ":: arguments 'klass', 'type', 'pkg' and 'output-file' are ",
+        print "MANDATORY"
+        sys.exit(1)
+
+    if not (options.klass_type in GenTypes.values):
+        print ":: UNKNOWN klass_type [%s] !" % options.klass_type
+        print ":: allowed values are (%s)" % ("|".join(GenTypes.values))
+        sys.exit(2)
+        
+    if options.klass_type in GenTypes.needing_iface and \
+       ( options.ipkg is None or options.iklass is None ) :
+        parser.print_help()
+        print ":: You have to give 'ipkg' and 'iklass' options to properly ",
+        print "generate an implementation for '%s'"%options.klass_type
+        sys.exit(3)
+        
+
+    if options.ipkg is None:
+        options.ipkg = ""
+
+    if options.iklass is None:
+        options.iklass = ""
+
+    if options.klass_type.startswith('py'):
+        gen_pyfiles(klass=options.klass,
+                    klass_type=options.klass_type,
+                    pkg=options.pkg,
+                    fname=options.fname)
+    else:
+        gen_files(klass=options.klass,
+                  klass_type=options.klass_type,
+                  pkg=options.pkg,
+                  iklass=options.iklass,
+                  ipkg=options.ipkg,
+                  fname=options.fname)
diff --git a/Tools/PyUtils/bin/get-tag-diff.py b/Tools/PyUtils/bin/get-tag-diff.py
new file mode 100755
index 00000000000..1e99413ad34
--- /dev/null
+++ b/Tools/PyUtils/bin/get-tag-diff.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    get-tag-diff.py
+# @purpose: Get the list of tag differences between 2 releases (CERN centric)
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    October 2009
+#
+# @example:
+#
+# get-tag-diff --ref=rel_3,12.0.X --chk=12.0.3
+#
+
+__version__ = "$Revision: 273188 $"
+
+from optparse import OptionParser
+
+import sys
+import os
+
+def main():
+    
+    parser = OptionParser( usage = "usage: %prog --ref some_rel --chk other_rel")
+    parser.add_option("--ref",
+                      dest = "ref",
+                      help = "The description string of the reference release (eg: 12.0.X,rel_3,AtlasOffline)" )
+
+    parser.add_option("--chk",
+                      dest = "chk",
+                      help = "The description string of the to-be-compared release (eg: 12.0.X,rel_3 or 12.0.3)" )
+
+    options,args = parser.parse_args()
+
+    if options.ref is None or options.chk is None:
+        parser.print_help()
+        return 1
+    
+    ref = options.ref
+    chk = options.chk
+
+    import PyCmt.Cmt as Cmt
+    diffs = Cmt.get_tag_diff(ref=ref, chk=chk, verbose=True)
+    if len(diffs) > 0:
+        return 1
+    return 0
+
+if __name__ == "__main__":
+    import sys
+    sys.exit(main())
+    
diff --git a/Tools/PyUtils/bin/getTagDiff.py b/Tools/PyUtils/bin/getTagDiff.py
new file mode 100755
index 00000000000..10ecf9e4fb6
--- /dev/null
+++ b/Tools/PyUtils/bin/getTagDiff.py
@@ -0,0 +1,707 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    getTagDiff.py
+# @purpose: Get the list of tag differences between 2 releases (CERN centric)
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    September 2006
+#
+# @example:
+#
+# getTagDiff rel_3,12.0.X 12.0.3
+#
+
+__version__ = "$Revision: 1.4 $"
+
+from optparse import OptionParser
+
+import sys
+import os
+import commands
+
+import logging
+class GetTagDiff:
+    LoggerName = "GetTagDiff"
+    ReleaseRoot = "/afs/cern.ch/atlas/software/builds"
+    Projects = [
+#        "AtlasPoint1",
+        "AtlasProduction",
+        "AtlasOffline",
+        "AtlasAnalysis",
+        "AtlasTrigger",
+        "AtlasReconstruction",
+        "AtlasSimulation",
+        "AtlasEvent",
+        "AtlasConditions",
+        "DetCommon",
+        "AtlasCore"
+        ]
+    pass
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+    pass
+
+class CmtStrings:
+    CMTPATH        = 'CMTPATH'
+    CMTDIR         = 'cmt'
+    CMTVERSIONFILE = 'version.cmt'
+    CMTREQFILE     = 'requirements'
+    CMTPROJFILE    = 'project.cmt'
+    pass
+
+class CmtPkg:
+
+    def __init__( self,
+                  pkgName    = 'Example',
+                  pkgVersion = 'Example-00-00-00',
+                  pkgPath    = '' ):
+        self.name    = pkgName
+        self.version = pkgVersion
+        self.path    = pkgPath
+
+        return
+    def __repr__( self ):
+       s = [ "Package: %s" % self.name,
+             "Version: %s" % self.version,
+             "Path:    %s" % self.path ]
+       return os.linesep.join(s)
+    
+    pass
+
+def listCmtDirs( path ):
+
+    log = logging.getLogger( GetTagDiff.LoggerName )
+    
+    cmtDirs = []
+    
+    # fill list of CMT directories
+    try:
+        ls = os.listdir(path)
+    except OSError, what:
+        log.error( "OSError: %s" % what )
+        return cmtDirs
+
+    subDirs = []
+    for n in ls:
+        fullName = os.path.join( path, n )
+
+        # hack to optimize searching 
+        if fullName.count( "i686-" )        <= 0 and \
+           fullName.count( "o..pacman..o" ) <= 0 and \
+           fullName.count( "InstallArea"  ) <= 0 and \
+           os.path.splitext( fullName )[1] != '.so' :
+            isDir = os.path.isdir(fullName)
+        else:
+            isDir = False
+            pass
+        
+        isCmtDir =  os.path.split(fullName)[1] == CmtStrings.CMTDIR
+
+        if isDir :
+            if isCmtDir:
+                log.debug( "\t==> found %s" % fullName )
+                cmtDirs.append(fullName)
+                isDir = True
+                pass
+            else:
+                subDirs.append( fullName )
+
+        pass
+    
+    # now take care of the subdirs:
+    for subDir in subDirs:
+        subDirName = os.path.split(subDir)[1]
+        if subDirName.count( "i686-") <= 0 and \
+           subDirName.count( "o..pacman..o" ) <= 0 and \
+           subDirName != 'CVS' and \
+           os.path.splitext( subDirName )[1] != '.so' :
+            cmtDirs.extend( listCmtDirs( subDir ) )
+        pass
+    
+    return cmtDirs
+
+def createCmtPkg( cmtDir, printWrongPkgNames = True ):
+    """
+    the cmtDir is assumed to be of the form Xyz/cmt
+    One has also to handle the case with or without version-directory
+    """
+    log = logging.getLogger(GetTagDiff.LoggerName)
+    
+    pkgName = None
+    try:
+        # the CMTVERSIONFILE should provide the name of the package
+        # so we extract it from this file
+        fileName = os.path.join( cmtDir, CmtStrings.CMTVERSIONFILE )
+        if os.path.exists(fileName):
+            versionFile = open( fileName, "r" )
+            pkgName = versionFile.readlines()[0].strip()
+            # a valid pkgName is of the form PkgName-??-??-??[-??]
+            pkgName = pkgName.split("-")[0]
+            versionFile.close()
+            del versionFile
+        else:
+            # the CMTREQFILE should (also) provide the name of the package
+            # so we extract it from this file
+            fileName = os.path.join( cmtDir, CmtStrings.CMTREQFILE )
+            reqFile = open( fileName, "r" )
+            for line in reqFile.readlines():
+                line = line.strip()
+                if len(line) > 0  and \
+                       line[0] != "#" and \
+                       line.count("package ") > 0:
+                    pkgName = line.splitlines()[0]\
+                              .split("package ")[1]\
+                              .replace("\r","")\
+                              .split("#")[0]\
+                              .strip()
+                    break
+                pass
+            reqFile.close()
+            del reqFile
+    except IOError:
+        ## No CMTREQFILE in this directory
+        ## ==> not a CMT package then ?
+        ## check if there is any CMT project file instead
+        if not os.path.exists( os.path.join(cmtDir, CmtStrings.CMTPROJFILE) ):
+            log.warning( "[%s] does NOT contain any '%s' nor '%s' file !!" % \
+                         ( cmtDir,
+                           CmtStrings.CMTREQFILE,
+                           CmtStrings.CMTPROJFILE ) )
+        return None
+
+    if pkgName == None:
+        log.warning( "No 'package Foo' line in %s of %s" % \
+                     ( CmtStrings.CMTREQFILE, cmtDir ) )
+        return None
+    
+    log.debug( "\t\t==> Analysing [%s]" % cmtDir )
+    
+    # first we try the no-version-directory case as it is the ATLAS
+    # default now.
+    if CmtStrings.CMTVERSIONFILE in os.listdir(cmtDir):
+        version = open( os.path.join( cmtDir, CmtStrings.CMTVERSIONFILE ),
+                        'r' )\
+                        .readline()
+        version = version.splitlines()[0].strip()
+        pkgDir = os.path.split(cmtDir)[0].strip()
+        pkgPath = os.path.split(pkgDir)[0].strip()
+        pass
+
+    # Now we are in the case where:
+    # /somePath/MyPkg/MyPkg-00-00-00/cmt
+    # or
+    # /somePath/MyPkg/v1r2p3/cmt
+    else:
+        baseDir = os.path.split(cmtDir)[0].strip()
+        pkgDir, version = os.path.split(baseDir)
+        pkgPath = os.path.split(pkgDir)[0].strip()
+
+        pass
+
+    log.debug( "\t\t\t- name    = %s" % pkgName )
+    log.debug( "\t\t\t- version = %s" % version )
+    log.debug( "\t\t\t- path    = %s" % pkgPath )
+
+    if pkgName.count(os.sep) > 0 and printWrongPkgNames :
+       log.warning( "About to create a funny CMT package !" )
+       log.warning( "'PkgName' contains '%s'. Please fix it!" % os.sep )
+       log.warning( "\t- name    = %s" % pkgName )
+       log.warning( "\t- version = %s" % version )
+       log.warning( "\t- path    = %s" % pkgPath )
+       # Ok, so, I fix it - but user is warned...
+       pkgName = os.path.basename(pkgName)
+       pass
+    
+    #version = '*'
+    return CmtPkg( pkgName, version, pkgPath )
+
+def scan( scanDir = os.curdir,
+          printWrongPkgNames = True,
+          suppressList = ["WorkArea"] ):
+    """Search for CMT packages in the given directory and walk down the
+    directory tree.
+    Return the list of found CMT packages.
+    """
+    log = logging.getLogger( GetTagDiff.LoggerName )
+    log.debug( "Scanning [%s]" % scanDir )
+    
+    # return value
+    cmtPackages = []
+    
+    # retrieve all cmt-ised directories in the scan directory
+    scanDir = os.path.abspath( scanDir )
+
+    cmtDirs = []
+    try:
+        cmtDirs = listCmtDirs(scanDir)
+    except KeyboardInterrupt:
+        log.warning( "Scanning has been STOPPED ! (by you)" )
+        pass
+    
+    for cmtDir in cmtDirs:
+        cmtPkg = createCmtPkg(cmtDir, printWrongPkgNames)
+        if cmtPkg != None and \
+           cmtPkg.name not in suppressList:
+            cmtPackages.append( cmtPkg )
+        pass
+    
+    return cmtPackages
+
+
+def getRelease( releaseId = "12.0.3", releaseRoot = GetTagDiff.ReleaseRoot ):
+    # fetch logger object
+    log = logging.getLogger( GetTagDiff.LoggerName)
+
+    id = releaseId.split(",")
+    #print "## id [%s] length: %i" % (str(id), len(id))
+    if releaseId.count("rel_") > 0 :
+        if releaseId.lower().count("bugfix")  > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "bugfix" )
+        elif releaseId.lower().count("cos")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "cos" )
+        elif releaseId.lower().count("devmig0")>0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "devmig0" )
+        elif releaseId.lower().count("devval")> 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "devval" )
+        elif releaseId.lower().count("dev")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "dev" )
+        elif releaseId.lower().count("lcg")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "lcg" )
+        elif releaseId.lower().count("lst")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "lst" )
+        elif releaseId.lower().count("mig0")  > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "mig0" )
+        elif releaseId.lower().count("mig1")  > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "mig1" )
+        elif releaseId.lower().count("trials")> 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "trials" )
+        elif releaseId.lower().count("val")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "val" )
+        elif releaseId.lower().count("pcache")   > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "pcache" )
+        elif releaseId.count(".0.X")  > 0 :
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "bugfix" )
+        elif releaseId.count(".X.0") > 0:
+            releaseRoot = os.path.join( releaseRoot, "nightlies", "dev" )
+        else:
+            log.warning( "Unknown release id [%s]" % releaseId )
+        for i in id:
+            if i.count("rel_") > 0:
+                releaseId = i
+                break
+    else:
+        if len(id) == 1:
+            releaseId = id[0]
+        else:
+            log.warning( "Wrong release id [%s]" % releaseId )
+        pass
+    log.debug( "## releaseRoot: %s" % releaseRoot )
+    log.debug( "## releaseId  : %s" % releaseId   )
+    
+    return releaseRoot,releaseId
+
+class Release(object):
+
+    def __init__(self, id, projs = GetTagDiff.Projects ):
+        object.__init__(self)
+        self.rawId  = self.__getRawId(id)
+        self.topDir, self.id = getRelease(id)
+        self.projs  = projs
+
+        self.printWrongPkgNames = False
+        
+        # fetch logger object
+        self.log = logging.getLogger(GetTagDiff.LoggerName)
+
+        self.cmtPkgs = None
+        self.cachedCmtPkgs = False
+        return
+
+    def __getRawId(self, id):
+        # 'normalize' the rawId : always 12.0.X-rel_? (not rel_?-12.0.X)
+        ids = id.split(",")
+        ids.sort()
+        id = ",".join( ids )
+        return id
+    
+    def setProjects(self, projs):
+        self.projs = projs
+        return
+
+    def getProjectDirs(self):
+        def getProjectVersion(projName, projVersion):
+            version = projVersion
+            if projVersion.count("rel_") <= 0:
+                if projName in GetTagDiff.Projects[2:]:
+                    version = projVersion[1:]
+                    pass
+                pass
+            return version
+        
+        return [
+            os.path.join( self.topDir,
+                          p,
+                          getProjectVersion(p, self.id)) for p in self.projs
+            ]
+
+    def getCmtDirs(self):
+        cmtDirs = []
+        for d in self.getProjectDirs():
+            self.log.info( "## scanning [%s]..." % \
+                           os.path.basename( os.path.dirname(d) ) )
+            cmtDirs += listCmtDirs(d)
+            pass
+        return cmtDirs
+
+    def buildCmtPkgsList(self):
+        self.log.info( "Building list of CMT pkgs [%s]..." % \
+                       self.rawId )
+        self.cachedCmtPkgs = False
+        self.cmtPkgs = {}
+        cmtPkgs = []
+        cmtDirs = self.getProjectDirs()
+        for cmtDir in cmtDirs:
+            self.log.info( "...Scanning [%s]..." % cmtDir )
+            cmtPkgs += scan(cmtDir, self.printWrongPkgNames)
+            pass
+        for cmtPkg in cmtPkgs:
+            # don't bother with container pkg
+            if self.isContainerPkg(cmtPkg):
+                continue
+            
+            if self.cmtPkgs.has_key(cmtPkg.name):
+                old = self.cmtPkgs[cmtPkg.name]
+                self.log.warning( "About to replace this pkg:" )
+                self.log.warning( " - name:    %s" % old.name )
+                self.log.warning( " - version: %s" % old.version )
+                self.log.warning( " - path:    %s" % old.path )
+                new = cmtPkg
+                self.log.warning( "with this pkg:" )
+                self.log.warning( " - name:    %s" % new.name )
+                self.log.warning( " - version: %s" % new.version )
+                self.log.warning( " - path:    %s" % new.path )
+                pass
+            self.cmtPkgs[cmtPkg.name] = cmtPkg
+        self.log.info( "Building list of CMT pkgs [%s]... [OK]" % \
+                       self.rawId )
+        self.saveCmtPkgs()
+        return
+
+    def getCmtPkgs(self):
+        if not self.cachedCmtPkgs:
+            self.buildCmtPkgsList()
+            pass
+        return self.cmtPkgs
+
+    def getPkgFullName(self, cmtPkg):
+        projId  = self.id
+        if projId.count("rel_") <= 0 :
+            projId = projId[1:]
+            pass
+        pkgName = cmtPkg.name
+        if cmtPkg.path.endswith(projId):
+            return pkgName
+        pkgFullName = cmtPkg.path.split(projId+os.sep)[1]
+        pkgFullName = os.path.join( pkgFullName, pkgName )
+        return pkgFullName
+    
+    def isContainerPkg(self, cmtPkg):
+        # this method assumes that a tag-name for a container pkg is of
+        # the form:
+        # ContainerName-AtlasProject-00-00-00[-00]
+        isContainer = False
+        version = cmtPkg.version
+        for p in self.projs:
+            if version.count( "-%s-" % p ) > 0:
+                isContainer = True
+                self.log.debug("Flagged Container pkg [%s] (%s)" %
+                               ( self.getPkgFullName(cmtPkg),
+                                 cmtPkg.path ) )
+                break
+        return isContainer
+    
+    def saveCmtPkgs(self, fileName = None):
+##         if fileName == None:
+##             fileName
+        fileName = os.path.join( os.path.expandvars( os.environ['HOME'] ),
+                                 ".pane",
+                                 "cmtTags-%s.db" % self.rawId.replace(",",
+                                                                      "-") )
+        if not os.path.exists( os.path.dirname(fileName) ):
+            os.makedirs( os.path.dirname(fileName) )
+            pass
+
+        self.log.debug( "Saving CMT pkgs list to [%s]..." % fileName )
+        self.cachedCmtPkgs = True
+        f = open( fileName, "wb" )
+        pickle.dump(self.getCmtPkgs(), f)
+        f.close()
+        self.log.info( "Saving CMT pkgs list to [%s]... [OK]" % fileName )
+        
+        return
+    
+    def loadCmtPkgs(self, fileName = None):
+        if fileName == None:
+            fileName = os.path.join( os.path.expandvars( os.environ['HOME'] ),
+                                     ".pane",
+                                     "cmtTags-%s.db" % \
+                                     self.rawId.replace(",", "-")
+                                     )
+        if not os.path.exists( fileName ):
+            self.log.error( "No such file [%s]" % fileName )
+            return -1
+        
+        # we don't want to read a tag-db which is deprecated
+        # hence we read the last modified time of SomeProject/cmt/project.cmt
+        projFile = os.path.join( self.getProjectDirs()[0],
+                                 "cmt",
+                                 "project.cmt" )
+        if os.path.getmtime( fileName ) <= \
+           os.path.getmtime( projFile ):
+            self.log.warning( "[%s] is older than [%s]" %
+                              ( fileName, projFile ) )
+            return -1
+        
+        self.log.info( "Loading CMT pkgs list from [%s]..." % fileName )
+
+        f = open( fileName, "r" )
+        self.cmtPkgs = pickle.load(f)
+        f.close()
+        
+        return len(self.cmtPkgs.keys())
+
+    pass # Release
+
+class ReleaseCmp(object):
+
+    def __init__(self, ref, rel, rescan = False):
+        self.ref    = ref
+        self.rel    = rel
+        self.rescan = rescan
+        self.log    = logging.getLogger(GetTagDiff.LoggerName)
+        self.fmt    = "%-45s : %-25s -> %-25s"
+        return
+
+    def cmp(self, ref = None, rel = None):
+        if ref == None:
+            ref = self.ref
+            pass
+        if rel == None:
+            rel = self.rel
+            pass
+
+        self.log.info( "Comparing [%s VS %s]" % (ref.rawId, rel.rawId) )
+
+        if self.rescan:
+            self.log.info( "Rescan has been requested: doing it..." )
+            ref.buildCmtPkgsList()
+            rel.buildCmtPkgsList()
+            pass
+        
+        if ref.loadCmtPkgs() <= 0:
+            self.log.warning( "==> will rebuild cache file..." )
+            ref.buildCmtPkgsList()
+        if rel.loadCmtPkgs() <= 0:
+            self.log.warning( "==> will rebuild cache file..." )
+            rel.buildCmtPkgsList()
+
+        diff = {}
+
+        # first compare the list of packages registered in the reference
+        for pkgName in ref.cmtPkgs.keys():
+
+            # check if it is a container package
+            if ref.isContainerPkg(ref.cmtPkgs[pkgName]):
+                continue
+
+            pkgFullName = ref.getPkgFullName(ref.cmtPkgs[pkgName])
+            
+            if not rel.cmtPkgs.has_key(pkgName):
+                diff[pkgFullName] = { 'ref' : ref.cmtPkgs[pkgName],
+                                      'rel' : CmtPkg( "None",
+                                                      "None-00-00-00",
+                                                      "-" ) }
+                pass
+            else:
+                refVersion = ref.cmtPkgs[pkgName].version
+                version    = rel.cmtPkgs[pkgName].version
+                if refVersion != version:
+                    diff[pkgFullName] = { 'ref' : ref.cmtPkgs[pkgName],
+                                          'rel' : rel.cmtPkgs[pkgName] }
+                pass
+            pass
+
+        # then compare the list of packages registered in the release
+        for pkgName in rel.cmtPkgs.keys():
+
+            # check if it is a container package
+            if rel.isContainerPkg(rel.cmtPkgs[pkgName]):
+                continue
+
+            pkgFullName = rel.getPkgFullName(rel.cmtPkgs[pkgName])
+
+            if not ref.cmtPkgs.has_key(pkgName):
+                diff[pkgFullName] = { 'ref' : CmtPkg( "None",
+                                                      "None-00-00-00",
+                                                      "-" ),
+                                      'rel' : rel.cmtPkgs[pkgName] }
+                pass
+            else:
+                refVersion = ref.cmtPkgs[pkgName].version
+                version    = rel.cmtPkgs[pkgName].version
+                if refVersion != version:
+                    diff[pkgFullName] = { 'ref' : ref.cmtPkgs[pkgName],
+                                          'rel' : rel.cmtPkgs[pkgName] }
+                pass
+            pass
+
+        self.diff = diff
+        self.saveReport()
+        return diff
+
+    def printReport(self):
+        self.log.info( "#" * 80 )
+        self.log.info( "Found [%i] tags which are different !" %
+                       len(self.diff.keys()) )
+        self.log.info( self.fmt % ( "          PKGNAME",
+                                    " PKG-REF",
+                                    " PKG-REL" ) )
+        self.log.info( "-" * 80 )
+        for k in self.diff.keys():
+            pkgFullName = k
+            pkgName = os.path.basename(pkgFullName)
+            self.log.info( self.fmt %
+                           ( pkgFullName,
+                             self.diff[k]['ref'].version.replace(pkgName,""),
+                             self.diff[k]['rel'].version.replace(pkgName,"") )
+                           )
+            pass
+        self.log.info( "-" * 80 )
+        return
+
+    def saveReport(self, fileName=None):
+        refId = self.ref.rawId.replace(",","-")
+        relId = self.rel.rawId.replace(",","-")
+        if fileName == None:
+            fileName = os.path.join( os.getcwd(),
+                                     "tag-diff-%s-vs-%s.out" % \
+                                     ( refId, relId ) )
+            pass
+        out = open( fileName, "w" )
+        lines = []
+        lines += [
+            "-" * 80,
+            "## Found [%i] tags which are different !" % len(self.diff.keys()),
+            self.fmt % ( "##       PKGNAME",
+                         " PKG-REF (%s)" % refId,
+                         " PKG-REL (%s)" % relId ),
+            "-" * 80
+            ]
+        for k in self.diff.keys():
+            pkgName = k
+            lines += [
+                self.fmt %
+                ( pkgName,
+                  self.diff[k]['ref'].version.replace(pkgName,""),
+                  self.diff[k]['rel'].version.replace(pkgName,"") )
+                ]
+            pass
+        lines += [ "-" * 80 ]
+        for line in lines:
+            out.writelines( line + os.linesep )
+            pass
+        out.close()
+        return
+        
+def _installLogger( lvl        = "INFO",
+                    loggerName = GetTagDiff.LoggerName ):
+    # define a Handler which writes DEBUG messages or higher to the sys.stderr
+    logger = logging.StreamHandler()
+    logger.setLevel(logging.DEBUG)
+    # set a format which is simpler for console use
+    formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+    # tell the handler to use this format
+    logger.setFormatter(formatter)
+    # add the handler to the root logger
+    logging.getLogger('').addHandler(logger)
+
+    log = logging.getLogger(loggerName)
+    exec ( """
+try:
+ log.setLevel( logging.%s )
+except AttributeError,err:
+ log.warning( 'Invalid logging.Level [%s]' )
+ log.warning( 'setting to [INFO]' )
+ log.setLevel( logging.INFO )
+""" % (lvl, lvl) )
+    return
+
+if __name__ == "__main__":
+
+    parser = OptionParser( usage = "usage: %prog [-R|--ref] 12.0.X,rel_3 [-r|--rel] 12.0.3")
+    parser.add_option( "-R",
+                       "--ref",
+                       dest = "ref",
+                       help = "The description string of the reference release (eg: 12.0.X,rel_3)" )
+
+    parser.add_option( "-r",
+                       "--rel",
+                       dest = "rel",
+                       help = "The description string of the to-be-compared release (eg: 12.0.X,rel_3 or 12.0.3)" )
+
+    
+    parser.add_option( "-f",
+                       "--force-rescan",
+                       action  = "store_true",
+                       dest    = "rescan",
+                       default = False,
+                       help    = "Switch to rescan the whole release or look for tags from a previous db-file" )
+
+    
+    parser.add_option( "-l",
+                       "--loglevel",
+                       type    = "string",
+                       dest    = "logLevel",
+                       default = "INFO",
+#                       choices = [ "DEBUG", "INFO", "WARNING", "ERROR" ],
+                       help    = "Logging message level [DEBUG, INFO, WARNING, ERROR]"
+                       )
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.ref = args[0]
+        pass
+
+    if len(args) > 1 and args[1][0] != "-":
+        options.rel = args[1]
+        pass
+
+    if options.ref == None or options.rel == None:
+        parser.print_help()
+        sys.exit(1)
+        pass
+    
+    ## install the user loglevel
+    _installLogger( loggerName = GetTagDiff.LoggerName,
+                    lvl        = options.logLevel )
+    log = logging.getLogger( GetTagDiff.LoggerName )
+
+    log.info( "#" * 80 )
+    log.info( "## Welcome to getTagDiff" )
+    log.info( "#" * 80 )
+    log.info( "##" )
+
+    relRef = Release(options.ref)
+    rel    = Release(options.rel)
+    log.info( "## Comparing [%s] VS [%s]" % ( relRef.rawId, rel.rawId ) )
+
+    relCmp = ReleaseCmp( relRef, rel, options.rescan )
+    relCmp.cmp()
+    relCmp.printReport()
+    
+    log.info( "...Bye..." )
diff --git a/Tools/PyUtils/bin/gprof2dot b/Tools/PyUtils/bin/gprof2dot
new file mode 100755
index 00000000000..3f79cf7cc9b
--- /dev/null
+++ b/Tools/PyUtils/bin/gprof2dot
@@ -0,0 +1,2896 @@
+#!/usr/bin/env python2
+#
+# Copyright 2008-2009 Jose Fonseca
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Generate a dot graph from the output of several profilers."""
+
+__author__ = "Jose Fonseca"
+
+__version__ = "1.0"
+
+
+import sys
+import math
+import os.path
+import re
+import textwrap
+import optparse
+import xml.parsers.expat
+
+
+try:
+    # Debugging helper module
+    import debug
+except ImportError:
+    pass
+
+
+def times(x):
+    return u"%u\xd7" % (x,)
+
+def percentage(p):
+    return "%.02f%%" % (p*100.0,)
+
+def add(a, b):
+    return a + b
+
+def equal(a, b):
+    if a == b:
+        return a
+    else:
+        return None
+
+def fail(a, b):
+    assert False
+
+
+tol = 2 ** -23
+
+def ratio(numerator, denominator):
+    try:
+        ratio = float(numerator)/float(denominator)
+    except ZeroDivisionError:
+        # 0/0 is undefined, but 1.0 yields more useful results
+        return 1.0
+    if ratio < 0.0:
+        if ratio < -tol:
+            sys.stderr.write('warning: negative ratio (%s/%s)\n' % (numerator, denominator))
+        return 0.0
+    if ratio > 1.0:
+        if ratio > 1.0 + tol:
+            sys.stderr.write('warning: ratio greater than one (%s/%s)\n' % (numerator, denominator))
+        return 1.0
+    return ratio
+
+
+class UndefinedEvent(Exception):
+    """Raised when attempting to get an event which is undefined."""
+    
+    def __init__(self, event):
+        Exception.__init__(self)
+        self.event = event
+
+    def __str__(self):
+        return 'unspecified event %s' % self.event.name
+
+
+class Event(object):
+    """Describe a kind of event, and its basic operations."""
+
+    def __init__(self, name, null, aggregator, formatter = str):
+        self.name = name
+        self._null = null
+        self._aggregator = aggregator
+        self._formatter = formatter
+
+    def __eq__(self, other):
+        return self is other
+
+    def __hash__(self):
+        return id(self)
+
+    def null(self):
+        return self._null
+
+    def aggregate(self, val1, val2):
+        """Aggregate two event values."""
+        assert val1 is not None
+        assert val2 is not None
+        return self._aggregator(val1, val2)
+    
+    def format(self, val):
+        """Format an event value."""
+        assert val is not None
+        return self._formatter(val)
+
+
+CALLS = Event("Calls", 0, add, times)
+SAMPLES = Event("Samples", 0, add)
+SAMPLES2 = Event("Samples", 0, add)
+
+TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')')
+TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')')
+TOTAL_TIME = Event("Total time", 0.0, fail)
+TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage)
+
+
+class Object(object):
+    """Base class for all objects in profile which can store events."""
+
+    def __init__(self, events=None):
+        if events is None:
+            self.events = {}
+        else:
+            self.events = events
+
+    def __hash__(self):
+        return id(self)
+
+    def __eq__(self, other):
+        return self is other
+
+    def __contains__(self, event):
+        return event in self.events
+    
+    def __getitem__(self, event):
+        try:
+            return self.events[event]
+        except KeyError:
+            raise UndefinedEvent(event)
+    
+    def __setitem__(self, event, value):
+        if value is None:
+            if event in self.events:
+                del self.events[event]
+        else:
+            self.events[event] = value
+
+
+class Call(Object):
+    """A call between functions.
+    
+    There should be at most one call object for every pair of functions.
+    """
+
+    def __init__(self, callee_id):
+        Object.__init__(self)
+        self.callee_id = callee_id
+        self.ratio = None
+        self.weight = None
+
+
+class Function(Object):
+    """A function."""
+
+    def __init__(self, id, name):
+        Object.__init__(self)
+        self.id = id
+        self.name = name
+        self.module = None
+        self.process = None
+        self.calls = {}
+        self.called = None
+        self.weight = None
+        self.cycle = None
+    
+    def add_call(self, call):
+        if call.callee_id in self.calls:
+            sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id)))
+        self.calls[call.callee_id] = call
+
+    def get_call(self, callee_id):
+        if not callee_id in self.calls:
+            call = Call(callee_id)
+            call[SAMPLES] = 0
+            call[SAMPLES2] = 0
+            call[CALLS] = 0
+            self.calls[callee_id] = call
+        return self.calls[callee_id]
+
+    _parenthesis_re = re.compile(r'\([^()]*\)')
+    _angles_re = re.compile(r'<[^<>]*>')
+    _const_re = re.compile(r'\s+const$')
+
+    def stripped_name(self):
+        """Remove extraneous information from C++ demangled function names."""
+
+        name = self.name
+
+        # Strip function parameters from name by recursively removing paired parenthesis
+        while True:
+            name, n = self._parenthesis_re.subn('', name)
+            if not n:
+                break
+
+        # Strip const qualifier
+        name = self._const_re.sub('', name)
+
+        # Strip template parameters from name by recursively removing paired angles
+        while True:
+            name, n = self._angles_re.subn('', name)
+            if not n:
+                break
+
+        return name
+
+    # TODO: write utility functions
+
+    def __repr__(self):
+        return self.name
+
+
+class Cycle(Object):
+    """A cycle made from recursive function calls."""
+
+    def __init__(self):
+        Object.__init__(self)
+        # XXX: Do cycles need an id?
+        self.functions = set()
+
+    def add_function(self, function):
+        assert function not in self.functions
+        self.functions.add(function)
+        # XXX: Aggregate events?
+        if function.cycle is not None:
+            for other in function.cycle.functions:
+                if function not in self.functions:
+                    self.add_function(other)
+        function.cycle = self
+
+
+class Profile(Object):
+    """The whole profile."""
+
+    def __init__(self):
+        Object.__init__(self)
+        self.functions = {}
+        self.cycles = []
+
+    def add_function(self, function):
+        if function.id in self.functions:
+            sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id)))
+        self.functions[function.id] = function
+
+    def add_cycle(self, cycle):
+        self.cycles.append(cycle)
+
+    def validate(self):
+        """Validate the edges."""
+
+        for function in self.functions.itervalues():
+            for callee_id in function.calls.keys():
+                assert function.calls[callee_id].callee_id == callee_id
+                if callee_id not in self.functions:
+                    sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name))
+                    del function.calls[callee_id]
+
+    def find_cycles(self):
+        """Find cycles using Tarjan's strongly connected components algorithm."""
+
+        # Apply the Tarjan's algorithm successively until all functions are visited
+        visited = set()
+        for function in self.functions.itervalues():
+            if function not in visited:
+                self._tarjan(function, 0, [], {}, {}, visited)
+        cycles = []
+        for function in self.functions.itervalues():
+            if function.cycle is not None and function.cycle not in cycles:
+                cycles.append(function.cycle)
+        self.cycles = cycles
+        if 0:
+            for cycle in cycles:
+                sys.stderr.write("Cycle:\n")
+                for member in cycle.functions:
+                    sys.stderr.write("\tFunction %s\n" % member.name)
+    
+    def _tarjan(self, function, order, stack, orders, lowlinks, visited):
+        """Tarjan's strongly connected components algorithm.
+
+        See also:
+        - http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm
+        """
+
+        visited.add(function)
+        orders[function] = order
+        lowlinks[function] = order
+        order += 1
+        pos = len(stack)
+        stack.append(function)
+        for call in function.calls.itervalues():
+            callee = self.functions[call.callee_id]
+            # TODO: use a set to optimize lookup
+            if callee not in orders:
+                order = self._tarjan(callee, order, stack, orders, lowlinks, visited)
+                lowlinks[function] = min(lowlinks[function], lowlinks[callee])
+            elif callee in stack:
+                lowlinks[function] = min(lowlinks[function], orders[callee])
+        if lowlinks[function] == orders[function]:
+            # Strongly connected component found
+            members = stack[pos:]
+            del stack[pos:]
+            if len(members) > 1:
+                cycle = Cycle()
+                for member in members:
+                    cycle.add_function(member)
+        return order
+
+    def call_ratios(self, event):
+        # Aggregate for incoming calls
+        cycle_totals = {}
+        for cycle in self.cycles:
+            cycle_totals[cycle] = 0.0
+        function_totals = {}
+        for function in self.functions.itervalues():
+            function_totals[function] = 0.0
+        for function in self.functions.itervalues():
+            for call in function.calls.itervalues():
+                if call.callee_id != function.id:
+                    callee = self.functions[call.callee_id]
+                    function_totals[callee] += call[event]
+                    if callee.cycle is not None and callee.cycle is not function.cycle:
+                        cycle_totals[callee.cycle] += call[event]
+
+        # Compute the ratios
+        for function in self.functions.itervalues():
+            for call in function.calls.itervalues():
+                assert call.ratio is None
+                if call.callee_id != function.id:
+                    callee = self.functions[call.callee_id]
+                    if callee.cycle is not None and callee.cycle is not function.cycle:
+                        total = cycle_totals[callee.cycle]
+                    else:
+                        total = function_totals[callee]
+                    call.ratio = ratio(call[event], total)
+
+    def integrate(self, outevent, inevent):
+        """Propagate function time ratio allong the function calls.
+
+        Must be called after finding the cycles.
+
+        See also:
+        - http://citeseer.ist.psu.edu/graham82gprof.html
+        """
+
+        # Sanity checking
+        assert outevent not in self
+        for function in self.functions.itervalues():
+            assert outevent not in function
+            assert inevent in function
+            for call in function.calls.itervalues():
+                assert outevent not in call
+                if call.callee_id != function.id:
+                    assert call.ratio is not None
+
+        # Aggregate the input for each cycle 
+        for cycle in self.cycles:
+            total = inevent.null()
+            for function in self.functions.itervalues():
+                total = inevent.aggregate(total, function[inevent])
+            self[inevent] = total
+
+        # Integrate along the edges
+        total = inevent.null()
+        for function in self.functions.itervalues():
+            total = inevent.aggregate(total, function[inevent])
+            self._integrate_function(function, outevent, inevent)
+        self[outevent] = total
+
+    def _integrate_function(self, function, outevent, inevent):
+        if function.cycle is not None:
+            return self._integrate_cycle(function.cycle, outevent, inevent)
+        else:
+            if outevent not in function:
+                total = function[inevent]
+                for call in function.calls.itervalues():
+                    if call.callee_id != function.id:
+                        total += self._integrate_call(call, outevent, inevent)
+                function[outevent] = total
+            return function[outevent]
+    
+    def _integrate_call(self, call, outevent, inevent):
+        assert outevent not in call
+        assert call.ratio is not None
+        callee = self.functions[call.callee_id]
+        subtotal = call.ratio *self._integrate_function(callee, outevent, inevent)
+        call[outevent] = subtotal
+        return subtotal
+
+    def _integrate_cycle(self, cycle, outevent, inevent):
+        if outevent not in cycle:
+
+            # Compute the outevent for the whole cycle
+            total = inevent.null()
+            for member in cycle.functions:
+                subtotal = member[inevent]
+                for call in member.calls.itervalues():
+                    callee = self.functions[call.callee_id]
+                    if callee.cycle is not cycle:
+                        subtotal += self._integrate_call(call, outevent, inevent)
+                total += subtotal
+            cycle[outevent] = total
+            
+            # Compute the time propagated to callers of this cycle
+            callees = {}
+            for function in self.functions.itervalues():
+                if function.cycle is not cycle:
+                    for call in function.calls.itervalues():
+                        callee = self.functions[call.callee_id]
+                        if callee.cycle is cycle:
+                            try:
+                                callees[callee] += call.ratio
+                            except KeyError:
+                                callees[callee] = call.ratio
+            
+            for member in cycle.functions:
+                member[outevent] = outevent.null()
+
+            for callee, call_ratio in callees.iteritems():
+                ranks = {}
+                call_ratios = {}
+                partials = {}
+                self._rank_cycle_function(cycle, callee, 0, ranks)
+                self._call_ratios_cycle(cycle, callee, ranks, call_ratios, set())
+                partial = self._integrate_cycle_function(cycle, callee, call_ratio, partials, ranks, call_ratios, outevent, inevent)
+                assert partial == max(partials.values())
+                assert not total or abs(1.0 - partial/(call_ratio*total)) <= 0.001
+
+        return cycle[outevent]
+
+    def _rank_cycle_function(self, cycle, function, rank, ranks):
+        if function not in ranks or ranks[function] > rank:
+            ranks[function] = rank
+            for call in function.calls.itervalues():
+                if call.callee_id != function.id:
+                    callee = self.functions[call.callee_id]
+                    if callee.cycle is cycle:
+                        self._rank_cycle_function(cycle, callee, rank + 1, ranks)
+
+    def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited):
+        if function not in visited:
+            visited.add(function)
+            for call in function.calls.itervalues():
+                if call.callee_id != function.id:
+                    callee = self.functions[call.callee_id]
+                    if callee.cycle is cycle:
+                        if ranks[callee] > ranks[function]:
+                            call_ratios[callee] = call_ratios.get(callee, 0.0) + call.ratio
+                            self._call_ratios_cycle(cycle, callee, ranks, call_ratios, visited)
+
+    def _integrate_cycle_function(self, cycle, function, partial_ratio, partials, ranks, call_ratios, outevent, inevent):
+        if function not in partials:
+            partial = partial_ratio*function[inevent]
+            for call in function.calls.itervalues():
+                if call.callee_id != function.id:
+                    callee = self.functions[call.callee_id]
+                    if callee.cycle is not cycle:
+                        assert outevent in call
+                        partial += partial_ratio*call[outevent]
+                    else:
+                        if ranks[callee] > ranks[function]:
+                            callee_partial = self._integrate_cycle_function(cycle, callee, partial_ratio, partials, ranks, call_ratios, outevent, inevent)
+                            call_ratio = ratio(call.ratio, call_ratios[callee])
+                            call_partial = call_ratio*callee_partial
+                            try:
+                                call[outevent] += call_partial
+                            except UndefinedEvent:
+                                call[outevent] = call_partial
+                            partial += call_partial
+            partials[function] = partial
+            try:
+                function[outevent] += partial
+            except UndefinedEvent:
+                function[outevent] = partial
+        return partials[function]
+
+    def aggregate(self, event):
+        """Aggregate an event for the whole profile."""
+
+        total = event.null()
+        for function in self.functions.itervalues():
+            try:
+                total = event.aggregate(total, function[event])
+            except UndefinedEvent:
+                return
+        self[event] = total
+
+    def ratio(self, outevent, inevent):
+        assert outevent not in self
+        assert inevent in self
+        for function in self.functions.itervalues():
+            assert outevent not in function
+            assert inevent in function
+            function[outevent] = ratio(function[inevent], self[inevent])
+            for call in function.calls.itervalues():
+                assert outevent not in call
+                if inevent in call:
+                    call[outevent] = ratio(call[inevent], self[inevent])
+        self[outevent] = 1.0
+
+    def prune(self, node_thres, edge_thres):
+        """Prune the profile"""
+
+        # compute the prune ratios
+        for function in self.functions.itervalues():
+            try:
+                function.weight = function[TOTAL_TIME_RATIO]
+            except UndefinedEvent:
+                pass
+
+            for call in function.calls.itervalues():
+                callee = self.functions[call.callee_id]
+
+                if TOTAL_TIME_RATIO in call:
+                    # handle exact cases first
+                    call.weight = call[TOTAL_TIME_RATIO] 
+                else:
+                    try:
+                        # make a safe estimate
+                        call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) 
+                    except UndefinedEvent:
+                        pass
+
+        # prune the nodes
+        for function_id in self.functions.keys():
+            function = self.functions[function_id]
+            if function.weight is not None:
+                if function.weight < node_thres:
+                    del self.functions[function_id]
+
+        # prune the egdes
+        for function in self.functions.itervalues():
+            for callee_id in function.calls.keys():
+                call = function.calls[callee_id]
+                if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
+                    del function.calls[callee_id]
+    
+    def dump(self):
+        for function in self.functions.itervalues():
+            sys.stderr.write('Function %s:\n' % (function.name,))
+            self._dump_events(function.events)
+            for call in function.calls.itervalues():
+                callee = self.functions[call.callee_id]
+                sys.stderr.write('  Call %s:\n' % (callee.name,))
+                self._dump_events(call.events)
+        for cycle in self.cycles:
+            sys.stderr.write('Cycle:\n')
+            self._dump_events(cycle.events)
+            for function in cycle.functions:
+                sys.stderr.write('  Function %s\n' % (function.name,))
+
+    def _dump_events(self, events):
+        for event, value in events.iteritems():
+            sys.stderr.write('    %s: %s\n' % (event.name, event.format(value)))
+
+
+class Struct:
+    """Masquerade a dictionary with a structure-like behavior."""
+
+    def __init__(self, attrs = None):
+        if attrs is None:
+            attrs = {}
+        self.__dict__['_attrs'] = attrs
+    
+    def __getattr__(self, name):
+        try:
+            return self._attrs[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __setattr__(self, name, value):
+        self._attrs[name] = value
+
+    def __str__(self):
+        return str(self._attrs)
+
+    def __repr__(self):
+        return repr(self._attrs)
+    
+
+class ParseError(Exception):
+    """Raised when parsing to signal mismatches."""
+
+    def __init__(self, msg, line):
+        self.msg = msg
+        # TODO: store more source line information
+        self.line = line
+
+    def __str__(self):
+        return '%s: %r' % (self.msg, self.line)
+
+
+class Parser:
+    """Parser interface."""
+
+    def __init__(self):
+        pass
+
+    def parse(self):
+        raise NotImplementedError
+
+    
+class LineParser(Parser):
+    """Base class for parsers that read line-based formats."""
+
+    def __init__(self, file):
+        Parser.__init__(self)
+        self._file = file
+        self.__line = None
+        self.__eof = False
+        self.line_no = 0
+
+    def readline(self):
+        line = self._file.readline()
+        if not line:
+            self.__line = ''
+            self.__eof = True
+        else:
+            self.line_no += 1
+        self.__line = line.rstrip('\r\n')
+
+    def lookahead(self):
+        assert self.__line is not None
+        return self.__line
+
+    def consume(self):
+        assert self.__line is not None
+        line = self.__line
+        self.readline()
+        return line
+
+    def eof(self):
+        assert self.__line is not None
+        return self.__eof
+
+
+XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF = range(4)
+
+
+class XmlToken:
+
+    def __init__(self, type, name_or_data, attrs = None, line = None, column = None):
+        assert type in (XML_ELEMENT_START, XML_ELEMENT_END, XML_CHARACTER_DATA, XML_EOF)
+        self.type = type
+        self.name_or_data = name_or_data
+        self.attrs = attrs
+        self.line = line
+        self.column = column
+
+    def __str__(self):
+        if self.type == XML_ELEMENT_START:
+            return '<' + self.name_or_data + ' ...>'
+        if self.type == XML_ELEMENT_END:
+            return '</' + self.name_or_data + '>'
+        if self.type == XML_CHARACTER_DATA:
+            return self.name_or_data
+        if self.type == XML_EOF:
+            return 'end of file'
+        assert 0
+
+
+class XmlTokenizer:
+    """Expat based XML tokenizer."""
+
+    def __init__(self, fp, skip_ws = True):
+        self.fp = fp
+        self.tokens = []
+        self.index = 0
+        self.final = False
+        self.skip_ws = skip_ws
+        
+        self.character_pos = 0, 0
+        self.character_data = ''
+        
+        self.parser = xml.parsers.expat.ParserCreate()
+        self.parser.StartElementHandler  = self.handle_element_start
+        self.parser.EndElementHandler    = self.handle_element_end
+        self.parser.CharacterDataHandler = self.handle_character_data
+    
+    def handle_element_start(self, name, attributes):
+        self.finish_character_data()
+        line, column = self.pos()
+        token = XmlToken(XML_ELEMENT_START, name, attributes, line, column)
+        self.tokens.append(token)
+    
+    def handle_element_end(self, name):
+        self.finish_character_data()
+        line, column = self.pos()
+        token = XmlToken(XML_ELEMENT_END, name, None, line, column)
+        self.tokens.append(token)
+
+    def handle_character_data(self, data):
+        if not self.character_data:
+            self.character_pos = self.pos()
+        self.character_data += data
+    
+    def finish_character_data(self):
+        if self.character_data:
+            if not self.skip_ws or not self.character_data.isspace(): 
+                line, column = self.character_pos
+                token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column)
+                self.tokens.append(token)
+            self.character_data = ''
+    
+    def next(self):
+        size = 16*1024
+        while self.index >= len(self.tokens) and not self.final:
+            self.tokens = []
+            self.index = 0
+            data = self.fp.read(size)
+            self.final = len(data) < size
+            try:
+                self.parser.Parse(data, self.final)
+            except xml.parsers.expat.ExpatError, e:
+                #if e.code == xml.parsers.expat.errors.XML_ERROR_NO_ELEMENTS:
+                if e.code == 3:
+                    pass
+                else:
+                    raise e
+        if self.index >= len(self.tokens):
+            line, column = self.pos()
+            token = XmlToken(XML_EOF, None, None, line, column)
+        else:
+            token = self.tokens[self.index]
+            self.index += 1
+        return token
+
+    def pos(self):
+        return self.parser.CurrentLineNumber, self.parser.CurrentColumnNumber
+
+
+class XmlTokenMismatch(Exception):
+
+    def __init__(self, expected, found):
+        self.expected = expected
+        self.found = found
+
+    def __str__(self):
+        return '%u:%u: %s expected, %s found' % (self.found.line, self.found.column, str(self.expected), str(self.found))
+
+
+class XmlParser(Parser):
+    """Base XML document parser."""
+
+    def __init__(self, fp):
+        Parser.__init__(self)
+        self.tokenizer = XmlTokenizer(fp)
+        self.consume()
+    
+    def consume(self):
+        self.token = self.tokenizer.next()
+
+    def match_element_start(self, name):
+        return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name
+    
+    def match_element_end(self, name):
+        return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name
+
+    def element_start(self, name):
+        while self.token.type == XML_CHARACTER_DATA:
+            self.consume()
+        if self.token.type != XML_ELEMENT_START:
+            raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
+        if self.token.name_or_data != name:
+            raise XmlTokenMismatch(XmlToken(XML_ELEMENT_START, name), self.token)
+        attrs = self.token.attrs
+        self.consume()
+        return attrs
+    
+    def element_end(self, name):
+        while self.token.type == XML_CHARACTER_DATA:
+            self.consume()
+        if self.token.type != XML_ELEMENT_END:
+            raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
+        if self.token.name_or_data != name:
+            raise XmlTokenMismatch(XmlToken(XML_ELEMENT_END, name), self.token)
+        self.consume()
+
+    def character_data(self, strip = True):
+        data = ''
+        while self.token.type == XML_CHARACTER_DATA:
+            data += self.token.name_or_data
+            self.consume()
+        if strip:
+            data = data.strip()
+        return data
+
+
+class GprofParser(Parser):
+    """Parser for GNU gprof output.
+
+    See also:
+    - Chapter "Interpreting gprof's Output" from the GNU gprof manual
+      http://sourceware.org/binutils/docs-2.18/gprof/Call-Graph.html#Call-Graph
+    - File "cg_print.c" from the GNU gprof source code
+      http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/src/gprof/cg_print.c?rev=1.12&cvsroot=src
+    """
+
+    def __init__(self, fp):
+        Parser.__init__(self)
+        self.fp = fp
+        self.functions = {}
+        self.cycles = {}
+
+    def readline(self):
+        line = self.fp.readline()
+        if not line:
+            sys.stderr.write('error: unexpected end of file\n')
+            sys.exit(1)
+        line = line.rstrip('\r\n')
+        return line
+
+    _int_re = re.compile(r'^\d+$')
+    _float_re = re.compile(r'^\d+\.\d+$')
+
+    def translate(self, mo):
+        """Extract a structure from a match object, while translating the types in the process."""
+        attrs = {}
+        groupdict = mo.groupdict()
+        for name, value in groupdict.iteritems():
+            if value is None:
+                value = None
+            elif self._int_re.match(value):
+                value = int(value)
+            elif self._float_re.match(value):
+                value = float(value)
+            attrs[name] = (value)
+        return Struct(attrs)
+
+    _cg_header_re = re.compile(
+        # original gprof header
+        r'^\s+called/total\s+parents\s*$|' +
+        r'^index\s+%time\s+self\s+descendents\s+called\+self\s+name\s+index\s*$|' +
+        r'^\s+called/total\s+children\s*$|' +
+        # GNU gprof header
+        r'^index\s+%\s+time\s+self\s+children\s+called\s+name\s*$'
+    )
+
+    _cg_ignore_re = re.compile(
+        # spontaneous
+        r'^\s+<spontaneous>\s*$|'
+        # internal calls (such as "mcount")
+        r'^.*\((\d+)\)$'
+    )
+
+    _cg_primary_re = re.compile(
+        r'^\[(?P<index>\d+)\]?' + 
+        r'\s+(?P<percentage_time>\d+\.\d+)' + 
+        r'\s+(?P<self>\d+\.\d+)' + 
+        r'\s+(?P<descendants>\d+\.\d+)' + 
+        r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + 
+        r'\s+(?P<name>\S.*?)' +
+        r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
+        r'\s\[(\d+)\]$'
+    )
+
+    _cg_parent_re = re.compile(
+        r'^\s+(?P<self>\d+\.\d+)?' + 
+        r'\s+(?P<descendants>\d+\.\d+)?' + 
+        r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' + 
+        r'\s+(?P<name>\S.*?)' +
+        r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
+        r'\s\[(?P<index>\d+)\]$'
+    )
+
+    _cg_child_re = _cg_parent_re
+
+    _cg_cycle_header_re = re.compile(
+        r'^\[(?P<index>\d+)\]?' + 
+        r'\s+(?P<percentage_time>\d+\.\d+)' + 
+        r'\s+(?P<self>\d+\.\d+)' + 
+        r'\s+(?P<descendants>\d+\.\d+)' + 
+        r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + 
+        r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
+        r'\s\[(\d+)\]$'
+    )
+
+    _cg_cycle_member_re = re.compile(
+        r'^\s+(?P<self>\d+\.\d+)?' + 
+        r'\s+(?P<descendants>\d+\.\d+)?' + 
+        r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' + 
+        r'\s+(?P<name>\S.*?)' +
+        r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
+        r'\s\[(?P<index>\d+)\]$'
+    )
+
+    _cg_sep_re = re.compile(r'^--+$')
+
+    def parse_function_entry(self, lines):
+        parents = []
+        children = []
+
+        while True:
+            if not lines:
+                sys.stderr.write('warning: unexpected end of entry\n')
+            line = lines.pop(0)
+            if line.startswith('['):
+                break
+        
+            # read function parent line
+            mo = self._cg_parent_re.match(line)
+            if not mo:
+                if self._cg_ignore_re.match(line):
+                    continue
+                sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
+            else:
+                parent = self.translate(mo)
+                parents.append(parent)
+
+        # read primary line
+        mo = self._cg_primary_re.match(line)
+        if not mo:
+            sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
+            return
+        else:
+            function = self.translate(mo)
+
+        while lines:
+            line = lines.pop(0)
+            
+            # read function subroutine line
+            mo = self._cg_child_re.match(line)
+            if not mo:
+                if self._cg_ignore_re.match(line):
+                    continue
+                sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
+            else:
+                child = self.translate(mo)
+                children.append(child)
+        
+        function.parents = parents
+        function.children = children
+
+        self.functions[function.index] = function
+
+    def parse_cycle_entry(self, lines):
+
+        # read cycle header line
+        line = lines[0]
+        mo = self._cg_cycle_header_re.match(line)
+        if not mo:
+            sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
+            return
+        cycle = self.translate(mo)
+
+        # read cycle member lines
+        cycle.functions = []
+        for line in lines[1:]:
+            mo = self._cg_cycle_member_re.match(line)
+            if not mo:
+                sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line)
+                continue
+            call = self.translate(mo)
+            cycle.functions.append(call)
+        
+        self.cycles[cycle.cycle] = cycle
+
+    def parse_cg_entry(self, lines):
+        if lines[0].startswith("["):
+            self.parse_cycle_entry(lines)
+        else:
+            self.parse_function_entry(lines)
+
+    def parse_cg(self):
+        """Parse the call graph."""
+
+        # skip call graph header
+        while not self._cg_header_re.match(self.readline()):
+            pass
+        line = self.readline()
+        while self._cg_header_re.match(line):
+            line = self.readline()
+
+        # process call graph entries
+        entry_lines = []
+        while line != '\014': # form feed
+            if line and not line.isspace():
+                if self._cg_sep_re.match(line):
+                    self.parse_cg_entry(entry_lines)
+                    entry_lines = []
+                else:
+                    entry_lines.append(line)            
+            line = self.readline()
+    
+    def parse(self):
+        self.parse_cg()
+        self.fp.close()
+
+        profile = Profile()
+        profile[TIME] = 0.0
+        
+        cycles = {}
+        for index in self.cycles.iterkeys():
+            cycles[index] = Cycle()
+
+        for entry in self.functions.itervalues():
+            # populate the function
+            function = Function(entry.index, entry.name)
+            function[TIME] = entry.self
+            if entry.called is not None:
+                function.called = entry.called
+            if entry.called_self is not None:
+                call = Call(entry.index)
+                call[CALLS] = entry.called_self
+                function.called += entry.called_self
+            
+            # populate the function calls
+            for child in entry.children:
+                call = Call(child.index)
+                
+                assert child.called is not None
+                call[CALLS] = child.called
+
+                if child.index not in self.functions:
+                    # NOTE: functions that were never called but were discovered by gprof's 
+                    # static call graph analysis dont have a call graph entry so we need
+                    # to add them here
+                    missing = Function(child.index, child.name)
+                    function[TIME] = 0.0
+                    function.called = 0
+                    profile.add_function(missing)
+
+                function.add_call(call)
+
+            profile.add_function(function)
+
+            if entry.cycle is not None:
+                try:
+                    cycle = cycles[entry.cycle]
+                except KeyError:
+                    sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle) 
+                    cycle = Cycle()
+                    cycles[entry.cycle] = cycle
+                cycle.add_function(function)
+
+            profile[TIME] = profile[TIME] + function[TIME]
+
+        for cycle in cycles.itervalues():
+            profile.add_cycle(cycle)
+
+        # Compute derived events
+        profile.validate()
+        profile.ratio(TIME_RATIO, TIME)
+        profile.call_ratios(CALLS)
+        profile.integrate(TOTAL_TIME, TIME)
+        profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
+
+        return profile
+
+
+class CallgrindParser(LineParser):
+    """Parser for valgrind's callgrind tool.
+    
+    See also:
+    - http://valgrind.org/docs/manual/cl-format.html
+    """
+
+    _call_re = re.compile('^calls=\s*(\d+)\s+((\d+|\+\d+|-\d+|\*)\s+)+$')
+
+    def __init__(self, infile):
+        LineParser.__init__(self, infile)
+
+        # Textual positions
+        self.position_ids = {}
+        self.positions = {}
+
+        # Numeric positions
+        self.num_positions = 1
+        self.cost_positions = ['line']
+        self.last_positions = [0]
+
+        # Events
+        self.num_events = 0
+        self.cost_events = []
+
+        self.profile = Profile()
+        self.profile[SAMPLES] = 0
+
+    def parse(self):
+        # read lookahead
+        self.readline()
+
+        self.parse_key('version')
+        self.parse_key('creator')
+        while self.parse_part():
+            pass
+        if not self.eof():
+            sys.stderr.write('warning: line %u: unexpected line\n' % self.line_no)
+            sys.stderr.write('%s\n' % self.lookahead())
+
+        # compute derived data
+        self.profile.validate()
+        self.profile.find_cycles()
+        self.profile.ratio(TIME_RATIO, SAMPLES)
+        self.profile.call_ratios(CALLS)
+        self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return self.profile
+
+    def parse_part(self):
+        if not self.parse_header_line():
+            return False
+        while self.parse_header_line():
+            pass
+        if not self.parse_body_line():
+            return False
+        while self.parse_body_line():
+            pass
+        return True
+
+    def parse_header_line(self):
+        return \
+            self.parse_empty() or \
+            self.parse_comment() or \
+            self.parse_part_detail() or \
+            self.parse_description() or \
+            self.parse_event_specification() or \
+            self.parse_cost_line_def() or \
+            self.parse_cost_summary()
+
+    _detail_keys = set(('cmd', 'pid', 'thread', 'part'))
+
+    def parse_part_detail(self):
+        return self.parse_keys(self._detail_keys)
+
+    def parse_description(self):
+        return self.parse_key('desc') is not None
+
+    def parse_event_specification(self):
+        event = self.parse_key('event')
+        if event is None:
+            return False
+        return True
+
+    def parse_cost_line_def(self):
+        pair = self.parse_keys(('events', 'positions'))
+        if pair is None:
+            return False
+        key, value = pair
+        items = value.split()
+        if key == 'events':
+            self.num_events = len(items)
+            self.cost_events = items
+        if key == 'positions':
+            self.num_positions = len(items)
+            self.cost_positions = items
+            self.last_positions = [0]*self.num_positions
+        return True
+
+    def parse_cost_summary(self):
+        pair = self.parse_keys(('summary', 'totals'))
+        if pair is None:
+            return False
+        return True
+
+    def parse_body_line(self):
+        return \
+            self.parse_empty() or \
+            self.parse_comment() or \
+            self.parse_cost_line() or \
+            self.parse_position_spec() or \
+            self.parse_association_spec()
+
+    __subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)'
+    _cost_re = re.compile(r'^' + 
+        __subpos_re + r'( +' + __subpos_re + r')*' +
+        r'( +\d+)*' +
+    '$')
+
+    def parse_cost_line(self, calls=None):
+        line = self.lookahead().rstrip()
+        mo = self._cost_re.match(line)
+        if not mo:
+            return False
+
+        function = self.get_function()
+
+        if calls is None:
+            # Unlike other aspects, call object (cob) is relative not to the
+            # last call object, but to the caller's object (ob), so update it
+            # when processing a functions cost line
+            self.positions['cob'] = self.positions['ob']
+
+        values = line.split()
+        assert len(values) <= self.num_positions + self.num_events
+
+        positions = values[0 : self.num_positions]
+        events = values[self.num_positions : ]
+        events += ['0']*(self.num_events - len(events))
+
+        for i in range(self.num_positions):
+            position = positions[i]
+            if position == '*':
+                position = self.last_positions[i]
+            elif position[0] in '-+':
+                position = self.last_positions[i] + int(position)
+            elif position.startswith('0x'):
+                position = int(position, 16)
+            else:
+                position = int(position)
+            self.last_positions[i] = position
+
+        events = map(float, events)
+
+        if calls is None:
+            function[SAMPLES] += events[0] 
+            self.profile[SAMPLES] += events[0]
+        else:
+            callee = self.get_callee()
+            callee.called += calls
+    
+            try:
+                call = function.calls[callee.id]
+            except KeyError:
+                call = Call(callee.id)
+                call[CALLS] = calls
+                call[SAMPLES] = events[0]
+                function.add_call(call)
+            else:
+                call[CALLS] += calls
+                call[SAMPLES] += events[0]
+
+        self.consume()
+        return True
+
+    def parse_association_spec(self):
+        line = self.lookahead()
+        if not line.startswith('calls='):
+            return False
+
+        _, values = line.split('=', 1)
+        values = values.strip().split()
+        calls = int(values[0])
+        call_position = values[1:]
+        self.consume()
+
+        self.parse_cost_line(calls)
+
+        return True
+
+    _position_re = re.compile('^(?P<position>[cj]?(?:ob|fl|fi|fe|fn))=\s*(?:\((?P<id>\d+)\))?(?:\s*(?P<name>.+))?')
+
+    _position_table_map = {
+        'ob': 'ob',
+        'fl': 'fl',
+        'fi': 'fl',
+        'fe': 'fl',
+        'fn': 'fn',
+        'cob': 'ob',
+        'cfl': 'fl',
+        'cfi': 'fl',
+        'cfe': 'fl',
+        'cfn': 'fn',
+        'jfi': 'fl',
+    }
+
+    _position_map = {
+        'ob': 'ob',
+        'fl': 'fl',
+        'fi': 'fl',
+        'fe': 'fl',
+        'fn': 'fn',
+        'cob': 'cob',
+        'cfl': 'cfl',
+        'cfi': 'cfl',
+        'cfe': 'cfl',
+        'cfn': 'cfn',
+        'jfi': 'jfi',
+    }
+
+    def parse_position_spec(self):
+        line = self.lookahead()
+        
+        if line.startswith('jump=') or line.startswith('jcnd='):
+            self.consume()
+            return True
+
+        mo = self._position_re.match(line)
+        if not mo:
+            return False
+
+        position, id, name = mo.groups()
+        if id:
+            table = self._position_table_map[position]
+            if name:
+                self.position_ids[(table, id)] = name
+            else:
+                name = self.position_ids.get((table, id), '')
+        self.positions[self._position_map[position]] = name
+
+        self.consume()
+        return True
+
+    def parse_empty(self):
+        if self.eof():
+            return False
+        line = self.lookahead()
+        if line.strip():
+            return False
+        self.consume()
+        return True
+
+    def parse_comment(self):
+        line = self.lookahead()
+        if not line.startswith('#'):
+            return False
+        self.consume()
+        return True
+
+    _key_re = re.compile(r'^(\w+):')
+
+    def parse_key(self, key):
+        pair = self.parse_keys((key,))
+        if not pair:
+            return None
+        key, value = pair
+        return value
+        line = self.lookahead()
+        mo = self._key_re.match(line)
+        if not mo:
+            return None
+        key, value = line.split(':', 1)
+        if key not in keys:
+            return None
+        value = value.strip()
+        self.consume()
+        return key, value
+
+    def parse_keys(self, keys):
+        line = self.lookahead()
+        mo = self._key_re.match(line)
+        if not mo:
+            return None
+        key, value = line.split(':', 1)
+        if key not in keys:
+            return None
+        value = value.strip()
+        self.consume()
+        return key, value
+
+    def make_function(self, module, filename, name):
+        # FIXME: module and filename are not being tracked reliably
+        #id = '|'.join((module, filename, name))
+        id = name
+        try:
+            function = self.profile.functions[id]
+        except KeyError:
+            function = Function(id, name)
+            if module:
+                function.module = os.path.basename(module)
+            function[SAMPLES] = 0
+            function.called = 0
+            self.profile.add_function(function)
+        return function
+
+    def get_function(self):
+        module = self.positions.get('ob', '')
+        filename = self.positions.get('fl', '') 
+        function = self.positions.get('fn', '') 
+        return self.make_function(module, filename, function)
+
+    def get_callee(self):
+        module = self.positions.get('cob', '')
+        filename = self.positions.get('cfi', '') 
+        function = self.positions.get('cfn', '') 
+        return self.make_function(module, filename, function)
+
+
+class PerfParser(LineParser):
+    """Parser for linux perf callgraph output.
+
+    It expects output generated with
+
+        perf record -g
+        perf script | gprof2dot.py --format=perf
+    """
+
+    def __init__(self, infile):
+        LineParser.__init__(self, infile)
+        self.profile = Profile()
+
+    def readline(self):
+        # Override LineParser.readline to ignore comment lines
+        while True:
+            LineParser.readline(self)
+            if self.eof() or not self.lookahead().startswith('#'):
+                break
+
+    def parse(self):
+        # read lookahead
+        self.readline()
+
+        profile = self.profile
+        profile[SAMPLES] = 0
+        while not self.eof():
+            self.parse_event()
+
+        # compute derived data
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES2)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+    def parse_event(self):
+        if self.eof():
+            return
+
+        line = self.consume()
+        assert line
+
+        callchain = self.parse_callchain()
+        if not callchain:
+            return
+
+        callee = callchain[0]
+        callee[SAMPLES] += 1
+        self.profile[SAMPLES] += 1
+
+        for caller in callchain[1:]:
+            try:
+                call = caller.calls[callee.id]
+            except KeyError:
+                call = Call(callee.id)
+                call[SAMPLES2] = 1
+                caller.add_call(call)
+            else:
+                call[SAMPLES2] += 1
+
+            callee = caller
+
+    def parse_callchain(self):
+        callchain = []
+        while self.lookahead():
+            function = self.parse_call()
+            if function is None:
+                break
+            callchain.append(function)
+        if self.lookahead() == '':
+            self.consume()
+        return callchain
+
+    call_re = re.compile(r'^\s+(?P<address>[0-9a-fA-F]+)\s+(?P<symbol>.*)\s+\((?P<module>[^)]*)\)$')
+
+    def parse_call(self):
+        line = self.consume()
+        mo = self.call_re.match(line)
+        assert mo
+        if not mo:
+            return None
+
+        function_name = mo.group('symbol')
+        if not function_name:
+            function_name = mo.group('address')
+
+        module = mo.group('module')
+
+        function_id = function_name + ':' + module
+
+        try:
+            function = self.profile.functions[function_id]
+        except KeyError:
+            function = Function(function_id, function_name)
+            function.module = os.path.basename(module)
+            function[SAMPLES] = 0
+            self.profile.add_function(function)
+
+        return function
+
+
+class OprofileParser(LineParser):
+    """Parser for oprofile callgraph output.
+    
+    See also:
+    - http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph
+    """
+
+    _fields_re = {
+        'samples': r'(\d+)',
+        '%': r'(\S+)',
+        'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)',
+        'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)',
+        'app name': r'(?P<application>\S+)',
+        'symbol name': r'(?P<symbol>\(no symbols\)|.+?)',
+    }
+
+    def __init__(self, infile):
+        LineParser.__init__(self, infile)
+        self.entries = {}
+        self.entry_re = None
+
+    def add_entry(self, callers, function, callees):
+        try:
+            entry = self.entries[function.id]
+        except KeyError:
+            self.entries[function.id] = (callers, function, callees)
+        else:
+            callers_total, function_total, callees_total = entry
+            self.update_subentries_dict(callers_total, callers)
+            function_total.samples += function.samples
+            self.update_subentries_dict(callees_total, callees)
+    
+    def update_subentries_dict(self, totals, partials):
+        for partial in partials.itervalues():
+            try:
+                total = totals[partial.id]
+            except KeyError:
+                totals[partial.id] = partial
+            else:
+                total.samples += partial.samples
+        
+    def parse(self):
+        # read lookahead
+        self.readline()
+
+        self.parse_header()
+        while self.lookahead():
+            self.parse_entry()
+
+        profile = Profile()
+
+        reverse_call_samples = {}
+        
+        # populate the profile
+        profile[SAMPLES] = 0
+        for _callers, _function, _callees in self.entries.itervalues():
+            function = Function(_function.id, _function.name)
+            function[SAMPLES] = _function.samples
+            profile.add_function(function)
+            profile[SAMPLES] += _function.samples
+
+            if _function.application:
+                function.process = os.path.basename(_function.application)
+            if _function.image:
+                function.module = os.path.basename(_function.image)
+
+            total_callee_samples = 0
+            for _callee in _callees.itervalues():
+                total_callee_samples += _callee.samples
+
+            for _callee in _callees.itervalues():
+                if not _callee.self:
+                    call = Call(_callee.id)
+                    call[SAMPLES2] = _callee.samples
+                    function.add_call(call)
+                
+        # compute derived data
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES2)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+    def parse_header(self):
+        while not self.match_header():
+            self.consume()
+        line = self.lookahead()
+        fields = re.split(r'\s\s+', line)
+        entry_re = r'^\s*' + r'\s+'.join([self._fields_re[field] for field in fields]) + r'(?P<self>\s+\[self\])?$'
+        self.entry_re = re.compile(entry_re)
+        self.skip_separator()
+
+    def parse_entry(self):
+        callers = self.parse_subentries()
+        if self.match_primary():
+            function = self.parse_subentry()
+            if function is not None:
+                callees = self.parse_subentries()
+                self.add_entry(callers, function, callees)
+        self.skip_separator()
+
+    def parse_subentries(self):
+        subentries = {}
+        while self.match_secondary():
+            subentry = self.parse_subentry()
+            subentries[subentry.id] = subentry
+        return subentries
+
+    def parse_subentry(self):
+        entry = Struct()
+        line = self.consume()
+        mo = self.entry_re.match(line)
+        if not mo:
+            raise ParseError('failed to parse', line)
+        fields = mo.groupdict()
+        entry.samples = int(mo.group(1))
+        if 'source' in fields and fields['source'] != '(no location information)':
+            source = fields['source']
+            filename, lineno = source.split(':')
+            entry.filename = filename
+            entry.lineno = int(lineno)
+        else:
+            source = ''
+            entry.filename = None
+            entry.lineno = None
+        entry.image = fields.get('image', '')
+        entry.application = fields.get('application', '')
+        if 'symbol' in fields and fields['symbol'] != '(no symbols)':
+            entry.symbol = fields['symbol']
+        else:
+            entry.symbol = ''
+        if entry.symbol.startswith('"') and entry.symbol.endswith('"'):
+            entry.symbol = entry.symbol[1:-1]
+        entry.id = ':'.join((entry.application, entry.image, source, entry.symbol))
+        entry.self = fields.get('self', None) != None
+        if entry.self:
+            entry.id += ':self'
+        if entry.symbol:
+            entry.name = entry.symbol
+        else:
+            entry.name = entry.image
+        return entry
+
+    def skip_separator(self):
+        while not self.match_separator():
+            self.consume()
+        self.consume()
+
+    def match_header(self):
+        line = self.lookahead()
+        return line.startswith('samples')
+
+    def match_separator(self):
+        line = self.lookahead()
+        return line == '-'*len(line)
+
+    def match_primary(self):
+        line = self.lookahead()
+        return not line[:1].isspace()
+    
+    def match_secondary(self):
+        line = self.lookahead()
+        return line[:1].isspace()
+
+
+class HProfParser(LineParser):
+    """Parser for java hprof output
+    
+    See also:
+    - http://java.sun.com/developer/technicalArticles/Programming/HPROF.html
+    """
+
+    trace_re = re.compile(r'\t(.*)\((.*):(.*)\)')
+    trace_id_re = re.compile(r'^TRACE (\d+):$')
+
+    def __init__(self, infile):
+        LineParser.__init__(self, infile)
+        self.traces = {}
+        self.samples = {}
+
+    def parse(self):
+        # read lookahead
+        self.readline()
+
+        while not self.lookahead().startswith('------'): self.consume()
+        while not self.lookahead().startswith('TRACE '): self.consume()
+
+        self.parse_traces()
+
+        while not self.lookahead().startswith('CPU'):
+            self.consume()
+
+        self.parse_samples()
+
+        # populate the profile
+        profile = Profile()
+        profile[SAMPLES] = 0
+
+        functions = {}
+
+        # build up callgraph
+        for id, trace in self.traces.iteritems():
+            if not id in self.samples: continue
+            mtime = self.samples[id][0]
+            last = None
+
+            for func, file, line in trace:
+                if not func in functions:
+                    function = Function(func, func)
+                    function[SAMPLES] = 0
+                    profile.add_function(function)
+                    functions[func] = function
+
+                function = functions[func]
+                # allocate time to the deepest method in the trace
+                if not last:
+                    function[SAMPLES] += mtime
+                    profile[SAMPLES] += mtime
+                else:
+                    c = function.get_call(last)
+                    c[SAMPLES2] += mtime
+
+                last = func
+
+        # compute derived data
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES2)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+    def parse_traces(self):
+        while self.lookahead().startswith('TRACE '):
+            self.parse_trace()
+
+    def parse_trace(self):
+        l = self.consume()
+        mo = self.trace_id_re.match(l)
+        tid = mo.group(1)
+        last = None
+        trace = []
+
+        while self.lookahead().startswith('\t'):
+            l = self.consume()
+            match = self.trace_re.search(l)
+            if not match:
+                #sys.stderr.write('Invalid line: %s\n' % l)
+                break
+            else:
+                function_name, file, line = match.groups()
+                trace += [(function_name, file, line)]
+
+        self.traces[int(tid)] = trace
+
+    def parse_samples(self):
+        self.consume()
+        self.consume()
+
+        while not self.lookahead().startswith('CPU'):
+            rank, percent_self, percent_accum, count, traceid, method = self.lookahead().split()
+            self.samples[int(traceid)] = (int(count), method)
+            self.consume()
+
+
+class SysprofParser(XmlParser):
+
+    def __init__(self, stream):
+        XmlParser.__init__(self, stream)
+
+    def parse(self):
+        objects = {}
+        nodes = {}
+
+        self.element_start('profile')
+        while self.token.type == XML_ELEMENT_START:
+            if self.token.name_or_data == 'objects':
+                assert not objects
+                objects = self.parse_items('objects')
+            elif self.token.name_or_data == 'nodes':
+                assert not nodes
+                nodes = self.parse_items('nodes')
+            else:
+                self.parse_value(self.token.name_or_data)
+        self.element_end('profile')
+
+        return self.build_profile(objects, nodes)
+
+    def parse_items(self, name):
+        assert name[-1] == 's'
+        items = {}
+        self.element_start(name)
+        while self.token.type == XML_ELEMENT_START:
+            id, values = self.parse_item(name[:-1])
+            assert id not in items
+            items[id] = values
+        self.element_end(name)
+        return items
+
+    def parse_item(self, name):
+        attrs = self.element_start(name)
+        id = int(attrs['id'])
+        values = self.parse_values()
+        self.element_end(name)
+        return id, values
+
+    def parse_values(self):
+        values = {}
+        while self.token.type == XML_ELEMENT_START:
+            name = self.token.name_or_data
+            value = self.parse_value(name)
+            assert name not in values
+            values[name] = value
+        return values
+
+    def parse_value(self, tag):
+        self.element_start(tag)
+        value = self.character_data()
+        self.element_end(tag)
+        if value.isdigit():
+            return int(value)
+        if value.startswith('"') and value.endswith('"'):
+            return value[1:-1]
+        return value
+
+    def build_profile(self, objects, nodes):
+        profile = Profile()
+        
+        profile[SAMPLES] = 0
+        for id, object in objects.iteritems():
+            # Ignore fake objects (process names, modules, "Everything", "kernel", etc.)
+            if object['self'] == 0:
+                continue
+
+            function = Function(id, object['name'])
+            function[SAMPLES] = object['self']
+            profile.add_function(function)
+            profile[SAMPLES] += function[SAMPLES]
+
+        for id, node in nodes.iteritems():
+            # Ignore fake calls
+            if node['self'] == 0:
+                continue
+
+            # Find a non-ignored parent
+            parent_id = node['parent']
+            while parent_id != 0:
+                parent = nodes[parent_id]
+                caller_id = parent['object']
+                if objects[caller_id]['self'] != 0:
+                    break
+                parent_id = parent['parent']
+            if parent_id == 0:
+                continue
+
+            callee_id = node['object']
+
+            assert objects[caller_id]['self']
+            assert objects[callee_id]['self']
+
+            function = profile.functions[caller_id]
+
+            samples = node['self']
+            try:
+                call = function.calls[callee_id]
+            except KeyError:
+                call = Call(callee_id)
+                call[SAMPLES2] = samples
+                function.add_call(call)
+            else:
+                call[SAMPLES2] += samples
+
+        # Compute derived events
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES2)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+
+class SharkParser(LineParser):
+    """Parser for MacOSX Shark output.
+
+    Author: tom@dbservice.com
+    """
+
+    def __init__(self, infile):
+        LineParser.__init__(self, infile)
+        self.stack = []
+        self.entries = {}
+
+    def add_entry(self, function):
+        try:
+            entry = self.entries[function.id]
+        except KeyError:
+            self.entries[function.id] = (function, { })
+        else:
+            function_total, callees_total = entry
+            function_total.samples += function.samples
+    
+    def add_callee(self, function, callee):
+        func, callees = self.entries[function.id]
+        try:
+            entry = callees[callee.id]
+        except KeyError:
+            callees[callee.id] = callee
+        else:
+            entry.samples += callee.samples
+        
+    def parse(self):
+        self.readline()
+        self.readline()
+        self.readline()
+        self.readline()
+
+        match = re.compile(r'(?P<prefix>[|+ ]*)(?P<samples>\d+), (?P<symbol>[^,]+), (?P<image>.*)')
+
+        while self.lookahead():
+            line = self.consume()
+            mo = match.match(line)
+            if not mo:
+                raise ParseError('failed to parse', line)
+
+            fields = mo.groupdict()
+            prefix = len(fields.get('prefix', 0)) / 2 - 1
+
+            symbol = str(fields.get('symbol', 0))
+            image = str(fields.get('image', 0))
+
+            entry = Struct()
+            entry.id = ':'.join([symbol, image])
+            entry.samples = int(fields.get('samples', 0))
+
+            entry.name = symbol
+            entry.image = image
+
+            # adjust the callstack
+            if prefix < len(self.stack):
+                del self.stack[prefix:]
+
+            if prefix == len(self.stack):
+                self.stack.append(entry)
+
+            # if the callstack has had an entry, it's this functions caller
+            if prefix > 0:
+                self.add_callee(self.stack[prefix - 1], entry)
+                
+            self.add_entry(entry)
+                
+        profile = Profile()
+        profile[SAMPLES] = 0
+        for _function, _callees in self.entries.itervalues():
+            function = Function(_function.id, _function.name)
+            function[SAMPLES] = _function.samples
+            profile.add_function(function)
+            profile[SAMPLES] += _function.samples
+
+            if _function.image:
+                function.module = os.path.basename(_function.image)
+
+            for _callee in _callees.itervalues():
+                call = Call(_callee.id)
+                call[SAMPLES] = _callee.samples
+                function.add_call(call)
+                
+        # compute derived data
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+
+class XPerfParser(Parser):
+    """Parser for CSVs generted by XPerf, from Microsoft Windows Performance Tools.
+    """
+
+    def __init__(self, stream):
+        Parser.__init__(self)
+        self.stream = stream
+        self.profile = Profile()
+        self.profile[SAMPLES] = 0
+        self.column = {}
+
+    def parse(self):
+        import csv
+        reader = csv.reader(
+            self.stream, 
+            delimiter = ',',
+            quotechar = None,
+            escapechar = None,
+            doublequote = False,
+            skipinitialspace = True,
+            lineterminator = '\r\n',
+            quoting = csv.QUOTE_NONE)
+        it = iter(reader)
+        row = reader.next()
+        self.parse_header(row)
+        for row in it:
+            self.parse_row(row)
+                
+        # compute derived data
+        self.profile.validate()
+        self.profile.find_cycles()
+        self.profile.ratio(TIME_RATIO, SAMPLES)
+        self.profile.call_ratios(SAMPLES2)
+        self.profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return self.profile
+
+    def parse_header(self, row):
+        for column in range(len(row)):
+            name = row[column]
+            assert name not in self.column
+            self.column[name] = column
+
+    def parse_row(self, row):
+        fields = {}
+        for name, column in self.column.iteritems():
+            value = row[column]
+            for factory in int, float:
+                try:
+                    value = factory(value)
+                except ValueError:
+                    pass
+                else:
+                    break
+            fields[name] = value
+        
+        process = fields['Process Name']
+        symbol = fields['Module'] + '!' + fields['Function']
+        weight = fields['Weight']
+        count = fields['Count']
+
+        function = self.get_function(process, symbol)
+        function[SAMPLES] += weight * count
+        self.profile[SAMPLES] += weight * count
+
+        stack = fields['Stack']
+        if stack != '?':
+            stack = stack.split('/')
+            assert stack[0] == '[Root]'
+            if stack[-1] != symbol:
+                # XXX: some cases the sampled function does not appear in the stack
+                stack.append(symbol)
+            caller = None
+            for symbol in stack[1:]:
+                callee = self.get_function(process, symbol)
+                if caller is not None:
+                    try:
+                        call = caller.calls[callee.id]
+                    except KeyError:
+                        call = Call(callee.id)
+                        call[SAMPLES2] = count
+                        caller.add_call(call)
+                    else:
+                        call[SAMPLES2] += count
+                caller = callee
+
+    def get_function(self, process, symbol):
+        function_id = process + '!' + symbol
+
+        try:
+            function = self.profile.functions[function_id]
+        except KeyError:
+            module, name = symbol.split('!', 1)
+            function = Function(function_id, name)
+            function.process = process
+            function.module = module
+            function[SAMPLES] = 0
+            self.profile.add_function(function)
+
+        return function
+
+
+class SleepyParser(Parser):
+    """Parser for GNU gprof output.
+
+    See also:
+    - http://www.codersnotes.com/sleepy/
+    - http://sleepygraph.sourceforge.net/
+    """
+
+    def __init__(self, filename):
+        Parser.__init__(self)
+
+        from zipfile import ZipFile
+
+        self.database = ZipFile(filename)
+
+        self.version_0_7 = 'Version 0.7 required' in self.database.namelist()
+
+        self.symbols = {}
+        self.calls = {}
+
+        self.profile = Profile()
+    
+    _symbol_re = re.compile(
+        r'^(?P<id>\w+)' + 
+        r'\s+"(?P<module>[^"]*)"' + 
+        r'\s+"(?P<procname>[^"]*)"' + 
+        r'\s+"(?P<sourcefile>[^"]*)"' + 
+        r'\s+(?P<sourceline>\d+)$'
+    )
+
+    def parse_symbols(self):
+        if self.version_0_7:
+            symbols_txt = 'Symbols.txt'
+        else:
+            symbols_txt = 'symbols.txt'
+        lines = self.database.read(symbols_txt).splitlines()
+        for line in lines:
+            mo = self._symbol_re.match(line)
+            if mo:
+                symbol_id, module, procname, sourcefile, sourceline = mo.groups()
+    
+                function_id = ':'.join([module, procname])
+
+                try:
+                    function = self.profile.functions[function_id]
+                except KeyError:
+                    function = Function(function_id, procname)
+                    function.module = module
+                    function[SAMPLES] = 0
+                    self.profile.add_function(function)
+
+                self.symbols[symbol_id] = function
+
+    def parse_callstacks(self):
+        if self.version_0_7:
+            callstacks_txt = 'Callstacks.txt'
+        else:
+            callstacks_txt = 'callstacks.txt'
+        lines = self.database.read(callstacks_txt).splitlines()
+        for line in lines:
+            fields = line.split()
+            samples = float(fields[0])
+            callstack = fields[1:]
+
+            callstack = [self.symbols[symbol_id] for symbol_id in callstack]
+
+            callee = callstack[0]
+
+            callee[SAMPLES] += samples
+            self.profile[SAMPLES] += samples
+            
+            for caller in callstack[1:]:
+                try:
+                    call = caller.calls[callee.id]
+                except KeyError:
+                    call = Call(callee.id)
+                    call[SAMPLES2] = samples
+                    caller.add_call(call)
+                else:
+                    call[SAMPLES2] += samples
+
+                callee = caller
+
+    def parse(self):
+        profile = self.profile
+        profile[SAMPLES] = 0
+
+        self.parse_symbols()
+        self.parse_callstacks()
+
+        # Compute derived events
+        profile.validate()
+        profile.find_cycles()
+        profile.ratio(TIME_RATIO, SAMPLES)
+        profile.call_ratios(SAMPLES2)
+        profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO)
+
+        return profile
+
+
+class AQtimeTable:
+
+    def __init__(self, name, fields):
+        self.name = name
+
+        self.fields = fields
+        self.field_column = {}
+        for column in range(len(fields)):
+            self.field_column[fields[column]] = column
+        self.rows = []
+
+    def __len__(self):
+        return len(self.rows)
+
+    def __iter__(self):
+        for values, children in self.rows:
+            fields = {}
+            for name, value in zip(self.fields, values):
+                fields[name] = value
+            children = dict([(child.name, child) for child in children])
+            yield fields, children
+        raise StopIteration
+
+    def add_row(self, values, children=()):
+        self.rows.append((values, children))
+
+
+class AQtimeParser(XmlParser):
+
+    def __init__(self, stream):
+        XmlParser.__init__(self, stream)
+        self.tables = {}
+
+    def parse(self):
+        self.element_start('AQtime_Results')
+        self.parse_headers()
+        results = self.parse_results()
+        self.element_end('AQtime_Results')
+        return self.build_profile(results) 
+
+    def parse_headers(self):
+        self.element_start('HEADERS')
+        while self.token.type == XML_ELEMENT_START:
+            self.parse_table_header()
+        self.element_end('HEADERS')
+
+    def parse_table_header(self):
+        attrs = self.element_start('TABLE_HEADER')
+        name = attrs['NAME']
+        id = int(attrs['ID'])
+        field_types = []
+        field_names = []
+        while self.token.type == XML_ELEMENT_START:
+            field_type, field_name = self.parse_table_field()
+            field_types.append(field_type)
+            field_names.append(field_name)
+        self.element_end('TABLE_HEADER')
+        self.tables[id] = name, field_types, field_names
+
+    def parse_table_field(self):
+        attrs = self.element_start('TABLE_FIELD')
+        type = attrs['TYPE']
+        name = self.character_data()
+        self.element_end('TABLE_FIELD')
+        return type, name
+
+    def parse_results(self):
+        self.element_start('RESULTS')
+        table = self.parse_data()
+        self.element_end('RESULTS')
+        return table
+
+    def parse_data(self):
+        rows = []
+        attrs = self.element_start('DATA')
+        table_id = int(attrs['TABLE_ID'])
+        table_name, field_types, field_names = self.tables[table_id]
+        table = AQtimeTable(table_name, field_names)
+        while self.token.type == XML_ELEMENT_START:
+            row, children = self.parse_row(field_types)
+            table.add_row(row, children)
+        self.element_end('DATA')
+        return table
+
+    def parse_row(self, field_types):
+        row = [None]*len(field_types)
+        children = []
+        self.element_start('ROW')
+        while self.token.type == XML_ELEMENT_START:
+            if self.token.name_or_data == 'FIELD':
+                field_id, field_value = self.parse_field(field_types)
+                row[field_id] = field_value
+            elif self.token.name_or_data == 'CHILDREN':
+                children = self.parse_children()
+            else:
+                raise XmlTokenMismatch("<FIELD ...> or <CHILDREN ...>", self.token)
+        self.element_end('ROW')
+        return row, children
+
+    def parse_field(self, field_types):
+        attrs = self.element_start('FIELD')
+        id = int(attrs['ID'])
+        type = field_types[id]
+        value = self.character_data()
+        if type == 'Integer':
+            value = int(value)
+        elif type == 'Float':
+            value = float(value)
+        elif type == 'Address':
+            value = int(value)
+        elif type == 'String':
+            pass
+        else:
+            assert False
+        self.element_end('FIELD')
+        return id, value
+
+    def parse_children(self):
+        children = []
+        self.element_start('CHILDREN')
+        while self.token.type == XML_ELEMENT_START:
+            table = self.parse_data()
+            assert table.name not in children
+            children.append(table)
+        self.element_end('CHILDREN')
+        return children
+
+    def build_profile(self, results):
+        assert results.name == 'Routines'
+        profile = Profile()
+        profile[TIME] = 0.0
+        for fields, tables in results:
+            function = self.build_function(fields)
+            children = tables['Children']
+            for fields, _ in children:
+                call = self.build_call(fields)
+                function.add_call(call)
+            profile.add_function(function)
+            profile[TIME] = profile[TIME] + function[TIME]
+        profile[TOTAL_TIME] = profile[TIME]
+        profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
+        return profile
+    
+    def build_function(self, fields):
+        function = Function(self.build_id(fields), self.build_name(fields))
+        function[TIME] = fields['Time']
+        function[TOTAL_TIME] = fields['Time with Children']
+        #function[TIME_RATIO] = fields['% Time']/100.0
+        #function[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
+        return function
+
+    def build_call(self, fields):
+        call = Call(self.build_id(fields))
+        call[TIME] = fields['Time']
+        call[TOTAL_TIME] = fields['Time with Children']
+        #call[TIME_RATIO] = fields['% Time']/100.0
+        #call[TOTAL_TIME_RATIO] = fields['% with Children']/100.0
+        return call
+
+    def build_id(self, fields):
+        return ':'.join([fields['Module Name'], fields['Unit Name'], fields['Routine Name']])
+
+    def build_name(self, fields):
+        # TODO: use more fields
+        return fields['Routine Name']
+
+
+class PstatsParser:
+    """Parser python profiling statistics saved with te pstats module."""
+
+    def __init__(self, *filename):
+        import pstats
+        try:
+            self.stats = pstats.Stats(*filename)
+        except ValueError:
+            import hotshot.stats
+            self.stats = hotshot.stats.load(filename[0])
+        self.profile = Profile()
+        self.function_ids = {}
+
+    def get_function_name(self, (filename, line, name)):
+        module = os.path.splitext(filename)[0]
+        module = os.path.basename(module)
+        return "%s:%d:%s" % (module, line, name)
+
+    def get_function(self, key):
+        try:
+            id = self.function_ids[key]
+        except KeyError:
+            id = len(self.function_ids)
+            name = self.get_function_name(key)
+            function = Function(id, name)
+            self.profile.functions[id] = function
+            self.function_ids[key] = id
+        else:
+            function = self.profile.functions[id]
+        return function
+
+    def parse(self):
+        self.profile[TIME] = 0.0
+        self.profile[TOTAL_TIME] = self.stats.total_tt
+        for fn, (cc, nc, tt, ct, callers) in self.stats.stats.iteritems():
+            callee = self.get_function(fn)
+            callee.called = nc
+            callee[TOTAL_TIME] = ct
+            callee[TIME] = tt
+            self.profile[TIME] += tt
+            self.profile[TOTAL_TIME] = max(self.profile[TOTAL_TIME], ct)
+            for fn, value in callers.iteritems():
+                caller = self.get_function(fn)
+                call = Call(callee.id)
+                if isinstance(value, tuple):
+                    for i in xrange(0, len(value), 4):
+                        nc, cc, tt, ct = value[i:i+4]
+                        if CALLS in call:
+                            call[CALLS] += cc
+                        else:
+                            call[CALLS] = cc
+
+                        if TOTAL_TIME in call:
+                            call[TOTAL_TIME] += ct
+                        else:
+                            call[TOTAL_TIME] = ct
+
+                else:
+                    call[CALLS] = value
+                    call[TOTAL_TIME] = ratio(value, nc)*ct
+
+                caller.add_call(call)
+        #self.stats.print_stats()
+        #self.stats.print_callees()
+
+        # Compute derived events
+        self.profile.validate()
+        self.profile.ratio(TIME_RATIO, TIME)
+        self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
+
+        return self.profile
+
+
+class Theme:
+
+    def __init__(self, 
+            bgcolor = (0.0, 0.0, 1.0),
+            mincolor = (0.0, 0.0, 0.0),
+            maxcolor = (0.0, 0.0, 1.0),
+            fontname = "Arial",
+            minfontsize = 10.0,
+            maxfontsize = 10.0,
+            minpenwidth = 0.5,
+            maxpenwidth = 4.0,
+            gamma = 2.2,
+            skew = 1.0):
+        self.bgcolor = bgcolor
+        self.mincolor = mincolor
+        self.maxcolor = maxcolor
+        self.fontname = fontname
+        self.minfontsize = minfontsize
+        self.maxfontsize = maxfontsize
+        self.minpenwidth = minpenwidth
+        self.maxpenwidth = maxpenwidth
+        self.gamma = gamma
+        self.skew = skew
+
+    def graph_bgcolor(self):
+        return self.hsl_to_rgb(*self.bgcolor)
+
+    def graph_fontname(self):
+        return self.fontname
+
+    def graph_fontsize(self):
+        return self.minfontsize
+
+    def node_bgcolor(self, weight):
+        return self.color(weight)
+
+    def node_fgcolor(self, weight):
+        return self.graph_bgcolor()
+
+    def node_fontsize(self, weight):
+        return self.fontsize(weight)
+
+    def edge_color(self, weight):
+        return self.color(weight)
+
+    def edge_fontsize(self, weight):
+        return self.fontsize(weight)
+
+    def edge_penwidth(self, weight):
+        return max(weight*self.maxpenwidth, self.minpenwidth)
+
+    def edge_arrowsize(self, weight):
+        return 0.5 * math.sqrt(self.edge_penwidth(weight))
+
+    def fontsize(self, weight):
+        return max(weight**2 * self.maxfontsize, self.minfontsize)
+
+    def color(self, weight):
+        weight = min(max(weight, 0.0), 1.0)
+    
+        hmin, smin, lmin = self.mincolor
+        hmax, smax, lmax = self.maxcolor
+        
+        if self.skew < 0:
+            raise ValueError("Skew must be greater than 0")
+        elif self.skew == 1.0:
+            h = hmin + weight*(hmax - hmin)
+            s = smin + weight*(smax - smin)
+            l = lmin + weight*(lmax - lmin)
+        else:
+            base = self.skew
+            h = hmin + ((hmax-hmin)*(-1.0 + (base ** weight)) / (base - 1.0))
+            s = smin + ((smax-smin)*(-1.0 + (base ** weight)) / (base - 1.0))
+            l = lmin + ((lmax-lmin)*(-1.0 + (base ** weight)) / (base - 1.0))
+
+        return self.hsl_to_rgb(h, s, l)
+
+    def hsl_to_rgb(self, h, s, l):
+        """Convert a color from HSL color-model to RGB.
+
+        See also:
+        - http://www.w3.org/TR/css3-color/#hsl-color
+        """
+
+        h = h % 1.0
+        s = min(max(s, 0.0), 1.0)
+        l = min(max(l, 0.0), 1.0)
+
+        if l <= 0.5:
+            m2 = l*(s + 1.0)
+        else:
+            m2 = l + s - l*s
+        m1 = l*2.0 - m2
+        r = self._hue_to_rgb(m1, m2, h + 1.0/3.0)
+        g = self._hue_to_rgb(m1, m2, h)
+        b = self._hue_to_rgb(m1, m2, h - 1.0/3.0)
+
+        # Apply gamma correction
+        r **= self.gamma
+        g **= self.gamma
+        b **= self.gamma
+
+        return (r, g, b)
+
+    def _hue_to_rgb(self, m1, m2, h):
+        if h < 0.0:
+            h += 1.0
+        elif h > 1.0:
+            h -= 1.0
+        if h*6 < 1.0:
+            return m1 + (m2 - m1)*h*6.0
+        elif h*2 < 1.0:
+            return m2
+        elif h*3 < 2.0:
+            return m1 + (m2 - m1)*(2.0/3.0 - h)*6.0
+        else:
+            return m1
+
+
+TEMPERATURE_COLORMAP = Theme(
+    mincolor = (2.0/3.0, 0.80, 0.25), # dark blue
+    maxcolor = (0.0, 1.0, 0.5), # satured red
+    gamma = 1.0
+)
+
+PINK_COLORMAP = Theme(
+    mincolor = (0.0, 1.0, 0.90), # pink
+    maxcolor = (0.0, 1.0, 0.5), # satured red
+)
+
+GRAY_COLORMAP = Theme(
+    mincolor = (0.0, 0.0, 0.85), # light gray
+    maxcolor = (0.0, 0.0, 0.0), # black
+)
+
+BW_COLORMAP = Theme(
+    minfontsize = 8.0,
+    maxfontsize = 24.0,
+    mincolor = (0.0, 0.0, 0.0), # black
+    maxcolor = (0.0, 0.0, 0.0), # black
+    minpenwidth = 0.1,
+    maxpenwidth = 8.0,
+)
+
+
+class DotWriter:
+    """Writer for the DOT language.
+
+    See also:
+    - "The DOT Language" specification
+      http://www.graphviz.org/doc/info/lang.html
+    """
+
+    strip = False
+    wrap = False
+
+    def __init__(self, fp):
+        self.fp = fp
+
+    def wrap_function_name(self, name):
+        """Split the function name on multiple lines."""
+
+        if len(name) > 32:
+            ratio = 2.0/3.0
+            height = max(int(len(name)/(1.0 - ratio) + 0.5), 1)
+            width = max(len(name)/height, 32)
+            # TODO: break lines in symbols
+            name = textwrap.fill(name, width, break_long_words=False)
+
+        # Take away spaces
+        name = name.replace(", ", ",")
+        name = name.replace("> >", ">>")
+        name = name.replace("> >", ">>") # catch consecutive
+
+        return name
+
+    def graph(self, profile, theme):
+        self.begin_graph()
+
+        fontname = theme.graph_fontname()
+
+        self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125)
+        self.attr('node', fontname=fontname, shape="box", style="filled", fontcolor="white", width=0, height=0)
+        self.attr('edge', fontname=fontname)
+
+        for function in profile.functions.itervalues():
+            labels = []
+            if function.process is not None:
+                labels.append(function.process)
+            if function.module is not None:
+                labels.append(function.module)
+
+            if self.strip:
+                function_name = function.stripped_name()
+            else:
+                function_name = function.name
+            if self.wrap:
+                function_name = self.wrap_function_name(function_name)
+            labels.append(function_name)
+
+            for event in TOTAL_TIME_RATIO, TIME_RATIO:
+                if event in function.events:
+                    label = event.format(function[event])
+                    labels.append(label)
+            if function.called is not None:
+                labels.append(u"%u\xd7" % (function.called,))
+
+            if function.weight is not None:
+                weight = function.weight
+            else:
+                weight = 0.0
+
+            label = '\n'.join(labels)
+            self.node(function.id, 
+                label = label, 
+                color = self.color(theme.node_bgcolor(weight)), 
+                fontcolor = self.color(theme.node_fgcolor(weight)), 
+                fontsize = "%.2f" % theme.node_fontsize(weight),
+            )
+
+            for call in function.calls.itervalues():
+                callee = profile.functions[call.callee_id]
+
+                labels = []
+                for event in TOTAL_TIME_RATIO, CALLS:
+                    if event in call.events:
+                        label = event.format(call[event])
+                        labels.append(label)
+
+                if call.weight is not None:
+                    weight = call.weight
+                elif callee.weight is not None:
+                    weight = callee.weight
+                else:
+                    weight = 0.0
+
+                label = '\n'.join(labels)
+
+                self.edge(function.id, call.callee_id, 
+                    label = label, 
+                    color = self.color(theme.edge_color(weight)), 
+                    fontcolor = self.color(theme.edge_color(weight)),
+                    fontsize = "%.2f" % theme.edge_fontsize(weight), 
+                    penwidth = "%.2f" % theme.edge_penwidth(weight), 
+                    labeldistance = "%.2f" % theme.edge_penwidth(weight), 
+                    arrowsize = "%.2f" % theme.edge_arrowsize(weight),
+                )
+
+        self.end_graph()
+
+    def begin_graph(self):
+        self.write('digraph {\n')
+
+    def end_graph(self):
+        self.write('}\n')
+
+    def attr(self, what, **attrs):
+        self.write("\t")
+        self.write(what)
+        self.attr_list(attrs)
+        self.write(";\n")
+
+    def node(self, node, **attrs):
+        self.write("\t")
+        self.id(node)
+        self.attr_list(attrs)
+        self.write(";\n")
+
+    def edge(self, src, dst, **attrs):
+        self.write("\t")
+        self.id(src)
+        self.write(" -> ")
+        self.id(dst)
+        self.attr_list(attrs)
+        self.write(";\n")
+
+    def attr_list(self, attrs):
+        if not attrs:
+            return
+        self.write(' [')
+        first = True
+        for name, value in attrs.iteritems():
+            if first:
+                first = False
+            else:
+                self.write(", ")
+            self.id(name)
+            self.write('=')
+            self.id(value)
+        self.write(']')
+
+    def id(self, id):
+        if isinstance(id, (int, float)):
+            s = str(id)
+        elif isinstance(id, basestring):
+            if id.isalnum() and not id.startswith('0x'):
+                s = id
+            else:
+                s = self.escape(id)
+        else:
+            raise TypeError
+        self.write(s)
+
+    def color(self, (r, g, b)):
+
+        def float2int(f):
+            if f <= 0.0:
+                return 0
+            if f >= 1.0:
+                return 255
+            return int(255.0*f + 0.5)
+
+        return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)])
+
+    def escape(self, s):
+        s = s.encode('utf-8')
+        s = s.replace('\\', r'\\')
+        s = s.replace('\n', r'\n')
+        s = s.replace('\t', r'\t')
+        s = s.replace('"', r'\"')
+        return '"' + s + '"'
+
+    def write(self, s):
+        self.fp.write(s)
+
+
+class Main:
+    """Main program."""
+
+    themes = {
+            "color": TEMPERATURE_COLORMAP,
+            "pink": PINK_COLORMAP,
+            "gray": GRAY_COLORMAP,
+            "bw": BW_COLORMAP,
+    }
+
+    def main(self):
+        """Main program."""
+
+        parser = optparse.OptionParser(
+            usage="\n\t%prog [options] [file] ...",
+            version="%%prog %s" % __version__)
+        parser.add_option(
+            '-o', '--output', metavar='FILE',
+            type="string", dest="output",
+            help="output filename [stdout]")
+        parser.add_option(
+            '-n', '--node-thres', metavar='PERCENTAGE',
+            type="float", dest="node_thres", default=0.5,
+            help="eliminate nodes below this threshold [default: %default]")
+        parser.add_option(
+            '-e', '--edge-thres', metavar='PERCENTAGE',
+            type="float", dest="edge_thres", default=0.1,
+            help="eliminate edges below this threshold [default: %default]")
+        parser.add_option(
+            '-f', '--format',
+            type="choice", choices=('prof', 'callgrind', 'perf', 'oprofile', 'hprof', 'sysprof', 'pstats', 'shark', 'sleepy', 'aqtime', 'xperf'),
+            dest="format", default="prof",
+            help="profile format: prof, callgrind, oprofile, hprof, sysprof, shark, sleepy, aqtime, pstats, or xperf [default: %default]")
+        parser.add_option(
+            '-c', '--colormap',
+            type="choice", choices=('color', 'pink', 'gray', 'bw'),
+            dest="theme", default="color",
+            help="color map: color, pink, gray, or bw [default: %default]")
+        parser.add_option(
+            '-s', '--strip',
+            action="store_true",
+            dest="strip", default=False,
+            help="strip function parameters, template parameters, and const modifiers from demangled C++ function names")
+        parser.add_option(
+            '-w', '--wrap',
+            action="store_true",
+            dest="wrap", default=False,
+            help="wrap function names")
+        # add a new option to control skew of the colorization curve
+        parser.add_option(
+            '--skew',
+            type="float", dest="theme_skew", default=1.0,
+            help="skew the colorization curve.  Values < 1.0 give more variety to lower percentages.  Value > 1.0 give less variety to lower percentages")
+        (self.options, self.args) = parser.parse_args(sys.argv[1:])
+
+        if len(self.args) > 1 and self.options.format != 'pstats':
+            parser.error('incorrect number of arguments')
+
+        try:
+            self.theme = self.themes[self.options.theme]
+        except KeyError:
+            parser.error('invalid colormap \'%s\'' % self.options.theme)
+        
+        # set skew on the theme now that it has been picked.
+        if self.options.theme_skew:
+            self.theme.skew = self.options.theme_skew
+
+        if self.options.format == 'prof':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = GprofParser(fp)
+        elif self.options.format == 'callgrind':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = CallgrindParser(fp)
+        elif self.options.format == 'perf':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = PerfParser(fp)
+        elif self.options.format == 'oprofile':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = OprofileParser(fp)
+        elif self.options.format == 'sysprof':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = SysprofParser(fp)
+        elif self.options.format == 'hprof':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = HProfParser(fp)        
+        elif self.options.format == 'pstats':
+            if not self.args:
+                parser.error('at least a file must be specified for pstats input')
+            parser = PstatsParser(*self.args)
+        elif self.options.format == 'xperf':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = XPerfParser(fp)
+        elif self.options.format == 'shark':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = SharkParser(fp)
+        elif self.options.format == 'sleepy':
+            if len(self.args) != 1:
+                parser.error('exactly one file must be specified for sleepy input')
+            parser = SleepyParser(self.args[0])
+        elif self.options.format == 'aqtime':
+            if not self.args:
+                fp = sys.stdin
+            else:
+                fp = open(self.args[0], 'rt')
+            parser = AQtimeParser(fp)
+        else:
+            parser.error('invalid format \'%s\'' % self.options.format)
+
+        self.profile = parser.parse()
+        
+        if self.options.output is None:
+            self.output = sys.stdout
+        else:
+            self.output = open(self.options.output, 'wt')
+
+        self.write_graph()
+
+    def write_graph(self):
+        dot = DotWriter(self.output)
+        dot.strip = self.options.strip
+        dot.wrap = self.options.wrap
+
+        profile = self.profile
+        profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0)
+
+        dot.graph(profile, self.theme)
+
+
+if __name__ == '__main__':
+    Main().main()
diff --git a/Tools/PyUtils/bin/icython.py b/Tools/PyUtils/bin/icython.py
new file mode 100755
index 00000000000..d0d136bb36d
--- /dev/null
+++ b/Tools/PyUtils/bin/icython.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+# @file icython.py: a dead simple cython interpreter
+# @author Sebastien Binet <binet@cern.ch>
+# @date June 2009
+
+__version__ = "$Revision$"
+__doc__ = """A dead simple cython interpreter to try out cython statements"""
+__author__= "Sebastien Binet <binet@cern.ch>"
+
+### stdlib imports ------------------------------------------------------------
+from code import InteractiveConsole
+
+### cython
+import pyximport
+pyximport.install()
+
+### classes -------------------------------------------------------------------
+class CythonInteractiveConsole(InteractiveConsole):
+    """A dead simple Cython interpreter
+    """
+
+    def __init__(self, *args, **kwds):
+        # interactive console is an old-style class
+        InteractiveConsole.__init__(self, *args, **kwds)
+        import sys
+        sys.ps1 = 'cython> '
+        sys.ps2 = '.   ... '
+        self._pyxbuild_dir = kwds.get('pyxbuild_dir', None)
+        self.py_compile = self.compile
+        
+    def runcode(self, code):
+        """%s""" % InteractiveConsole.runcode.__doc__
+        # we need to fix-up that method as we are given a C-ext module
+        # in case of cython (instead of a code-object)
+        import types
+        if isinstance(code, types.ModuleType):
+            # slam the content of the module into our local namespace
+            for k,v in code.__dict__.iteritems():
+                if not k.startswith('__'):
+                    self.locals[k] = v
+        else:
+            return InteractiveConsole.runcode(self, code)
+
+    def runsource(self, source, filename="<input>", symbol="single"):
+        kwds = dict(source=source, filename=filename, symbol=symbol)
+        try:
+            code = self.cython_compile(source, filename, symbol)
+        except (OverflowError, SyntaxError, ValueError):
+            # Case 1
+            self.showsyntaxerror(filename)
+            return False
+
+        if code is None:
+            # Case 2
+            return True
+
+        # Case 3
+        self.runcode(code)
+        return False
+
+    def cython_compile(self, source, filename, symbol):
+        try:
+            # first try normal python...
+            return self.compile(source, filename, symbol)
+        except SyntaxError, py_err:
+            # maybe a cython-construct...
+            try:
+                fname = _maybe_cython_compile(source=source, ctx=self)
+                if fname is None:
+                    # more input needed...
+                    return
+                from pyximport import load_module as cy_load_module
+                mod = cy_load_module("_cython_gen_%s" % id(source), fname)
+                import os
+                os.remove(fname)
+                return mod
+            except Exception, cython_err:
+                raise SyntaxError(cython_err)
+    pass # class CythonInteractiveConsole
+
+### utils ---------------------------------------------------------------------
+def code_to_file(source):
+    import tempfile
+    fd, fname = tempfile.mkstemp(prefix="cython_source_",
+                                 suffix='.pyx')
+    import os
+    os.close(fd)
+    if os.path.exists(fname):
+        os.remove(fname)
+    f = open(fname, 'w')
+    f.write(source)
+    f.flush()
+    f.close()
+    return fname
+
+def _maybe_cython_compile(source, filename="<input>", symbol="single", ctx=None):
+    # Check for source consisting of only blank lines and comments
+    for line in source.split("\n"):
+        line = line.strip()
+        if line and line[0] != '#':
+            break               # Leave it alone
+    else:
+        if symbol != "eval":
+            source = "pass"     # Replace it with a 'pass' statement
+
+    err = err1 = err2 = None
+    code = code1 = code2 = None
+
+    lines = source.split("\n")
+    if lines[-1].strip() != "":
+        return #
+    
+    from pyximport import pyxbuild
+    #pyxbuild.DEBUG = 1
+    pyx_to_dll = pyxbuild.pyx_to_dll
+
+    # the following is modeled after python:codeop._maybe_compile
+    try:
+        fname = code_to_file(source+"\n ")
+        code = pyx_to_dll(filename=fname,
+                          force_rebuild=1,
+                          pyxbuild_dir=ctx._pyxbuild_dir)
+    except (pyxbuild.DistutilsError, pyxbuild.CCompilerError), err:
+        pass
+
+    try:
+        open(fname, "a").write('\n')
+        code1 = pyx_to_dll(filename=fname,
+                           force_rebuild=1,
+                           pyxbuild_dir=ctx._pyxbuild_dir)
+    except (pyxbuild.DistutilsError, pyxbuild.CCompilerError), err1:
+        pass
+
+    try:
+        open(fname, "a").write('\n')
+        code2 = pyx_to_dll(filename=fname,
+                           force_rebuild=1,
+                           pyxbuild_dir=ctx._pyxbuild_dir)
+    except (pyxbuild.DistutilsError, pyxbuild.CCompilerError), err2:
+        pass
+
+    if code and fname:
+        return fname
+    if not code1 and repr(err1) == repr(err2):
+        raise SyntaxError, err1
+
+### ---------------------------------------------------------------------------
+if __name__ == "__main__":
+    import user # allow user to inject its own python customizations...
+    banner = """
+###################################
+# Welcome to ICython,             
+#  an interactive Cython console  
+#  (version=%s)                   
+###################################
+""" % __version__
+    import sys
+    cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
+    banner += "\nPython %s on %s\n%s\n(%s)\n" % (
+        sys.version, sys.platform, cprt,
+        CythonInteractiveConsole.__name__)
+
+    # fix-up distutils, see bug #51501
+    import platform
+    if platform.architecture()[0] == '32bit':
+        import distutils.sysconfig as ds
+        ds.get_config_vars()['CFLAGS'] += ' -m32'
+        ds.get_config_vars()['LDSHARED'] += ' -m32'
+    del platform
+    
+    ns = dict(locals())
+    for k in ('_maybe_cython_compile',
+              'code_to_file', 'cprt', 'banner', 'InteractiveConsole',
+              'CythonInteractiveConsole'
+              ):
+        del ns[k]
+    icython = CythonInteractiveConsole(locals=ns)
+    icython.interact(banner=banner)
diff --git a/Tools/PyUtils/bin/isDSinFAX.py b/Tools/PyUtils/bin/isDSinFAX.py
new file mode 100755
index 00000000000..a85bd5bfea5
--- /dev/null
+++ b/Tools/PyUtils/bin/isDSinFAX.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+import subprocess, threading, os, sys 
+import urllib2,simplejson
+
+import argparse
+ 
+parser = argparse.ArgumentParser(description='Checks if dataset is accessible through FAX.')
+parser.add_argument('dataset', type=str, help='Dataset name')
+parser.add_argument('-af','--accessfile', action='store_const', const='1', help='try to open the first root file of the dataset using root. ')
+parser.add_argument('-aa','--accessall', action='store_const', const='1', help='try to open all the root files of the dataset using root. ')
+
+args = vars(parser.parse_args())
+
+try:
+    import dq2.clientapi.cli.cliutil
+    from dq2.common.cli.DQDashboardTool import DQDashboardTool
+    from dq2.clientapi.cli.cliutil import getDQ2
+    from dq2.filecatalog.lfc.lfcconventions import to_native_lfn
+except ImportError:
+    print "Environment not set [error importing DQ2 dependencies]!"
+    sys.exit(1)
+
+
+class Command(object):
+    
+    def __init__(self, cmd):
+        self.cmd = cmd
+        self.process = None
+    
+    def run(self, timeout):
+        def target():
+#            print 'command started: ', self.cmd
+            self.process = subprocess.Popen(self.cmd, shell=True)
+            self.process.communicate()
+        
+        thread = threading.Thread(target=target)
+        thread.start()
+        
+        thread.join(timeout)
+        if thread.is_alive():
+            print 'Terminating process'
+            self.process.terminate()
+            thread.join()
+        return self.process.returncode
+
+
+#print 'Geting data from AGIS ...'
+
+class site:    
+    name=''
+    host=''
+    port=1094
+     
+    def __init__(self, na, ho):
+        self.name=na
+        ho=ho.replace("root://","")
+        self.host=ho.split(":")[0]
+        if ho.count(":"):
+            self.port=ho.split(":")[1]
+    
+    def prnt(self, what):
+        if (what>=0 and self.redirector!=what): return
+        print  'name:', self.name, '\thost:', self.host, '\tport:', self.port 
+
+sites=[];
+
+try:
+    req = urllib2.Request("http://atlas-agis-api-0.cern.ch/request/service/query/get_se_services/?json&state=ACTIVE&flavour=XROOTD", None)
+    opener = urllib2.build_opener()
+    f = opener.open(req)
+    res=simplejson.load(f)
+    for s in res:
+#        print s["name"], s["rc_site"], s["endpoint"]
+        ns = site( s["rc_site"], s["endpoint"] )
+        sites.append(ns)
+#    print res
+#    print ' got FAX SEs from AGIS.'
+
+except:
+    print "Unexpected error:", sys.exc_info()[0]    
+
+#for s in sites: s.prnt(-1)
+
+allddms=set()
+
+try:
+    req = urllib2.Request("http://atlas-agis-api-0.cern.ch/request/ddmendpoint/query/list/?json&state=ACTIVE", None)
+    opener = urllib2.build_opener()
+    f = opener.open(req)
+    res=simplejson.load(f)
+    for s in res:
+        for c in sites:
+            if s["rc_site"]==c.name:
+ #               print s["rc_site"], s["name"]
+                allddms.add(s["name"])
+                break
+#    print ' got related ddmendpoints from agis.'
+
+except:
+    print "Unexpected error:", sys.exc_info()[0]    
+
+
+DS=args['dataset']
+
+com=Command('dq2-ls -r '+ DS + ' > fax.tmp' )
+com.run(300)
+dsets={}
+cds=''
+f = open('fax.tmp', 'r') 
+for line in f:
+    if line.startswith('Multiple'): break
+    line=line.strip()
+    if line.count(':')==0:continue
+    line=line.split(":")
+  #  print line
+
+    if line[0]=='INCOMPLETE':
+        if len(line[1])==0: continue
+        rep=line[1].split(',')
+        for r in rep:
+            if r in allddms:
+                dsets[cds][0]+=1
+        continue
+  
+    if line[0]=='COMPLETE':
+        if len(line[1])==0: continue
+        rep=line[1].split(',')
+        for r in rep:
+            if r in allddms:
+                dsets[cds][1]+=1 
+        continue  
+    
+    cds=line[0]
+    dsets[cds]=[0,0]
+
+for d  in dsets.keys():
+    print d,'\tcomplete replicas:',dsets[d][1],'\tincomplete:',dsets[d][0]
diff --git a/Tools/PyUtils/bin/lstags b/Tools/PyUtils/bin/lstags
new file mode 100755
index 00000000000..f34bcd54f4d
--- /dev/null
+++ b/Tools/PyUtils/bin/lstags
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+#
+# @file:    lstags
+# @purpose: List the version (tag) of the currently checked out packages.
+#           Optionally compare to the release version and most recent version.
+#           Inspired by BaBar's 'statusrel' command.
+# @author:  Frank Winklmeier
+#
+# $Id: $ 
+
+__version__ = "$Revision$"
+__author__  = "Frank Winklmeier"
+
+import sys
+import os
+from PyCmt import Cmt
+
+def main():
+
+   import logging
+   logging.basicConfig(level = logging.WARNING)
+   log = logging.getLogger("lstags")
+   
+   import optparse
+   parser = optparse.OptionParser(description="List the version of the locally checked out packages.")
+   
+   parser.add_option("-d", "--differ", action="store_true", dest="differ",
+                     help="only show tags that differ")
+   parser.add_option("-r", "--recent", action="store_true", dest="recent",
+                     help="show most recent tag in SVN")
+   parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
+                     help="only print package name (e.g. as input to pkgco)")
+   parser.add_option("-f", "--fullName", action="store_true", dest="fullName",
+                     help="print full package name including all containers")
+   parser.add_option("-c", "--cmtCmd", action="store_true", dest="cmtCmd",
+                     help="display as 'cmt co' command")
+   parser.add_option("-n", "--nosupression", action="store_true", dest="nosup",
+                     help="do not use default package supression list")
+   
+   (opt, args) = parser.parse_args()
+
+   # Some consistency checks
+   if (opt.cmtCmd):
+      opt.fullName = True
+      opt.quiet = True
+      opt.recent = False
+      
+   if (opt.nosup): pkgSupList = []
+   else: pkgSupList = ["WorkArea"]
+
+   testArea = os.environ.get("TestArea")
+   if testArea==None:
+      log.fatal("TestArea environment variable not set. Setup your test release first.")
+      return 1
+   
+   from PyUtils.WorkAreaLib import scan
+   cmtPackages = []
+   cmtPackages.extend( scan( testArea, pkgSupList ) )
+   
+   cmt = Cmt.CmtWrapper()
+   
+   for pkg in cmtPackages:
+      pkgContainer = pkg.path.replace(testArea,"").lstrip("/")
+      fullPkgName = pkgContainer + "/" + pkg.name
+
+      if (opt.differ or not opt.quiet):    # Need to know the release version of pkg
+         testversion = cmt.get_pkg_version(fullPkgName)
+         if testversion==None: testversion = "NOT_FOUND"
+      
+      if (not opt.differ or testversion!=pkg.version):
+         prefix = ""
+         if (opt.fullName): prefix = pkgContainer+"/"
+
+         release = os.environ.get("AtlasVersion","Release")
+         msg = prefix+pkg.version;
+         
+         if (not opt.quiet):
+            msg += "  (%s uses %s)" % (release,testversion)
+            
+         if (opt.recent):
+            headversion = cmt.get_latest_pkg_tag(fullPkgName)
+            if headversion==None: headversion="NONE"
+            msg += "  (most recent %s)" % (headversion)
+
+         if (opt.cmtCmd):
+            msg = "cmt co -r %s %s" % (pkg.version,fullPkgName)
+            
+         print msg
+
+   return 0
+
+
+if __name__ == "__main__":
+   try:
+      sys.exit(main())
+   except KeyboardInterrupt:
+      sys.exit(1)
+      
diff --git a/Tools/PyUtils/bin/magnifyPoolFile.py b/Tools/PyUtils/bin/magnifyPoolFile.py
new file mode 100755
index 00000000000..4f3bd48cfcf
--- /dev/null
+++ b/Tools/PyUtils/bin/magnifyPoolFile.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# $Id: magnifyPoolFile.py,v 1.5 2008-06-27 17:24:13 binet Exp $
+# @file:    magnifyPoolFile.py
+# @purpose: produce a new POOL file with N times the content of an input one.
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    May 2007
+#
+# @example:
+#
+# magnifyPoolFile.py 1000 aod.pool
+# magnifyPoolFile.py 1000 aod.pool my.magnified.aod.pool
+#
+
+__version__ = "$Revision: 1.5 $"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+
+from optparse import OptionParser
+
+if __name__ == "__main__":
+
+    parser = OptionParser(
+        usage = "usage: %prog [-n] nMagnify [-i] input.pool [-o output.pool]"
+        )
+
+    parser.add_option(
+        "-n",
+        dest = "nMagnify",
+        help = "The number of times the input file will be 'replicated'"
+        )
+    parser.add_option(
+        "-i",
+        "--input",
+        dest = "inPoolFile",
+        help = "Path to the input POOL file to be 'replicated'/'magnified'"
+        )
+    parser.add_option(
+        "-o",
+        "--output",
+        dest = "outPoolFile",
+        default = None,
+        help = "Path to the output POOL file containing the replicated data"
+        )
+
+    (options, args) = parser.parse_args()
+
+    if len(args) > 0 and args[0][0] != "-":
+        options.nMagnify = args[0]
+        pass
+
+    if len(args) > 1 and args[1][0] != "-":
+        options.inPoolFile = args[1]
+        pass
+
+    if len(args) > 2 and args[2][0] != "-":
+        options.outPoolFile = args[2]
+        pass
+
+    if not options.nMagnify or \
+       not options.inPoolFile :
+        str(parser.print_help() or "ERROR")
+        sys.exit(1)
+        pass
+    
+    nMagnify = int(options.nMagnify)
+    if nMagnify <= 1:
+        print "ERROR: you have to give an integer > 1 for the magnifier !!"
+        str(parser.print_help() or "ERROR")
+        sys.exit(1)
+        pass
+    
+    inPoolFile = os.path.expandvars(os.path.expanduser(options.inPoolFile))
+
+    if not options.outPoolFile:
+        options.outPoolFile = os.path.join( [
+            os.path.dirname(inPoolFile),
+            "magnified."+os.path.basename(inPoolFile)
+            ] )
+
+    outPoolFile = os.path.expandvars(os.path.expanduser(options.outPoolFile))
+
+    
+    print "#"*80
+    print "## Magnifying POOL files..."
+    print "##  - replicator parameter:",options.nMagnify
+    print "##  - input: ",inPoolFile
+    print "##  - output:",outPoolFile
+    print "##"
+
+    oldArgs = sys.argv
+    sys.argv = sys.argv[:1] + ['-b'] + sys.argv[1:]
+    print "## importing ROOT..."
+    import ROOT
+    print "## importing ROOT... [DONE]"
+    import PyCintex
+    PyCintex.Cintex.Enable()
+
+    import RootUtils.PyROOTFixes
+
+    sys.argv = oldArgs
+    
+    print "## opening input Pool file..."
+    inPoolFile = ROOT.TFile.Open( inPoolFile, "READ" )
+    assert( inPoolFile.IsOpen() )
+    print "## opening input Pool file... [DONE]"
+
+    trees = [ k.ReadObj() for k in inPoolFile.GetListOfKeys() ]
+
+    print "## creating output Pool file..."
+    outPoolFile = ROOT.TFile.Open( outPoolFile, "RECREATE" )
+    assert( outPoolFile.IsOpen() )
+    print "## creating output Pool file... [DONE]"
+    
+    print "## initialize input file trees' branch status..."
+    for tree in trees: tree.SetBranchStatus("*", 0)
+
+    print "## create output trees..."
+    outTrees = [ ]
+    for tree in trees:
+        tree.SetBranchStatus("*", 1)
+        outTrees.append( tree.CloneTree(0) )
+
+    print "## magnifying..."
+    for m in range( nMagnify ):
+        if nMagnify<10 or m % (nMagnify/10) == 0:
+            print "  ... %s" % str(m).zfill(8)
+        for i in range(len(trees)):
+            for j in range(trees[i].GetEntries()):
+                trees[i].GetEntry(j)
+                outTrees[i].Fill()
+    print "## magnifying... [DONE]"
+
+    print "## committing output file..."
+    outPoolFile.Write()
+    print "## committing output file... [DONE]"
+
+    inPoolFile.Close()
+    del inPoolFile
+
+    outPoolFile.Close()
+    del outPoolFile
+
+    print "## Bye."
+    sys.exit(0)
+    
diff --git a/Tools/PyUtils/bin/merge-poolfiles.py b/Tools/PyUtils/bin/merge-poolfiles.py
new file mode 100755
index 00000000000..0ab80663164
--- /dev/null
+++ b/Tools/PyUtils/bin/merge-poolfiles.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+## @file PyUtils/bin/merge-poolfiles.py
+## @purpose take a bunch of input pool files and produce a single one
+##          autoconfiguration is (attempted to be) performed
+## @author Sebastien Binet <binet@cern.ch>
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+__doc__ = """take a bunch of input pool files and produce a single one.
+autoconfiguration is (attempted to be) performed
+"""
+
+import sys
+import os
+
+if __name__ == "__main__":
+   
+    from PyUtils.Logging import logging
+    msg = logging.getLogger('pool-merge')
+    msg.setLevel(logging.INFO)
+    
+    from optparse import OptionParser
+    parser = OptionParser(
+        usage="usage: %prog [-o] out.merged.pool f1.pool f2.pool [...]"
+        )
+    p = parser.add_option
+    p( "-o",
+       "--output",
+       dest = "outfname",
+       default = None,
+       help = "Name of the merged output POOL file" )
+    p( "--evts",
+       dest = "evts",
+       default = -1,
+       help = "Number of events to process (default: %default)" )
+    p( "--logfile",
+       dest = "logfile",
+       default = None,
+       help = "Path to a file where to put athena job's logfile (default: stdout)" )
+    
+    
+    msg.info(':'*40)
+    msg.info('welcome to poolfiles merger version %s', __version__)
+    
+    (options, args) = parser.parse_args()
+
+    fnames = []
+    
+    if len(args) > 0:
+        from os.path import expanduser, expandvars
+        fnames = [ expandvars(expanduser(arg))
+                   for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.outfname is None:
+        # take the first out of fnames
+        options.outfname = fnames[0]
+        fnames = fnames[1:]
+        
+    if len(fnames) == 0:
+        str(parser.print_help() or "")
+        sys.exit(1)
+
+
+    # running merger...
+    from PyUtils.PoolFile import merge_pool_files
+    sc = merge_pool_files(input_files=fnames, output_file=options.outfname,
+                          nevts=options.evts,
+                          msg=msg,
+                          logfile=options.logfile)
+    msg.info('bye')
+    msg.info(':'*40)
+    sys.exit(sc)
+    
diff --git a/Tools/PyUtils/bin/pep8.py b/Tools/PyUtils/bin/pep8.py
new file mode 100755
index 00000000000..0a9fbbc9d82
--- /dev/null
+++ b/Tools/PyUtils/bin/pep8.py
@@ -0,0 +1,1360 @@
+#!/usr/bin/python
+# pep8.py - Check Python source code formatting, according to PEP 8
+# Copyright (C) 2006 Johann C. Rocholl <johann@rocholl.net>
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""
+Check Python source code formatting, according to PEP 8:
+http://www.python.org/dev/peps/pep-0008/
+
+For usage and a list of options, try this:
+$ python pep8.py -h
+
+This program and its regression test suite live here:
+http://github.com/jcrocholl/pep8
+
+Groups of errors and warnings:
+E errors
+W warnings
+100 indentation
+200 whitespace
+300 blank lines
+400 imports
+500 line length
+600 deprecation
+700 statements
+
+You can add checks to this program by writing plugins. Each plugin is
+a simple function that is called for each line of source code, either
+physical or logical.
+
+Physical line:
+- Raw line of text from the input file.
+
+Logical line:
+- Multi-line statements converted to a single line.
+- Stripped left and right.
+- Contents of strings replaced with 'xxx' of same length.
+- Comments removed.
+
+The check function requests physical or logical lines by the name of
+the first argument:
+
+def maximum_line_length(physical_line)
+def extraneous_whitespace(logical_line)
+def blank_lines(logical_line, blank_lines, indent_level, line_number)
+
+The last example above demonstrates how check plugins can request
+additional information with extra arguments. All attributes of the
+Checker object are available. Some examples:
+
+lines: a list of the raw lines from the input file
+tokens: the tokens that contribute to this logical line
+line_number: line number in the input file
+blank_lines: blank lines before this one
+indent_char: first indentation character in this file (' ' or '\t')
+indent_level: indentation (with tabs expanded to multiples of 8)
+previous_indent_level: indentation on previous line
+previous_logical: previous logical line
+
+The docstring of each check function shall be the relevant part of
+text from PEP 8. It is printed if the user enables --show-pep8.
+Several docstrings contain examples directly from the PEP 8 document.
+
+Okay: spam(ham[1], {eggs: 2})
+E201: spam( ham[1], {eggs: 2})
+
+These examples are verified automatically when pep8.py is run with the
+--doctest option. You can add examples for your own check functions.
+The format is simple: "Okay" or error/warning code followed by colon
+and space, the rest of the line is example source code. If you put 'r'
+before the docstring, you can use \n for newline, \t for tab and \s
+for space.
+
+"""
+
+__version__ = '0.6.1'
+
+import os
+import sys
+import re
+import time
+import inspect
+import keyword
+import tokenize
+from optparse import OptionParser
+from fnmatch import fnmatch
+try:
+    frozenset
+except NameError:
+    from sets import ImmutableSet as frozenset
+
+
+DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git'
+DEFAULT_IGNORE = 'E24'
+MAX_LINE_LENGTH = 79
+
+INDENT_REGEX = re.compile(r'([ \t]*)')
+RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)')
+SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)')
+ERRORCODE_REGEX = re.compile(r'[EW]\d{3}')
+DOCSTRING_REGEX = re.compile(r'u?r?["\']')
+WHITESPACE_AROUND_OPERATOR_REGEX = \
+    re.compile('([^\w\s]*)\s*(\t|  )\s*([^\w\s]*)')
+EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
+WHITESPACE_AROUND_NAMED_PARAMETER_REGEX = \
+    re.compile(r'[()]|\s=[^=]|[^=!<>]=\s')
+
+
+WHITESPACE = ' \t'
+
+BINARY_OPERATORS = frozenset(['**=', '*=', '+=', '-=', '!=', '<>',
+    '%=', '^=', '&=', '|=', '==', '/=', '//=', '<=', '>=', '<<=', '>>=',
+    '%',  '^',  '&',  '|',  '=',  '/',  '//',  '<',  '>',  '<<'])
+UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
+OPERATORS = BINARY_OPERATORS | UNARY_OPERATORS
+SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.INDENT,
+                         tokenize.DEDENT, tokenize.NEWLINE])
+E225NOT_KEYWORDS = (frozenset(keyword.kwlist + ['print']) -
+                    frozenset(['False', 'None', 'True']))
+BENCHMARK_KEYS = ('directories', 'files', 'logical lines', 'physical lines')
+
+options = None
+args = None
+
+
+##############################################################################
+# Plugins (check functions) for physical lines
+##############################################################################
+
+
+def tabs_or_spaces(physical_line, indent_char):
+    r"""
+    Never mix tabs and spaces.
+
+    The most popular way of indenting Python is with spaces only.  The
+    second-most popular way is with tabs only.  Code indented with a mixture
+    of tabs and spaces should be converted to using spaces exclusively.  When
+    invoking the Python command line interpreter with the -t option, it issues
+    warnings about code that illegally mixes tabs and spaces.  When using -tt
+    these warnings become errors.  These options are highly recommended!
+
+    Okay: if a == 0:\n        a = 1\n        b = 1
+    E101: if a == 0:\n        a = 1\n\tb = 1
+    """
+    indent = INDENT_REGEX.match(physical_line).group(1)
+    for offset, char in enumerate(indent):
+        if char != indent_char:
+            return offset, "E101 indentation contains mixed spaces and tabs"
+
+
+def tabs_obsolete(physical_line):
+    r"""
+    For new projects, spaces-only are strongly recommended over tabs.  Most
+    editors have features that make this easy to do.
+
+    Okay: if True:\n    return
+    W191: if True:\n\treturn
+    """
+    indent = INDENT_REGEX.match(physical_line).group(1)
+    if indent.count('\t'):
+        return indent.index('\t'), "W191 indentation contains tabs"
+
+
+def trailing_whitespace(physical_line):
+    r"""
+    JCR: Trailing whitespace is superfluous.
+    FBM: Except when it occurs as part of a blank line (i.e. the line is
+         nothing but whitespace). According to Python docs[1] a line with only
+         whitespace is considered a blank line, and is to be ignored. However,
+         matching a blank line to its indentation level avoids mistakenly
+         terminating a multi-line statement (e.g. class declaration) when
+         pasting code into the standard Python interpreter.
+
+         [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
+
+    The warning returned varies on whether the line itself is blank, for easier
+    filtering for those who want to indent their blank lines.
+
+    Okay: spam(1)
+    W291: spam(1)\s
+    W293: class Foo(object):\n    \n    bang = 12
+    """
+    physical_line = physical_line.rstrip('\n')    # chr(10), newline
+    physical_line = physical_line.rstrip('\r')    # chr(13), carriage return
+    physical_line = physical_line.rstrip('\x0c')  # chr(12), form feed, ^L
+    stripped = physical_line.rstrip()
+    if physical_line != stripped:
+        if stripped:
+            return len(stripped), "W291 trailing whitespace"
+        else:
+            return 0, "W293 blank line contains whitespace"
+
+
+def trailing_blank_lines(physical_line, lines, line_number):
+    r"""
+    JCR: Trailing blank lines are superfluous.
+
+    Okay: spam(1)
+    W391: spam(1)\n
+    """
+    if physical_line.strip() == '' and line_number == len(lines):
+        return 0, "W391 blank line at end of file"
+
+
+def missing_newline(physical_line):
+    """
+    JCR: The last line should have a newline.
+    """
+    if physical_line.rstrip() == physical_line:
+        return len(physical_line), "W292 no newline at end of file"
+
+
+def maximum_line_length(physical_line):
+    """
+    Limit all lines to a maximum of 79 characters.
+
+    There are still many devices around that are limited to 80 character
+    lines; plus, limiting windows to 80 characters makes it possible to have
+    several windows side-by-side.  The default wrapping on such devices looks
+    ugly.  Therefore, please limit all lines to a maximum of 79 characters.
+    For flowing long blocks of text (docstrings or comments), limiting the
+    length to 72 characters is recommended.
+    """
+    line = physical_line.rstrip()
+    length = len(line)
+    if length > MAX_LINE_LENGTH:
+        try:
+            # The line could contain multi-byte characters
+            if not hasattr(line, 'decode'):   # Python 3
+                line = line.encode('latin-1')
+            length = len(line.decode('utf-8'))
+        except UnicodeDecodeError:
+            pass
+    if length > MAX_LINE_LENGTH:
+        return MAX_LINE_LENGTH, "E501 line too long (%d characters)" % length
+
+
+##############################################################################
+# Plugins (check functions) for logical lines
+##############################################################################
+
+
+def blank_lines(logical_line, blank_lines, indent_level, line_number,
+                previous_logical, previous_indent_level,
+                blank_lines_before_comment):
+    r"""
+    Separate top-level function and class definitions with two blank lines.
+
+    Method definitions inside a class are separated by a single blank line.
+
+    Extra blank lines may be used (sparingly) to separate groups of related
+    functions.  Blank lines may be omitted between a bunch of related
+    one-liners (e.g. a set of dummy implementations).
+
+    Use blank lines in functions, sparingly, to indicate logical sections.
+
+    Okay: def a():\n    pass\n\n\ndef b():\n    pass
+    Okay: def a():\n    pass\n\n\n# Foo\n# Bar\n\ndef b():\n    pass
+
+    E301: class Foo:\n    b = 0\n    def bar():\n        pass
+    E302: def a():\n    pass\n\ndef b(n):\n    pass
+    E303: def a():\n    pass\n\n\n\ndef b(n):\n    pass
+    E303: def a():\n\n\n\n    pass
+    E304: @decorator\n\ndef a():\n    pass
+    """
+    if line_number == 1:
+        return  # Don't expect blank lines before the first line
+    max_blank_lines = max(blank_lines, blank_lines_before_comment)
+    if previous_logical.startswith('@'):
+        if max_blank_lines:
+            return 0, "E304 blank lines found after function decorator"
+    elif max_blank_lines > 2 or (indent_level and max_blank_lines == 2):
+        return 0, "E303 too many blank lines (%d)" % max_blank_lines
+    elif (logical_line.startswith('def ') or
+          logical_line.startswith('class ') or
+          logical_line.startswith('@')):
+        if indent_level:
+            if not (max_blank_lines or previous_indent_level < indent_level or
+                    DOCSTRING_REGEX.match(previous_logical)):
+                return 0, "E301 expected 1 blank line, found 0"
+        elif max_blank_lines != 2:
+            return 0, "E302 expected 2 blank lines, found %d" % max_blank_lines
+
+
+def extraneous_whitespace(logical_line):
+    """
+    Avoid extraneous whitespace in the following situations:
+
+    - Immediately inside parentheses, brackets or braces.
+
+    - Immediately before a comma, semicolon, or colon.
+
+    Okay: spam(ham[1], {eggs: 2})
+    E201: spam( ham[1], {eggs: 2})
+    E201: spam(ham[ 1], {eggs: 2})
+    E201: spam(ham[1], { eggs: 2})
+    E202: spam(ham[1], {eggs: 2} )
+    E202: spam(ham[1 ], {eggs: 2})
+    E202: spam(ham[1], {eggs: 2 })
+
+    E203: if x == 4: print x, y; x, y = y , x
+    E203: if x == 4: print x, y ; x, y = y, x
+    E203: if x == 4 : print x, y; x, y = y, x
+    """
+    line = logical_line
+    for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
+        text = match.group()
+        char = text.strip()
+        found = match.start()
+        if text == char + ' ' and char in '([{':
+            return found + 1, "E201 whitespace after '%s'" % char
+        if text == ' ' + char and line[found - 1] != ',':
+            if char in '}])':
+                return found, "E202 whitespace before '%s'" % char
+            if char in ',;:':
+                return found, "E203 whitespace before '%s'" % char
+
+
+def missing_whitespace(logical_line):
+    """
+    JCR: Each comma, semicolon or colon should be followed by whitespace.
+
+    Okay: [a, b]
+    Okay: (3,)
+    Okay: a[1:4]
+    Okay: a[:4]
+    Okay: a[1:]
+    Okay: a[1:4:2]
+    E231: ['a','b']
+    E231: foo(bar,baz)
+    """
+    line = logical_line
+    for index in range(len(line) - 1):
+        char = line[index]
+        if char in ',;:' and line[index + 1] not in WHITESPACE:
+            before = line[:index]
+            if char == ':' and before.count('[') > before.count(']'):
+                continue  # Slice syntax, no space required
+            if char == ',' and line[index + 1] == ')':
+                continue  # Allow tuple with only one element: (3,)
+            return index, "E231 missing whitespace after '%s'" % char
+
+
+def indentation(logical_line, previous_logical, indent_char,
+                indent_level, previous_indent_level):
+    r"""
+    Use 4 spaces per indentation level.
+
+    For really old code that you don't want to mess up, you can continue to
+    use 8-space tabs.
+
+    Okay: a = 1
+    Okay: if a == 0:\n    a = 1
+    E111:   a = 1
+
+    Okay: for item in items:\n    pass
+    E112: for item in items:\npass
+
+    Okay: a = 1\nb = 2
+    E113: a = 1\n    b = 2
+    """
+    if indent_char == ' ' and indent_level % 4:
+        return 0, "E111 indentation is not a multiple of four"
+    indent_expect = previous_logical.endswith(':')
+    if indent_expect and indent_level <= previous_indent_level:
+        return 0, "E112 expected an indented block"
+    if indent_level > previous_indent_level and not indent_expect:
+        return 0, "E113 unexpected indentation"
+
+
+def whitespace_before_parameters(logical_line, tokens):
+    """
+    Avoid extraneous whitespace in the following situations:
+
+    - Immediately before the open parenthesis that starts the argument
+      list of a function call.
+
+    - Immediately before the open parenthesis that starts an indexing or
+      slicing.
+
+    Okay: spam(1)
+    E211: spam (1)
+
+    Okay: dict['key'] = list[index]
+    E211: dict ['key'] = list[index]
+    E211: dict['key'] = list [index]
+    """
+    prev_type = tokens[0][0]
+    prev_text = tokens[0][1]
+    prev_end = tokens[0][3]
+    for index in range(1, len(tokens)):
+        token_type, text, start, end, line = tokens[index]
+        if (token_type == tokenize.OP and
+            text in '([' and
+            start != prev_end and
+            (prev_type == tokenize.NAME or prev_text in '}])') and
+            # Syntax "class A (B):" is allowed, but avoid it
+            (index < 2 or tokens[index - 2][1] != 'class') and
+            # Allow "return (a.foo for a in range(5))"
+            (not keyword.iskeyword(prev_text))):
+            return prev_end, "E211 whitespace before '%s'" % text
+        prev_type = token_type
+        prev_text = text
+        prev_end = end
+
+
+def whitespace_around_operator(logical_line):
+    """
+    Avoid extraneous whitespace in the following situations:
+
+    - More than one space around an assignment (or other) operator to
+      align it with another.
+
+    Okay: a = 12 + 3
+    E221: a = 4  + 5
+    E222: a = 4 +  5
+    E223: a = 4\t+ 5
+    E224: a = 4 +\t5
+    """
+    for match in WHITESPACE_AROUND_OPERATOR_REGEX.finditer(logical_line):
+        before, whitespace, after = match.groups()
+        tab = whitespace == '\t'
+        offset = match.start(2)
+        if before in OPERATORS:
+            return offset, (tab and "E224 tab after operator" or
+                            "E222 multiple spaces after operator")
+        elif after in OPERATORS:
+            return offset, (tab and "E223 tab before operator" or
+                            "E221 multiple spaces before operator")
+
+
+def missing_whitespace_around_operator(logical_line, tokens):
+    r"""
+    - Always surround these binary operators with a single space on
+      either side: assignment (=), augmented assignment (+=, -= etc.),
+      comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
+      Booleans (and, or, not).
+
+    - Use spaces around arithmetic operators.
+
+    Okay: i = i + 1
+    Okay: submitted += 1
+    Okay: x = x * 2 - 1
+    Okay: hypot2 = x * x + y * y
+    Okay: c = (a + b) * (a - b)
+    Okay: foo(bar, key='word', *args, **kwargs)
+    Okay: baz(**kwargs)
+    Okay: negative = -1
+    Okay: spam(-1)
+    Okay: alpha[:-i]
+    Okay: if not -5 < x < +5:\n    pass
+    Okay: lambda *args, **kw: (args, kw)
+
+    E225: i=i+1
+    E225: submitted +=1
+    E225: x = x*2 - 1
+    E225: hypot2 = x*x + y*y
+    E225: c = (a+b) * (a-b)
+    E225: c = alpha -4
+    E225: z = x **y
+    """
+    parens = 0
+    need_space = False
+    prev_type = tokenize.OP
+    prev_text = prev_end = None
+    for token_type, text, start, end, line in tokens:
+        if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
+            # ERRORTOKEN is triggered by backticks in Python 3000
+            continue
+        if text in ('(', 'lambda'):
+            parens += 1
+        elif text == ')':
+            parens -= 1
+        if need_space:
+            if start != prev_end:
+                need_space = False
+            elif text == '>' and prev_text == '<':
+                # Tolerate the "<>" operator, even if running Python 3
+                pass
+            else:
+                return prev_end, "E225 missing whitespace around operator"
+        elif token_type == tokenize.OP and prev_end is not None:
+            if text == '=' and parens:
+                # Allow keyword args or defaults: foo(bar=None).
+                pass
+            elif text in BINARY_OPERATORS:
+                need_space = True
+            elif text in UNARY_OPERATORS:
+                # Allow unary operators: -123, -x, +1.
+                # Allow argument unpacking: foo(*args, **kwargs).
+                if prev_type == tokenize.OP:
+                    if prev_text in '}])':
+                        need_space = True
+                elif prev_type == tokenize.NAME:
+                    if prev_text not in E225NOT_KEYWORDS:
+                        need_space = True
+                else:
+                    need_space = True
+            if need_space and start == prev_end:
+                return prev_end, "E225 missing whitespace around operator"
+        prev_type = token_type
+        prev_text = text
+        prev_end = end
+
+
+def whitespace_around_comma(logical_line):
+    """
+    Avoid extraneous whitespace in the following situations:
+
+    - More than one space around an assignment (or other) operator to
+      align it with another.
+
+    JCR: This should also be applied around comma etc.
+    Note: these checks are disabled by default
+
+    Okay: a = (1, 2)
+    E241: a = (1,  2)
+    E242: a = (1,\t2)
+    """
+    line = logical_line
+    for separator in ',;:':
+        found = line.find(separator + '  ')
+        if found > -1:
+            return found + 1, "E241 multiple spaces after '%s'" % separator
+        found = line.find(separator + '\t')
+        if found > -1:
+            return found + 1, "E242 tab after '%s'" % separator
+
+
+def whitespace_around_named_parameter_equals(logical_line):
+    """
+    Don't use spaces around the '=' sign when used to indicate a
+    keyword argument or a default parameter value.
+
+    Okay: def complex(real, imag=0.0):
+    Okay: return magic(r=real, i=imag)
+    Okay: boolean(a == b)
+    Okay: boolean(a != b)
+    Okay: boolean(a <= b)
+    Okay: boolean(a >= b)
+
+    E251: def complex(real, imag = 0.0):
+    E251: return magic(r = real, i = imag)
+    """
+    parens = 0
+    for match in WHITESPACE_AROUND_NAMED_PARAMETER_REGEX.finditer(
+            logical_line):
+        text = match.group()
+        if parens and len(text) == 3:
+            issue = "E251 no spaces around keyword / parameter equals"
+            return match.start(), issue
+        if text == '(':
+            parens += 1
+        elif text == ')':
+            parens -= 1
+
+
+def whitespace_before_inline_comment(logical_line, tokens):
+    """
+    Separate inline comments by at least two spaces.
+
+    An inline comment is a comment on the same line as a statement.  Inline
+    comments should be separated by at least two spaces from the statement.
+    They should start with a # and a single space.
+
+    Okay: x = x + 1  # Increment x
+    Okay: x = x + 1    # Increment x
+    E261: x = x + 1 # Increment x
+    E262: x = x + 1  #Increment x
+    E262: x = x + 1  #  Increment x
+    """
+    prev_end = (0, 0)
+    for token_type, text, start, end, line in tokens:
+        if token_type == tokenize.NL:
+            continue
+        if token_type == tokenize.COMMENT:
+            if not line[:start[1]].strip():
+                continue
+            if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
+                return (prev_end,
+                        "E261 at least two spaces before inline comment")
+            if (len(text) > 1 and text.startswith('#  ')
+                           or not text.startswith('# ')):
+                return start, "E262 inline comment should start with '# '"
+        else:
+            prev_end = end
+
+
+def imports_on_separate_lines(logical_line):
+    r"""
+    Imports should usually be on separate lines.
+
+    Okay: import os\nimport sys
+    E401: import sys, os
+
+    Okay: from subprocess import Popen, PIPE
+    Okay: from myclas import MyClass
+    Okay: from foo.bar.yourclass import YourClass
+    Okay: import myclass
+    Okay: import foo.bar.yourclass
+    """
+    line = logical_line
+    if line.startswith('import '):
+        found = line.find(',')
+        if found > -1:
+            return found, "E401 multiple imports on one line"
+
+
+def compound_statements(logical_line):
+    r"""
+    Compound statements (multiple statements on the same line) are
+    generally discouraged.
+
+    While sometimes it's okay to put an if/for/while with a small body
+    on the same line, never do this for multi-clause statements. Also
+    avoid folding such long lines!
+
+    Okay: if foo == 'blah':\n    do_blah_thing()
+    Okay: do_one()
+    Okay: do_two()
+    Okay: do_three()
+
+    E701: if foo == 'blah': do_blah_thing()
+    E701: for x in lst: total += x
+    E701: while t < 10: t = delay()
+    E701: if foo == 'blah': do_blah_thing()
+    E701: else: do_non_blah_thing()
+    E701: try: something()
+    E701: finally: cleanup()
+    E701: if foo == 'blah': one(); two(); three()
+
+    E702: do_one(); do_two(); do_three()
+    """
+    line = logical_line
+    found = line.find(':')
+    if -1 < found < len(line) - 1:
+        before = line[:found]
+        if (before.count('{') <= before.count('}') and  # {'a': 1} (dict)
+            before.count('[') <= before.count(']') and  # [1:2] (slice)
+            not re.search(r'\blambda\b', before)):      # lambda x: x
+            return found, "E701 multiple statements on one line (colon)"
+    found = line.find(';')
+    if -1 < found:
+        return found, "E702 multiple statements on one line (semicolon)"
+
+
+def python_3000_has_key(logical_line):
+    """
+    The {}.has_key() method will be removed in the future version of
+    Python. Use the 'in' operation instead, like:
+    d = {"a": 1, "b": 2}
+    if "b" in d:
+        print d["b"]
+    """
+    pos = logical_line.find('.has_key(')
+    if pos > -1:
+        return pos, "W601 .has_key() is deprecated, use 'in'"
+
+
+def python_3000_raise_comma(logical_line):
+    """
+    When raising an exception, use "raise ValueError('message')"
+    instead of the older form "raise ValueError, 'message'".
+
+    The paren-using form is preferred because when the exception arguments
+    are long or include string formatting, you don't need to use line
+    continuation characters thanks to the containing parentheses.  The older
+    form will be removed in Python 3000.
+    """
+    match = RAISE_COMMA_REGEX.match(logical_line)
+    if match:
+        return match.start(1), "W602 deprecated form of raising exception"
+
+
+def python_3000_not_equal(logical_line):
+    """
+    != can also be written <>, but this is an obsolete usage kept for
+    backwards compatibility only. New code should always use !=.
+    The older syntax is removed in Python 3000.
+    """
+    pos = logical_line.find('<>')
+    if pos > -1:
+        return pos, "W603 '<>' is deprecated, use '!='"
+
+
+def python_3000_backticks(logical_line):
+    """
+    Backticks are removed in Python 3000.
+    Use repr() instead.
+    """
+    pos = logical_line.find('`')
+    if pos > -1:
+        return pos, "W604 backticks are deprecated, use 'repr()'"
+
+
+##############################################################################
+# Helper functions
+##############################################################################
+
+
+if '' == ''.encode():
+    # Python 2: implicit encoding.
+    def readlines(filename):
+        return open(filename).readlines()
+else:
+    # Python 3: decode to latin-1.
+    # This function is lazy, it does not read the encoding declaration.
+    # XXX: use tokenize.detect_encoding()
+    def readlines(filename):
+        return open(filename, encoding='latin-1').readlines()
+
+
+def expand_indent(line):
+    """
+    Return the amount of indentation.
+    Tabs are expanded to the next multiple of 8.
+
+    >>> expand_indent('    ')
+    4
+    >>> expand_indent('\\t')
+    8
+    >>> expand_indent('    \\t')
+    8
+    >>> expand_indent('       \\t')
+    8
+    >>> expand_indent('        \\t')
+    16
+    """
+    result = 0
+    for char in line:
+        if char == '\t':
+            result = result // 8 * 8 + 8
+        elif char == ' ':
+            result += 1
+        else:
+            break
+    return result
+
+
+def mute_string(text):
+    """
+    Replace contents with 'xxx' to prevent syntax matching.
+
+    >>> mute_string('"abc"')
+    '"xxx"'
+    >>> mute_string("'''abc'''")
+    "'''xxx'''"
+    >>> mute_string("r'abc'")
+    "r'xxx'"
+    """
+    start = 1
+    end = len(text) - 1
+    # String modifiers (e.g. u or r)
+    if text.endswith('"'):
+        start += text.index('"')
+    elif text.endswith("'"):
+        start += text.index("'")
+    # Triple quotes
+    if text.endswith('"""') or text.endswith("'''"):
+        start += 2
+        end -= 2
+    return text[:start] + 'x' * (end - start) + text[end:]
+
+
+def message(text):
+    """Print a message."""
+    # print >> sys.stderr, options.prog + ': ' + text
+    # print >> sys.stderr, text
+    print(text)
+
+
+##############################################################################
+# Framework to run all checks
+##############################################################################
+
+
+def find_checks(argument_name):
+    """
+    Find all globally visible functions where the first argument name
+    starts with argument_name.
+    """
+    checks = []
+    for name, function in globals().items():
+        if not inspect.isfunction(function):
+            continue
+        args = inspect.getargspec(function)[0]
+        if args and args[0].startswith(argument_name):
+            codes = ERRORCODE_REGEX.findall(inspect.getdoc(function) or '')
+            for code in codes or ['']:
+                if not code or not ignore_code(code):
+                    checks.append((name, function, args))
+                    break
+    checks.sort()
+    return checks
+
+
+class Checker(object):
+    """
+    Load a Python source file, tokenize it, check coding style.
+    """
+
+    def __init__(self, filename, lines=None):
+        self.filename = filename
+        if filename is None:
+            self.filename = 'stdin'
+            self.lines = lines or []
+        elif lines is None:
+            self.lines = readlines(filename)
+        else:
+            self.lines = lines
+        options.counters['physical lines'] += len(self.lines)
+
+    def readline(self):
+        """
+        Get the next line from the input buffer.
+        """
+        self.line_number += 1
+        if self.line_number > len(self.lines):
+            return ''
+        return self.lines[self.line_number - 1]
+
+    def readline_check_physical(self):
+        """
+        Check and return the next physical line. This method can be
+        used to feed tokenize.generate_tokens.
+        """
+        line = self.readline()
+        if line:
+            self.check_physical(line)
+        return line
+
+    def run_check(self, check, argument_names):
+        """
+        Run a check plugin.
+        """
+        arguments = []
+        for name in argument_names:
+            arguments.append(getattr(self, name))
+        return check(*arguments)
+
+    def check_physical(self, line):
+        """
+        Run all physical checks on a raw input line.
+        """
+        self.physical_line = line
+        if self.indent_char is None and len(line) and line[0] in ' \t':
+            self.indent_char = line[0]
+        for name, check, argument_names in options.physical_checks:
+            result = self.run_check(check, argument_names)
+            if result is not None:
+                offset, text = result
+                self.report_error(self.line_number, offset, text, check)
+
+    def build_tokens_line(self):
+        """
+        Build a logical line from tokens.
+        """
+        self.mapping = []
+        logical = []
+        length = 0
+        previous = None
+        for token in self.tokens:
+            token_type, text = token[0:2]
+            if token_type in SKIP_TOKENS:
+                continue
+            if token_type == tokenize.STRING:
+                text = mute_string(text)
+            if previous:
+                end_line, end = previous[3]
+                start_line, start = token[2]
+                if end_line != start_line:  # different row
+                    prev_text = self.lines[end_line - 1][end - 1]
+                    if prev_text == ',' or (prev_text not in '{[('
+                                            and text not in '}])'):
+                        logical.append(' ')
+                        length += 1
+                elif end != start:  # different column
+                    fill = self.lines[end_line - 1][end:start]
+                    logical.append(fill)
+                    length += len(fill)
+            self.mapping.append((length, token))
+            logical.append(text)
+            length += len(text)
+            previous = token
+        self.logical_line = ''.join(logical)
+        assert self.logical_line.lstrip() == self.logical_line
+        assert self.logical_line.rstrip() == self.logical_line
+
+    def check_logical(self):
+        """
+        Build a line from tokens and run all logical checks on it.
+        """
+        options.counters['logical lines'] += 1
+        self.build_tokens_line()
+        first_line = self.lines[self.mapping[0][1][2][0] - 1]
+        indent = first_line[:self.mapping[0][1][2][1]]
+        self.previous_indent_level = self.indent_level
+        self.indent_level = expand_indent(indent)
+        if options.verbose >= 2:
+            print(self.logical_line[:80].rstrip())
+        for name, check, argument_names in options.logical_checks:
+            if options.verbose >= 4:
+                print('   ' + name)
+            result = self.run_check(check, argument_names)
+            if result is not None:
+                offset, text = result
+                if isinstance(offset, tuple):
+                    original_number, original_offset = offset
+                else:
+                    for token_offset, token in self.mapping:
+                        if offset >= token_offset:
+                            original_number = token[2][0]
+                            original_offset = (token[2][1]
+                                               + offset - token_offset)
+                self.report_error(original_number, original_offset,
+                                  text, check)
+        self.previous_logical = self.logical_line
+
+    def check_all(self, expected=None, line_offset=0):
+        """
+        Run all checks on the input file.
+        """
+        self.expected = expected or ()
+        self.line_offset = line_offset
+        self.line_number = 0
+        self.file_errors = 0
+        self.indent_char = None
+        self.indent_level = 0
+        self.previous_logical = ''
+        self.blank_lines = 0
+        self.blank_lines_before_comment = 0
+        self.tokens = []
+        parens = 0
+        for token in tokenize.generate_tokens(self.readline_check_physical):
+            if options.verbose >= 3:
+                if token[2][0] == token[3][0]:
+                    pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
+                else:
+                    pos = 'l.%s' % token[3][0]
+                print('l.%s\t%s\t%s\t%r' %
+                    (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
+            self.tokens.append(token)
+            token_type, text = token[0:2]
+            if token_type == tokenize.OP and text in '([{':
+                parens += 1
+            if token_type == tokenize.OP and text in '}])':
+                parens -= 1
+            if token_type == tokenize.NEWLINE and not parens:
+                self.check_logical()
+                self.blank_lines = 0
+                self.blank_lines_before_comment = 0
+                self.tokens = []
+            if token_type == tokenize.NL and not parens:
+                if len(self.tokens) <= 1:
+                    # The physical line contains only this token.
+                    self.blank_lines += 1
+                self.tokens = []
+            if token_type == tokenize.COMMENT:
+                source_line = token[4]
+                token_start = token[2][1]
+                if source_line[:token_start].strip() == '':
+                    self.blank_lines_before_comment = max(self.blank_lines,
+                        self.blank_lines_before_comment)
+                    self.blank_lines = 0
+                if text.endswith('\n') and not parens:
+                    # The comment also ends a physical line.  This works around
+                    # Python < 2.6 behaviour, which does not generate NL after
+                    # a comment which is on a line by itself.
+                    self.tokens = []
+        return self.file_errors
+
+    def report_error(self, line_number, offset, text, check):
+        """
+        Report an error, according to options.
+        """
+        code = text[:4]
+        if ignore_code(code):
+            return
+        if options.quiet == 1 and not self.file_errors:
+            message(self.filename)
+        if code in options.counters:
+            options.counters[code] += 1
+        else:
+            options.counters[code] = 1
+            options.messages[code] = text[5:]
+        if options.quiet or code in self.expected:
+            # Don't care about expected errors or warnings
+            return
+        self.file_errors += 1
+        if options.counters[code] == 1 or options.repeat:
+            message("%s:%s:%d: %s" %
+                    (self.filename, self.line_offset + line_number,
+                     offset + 1, text))
+            if options.show_source:
+                line = self.lines[line_number - 1]
+                message(line.rstrip())
+                message(' ' * offset + '^')
+            if options.show_pep8:
+                message(check.__doc__.lstrip('\n').rstrip())
+
+
+def input_file(filename):
+    """
+    Run all checks on a Python source file.
+    """
+    if options.verbose:
+        message('checking ' + filename)
+    errors = Checker(filename).check_all()
+
+
+def input_dir(dirname, runner=None):
+    """
+    Check all Python source files in this directory and all subdirectories.
+    """
+    dirname = dirname.rstrip('/')
+    if excluded(dirname):
+        return
+    if runner is None:
+        runner = input_file
+    for root, dirs, files in os.walk(dirname):
+        if options.verbose:
+            message('directory ' + root)
+        options.counters['directories'] += 1
+        dirs.sort()
+        for subdir in dirs:
+            if excluded(subdir):
+                dirs.remove(subdir)
+        files.sort()
+        for filename in files:
+            if filename_match(filename) and not excluded(filename):
+                options.counters['files'] += 1
+                runner(os.path.join(root, filename))
+
+
+def excluded(filename):
+    """
+    Check if options.exclude contains a pattern that matches filename.
+    """
+    basename = os.path.basename(filename)
+    for pattern in options.exclude:
+        if fnmatch(basename, pattern):
+            # print basename, 'excluded because it matches', pattern
+            return True
+
+
+def filename_match(filename):
+    """
+    Check if options.filename contains a pattern that matches filename.
+    If options.filename is unspecified, this always returns True.
+    """
+    if not options.filename:
+        return True
+    for pattern in options.filename:
+        if fnmatch(filename, pattern):
+            return True
+
+
+def ignore_code(code):
+    """
+    Check if options.ignore contains a prefix of the error code.
+    If options.select contains a prefix of the error code, do not ignore it.
+    """
+    for select in options.select:
+        if code.startswith(select):
+            return False
+    for ignore in options.ignore:
+        if code.startswith(ignore):
+            return True
+
+
+def reset_counters():
+    for key in list(options.counters.keys()):
+        if key not in BENCHMARK_KEYS:
+            del options.counters[key]
+    options.messages = {}
+
+
+def get_error_statistics():
+    """Get error statistics."""
+    return get_statistics("E")
+
+
+def get_warning_statistics():
+    """Get warning statistics."""
+    return get_statistics("W")
+
+
+def get_statistics(prefix=''):
+    """
+    Get statistics for message codes that start with the prefix.
+
+    prefix='' matches all errors and warnings
+    prefix='E' matches all errors
+    prefix='W' matches all warnings
+    prefix='E4' matches all errors that have to do with imports
+    """
+    stats = []
+    keys = list(options.messages.keys())
+    keys.sort()
+    for key in keys:
+        if key.startswith(prefix):
+            stats.append('%-7s %s %s' %
+                         (options.counters[key], key, options.messages[key]))
+    return stats
+
+
+def get_count(prefix=''):
+    """Return the total count of errors and warnings."""
+    keys = list(options.messages.keys())
+    count = 0
+    for key in keys:
+        if key.startswith(prefix):
+            count += options.counters[key]
+    return count
+
+
+def print_statistics(prefix=''):
+    """Print overall statistics (number of errors and warnings)."""
+    for line in get_statistics(prefix):
+        print(line)
+
+
+def print_benchmark(elapsed):
+    """
+    Print benchmark numbers.
+    """
+    print('%-7.2f %s' % (elapsed, 'seconds elapsed'))
+    for key in BENCHMARK_KEYS:
+        print('%-7d %s per second (%d total)' % (
+            options.counters[key] / elapsed, key,
+            options.counters[key]))
+
+
+def run_tests(filename):
+    """
+    Run all the tests from a file.
+
+    A test file can provide many tests.  Each test starts with a declaration.
+    This declaration is a single line starting with '#:'.
+    It declares codes of expected failures, separated by spaces or 'Okay'
+    if no failure is expected.
+    If the file does not contain such declaration, it should pass all tests.
+    If the declaration is empty, following lines are not checked, until next
+    declaration.
+
+    Examples:
+
+     * Only E224 and W701 are expected:         #: E224 W701
+     * Following example is conform:            #: Okay
+     * Don't check these lines:                 #:
+    """
+    lines = readlines(filename) + ['#:\n']
+    line_offset = 0
+    codes = ['Okay']
+    testcase = []
+    for index, line in enumerate(lines):
+        if not line.startswith('#:'):
+            if codes:
+                # Collect the lines of the test case
+                testcase.append(line)
+            continue
+        if codes and index > 0:
+            label = '%s:%s:1' % (filename, line_offset + 1)
+            codes = [c for c in codes if c != 'Okay']
+            # Run the checker
+            errors = Checker(filename, testcase).check_all(codes, line_offset)
+            # Check if the expected errors were found
+            for code in codes:
+                if not options.counters.get(code):
+                    errors += 1
+                    message('%s: error %s not found' % (label, code))
+            if options.verbose and not errors:
+                message('%s: passed (%s)' % (label, ' '.join(codes)))
+            # Keep showing errors for multiple tests
+            reset_counters()
+        # output the real line numbers
+        line_offset = index
+        # configure the expected errors
+        codes = line.split()[1:]
+        # empty the test case buffer
+        del testcase[:]
+
+
+def selftest():
+    """
+    Test all check functions with test cases in docstrings.
+    """
+    count_passed = 0
+    count_failed = 0
+    checks = options.physical_checks + options.logical_checks
+    for name, check, argument_names in checks:
+        for line in check.__doc__.splitlines():
+            line = line.lstrip()
+            match = SELFTEST_REGEX.match(line)
+            if match is None:
+                continue
+            code, source = match.groups()
+            checker = Checker(None)
+            for part in source.split(r'\n'):
+                part = part.replace(r'\t', '\t')
+                part = part.replace(r'\s', ' ')
+                checker.lines.append(part + '\n')
+            options.quiet = 2
+            checker.check_all()
+            error = None
+            if code == 'Okay':
+                if len(options.counters) > len(BENCHMARK_KEYS):
+                    codes = [key for key in options.counters.keys()
+                             if key not in BENCHMARK_KEYS]
+                    error = "incorrectly found %s" % ', '.join(codes)
+            elif not options.counters.get(code):
+                error = "failed to find %s" % code
+            # Reset the counters
+            reset_counters()
+            if not error:
+                count_passed += 1
+            else:
+                count_failed += 1
+                if len(checker.lines) == 1:
+                    print("pep8.py: %s: %s" %
+                          (error, checker.lines[0].rstrip()))
+                else:
+                    print("pep8.py: %s:" % error)
+                    for line in checker.lines:
+                        print(line.rstrip())
+    if options.verbose:
+        print("%d passed and %d failed." % (count_passed, count_failed))
+        if count_failed:
+            print("Test failed.")
+        else:
+            print("Test passed.")
+
+
+def process_options(arglist=None):
+    """
+    Process options passed either via arglist or via command line args.
+    """
+    global options, args
+    parser = OptionParser(version=__version__,
+                          usage="%prog [options] input ...")
+    parser.add_option('-v', '--verbose', default=0, action='count',
+                      help="print status messages, or debug with -vv")
+    parser.add_option('-q', '--quiet', default=0, action='count',
+                      help="report only file names, or nothing with -qq")
+    parser.add_option('-r', '--repeat', action='store_true',
+                      help="show all occurrences of the same error")
+    parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
+                      help="exclude files or directories which match these "
+                        "comma separated patterns (default: %s)" %
+                        DEFAULT_EXCLUDE)
+    parser.add_option('--filename', metavar='patterns', default='*.py',
+                      help="when parsing directories, only check filenames "
+                        "matching these comma separated patterns (default: "
+                        "*.py)")
+    parser.add_option('--select', metavar='errors', default='',
+                      help="select errors and warnings (e.g. E,W6)")
+    parser.add_option('--ignore', metavar='errors', default='',
+                      help="skip errors and warnings (e.g. E4,W)")
+    parser.add_option('--show-source', action='store_true',
+                      help="show source code for each error")
+    parser.add_option('--show-pep8', action='store_true',
+                      help="show text of PEP 8 for each error")
+    parser.add_option('--statistics', action='store_true',
+                      help="count errors and warnings")
+    parser.add_option('--count', action='store_true',
+                      help="print total number of errors and warnings "
+                        "to standard error and set exit code to 1 if "
+                        "total is not null")
+    parser.add_option('--benchmark', action='store_true',
+                      help="measure processing speed")
+    parser.add_option('--testsuite', metavar='dir',
+                      help="run regression tests from dir")
+    parser.add_option('--doctest', action='store_true',
+                      help="run doctest on myself")
+    options, args = parser.parse_args(arglist)
+    if options.testsuite:
+        args.append(options.testsuite)
+    if not args and not options.doctest:
+        parser.error('input not specified')
+    options.prog = os.path.basename(sys.argv[0])
+    options.exclude = options.exclude.split(',')
+    for index in range(len(options.exclude)):
+        options.exclude[index] = options.exclude[index].rstrip('/')
+    if options.filename:
+        options.filename = options.filename.split(',')
+    if options.select:
+        options.select = options.select.split(',')
+    else:
+        options.select = []
+    if options.ignore:
+        options.ignore = options.ignore.split(',')
+    elif options.select:
+        # Ignore all checks which are not explicitly selected
+        options.ignore = ['']
+    elif options.testsuite or options.doctest:
+        # For doctest and testsuite, all checks are required
+        options.ignore = []
+    else:
+        # The default choice: ignore controversial checks
+        options.ignore = DEFAULT_IGNORE.split(',')
+    options.physical_checks = find_checks('physical_line')
+    options.logical_checks = find_checks('logical_line')
+    options.counters = dict.fromkeys(BENCHMARK_KEYS, 0)
+    options.messages = {}
+    return options, args
+
+
+def _main():
+    """
+    Parse options and run checks on Python source.
+    """
+    options, args = process_options()
+    if options.doctest:
+        import doctest
+        doctest.testmod(verbose=options.verbose)
+        selftest()
+    if options.testsuite:
+        runner = run_tests
+    else:
+        runner = input_file
+    start_time = time.time()
+    for path in args:
+        if os.path.isdir(path):
+            input_dir(path, runner=runner)
+        elif not excluded(path):
+            options.counters['files'] += 1
+            runner(path)
+    elapsed = time.time() - start_time
+    if options.statistics:
+        print_statistics()
+    if options.benchmark:
+        print_benchmark(elapsed)
+    count = get_count()
+    if count:
+        if options.count:
+            sys.stderr.write(str(count) + '\n')
+        sys.exit(1)
+
+
+if __name__ == '__main__':
+    _main()
diff --git a/Tools/PyUtils/bin/pkgco.py b/Tools/PyUtils/bin/pkgco.py
new file mode 100755
index 00000000000..bf4a548ea6c
--- /dev/null
+++ b/Tools/PyUtils/bin/pkgco.py
@@ -0,0 +1,262 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+#
+# @file:    pkgco.py
+# @purpose: Checkout a given package. Find container names and release tag
+#           if not explicitly given (inspired by BaBar's 'addpkg' command).
+# @author:  Frank Winklmeier
+#
+# $Id: pkgco,v 1.4 2009/03/25 14:25:46 fwinkl Exp $
+
+__version__ = "$Revision: 1.4 $"
+__author__  = "Frank Winklmeier"
+
+import sys
+import os
+import getopt
+import string
+import subprocess
+from PyCmt import Cmt
+
+try:
+   import multiprocessing as mp
+except ImportError:
+   mp = None
+
+cmt = Cmt.CmtWrapper()
+
+def usage():
+   print """\
+Usage: pkgco.py [OPTION]... PACKAGE...
+
+Checkout PACKAGE from the release. Possible formats:
+  Package                  find container and checkout tag in release
+  Package-X-Y-Z            find container and checkout specified tag
+  Container/Package        checkout tag in release
+  Container/Package-X-Y-Z  checkout specified tag
+
+where OPTION is:
+  -A    checkout the HEAD/trunk of the package(s)
+  -f    FILE contains PACKAGE list (one per line)
+  -s    only show package version, no checkout
+  -r    show most recent version, no checkout
+"""
+   return
+
+def findPkg(pkg):
+  """Find package version in release."""
+
+  cmtPkg = cmt.find_pkg(name=pkg)
+  
+  if cmtPkg:
+    return os.path.join(cmtPkg.path,cmtPkg.name)
+  else:
+    raise RuntimeError, "Package '%s' does not exist" % pkg         
+
+
+def svn_tag_equals_trunk(pkg,tag):
+   """Do an SVN diff of pkg/tag with trunk
+
+   Return: True/False
+   """
+
+   env = dict(os.environ)
+   
+   svnroot = env.get("SVNROOT", None)
+   if svnroot is None:
+      raise RuntimeError, "SVNROOT is not set"
+
+   pkg_url = os.path.join(svnroot, pkg)
+   if pkg.startswith('Gaudi'):
+      env['GAUDISVN'] = env.get('GAUDISVN',
+                                'http://svnweb.cern.ch/guest/gaudi')
+      svnroot = env['SVNROOT'] = '${GAUDISVN}/Gaudi'
+      env['SVNTRUNK'] = 'trunk'
+      env['SVNTAGS'] = 'tags'
+      tag_url = '/'.join([svnroot, 'tags', pkg])
+      trunk_url = '/'.join([svnroot, 'trunk', pkg])
+   else:
+      tag_url = '/'.join([svnroot, pkg, 'tags', tag])
+      trunk_url= '/'.join([svnroot, pkg, 'trunk'])
+      pass
+   cmd = 'svn diff %(tag_url)s %(trunk_url)s' % {'tag_url':tag_url,
+                                                 'trunk_url':trunk_url}
+
+   p = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell=True, env=env)
+   stdout,stderr = p.communicate()
+   if stderr!=None:
+      print stderr
+      return False
+   
+   return len(stdout)==0
+
+      
+def checkout(pkg, head, doCheckOut=True, showRecent=False):
+   """Checkout one package."""
+   
+   tag = ""
+   # If "-" in name, tag was given
+   if pkg.find('-') != -1:
+      tag = pkg.split('/')[-1]   # remove container packages
+      pkg = pkg.split('-',1)[0]  # package name
+      
+   # If no "/" in name, need to find full package path
+   if pkg.find('/')==-1: pkg = findPkg(pkg)      
+
+   # Remove leading "/" for CMT checkout
+   pkg = string.lstrip(pkg, "/")
+
+   # special treatment of Gaudi packages...
+   if pkg.startswith('Gaudi'):
+      env = dict(os.environ)
+      env['GAUDISVN'] = env.get('GAUDISVN',
+                                'http://svnweb.cern.ch/guest/gaudi')
+      env['SVNROOT'] = '%(GAUDISVN)s/Gaudi' % env
+      env['SVNTRUNK'] = 'trunk'
+      env['SVNTAGS'] = 'tags'
+
+      env['pkg'] = pkg
+      if head:
+         cmd = 'svn co %(SVNROOT)s/%(SVNTRUNK)s/%(pkg)s %(pkg)s' % env
+      else:
+         if len(tag)==0:
+            tag = cmt.get_pkg_version(pkg)      
+            if tag is None:
+               raise RuntimeError, "Could not find any tag for '%s'" % pkg
+         env['tag'] = tag
+         cmd = 'svn co %(SVNROOT)s/%(SVNTAGS)s/%(pkg)s/%(tag)s %(pkg)s' % env
+         pass
+      #print ">>> [%s]" % cmd
+      if doCheckOut:
+         subprocess.check_call(cmd,
+                               shell=True,
+                               env=env)
+         subprocess.check_call("cd %(pkg)s/cmt && cmt config" % env,
+                               shell=True,
+                               env=env)
+         
+      else:
+         msg = "%s %s" % (tag,pkg)
+         if (showRecent):
+            headversion = cmt.get_latest_pkg_tag(pkg)
+            if not (headversion is None):
+               istrunk = svn_tag_equals_trunk(pkg,headversion)
+            else:
+               istrunk = False
+               headversion="NONE"
+
+            msg += "  (most recent %s %s trunk)" % (headversion, "==" if istrunk else "!=")
+
+
+         print msg
+      return
+   
+   if head:
+      cmd = "cmt co %r" % pkg
+      subprocess.check_call(cmd, shell=True)
+      return
+   
+   if len(tag)==0:
+      tag = cmt.get_pkg_version(pkg)      
+      if tag is None:
+         raise RuntimeError, "Could not find any tag for '%s'" % pkg
+
+   if doCheckOut:
+      subprocess.check_call("cmt co -r %s %s" % (tag,pkg),
+                            shell=True)
+   else:
+      msg = "%s %s" % (tag,pkg)
+      if (showRecent):
+         headversion = cmt.get_latest_pkg_tag(pkg)
+         if headversion!=None:
+            istrunk = svn_tag_equals_trunk(pkg,headversion)
+         else:
+            istrunk = False
+            headversion="NONE"
+
+         msg += "  (most recent %s %s trunk)" % (headversion, "==" if istrunk else "!=")
+
+            
+      print msg
+      
+   return
+   
+def safe_checkout(args):
+   try:
+      checkout(*args)
+   except RuntimeError, e:
+      print e
+      
+def main():
+
+   try:
+      opts,args = getopt.gnu_getopt(sys.argv[1:], "hAsrf:v", ["help","version"])
+   except getopt.GetoptError, e:
+      print e
+      usage()
+      return 1
+
+   # Parse command line
+   head = False
+   pkgFile = None
+   doCheckOut = True
+   showRecent = False
+   for o,a in opts:
+      if o == "-A":
+         head = True
+      elif o == "-f":
+         pkgFile = a
+      elif o == "-s":
+         doCheckOut = False
+      elif o == "-r":
+         showRecent = True
+         doCheckOut = False
+      elif o in ("-h", "--help"):
+         usage()
+         return 0
+      elif o in ("-v", "--version"):
+         print __version__.strip("$")
+         return 0
+      
+   if (pkgFile is None) and (len(args)==0):
+      usage()
+      return 1
+
+   # Read optional file with package tags
+   pkgList = args
+   if pkgFile:
+      try:
+         f = open(pkgFile)
+      except IOError:
+         print "Cannot open file '%s'." % pkgFile
+         return 2
+         
+      for line in f: pkgList.append(line.strip())
+
+   # Checkout packages
+   args = zip(pkgList,
+              [head]*len(pkgList),
+              [doCheckOut]*len(pkgList),
+              [showRecent]*len(pkgList))
+
+   # allow to process multiple packages in parallel
+   if mp and len(pkgList)>1 and doCheckOut:
+      print "enabling parallel checkout..."
+      pool = mp.Pool()
+      res = pool.map_async(safe_checkout, args)
+      res.get()
+   else:
+      map(safe_checkout, args)
+
+   return 0
+
+if __name__ == "__main__":
+   try:   
+      sys.exit(main())
+   except RuntimeError, e:
+      print e
+      sys.exit(1)
+   except KeyboardInterrupt:
+      sys.exit(1)
+      
diff --git a/Tools/PyUtils/bin/pool_extractFileIdentifier.py b/Tools/PyUtils/bin/pool_extractFileIdentifier.py
new file mode 100755
index 00000000000..47c784db058
--- /dev/null
+++ b/Tools/PyUtils/bin/pool_extractFileIdentifier.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    pool_extractFileIdentifier.py
+# @purpose: extract the GUID of a POOL file.
+#           Also speeds up greatly processing time by shrinking LD_LIBRARY_PATH
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    March 2009
+#
+# @example:
+#
+# python pool_extractFileIdentifier.py aod.pool.root
+#
+# if pool_extractFileIdentifier.py has been made 'chmod +x' one can just do:
+# ./pool_extractFileIdentifier.py aod.pool.root
+
+from __future__ import with_statement
+
+def pool_extract(files):
+    print ":: extracting GUID for [%i] files... "% len(files)
+    import os, sys
+    import commands
+    sc,exe = commands.getstatusoutput('which pool_extractFileIdentifier')
+    if sc != 0:
+        print ":: could not find 'pool_extractFileIdentifier' !"
+        print exe
+        return 1
+
+    import PyUtils.Helpers as H
+    with H.restricted_ldenviron(projects=('AtlasCore',)):
+        
+        cmd = "%s %s" % (exe, " ".join(files))
+        sc, out = commands.getstatusoutput(cmd)
+
+    out = os.linesep.join(
+        [o for o in out.splitlines()
+         if not (o.startswith("Warning in <TClass::TClass>: no dictionary for class ") or
+                 o.startswith('Warning in <TEnvRec::ChangeValue>: duplicate entry'))]
+        )
+
+    if sc != 0:
+        print ":: problem running pool_extractFileIdentifier:"
+        print out
+        return sc
+
+    print out
+    print ":: extracting GUID for [%i] files... [done]" % len(files)
+    return sc
+    
+if __name__ == "__main__":
+    import sys
+    from optparse import OptionParser
+    parser = OptionParser(usage="%prog file1.pool [file2.pool [...]]")
+    parser.add_option("-f", "--files",
+                      dest = "files",
+                      help = "(list of) files to extract the GUID(s) from")
+    options, args = parser.parse_args()
+
+    files = list()
+    if len(args) > 0:
+        files = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.files is None and len(files) == 0:
+        str(parser.print_help() or "")
+        print ":: You have to provide at least one POOL file to extract a GUID from:"
+        print " shell> pool_extractFileIdentifier.py aod.pool"
+        sys.exit(1)
+
+    if not (options.files is None):
+        import os
+        for f in options.files.split():
+            f = os.path.expandvars(os.path.expanduser(f))
+            files.append(f)
+
+    sys.exit(pool_extract(files=files))
diff --git a/Tools/PyUtils/bin/pool_insertFileToCatalog.py b/Tools/PyUtils/bin/pool_insertFileToCatalog.py
new file mode 100755
index 00000000000..5aa4b96e46f
--- /dev/null
+++ b/Tools/PyUtils/bin/pool_insertFileToCatalog.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    pool_insertFileToCatalog.py
+# @purpose: insert a POOL file into a POOL file catalog without displaying the
+#           annoying ROOT warnings for missing dictionaries
+#           Also speeds up greatly processing time by shrinking LD_LIBRARY_PATH
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    May 2006
+#
+# @example:
+#
+# python pool_insertFileToCatalog.py aod.pool.root
+#
+# if pool_insertFileToCatalog.py has been made 'chmod +x' one can just do:
+# ./pool_insertFileToCatalog.py aod.pool.root
+from __future__ import with_statement
+
+def pool_insert(files, catalog_name="xmlcatalog_file:PoolFileCatalog.xml"):
+    print ":: inserting [%i] files into pool catalog... (%s)"%(
+        len (files),
+        catalog_name
+        )
+    import os, sys
+    import commands
+    sc,exe = commands.getstatusoutput ('which pool_insertFileToCatalog')
+    if sc != 0:
+        print ":: could not find 'pool_insertFileToCatalog' !"
+        print exe
+        return 1
+
+    import PyUtils.Helpers as H
+    with H.restricted_ldenviron(projects=('AtlasCore',)):
+        os.environ['POOL_CATALOG'] = catalog_name
+        cmd = "%s %s" % (exe, " ".join(files))
+        sc, out = commands.getstatusoutput (cmd)
+        
+    out = os.linesep.join(
+        [o for o in out.splitlines()
+         if not (o.startswith("Warning in <TClass::TClass>: no dictionary for class ") or
+                 o.startswith('Warning in <TEnvRec::ChangeValue>: duplicate entry'))]
+        )
+
+    if sc != 0:
+        print ":: problem running pool_insertFileToCatalog:"
+        print out
+        return 2
+
+    print out
+    print ":: inserting [%i] files into pool catalog... [done]"%len(files)
+    return sc
+    
+if __name__ == "__main__":
+    import sys
+    from optparse import OptionParser
+    parser = OptionParser(usage="%prog file1.pool [file2.pool [...]] [--catalog='xmlcatalog_file:PoolFileCatalog.xml']")
+    parser.add_option("-f", "--files",
+                      dest = "files",
+                      help = "(list of) files to be inserted in the catalog")
+    parser.add_option("--catalog",
+                      dest = "catalog",
+                      default = "xmlcatalog_file:PoolFileCatalog.xml",
+                      help = "catalog connection string")
+    options, args = parser.parse_args()
+
+    files = list()
+    if len(args) > 0:
+        files = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    if options.files is None and len(files) == 0:
+        str(parser.print_help() or "")
+        print ":: You have to provide at least one POOL file to insert:"
+        print " shell> pool_insertFileToCatalog.py aod.pool"
+        sys.exit(1)
+
+    if not (options.files is None):
+        import os
+        for f in options.files.split():
+            f = os.path.expandvars(os.path.expanduser(f))
+            files.append(f)
+
+    sc = pool_insert(files=files, catalog_name=options.catalog)
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/print_auditor_callgraph.py b/Tools/PyUtils/bin/print_auditor_callgraph.py
new file mode 100755
index 00000000000..699c555145f
--- /dev/null
+++ b/Tools/PyUtils/bin/print_auditor_callgraph.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    print_auditor_callgraph.py
+# @purpose: print the callgraph sequence of a job stage (ini/exe/fin), parsing
+#           the logfile of a job run with NameAuditor enabled.
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    May 2008
+#
+# @example:
+# @code
+# print_auditor_callgraph recexcommon.log
+# @endcode
+#
+
+__version__ = "$Revision: 1.1 $"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import os
+import re
+import sys
+
+class Steps:
+    ini = 'Initialization'
+    exe = 'Execute'
+    fin = 'Finalization'
+    ALLOWED = ('ini', 'exe', 'fin')
+    
+def parse_log_file(fname, step=Steps.ini):
+    beg_pat = re.compile(r"NameAuditor.*?About to Enter "\
+                         r"(?P<CompName>.*?) "\
+                         r"%s Method"%step)
+    end_pat = re.compile(r"NameAuditor.*?Just Exited "\
+                         r"(?P<CompName>.*?) "\
+                         r"%s Method"%step)
+
+    stack = 0
+    graph = []
+
+    for l in open(fname, 'r'):
+        l = l.strip()
+        beg = re.match(beg_pat, l)
+        end = re.match(end_pat, l)
+        if not (beg or end):
+            continue
+
+        if beg:
+            component = beg.group('CompName')
+            #print "  "*stack,component
+            stack += 1
+            graph += [ (stack, component) ]
+
+        if end:
+            component = end.group('CompName')
+            #print "  "*stack,component
+            stack -= 1
+
+    return graph
+
+
+if __name__ == '__main__':
+    if len(sys.argv) < 2:
+        raise SystemExit(1, "You have to provide a path to a logfile to parse")
+    
+    fname = os.path.expandvars(os.path.expanduser(sys.argv[1]))
+    if not os.path.exists(fname):
+        raise SystemExit(
+            1, "You have to provide a VALID path to a logfile to parse")
+    
+    step = Steps.ini
+    if len(sys.argv) > 2:
+        step = sys.argv[2].lower()
+        if not step in Steps.ALLOWED:
+            raise SystemExit(
+                2, "Invalid step name [%s] allowed=%r"%(step, Steps.ALLOWED))
+
+        step = getattr(Steps,step)
+        
+    gr = parse_log_file(fname,step)
+    #print gr
+    for i in gr:
+        print ".."*(i[0]+1),i[1]
+    
diff --git a/Tools/PyUtils/bin/pyroot.py b/Tools/PyUtils/bin/pyroot.py
new file mode 100755
index 00000000000..7a38e127934
--- /dev/null
+++ b/Tools/PyUtils/bin/pyroot.py
@@ -0,0 +1,185 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+"exec" "`which python`" "-tt" "$0" "$@";
+
+# File: pyroot.py
+# Author: Sebastien Binet (binet@cern.ch)
+
+# This script allows you to run ROOT from python.
+# It has been heavily based (err... stolen) on athena.py from Wim
+
+import user
+
+__version__ = '$Revision$'
+__author__  = 'Sebastien Binet (binet@cern.ch)'
+__doc__     = 'For details about pyroot.py, run "less `which pyroot.py`"'
+
+import sys, os
+import getopt, string
+
+
+### recognized user options __________________________________________________
+_useropts = "bidc:hv"
+_userlongopts = [ "batch", "interactive", "no-display", "debug=", "command=",
+                  "help",  "version",]
+
+
+### explanation of the options -----------------------------------------------
+def _help_and_exit( reason = None ):
+   print """Accepted command line options:
+ -b, --batch                          ...  batch mode
+ -i, --interactive                    ...  interactive mode [default]
+     --no-display                           prompt, but no graphics display
+ -c, --command                        ...  one-liner, runs before any scripts
+ -h, --help                           ...  print this help message
+ -v, --version                        ...  print version number
+ -,-- [arg1,...]                      ...  additional arguments passed directly 
+                                           to user scripts (left untouched)
+ [<file1>.py [<file2>.py [...]]]      ...  scripts to run"""
+
+   sys.exit( 1 )
+
+
+### option handling  ---------------------------------------------------------
+scripts,opts = [],[]
+runBatch = 0                     # batch mode is NOT the default
+display = None                   # only useful in interactive mode: no display
+command = ""                     # optional one-line command
+userOpts = []                    # left-over opts after '-'
+
+
+## emulated gnu getopt for p2.2: collect scripts and options
+for arg in sys.argv[1:]:
+   if arg[-3:] == ".py":
+      scripts.append( arg )
+   elif arg in ('-','--'):     # rest are user opts, save and done
+      userOpts += sys.argv[ sys.argv.index( arg )+1: ]
+      break
+   else:
+      opts.append( arg )
+
+## process user options
+try:
+   optlist, args = getopt.getopt( opts, _useropts, _userlongopts )
+except getopt.error:
+   print sys.exc_value
+   _help_and_exit()
+
+if args:
+   print "Unhandled arguments:", args
+   _help_and_exit()
+
+for opt, arg in optlist:
+   if opt in ("-b", "--batch"):
+      runBatch = 1
+   elif opt in ("-i", "--interactive"):
+      runBatch = 0
+      defOptions = ""
+      if display == None: display = 1
+   elif opt in ("--no-display",):
+      display = 0
+   elif opt in ("-c", "--command"):
+      command = string.strip( arg )
+   elif opt in ("-h", "--help"):
+      _help_and_exit()
+   elif opt in ("-v", "--version"):
+      print __version__
+      sys.exit(0)
+
+if optlist: del opt, arg
+del args, optlist, opts
+del _useropts, _userlongopts, string, getopt
+
+## for the benefit of PyROOT
+if not display and not '-b' in sys.argv:
+   sys.argv = sys.argv[:1] + ['-b'] + sys.argv[1:]
+del display
+
+
+### python interpreter configuration -----------------------------------------
+if not os.getcwd() in sys.path:
+   sys.path = [ os.getcwd() ] + sys.path
+
+if not '' in sys.path:
+   sys.path = [ '' ] + sys.path
+
+sys.ps1 = 'pyroot> '
+fhistory = os.path.expanduser( '~/.pyroot.history' )
+
+## interface setup as appropriate
+if runBatch:
+ # in batch there is no need for stdin
+   if os.isatty( sys.stdin.fileno() ):
+      os.close( sys.stdin.fileno() )
+   if 'PYTHONINSPECT' in os.environ:
+      os.unsetenv('PYTHONINSPECT')
+else:
+   os.environ['PYTHONINSPECT'] = '1'
+ # readline support
+   import rlcompleter, readline
+
+   readline.parse_and_bind( 'tab: complete' )
+   readline.parse_and_bind( 'set show-all-if-ambiguous On' )
+
+ # history support
+   if os.path.exists( fhistory ):
+      readline.read_history_file( fhistory )
+   readline.set_history_length( 1024 )
+
+   del readline, rlcompleter
+
+### ROOT & Cintex loading -----------------------------------------------------
+print sys.ps1+"loading ROOT..."
+import PyCintex
+PyCintex.Cintex.Enable()
+import ROOT
+print sys.ps1+"loading ROOT... [ok]"
+ROOT.gROOT.SetStyle("Plain")
+ROOT.gStyle.SetPalette(1)      # less ugly palette colors
+ROOT.gStyle.SetOptStat(111111) #
+print sys.ps1+"loaded pyroot style... [ok]"
+
+### execution ----------------------------------------------------------------
+if not runBatch:
+   import atexit, readline
+
+ # history support
+   atexit.register( readline.write_history_file, fhistory )
+   del readline, atexit
+
+del fhistory
+
+if command:
+   print sys.ps1+'executing CLI (-c) command: "%s"' % command
+   exec command
+del command
+
+for script in scripts:
+   try:
+      execfile( script )
+   except Exception, e:
+      if isinstance(e,SystemExit):
+         raise
+
+      import traceback
+      traceback.print_exc()
+
+      if runBatch:
+          import sys
+          sys.exit(2)
+
+    # for interactive: drop into prompt
+      break
+
+else:
+ ## only get here if all scripts successfully included
+   del scripts
+
+ ## in batch, then we are done: exit nicely
+   if runBatch:
+       import sys
+       sys.exit(0)
+   else:
+       # done, back to the user
+       print sys.ps1+"entering interactive session..."
+       pass
diff --git a/Tools/PyUtils/bin/setupWorkArea.py b/Tools/PyUtils/bin/setupWorkArea.py
new file mode 100755
index 00000000000..36acaa7c596
--- /dev/null
+++ b/Tools/PyUtils/bin/setupWorkArea.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+#
+# @file: setupWorkArea.py
+# @purpose: fill the stubs of the WorkArea package so that all the local
+#           CMT packages can be recompiled in one go.
+# @date: June 2006
+# @author: Sebastien Binet
+
+# /!\ Warning /!\
+# I am relying on the following assumption:
+#  - you have a working Release environment
+#     ==> a valid CMTPATH environment variable
+#     ==> python-2.4 (this needs to be addressed!!)
+
+# example0:
+# ./setupWorkArea.py
+# ==> will :
+#  - install a CMT package called WorkArea under the first directory found
+#    in the CMTPATH environment variable
+#  - put use statements for all the CMT packages (recursively) found under
+#    the first directory of the CMTPATH environment variable
+
+# example1:
+# ./setupWorkArea.py -i ~/Athena/dev -S ~/cmtSuppressList -w $CMTPATH
+# ==> will :
+#  - install a CMT package called WorkArea under the dir. ~/Athena/dev
+#  - put use statements for all the CMT packages (recursively) found under
+#    the ':' separated list of paths (=$CMTPATH)
+#  - and only if these packages are not found in the cmtSuppressList
+
+# example2:
+# ./setupWorkArea.py -s "['Foo']"
+# - install a CMT package called WorkArea under the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the first dir of $CMTPATH
+# - remove any 'use statement' for packages called "Foo"
+
+# example3:
+# ./setupWorkArea.py --suppress-list "['Foo']"
+# - install a CMT package called WorkArea under the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the first dir of $CMTPATH
+# - remove any 'use statement' for packages called "Foo"
+
+# example4:
+# ./setupWorkArea.py --suppress-list "['Foo']" --runtime Core
+# - install a CMT package called WorkArea under the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the first dir of $CMTPATH
+# - remove any 'use statement' for packages called "Foo"
+# - use only the AtlasCoreRunTime environment
+
+# example5:
+# ./setupWorkArea.py -r Core
+# - install a CMT package called WorkArea under the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the first dir of $CMTPATH
+# - use only the AtlasCoreRunTime environment
+
+# example6:
+# ./setupWorkArea.py -g (or --group-area)
+# - install a CMT package called WorkArea under the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the first dir of $CMTPATH
+# - put use statements for all the CMT packages (recursively) found under
+#   the $GroupArea environment variable
+# Note that one can specify the ':' separated list of Group areas directories:
+# --group-area=${SomeVariable}:${GroupArea}:${SomethingElse}
+# -g ${MyGroupArea}
+
+import sys
+import os
+import getopt
+import string
+
+from PyUtils.Logging import logging
+
+__version__ = "$Revision: 1.7 $"
+
+##########################
+# recognized user options
+##########################
+_useropts = 's:i:hl:S:w:r:gv'
+_userlongopts = [ 'suppress-list=',  'install-dir=',
+                  'help',            'loglevel=',
+                  'suppress-file=',
+                  'work-area=',
+                  'runtime=',
+                  'group-area',
+                  'version' ]
+
+def _usage():
+   print """Accepted command line options (CLI):
+   -s, --suppress-list <list> ...  list of package names to ignore.
+   -S, --suppress-file <file> ...  path to a file containing the suppress list.
+   -i, --install-dir <path>   ...  directory where to install the WorkArea pkg
+   -w, --work-area <dir1:d2>  ...  directories under which the packages for the
+                                   WorkArea pkg are installed.
+   -g, --group-area <dir1:d2> ...  directories under which the packages for the
+                                   GroupArea are looked for.
+                                   If no argument is given, it will try to
+                                   look for the $GroupArea environment
+                                   variable.
+   -r, --runtime <runtimePkg> ...  runtime package one wants to work with.
+                                   Default is AtlasOfflineRunTime.
+                                   Allowed values: core, event, conditions,
+                                                   simulation, reconstruction,
+                                                   trigger, analysis,
+                                                   production, point1,
+                                                   offline
+   -h, --help                 ...  print this help message
+   -l, --loglevel <level>     ...  logging level (DEBUG, INFO, WARNING, ERROR, FATAL)
+   -v, --version              ...  print version number
+   """
+   return
+
+from PyUtils.WorkAreaLib import *
+
+def _processOptions( useropts, userlongopts ):
+
+    log = logging.getLogger("WorkAreaMgr")
+
+    runTimePkgAllowedValues = [ "core",
+                                "event",
+                                "conditions",
+                                "simulation",
+                                "reconstruction",
+                                "trigger",
+                                "analysis",
+                                "production",
+                                "point1",
+                                "tier0",
+                                "hlt",
+                                "offline",
+                                "manacore",
+                                ]
+    # defaults
+    workAreas  = []
+    installDir = None
+    runTimePkg = None # --> "offline" or what is in .asetup.save's [summary:AtlasProject]
+    suppressList = []
+    lvl = logging.INFO
+    
+    try:
+        optlist,args = getopt.getopt( sys.argv[1:],
+                                      useropts,
+                                      userlongopts )
+    except getopt.error:
+        log.error( "%s" % sys.exc_value )
+        _usage()
+        sys.exit(2)
+
+    for opt, arg in optlist:
+        if opt in ('-h', '--help' ):
+            _usage()
+            sys.exit()
+        elif opt in ('-v', '--version'):
+            print WORKAREA_VERSION
+            print "By Sebastien Binet"
+            sys.exit()
+        elif opt in ('-i', '--install-dir'):
+            installDir = os.path.expanduser( os.path.expandvars(arg) )
+        elif opt in ('-s', '--suppress-list'):
+            exec( 'suppressList += %s' % arg )
+            #suppressList = arg
+        elif opt in ('-S', '--suppress-file'):
+            suppressFileName = os.path.expanduser( os.path.expandvars(arg) )
+            if os.path.exists( suppressFileName ):
+                suppressFile = open( suppressFileName, 'r' )
+                for line in suppressFile.readlines():
+                    for l in line.splitlines():
+                        suppressList.append( l.strip() )
+                        pass
+                    pass
+                pass
+            else:
+                log.error("Could NOT access this file [%s]" % suppressFileName)
+                pass
+        elif opt in ('-w', '--work-area'):
+            workAreaDirs = os.path.expanduser( os.path.expandvars(arg) )
+            if workAreaDirs.count(os.pathsep) > 0:
+               workAreaDirs = workAreaDirs.split(os.pathsep)
+               pass
+            for workAreaDir in workAreaDirs:
+                if os.path.exists( workAreaDir ):
+                    if os.access(workAreaDir, os.R_OK):
+                        workAreas.append( os.path.abspath(workAreaDir) )
+                        pass
+                    else:
+                        log.error( "Can't read from [%s] !!" % workAreaDir )
+                        pass
+                    pass
+                else:
+                    log.error("Directory does NOT exists [%s] !" % workAreaDir)
+                    pass
+                pass
+        elif opt in ('-g', '--group-area'):
+           if len(arg) == 0:
+              arg = os.environ.get("GroupArea") or ""
+              pass
+           groupAreaDirs = os.path.expanduser( os.path.expandvars(arg) )
+           if groupAreaDirs.count(os.pathsep) > 0:
+              groupAreaDirs = groupAreaDirs.split(os.pathsep)
+              pass
+           for groupAreaDir in groupAreaDirs:
+              if os.path.exists( groupAreaDir ):
+                 if os.access(groupAreaDir, os.R_OK):
+                    workAreas.append( groupAreaDir )
+                    pass
+                 else:
+                    log.error( "Can't read from [%s] !!" % groupAreaDir )
+                    pass
+                 pass
+              else:
+                 log.error("Directory does NOT exists [%s] !" % groupAreaDir)
+                 pass
+              pass
+        elif opt in ('-r', '--runtime'):
+           if arg.lower() in runTimePkgAllowedValues:
+              runTimePkg = arg
+           else:
+              log.error( "Unknown runtime package [%s]" % arg )
+              log.error( "Must be one of: %s" % str(runTimePkgAllowedValues) )
+              pass
+        elif opt in ('-l', '--loglevel'):
+            lvl = string.upper( arg )
+            logLevel = getattr(logging, lvl)
+            log.setLevel(logLevel)
+            del lvl,logLevel
+            pass
+        else:
+            pass
+        pass
+
+    if runTimePkg is None:
+       # try to get it from .asetup.save
+       if os.path.exists('.asetup.save'):
+          import ConfigParser as _cp
+          cfg = _cp.SafeConfigParser()
+          try:
+             cfg.read(['.asetup.save'])
+          except _cp.ParsingError, err:
+             # .asetup.save file does not, generally, conform to MS Windows INI files syntax
+             log.debug('got these non-fatal parsing errors:\n%s' % err)
+          else:
+             if (cfg.has_section('summary') and
+                 cfg.has_option('summary', 'AtlasProject')):
+                try:
+                   v = cfg.get('summary', 'AtlasProject')
+                   v = v.lower()
+                   if v.startswith('atlas'):
+                      v = v[len('atlas'):]
+                      runTimePkg = v
+                      log.info('taking runtime package [%s] from .asetup.save',
+                               runTimePkg)
+                except Exception, err:
+                   log.info('got this non-fatal parsing error:\n%s' % err)
+                   log.info('taking runtime package [AtlasOffline] by default')
+                   runTimePkg = None # offline
+
+       # failing to determine runTimePkg,
+       # take it from env-var AtlasProject,
+       # or 'offline'
+
+    return workAreas, installDir, runTimePkg, suppressList
+    
+if __name__ == "__main__":
+
+    msg = logging.getLogger('WorkAreaMgr')
+    msg.setLevel(logging.INFO)
+    
+    ## process user options
+    workAreas,  installDir,  \
+    runTimePkg, suppressList = _processOptions( _useropts, _userlongopts )
+
+    createWorkArea( workAreas, installDir, runTimePkg, suppressList )
+    pass
diff --git a/Tools/PyUtils/bin/tabnanny-checker.py b/Tools/PyUtils/bin/tabnanny-checker.py
new file mode 100755
index 00000000000..33c0e99eaa4
--- /dev/null
+++ b/Tools/PyUtils/bin/tabnanny-checker.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file:    tabnanny-checker.py
+# @purpose: simple command-line utility wrapping tabnanny
+# @author:  Sebastien Binet <binet@cern.ch>
+# @date:    July 2009
+#
+# @example:
+# @code
+# tabnanny-checker
+# tabnanny-checker AtlasCore
+# @endcode
+#
+from __future__ import with_statement
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+import tabnanny
+import StringIO
+
+from optparse import OptionParser
+
+import PyCmt.Logging as _L
+import PyCmt.Cmt as PyCmt
+
+def process_path(p):
+    stdout = sys.stdout
+    output = ''
+    try:
+        sys.stdout = StringIO.StringIO()
+        tabnanny.check(p)
+        output = sys.stdout.getvalue()
+    finally:
+        sys.stdout = stdout
+    return output
+
+if __name__ == "__main__":
+
+    parser = OptionParser(usage="usage: %prog [options] [--project] AtlasCore")
+    parser.add_option("--project",
+                      dest = "project_name",
+                      default=None,
+                      help = "The name of the project to inspect" )
+
+    sc = 0
+    report = []
+    
+    (options, args) = parser.parse_args()
+    msg = _L.logging.getLogger("TabNanny")
+    msg.setLevel(_L.logging.INFO)
+    msg.info('TabNanny checker')
+    
+    project_names = None
+    if len(args) > 0:
+        project_names = [ arg for arg in args if arg[0] != "-" ]
+        pass
+
+    cmt = PyCmt.CmtWrapper()
+    if options.project_name is None:
+        if project_names is None:
+            # take the top-root project name
+            options.project_name = cmt.projects_dag()[0].name
+        else:
+            # FIXME
+            # take the first one... 
+            options.project_name = str(project_names[0])
+
+    # also allows to give a filename: early exit...
+    import os.path as osp
+    if osp.exists(options.project_name):
+        path = options.project_name
+        msg.info(':'*80)
+        msg.info('checking [%s]...', path)
+        output = process_path(path)
+        if len(output) > 0:
+            lines = output.splitlines()
+            for l in lines:
+                print l
+                report.append(l)
+            sc = 1
+        else:
+            msg.info('all good.')
+
+    else:
+        # retrieve the list of project names on which the
+        # selected project depends
+        projects = cmt.project_deps(options.project_name)
+
+        for project_name in projects:
+            project = cmt.projects_tree()[project_name]
+            msg.info(':'*80)
+            msg.info('checking [%s]...', project.name)
+            output = process_path(project.path)
+            if len(output) > 0:
+                lines = output.splitlines()
+                report.append("::: [%s] ::: (%i pbs)" % (
+                    project_name, len(lines)))
+                for l in lines:
+                    print l
+                    report.append(l)
+                sc = 1
+            else:
+                msg.info('all good.')
+        pass
+    
+    with open('tabnanny.report.txt', 'w') as out:
+        print >> out, ':'*80
+        print >> out, "::: TabNanny report"
+        for l in report:
+            print >> out, l
+        print >> out, ':'*80
+        pass
+    
+    msg.info("Bye.")
+    sys.exit(sc)
diff --git a/Tools/PyUtils/bin/tcSubmitTag.py b/Tools/PyUtils/bin/tcSubmitTag.py
new file mode 100755
index 00000000000..43b8bb122b1
--- /dev/null
+++ b/Tools/PyUtils/bin/tcSubmitTag.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+acmd.py tc submit-tag "$@"
diff --git a/Tools/PyUtils/bin/vmem-sz.py b/Tools/PyUtils/bin/vmem-sz.py
new file mode 100755
index 00000000000..9790dd2eb94
--- /dev/null
+++ b/Tools/PyUtils/bin/vmem-sz.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+#@purpose: get the inclusive and exclusive vmem sizes of a library
+
+__author__ = "Sebastien Binet <binet@cern.ch>"
+__doc__    = "get the inclusive and exclusive vmem sizes of a library"
+__version__= "$Revision: 1.2 $"
+
+## std imports
+import argparse
+import ctypes
+import os
+import sys
+import user
+
+## 3rd-party imports
+from PyUtils.Decorators import forking as forking
+from PerfMonComps.PyMonUtils import loaded_libs, pymon
+
+_veto_libs = [
+    'resource.so', # from python std-lib 'resource'...
+    ]
+
+def lib_loader(libname):
+    return ctypes.cdll.LoadLibrary(libname)
+    
+@forking
+def load_lib (libname):
+    _,vmem0,_ = pymon()
+    lib = lib_loader (libname)
+    _,vmem1,_  = pymon()
+    libs = [l for l in loaded_libs()
+            if not os.path.basename(l) in _veto_libs and
+            os.access(l, os.R_OK)]
+    return (vmem0, vmem1, libs)
+
+@forking
+def dep_libs(libname):
+    """retrieve the list of dependencies of a given library
+    both direct and indirect dependencies
+    """
+    bkg_libs = [os.path.basename(l) for l in loaded_libs()]
+    bkg_libs = [l for l in bkg_libs if l not in _veto_libs]
+
+    _,_,libs = load_lib(libname)
+    libs = [os.path.basename(l) for l in libs]
+    return [l for l in libs if l not in bkg_libs and l!=libname]
+    
+@forking
+def analyze (libname):
+    bkg_libs = loaded_libs()
+    #print "::: bkg_libs:",map(os.path.basename, bkg_libs)
+    vmem0,vmem1,libs = load_lib (libname)
+    dVmemOffset = vmem1-vmem0
+    #print "vmem0=%s vmem1=%s libs=%s" % (vmem0, vmem1, len(libs))
+    linked_libs = [os.path.basename(lib)
+                   for lib in libs
+                   if (not (lib in bkg_libs) and
+                       os.path.basename(lib) != os.path.basename(libname)
+                       and os.access(lib, os.R_OK))]
+    #print "::: linked_libs:",linked_libs
+
+    # load all linked-in libs
+    def load_linked_in_libs(linked_libs):
+        all_good = True
+        for l in linked_libs:
+            try:
+                #print " - loading [%s]..." % l
+                lib_loader (l)
+            except Exception, err:
+                print "** problem loading [%s]\n%s" % (l,err)
+                all_good = False
+                pass
+        return all_good
+    #print "--> loading all linked-in libs..."
+    NRETRY = 10
+    for _ in xrange(NRETRY):
+        ok = load_linked_in_libs(linked_libs)
+        if ok:
+            break
+    else:
+        print "** err: could not reliably load all libs (after %s retries)"%NRETRY
+        
+    #print "--> loading all linked-in libs... [done]"
+
+    vmem0,vmem1,libs = load_lib (libname)
+    #print "vmem0=%s vmem1=%s libs=%s" % (vmem0, vmem1, len(libs))
+    dVmemLib = vmem1-vmem0
+    return (dVmemLib, dVmemOffset, len(linked_libs), len(bkg_libs))
+
+@forking
+def analyze_libraries (libnames, detailed=False):
+    display = []
+    _print = display.append
+    lib_stats = {}
+    for libname in libnames:
+        _print(":"*80)
+        bname = os.path.basename(libname)
+        _print(":: inspecting library: %s" % bname)
+        stats = analyze (libname)
+        _print("::  nbr linked: %s" % stats[2])
+        _print("::  dVmem-self: %8.3f Mb" % stats[0])
+        _print("::  dVmem-all : %8.3f Mb" % stats[1])
+        #print "::  bkg-libs: %s" % stats[3]
+        lib_stats[bname] = dict(
+            vmem_self=  stats[0],
+            vmem_all=   stats[1],
+            nbr_linked= stats[2],
+            )
+        if detailed:
+            import operator as _op
+            deps = dep_libs(libname)
+            dep_stats = dict((d, analyze(d)[:2]) for d in deps)
+            dep_vmem = dep_stats.items()
+            dep_vmem.sort(key=_op.itemgetter(1), reverse=True)
+            _print("::  deps: lib-self | lib-all:")
+            for k,v in dep_vmem:
+                _print("::   --> [%8.3f |%8.3f Mb] (%s)" % (v[0], v[1], k))
+            
+    return display,lib_stats
+
+def save_stats (lib_stats, fname=None):
+    if fname is None:
+        fname = "vmem-stats-sz.csv"
+    print ":: saving vmem statistics in [%s]..."%fname
+    import csv, os
+    if os.path.exists (fname):
+        os.remove (fname)
+    o = csv.writer (open(fname, "w"), delimiter=';')
+    map (o.writerow,
+         [ ['nbr libraries', len(lib_stats)],
+           ['lib name', 'dvmem-self (Mb)', 'dvmem-all (Mb)', 'nbr linked-libs'],
+           ])
+    map (o.writerow,
+         [ [os.path.basename(k), v['vmem_self'], v['vmem_all'], v['nbr_linked']]
+           for k,v in lib_stats.iteritems() ]
+         )
+    print ":: saving vmem statistics in [%s]... [done]"%fname
+    
+def main():
+    import sys
+    import os
+
+    parser = argparse.ArgumentParser(
+        description='get the inclusive and exclusive vmem sizes of a library'
+        )
+    _add = parser.add_argument
+    _add('libnames',
+         type=str,
+         nargs='+',
+         help='list of library names to be inspected')
+    _add('--detailed',
+         action='store_true',
+         default=False,
+         help='enable detailed output')
+    args = parser.parse_args()
+
+    libnames = args.libnames
+    doDetailed = args.detailed
+    
+    print ":: inspecting libraries: %s" % libnames
+    display,lib_stats = analyze_libraries (libnames, doDetailed)
+
+    for l in display:
+        print l
+    save_stats (lib_stats)
+    
+    return lib_stats
+
+if __name__ == '__main__':
+    lib_stats = main()
+    
diff --git a/Tools/PyUtils/cmt/requirements b/Tools/PyUtils/cmt/requirements
new file mode 100755
index 00000000000..181a8bc047d
--- /dev/null
+++ b/Tools/PyUtils/cmt/requirements
@@ -0,0 +1,90 @@
+package PyUtils
+
+author Sebastien Binet <binet@cern.ch>
+
+use AtlasPolicy AtlasPolicy-*
+use AtlasPython	AtlasPython-*	External -no_auto_imports
+use AtlasPyROOT	AtlasPyROOT-*	External -no_auto_imports
+use AtlasPyFwdBwdPorts          AtlasPyFwdBwdPorts-*        External    -no_auto_imports
+use PyCmt       PyCmt-*         Tools    -no_auto_imports
+use RootUtils   RootUtils-*     Control  -no_auto_imports
+
+branches python bin
+
+## some handy aliases
+alias checkFile     checkFile.py
+alias checkSG       checkSG.py
+alias checkxAOD     checkxAOD.py
+alias diffPoolFiles diffPoolFiles.py
+alias merge-poolfiles merge-poolfiles.py
+alias checkTag      checkTag.py
+alias setupWorkArea setupWorkArea.py
+alias pyroot        pyroot.py
+alias print_auditor_callgraph print_auditor_callgraph.py
+alias gen_klass     gen_klass.py
+alias build_cmt_pkg_db build_cmt_pkg_db.py
+alias diffConfigs   diffConfigs.py
+alias vmem-sz	    vmem-sz.py
+alias dso-stats	    dso-stats.py
+alias dump-athfile  dump-athfile.py
+alias pkgco         pkgco.py
+alias icython       icython.py
+alias tabnanny-checker tabnanny-checker.py
+alias get-tag-diff  get-tag-diff.py
+alias avn           avn.py
+alias abootstrap-wkarea abootstrap-wkarea.py
+alias tc-submit-tag tcSubmitTag.py
+alias tcSubmitTag   tcSubmitTag.py
+alias acmd          acmd.py
+alias diff-jobo-cfg diff-jobo-cfg.py
+alias filter-and-merge-d3pd filter-and-merge-d3pd.py
+alias diffTAGTree   diffTAGTree.py
+
+private
+
+apply_pattern declare_python_modules files="*.py AthFile scripts"
+apply_pattern declare_scripts files="\
+ -s=$(PyUtils_root)/bin \
+ abootstrap-wkarea.py \
+ acmd.py \
+ atl-gen-athena-d3pd-reader \
+ avn.py \
+ build_cmt_pkg_db.py \
+ checkFile.py \
+ checkPlugins.py \
+ checkSG.py \
+ checkTP.py \
+ checkTag.py \
+ checkxAOD.py \
+ cmtClients.py \
+ diff-athfile \
+ diff-jobo-cfg.py \
+ diffConfigs.py \
+ diffPoolFiles.py \
+ diffTAGTree.py \
+ dlldep.py \
+ dso-stats.py \
+ dump-athfile.py \
+ filter-and-merge-d3pd.py \
+ gen-typereg-dso.py \
+ gen_klass.py \
+ get-tag-diff.py \
+ getTagDiff.py \
+ gprof2dot \
+ icython.py \
+ lstags \
+ magnifyPoolFile.py \
+ merge-poolfiles.py \
+ pep8.py \
+ pkgco.py \
+ pool_extractFileIdentifier.py \
+ pool_insertFileToCatalog.py \
+ print_auditor_callgraph.py \
+ pyroot.py \
+ setupWorkArea.py \
+ tabnanny-checker.py \
+ tcSubmitTag.py \
+ vmem-sz.py \
+"
+
+end_private
diff --git a/Tools/PyUtils/doc/mainpage.h b/Tools/PyUtils/doc/mainpage.h
new file mode 100755
index 00000000000..fec2e2cc9a6
--- /dev/null
+++ b/Tools/PyUtils/doc/mainpage.h
@@ -0,0 +1,141 @@
+/*
+  Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+*/
+
+/**
+@mainpage PyUtils Package
+@author Sebastien Binet
+
+@section PyUtilsIntro Introduction
+
+This package holds various python classes and scripts of general use.
+
+@section PyUtilsOverview Class Overview
+  The PyUtils package contains the following classes:
+
+  - python::PoolFile::PoolFile : a python class to model and extract informations about the content of a POOL file. The informations are stored into a python::PoolFile::PoolRecord (memory and compressed size, name of the container, number of such items into that POOL file). A script (@c checkFile.py) is provided to easily inspect and dump the content of such POOL files:
+@code
+lxplus> checkFile.py --help
+lxplus> checkFile.py my.aod.pool
+@endcode
+
+  - python::Dso::DsoDb : a python class to gather and store informations about the shared libraries (data shared objects, DSO) an Athena release contains. Informations about dictionaries, component factories are gathered from the <i>rootmap</i> files in such a way that queries like duplicates searches and libraries capabilities can be issued. A wrapper script (@c checkPlugins.py) is also provided:
+@code
+lxplus> checkPlugins.py --help
+lxplus> checkPlugins.py --check-dict-dups
+@endcode
+
+  - coverage.py : Do coverage testing on a python module.  When you run
+regression tests on a python module, coverage.py will flag any lines
+that are not executed by the tests.  Suggested usage is to create
+a separate file for regression tests.  This module will then end
+with the lines
+@code
+from PyUtils import coverage
+c = coverage.Coverage ('MOD.NAME')
+c.doctest_cover ()
+@endcode
+where MOD.NAME is the source file that you are testing.
+Put your doctests in this regression test file (any doctests in the
+original source file will also be executed).  After the regression
+tests are run, if there are any lines in the source file that
+were not executed, a message will be printed, and a .cover
+file will be written that calls out the specific lines that
+were not executed.
+
+@section PyUtilsScripts Scripts
+  The PyUtils package provides some useful scripts:
+
+  - @c checkFile.py : as mentionned above this script can inspect a POOL file and dump on screen its 'high level' content. It will tell you what kind of containers have been stored (eg: @c ElectronContainer, @c TruthParticleContainer,...) but it won't tell you more detailed properties (such as pt,eta distributions). That's however already useful and can be used for example to quickly check your algorithm did its job if it was supposed to process 100 events and create 100 @c MyZeeBosonContainer. <b>Ex:</b>
+@code
+lxplus> checkFile.py atlfast.aod.pool
+## opening file [atlfast.aod.pool]...
+## importing ROOT...
+## importing ROOT... [DONE]
+## opening file [OK]
+File:atlfast.aod.pool
+Size:    56164.401 kb
+Nbr Events: 2000
+
+================================================================================
+     Mem Size       Disk Size        Size/Evt      items  (X) Container Name (X=Tree|Branch)
+================================================================================
+    3662.681 kb      196.654 kb        0.098 kb     2000  (T) DataHeader
+--------------------------------------------------------------------------------
+      75.394 kb        1.835 kb        0.001 kb     2000  (B) MissingET_AtlfastMissingEt
+     117.521 kb        4.528 kb        0.002 kb     2000  (B) TruthParticleContainer_p5_SpclMC
+     430.735 kb       13.803 kb        0.007 kb     2000  (B) EventInfo_p2_McEventInfo
+     185.332 kb       22.253 kb        0.011 kb     2000  (B) INav4MomAssocs_p1_AtlfastMcAodAssocs
+     349.459 kb       32.290 kb        0.016 kb     2000  (B) PhotonContainer_p1_AtlfastPhotonCollection
+     411.476 kb       39.623 kb        0.020 kb     2000  (B) ElectronContainer_p1_AtlfastElectronCollection
+     664.601 kb       64.972 kb        0.032 kb     2000  (B) MuonContainer_p1_AtlfastNonIsoMuonCollection
+     687.872 kb       68.858 kb        0.034 kb     2000  (B) MuonContainer_p1_AtlfastMuonCollection
+    3076.368 kb      861.268 kb        0.431 kb     2000  (B) ParticleJetContainer_p1_AtlfastParticleJetContainer
+  138092.949 kb    53942.971 kb       26.971 kb     2000  (B) McEventCollection_p3_GEN_AOD
+================================================================================
+  147754.388 kb    55249.055 kb       27.625 kb     2000  TOTAL (POOL containers)
+================================================================================
+## Bye.
+@endcode
+
+  - @c magnifyPoolFile.py : even if the @c checkFile.py is rather useful it relies on ROOT to provide sensible informations. It may happen however that (for ROOT technical reasons) the reported disk sizes be inaccurate. This ROOT shortcoming can be worked around via the @c magnifyPoolFile.py script which will create a new POOL file whose content is the same than an input POOL file, only being many times replicated. Informations displayed by @c checkFile.py for this magnified POOL file should be more accurate.
+@code
+lxplus> magnifyPoolFile.py --help
+lxplus> magnifyPoolFile.py 100 my.input.pool magnified.pool
+lxplus> checkFile.py magnified.pool
+@endcode
+
+  - @c diffPoolFiles.py : little script to compare the content of 2 POOL files. It will check that 2 given POOL files have the same container names and for each matching container name, will check that the memory sizes are the same.
+@code
+lxplus> diffPoolFiles.py --help
+lxplus> diffPoolFiles.py mc1.event.pool mc2.event.pool
+## opening file [mc1.event.pool]...
+## importing ROOT...
+## importing ROOT... [DONE]
+## opening file [OK]
+## opening file [mc2.event.pool]...
+## importing ROOT...
+## importing ROOT... [DONE]
+## opening file [OK]
+================================================================================
+::: Comparing POOL files...
+ chk : mc1.event.pool
+ ref : mc2.event.pool
+--------------------------------------------------------------------------------
+::: comparing common content (mem-size)...
+ [OK]      430.735 kb                 EventInfo_p2_McEventInfo
+ [OK]   196782.670 kb                 McEventCollection_p3_GEN_EVENT
+================================================================================
+## Comparison : [OK]
+@endcode
+
+  - @c cmtClients.py : a wrapper around the slow 'cmt show clients' command. It provides the same functionality than the CMT command, only it is just super fast. It will also allow you to check out from CVS all the clients in one go (useful to check that a change in your package does not impact compilation of your clients):
+@code
+lxplus> cmtClients.py --help
+lxplus> cmtClients.py AthenaKernel
+PyCmt   : INFO     cmt show clients [AthenaKernel]
+PyCmt   : INFO     building dependencies...
+PyCmt   : INFO     projects used: ['AtlasCore', 'AtlasConditions', 'AtlasEvent', 'AtlasReconstruction', 'AtlasTrigger', 'AtlasAnalysis', 'AtlasSimulation', 'AtlasOffline', 'AtlasProduction']
+PyCmt   : INFO     building packages db...
+PyCmt   : INFO     building packages dependency tree...
+PyCmt   : INFO     => [PhysicsAnalysis/AnalysisCommon/AnalysisAssociation] (AnalysisAssociation-00-04-01)
+PyCmt   : INFO     => [PhysicsAnalysis/AnalysisCommon/AnalysisUtils] (AnalysisUtils-00-02-01)
+[...snip...]
+PyCmt   : INFO     Found [149] clients for [AthenaKernel]
+@endcode
+
+@ref used_PyUtils
+
+@ref requirements_PyUtils
+
+*/
+
+/**
+@page used_PyUtils Used Packages
+@htmlinclude used_packages.html
+*/
+
+/**
+@page requirements_PyUtils Requirements
+@include requirements
+*/
diff --git a/Tools/PyUtils/python/AmiLib.py b/Tools/PyUtils/python/AmiLib.py
new file mode 100644
index 00000000000..bbaa2e3efa0
--- /dev/null
+++ b/Tools/PyUtils/python/AmiLib.py
@@ -0,0 +1,524 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.AmiLib
+# @purpose a set of functions and helpers to talk to the TagCollector
+
+__version__ = "$Revision: 538932 $"
+__author__ = "Sebastien Binet"
+__doc__ = "a set of functions and helpers to talk to AMI and the TagCollector"
+
+__all__ = [
+    'ami_todict',
+    'Client',
+    ]
+
+if 0:
+    import xml.etree.cElementTree as ET
+    from pyAMI.pyAMI import *
+    amiclient = AMI(certAuth = True)
+
+    import PyUtils.xmldict as _x
+
+    clients_cmd = 'TCListPackageVersionClient  -processingStep=production -project=TagCollector   -groupName=AtlasOffline -releaseName=17.0.1 -fullPackageName=/AtlasTest/AthenaMPTest -repositoryName=AtlasOfflineRepository'.split(' ')
+    
+    rec_cmd = 'TCFormGetDependencyPackageVersionTree -expandedPackageID="*" -expandedTopContainerPackage="*" -groupName="AtlasProduction" -processingStep="production" -project="TagCollector" -releaseName="15.7.0"'.replace('"','').split(' ')
+    res = amiclient.execute(rec_cmd)
+    dd = _x.xml2dict(ET.fromstring(res.output('xml')))
+    dd['AMIMessage']['Result']['tree']
+
+
+    # all the leaf packages in AtlasProduction and its dependencies
+    cmd = """
+    TCFormGetDependencyPackageVersionTree -expandedPackageID=* 
+    -expandedTopContainerPackage=* -groupName=AtlasProduction 
+    -processingStep=production -project=TagCollector -releaseName=15.7.0
+    """.replace("\n","").split()
+    res = amiclient.execute(cmd)
+    d = _x.xml2dict(ET.fromstring(res.output('xml')))
+
+    # only the leaf packages in groupName="AtlasProduction"
+    cmd = """
+    TCFormGetPackageVersionTree -expandedPackageID='*' 
+    -expandedTopContainerPackage='*' -groupName='AtlasProduction' 
+    -processingStep='production' -project='TagCollector' -releaseName='15.7.0'
+    """.replace("\n","").split()
+    res = amiclient.execute(cmd)
+    d = _x.xml2dict(ET.fromstring(res.output('xml')))
+
+
+
+    # all the leaf packages in AtlasCore and its dependencies
+    cmd = """
+    TCFormGetDependencyPackageVersionTree
+    -expandedPackageID=* 
+    -expandedTopContainerPackage=*
+    -groupName=AtlasCore 
+    -processingStep=production
+    -project=TagCollector
+    -releaseName=15.7.0
+    """.replace("\n","").split()
+    res = amiclient.execute(cmd)
+    d = _x.xml2dict(ET.fromstring(res.output('xml')))
+
+    # ami dataset:
+    cmd = """
+    GetDatasetInfo 
+    -logicalDatasetName=data09_900GeV.00142404.physics_RNDM.merge.AOD.f193_m320
+    """.replace("\n","").split()
+    res = amiclient.execute(cmt)
+    d = _x.xml2dict(ET.fromstring(res.output('xml')))
+
+    """
+    [amiCommand] 
+    -logicalFileName=data09_1beam.00140536.physics_L1Calo.merge.HIST.f170_m255._0001.1 
+
+    [amiCommand] GetDatasetInfo 
+    -logicalDatasetName=mc08.105609.Pythia_Zprime_tt2000.merge.AOD.e393_s462_s520_r635_t53 
+
+    amiCommand=["GetDatasetInfo","logicalDatasetName=adatasetname"]
+    result=amiclient.execute(amiCommand)
+
+
+    or
+    amiCommand=["GetDatasetInfo","logicalFileName=aFilename"]
+    result=amiclient.execute(amiCommand)
+    """
+    def dsinfo(n):
+        import PyUtils.AmiLib as A
+        import xml.etree.cElementTree as ET
+        import PyUtils.xmldict as _x
+        c = A.Client()
+        try:
+            res = c.exec_cmd(cmd="GetDatasetInfo", logicalFileName=n)
+            dd = _x.xml2dict(ET.fromstring(res.output('xml')))
+            return dd['AMIMessage']['Result']
+        except PyAmi.AMI_Error:
+            # maybe a logical dataset name ?
+            res = c.exec_cmd(cmd="GetDatasetInfo", logicalDatasetName=n)
+            dd = _x.xml2dict(ET.fromstring(res.output('xml')))
+            return dd['AMIMessage']['Result']
+            
+### imports -------------------------------------------------------------------
+import os
+import sys
+
+import pyAMI.client as PyAmi
+import pyAMI.auth as PyAmiAuth
+
+from PyUtils.xmldict import xml2dict
+
+### globals -------------------------------------------------------------------
+
+### functions -----------------------------------------------------------------
+def ami_todict(res):
+    return res.to_dict()
+    
+def xmlstr_todict(s):
+    import PyUtils.xmldict as _x
+    import xml.etree.cElementTree as ET
+    return  _x.xml2dict(ET.fromstring(s))
+
+### classes -------------------------------------------------------------------
+class Client(object):
+
+    _instance = None
+    
+    @property
+    @staticmethod
+    def instance(self):
+        if Client._instance is None:
+            c = PyAmi.AMI()
+            import os.path as osp
+            if not osp.exists(PyAmiAuth.AMI_CONFIG):
+                PyAmiAuth.create_auth_config()
+                pass
+            c.read_config(PyAmiAuth.AMI_CONFIG)
+            Client._instance = c
+        return Client._instance
+
+    def __init__(self, certAuth=True, dry_run=False):
+        self._client = PyAmi.AMI()
+        import os.path as osp
+        if not osp.exists(PyAmiAuth.AMI_CONFIG):
+            PyAmiAuth.create_auth_config()
+            pass
+        self._client.read_config(PyAmiAuth.AMI_CONFIG)
+        import PyUtils.Logging as L
+        self.msg = L.logging.getLogger('ami-client')
+        self.msg.setLevel(L.logging.INFO)
+        self.dry_run = dry_run
+        return
+
+    def exec_cmd(self, cmd, **args):
+        """execute an AMI command"""
+        if 'args' in args and len(args)==1:
+            args = args['args']
+        # add some defaults
+        args.setdefault('project', 'TagCollector')
+        args.setdefault('processingStep', 'production')
+        args.setdefault('repositoryName', 'AtlasOfflineRepository')
+
+        # transform into an AMI command string
+        ami_cmd = map(
+            lambda a,b: "%s=%s" % (a,b,),
+            args.keys(),
+            args.values()
+            )
+        ami_cmd.insert(0, cmd)
+
+        self.msg.debug('ami_cmd: %s', ami_cmd)
+        if self.dry_run:
+            return True
+
+        # execute
+        ## try:
+        ##     result = self._client.execute(ami_cmd)
+        ##     return result
+        ## except Exception, err:
+        ##     if self.reraise:
+        ##         raise
+        ##     self.msg.error('caught an exception:\n%s', err)
+        ##     return
+        return self._client.execute(ami_cmd)
+    
+    def find_pkg(self, pkg, check_tag=True, cbk_fct=None):
+        """Find the full path name of a package.
+        @return (pkg,tag) tuple
+        """
+        
+        # if '-' in name, a tag was given.
+        if '-' in pkg:
+            tag = pkg.split('/')[-1]
+            pkg = pkg.split('-',1)[0]
+        elif check_tag:
+            raise ValueError('no tag was given for [%s]' % (pkg,))
+        else:
+            tag = None
+
+        orig_pkg = pkg
+        # check if we need to find the full pkg path
+        if '/' in pkg:
+            pkg = pkg.split('/')[-1]
+
+        args = {
+            'glite': (
+                "select packages.path,packages.packageName,packages.archive "
+                "where repositories.repositoryName='AtlasOfflineRepository' "
+                "and packages.packageName='%s' and packages.archive=0" % pkg
+                ),
+            }
+
+        result = self.exec_cmd(cmd='SearchQuery', args=args)
+        if not result:
+            raise RuntimeError(
+                'could not resolve [%s] to full package path' %
+                (pkg,)
+                )
+        res_dict = result.to_dict()
+        if not 'Element_Info' in res_dict:
+            raise RuntimeError(
+                'could not resolve [%s] to full package path' %
+                (pkg,)
+                )
+            
+        pkg_list = []
+        for v in res_dict['Element_Info'].values():
+            pkg_list.append(v) # += [v['path'] + v['packageName']]
+
+        idx = 0
+        if len(pkg_list) == 0:
+            raise RuntimeError('package [%s] does not exist' % pkg)
+
+        elif len(pkg_list)>1:
+            ambiguous = True
+            if '/' in orig_pkg:
+                pkg_candidates = []
+                for i,v in enumerate(pkg_list):
+                    if orig_pkg in v['path']+v['packageName']:
+                        pkg_candidates.append(i)
+                if len(pkg_candidates) == 1:
+                    idx = pkg_candidates[0]
+                    pkg = pkg_list[idx]
+                    ambiguous = False
+                pass
+
+            if ambiguous:
+                self.msg.info('multiple packages found for [%s]:', pkg)
+                for i,v in enumerate(pkg_list):
+                    self.msg.info(' %i) %s', i, v['path']+v['packageName'])
+                if cbk_fct:
+                    try:
+                        n = cbk_fct()
+                    except StopIteration:
+                        raise RuntimeError(
+                            'multiple packages found for [%s]' % pkg
+                            )
+                    idx = n
+                    pkg = pkg_list[n]
+                else:
+                    raise RuntimeError('multiple packages found for [%s]' % pkg)
+
+        else:
+            idx = 0
+            pkg = pkg_list[0]
+
+        # normalize...
+        if pkg['path'][0] != '/':
+            pkg['path'] = '/%s' % pkg['path']
+
+        pkg.update(dict(packagePath=pkg['path'],
+                        id=str(pkg['AMIELEMENTID'])))
+        if tag:
+            pkg['packageTag'] = tag
+        del pkg['path']
+        del pkg['AMIELEMENTID']
+        
+        # remove all unicode strings...
+        _pkg = dict(pkg)
+        pkg = {}
+        
+        for k,v in _pkg.iteritems():
+            if isinstance(v, basestring):
+                v = str(v)
+            pkg[str(k)] = v
+        
+        
+        ## if tag is None:
+        ##     tag = tag_list[idx]
+            
+        ## print "-"*80
+        ## print res_dict
+        ## print "-"*80
+        return pkg
+
+    def get_project_of_pkg(self, pkg, release):
+        """
+        retrieve the list of projects from AMI for a given release and package
+        """
+        pkg = self.find_pkg(pkg,check_tag=False)
+        
+        projects = []
+        full_pkg_name = pkg['packagePath']+pkg['packageName'] # pkg['packageTag']
+        try:
+            res = self.exec_cmd(cmd='TCGetPackageVersionHistory',
+                                fullPackageName=full_pkg_name,
+                                releaseName=release)
+            rows = res.rows()
+            if isinstance(rows, dict):
+                rows = [rows]
+            # print "---"
+            # print list(rows)
+            # print "---"
+            for row in rows:
+                projects.append(row.get('groupName'))
+            if not projects:
+                self.msg.error(
+                    "no project found for package [%s] and release [%s]",
+                    full_pkg_name,
+                    release)
+        except PyAmi.AMI_Error, err:
+            pass
+        return projects
+
+    def get_version_of_pkg(self, pkg, release):
+        """
+        retrieve the list of versions from AMI for a given release and package
+        """
+        pkg = self.find_pkg(pkg,check_tag=False)
+        
+        versions = []
+        full_pkg_name = pkg['packagePath']+pkg['packageName'] # pkg['packageTag']
+        try:
+            res = self.exec_cmd(cmd='TCGetPackageVersionHistory',
+                                fullPackageName=full_pkg_name,
+                                releaseName=release)
+            rows = res.rows()
+            if isinstance(rows, dict):
+                rows = [rows]
+            ## print "---"
+            ## print list(rows)
+            ## print "---"
+            for row in rows:
+                versions.append(row.get('packageTag'))
+            if not versions:
+                self.msg.error(
+                    "no version found for package [%s] and release [%s]",
+                    full_pkg_name,
+                    release)
+        except PyAmi.AMI_Error, err:
+            pass
+        return versions
+
+
+    def get_version_of_pkg_with_deps(self, pkg, project, release):
+        """
+        retrieve the package version from AMI taken into account project dependencies
+        """
+        
+        versions = []
+        try:
+            res = self.exec_cmd(cmd='TCSearchPackageVersion',
+                                keyword=pkg,
+                                groupName=project,
+                                withDep=True,
+                                releaseName=release)
+            rows = res.rows()
+            if isinstance(rows, dict):
+                rows = [rows]
+
+            for row in rows:
+                packageTag = row.get('packageTag', None)
+                fullPackageName = row.get('fullPackageName', None)
+                groupName = row.get('groupName', None)
+                releaseName = row.get('releaseName', None)
+                versions.append((groupName,releaseName,fullPackageName,packageTag))
+                
+            # If more than one result, match full package name
+            if len(versions)>1:
+                pkg = self.find_pkg(pkg, check_tag=False)
+                full_pkg_name = pkg['packagePath']+pkg['packageName']
+                versions = filter(lambda v:v[2]==full_pkg_name, versions)
+
+            if len(versions)==0:
+                self.msg.error(
+                    "no version found for package [%s] and release [%s]",
+                    pkg,
+                    release)
+
+        except PyAmi.AMI_Error, err:
+            pass
+            
+        return versions
+    
+    def get_project_tree(self, project, release, recursive=False):
+        """return the dependency tree of packages for a given project
+        and a given release
+        if ``recursive`` it will also visit the project dependencies of
+        ``project``
+        """
+        cmd = 'TCFormGetPackageVersionTree'
+        if recursive:
+            cmd = 'TCFormGetDependencyPackageVersionTree'
+        result = self.exec_cmd(
+            cmd=cmd,
+            expandedPackageID='*',
+            expandedTopContainerPackage='*',
+            groupName=project,
+            processingStep='production',
+            project='TagCollector',
+            releaseName=release,
+            )
+        if not result:
+            raise RuntimeError(
+                "Could not retrieve the dependency tree for project [%s]"
+                " and release [%s]" % (project, release,)
+                )
+        import xml.etree.cElementTree as ET
+        d = result.to_dict()
+
+        out = d
+        abs_path = ('AMIMessage', 'Result', 'tree', 'treeBranch',)
+        for i,k in enumerate(abs_path):
+            if not k in out:
+                raise RuntimeError(
+                    'malformated answer from AMI (no [%s] key)' % k
+                    )
+            out = out[k]
+        return out
+
+    def get_open_releases(self, project):
+        return self.get_releases(project, lambda x : x!='terminated')
+        
+    def get_releases(self, project, relStatusCond=lambda x : True):        
+        """return the list of open releases for a given ``project``"""
+        args = {
+           'groupName' : project,
+            'expandedRelease': '*',
+           }
+
+        result = self.exec_cmd(cmd='TCFormGetReleaseTreeDevView', args=args)
+        if not result:
+            raise RuntimeError(
+                "Could not find open releases in project %s" % project
+                )
+
+        rxml = result.output('xml')
+        import xml.etree.cElementTree as ET
+   
+        try:
+            reltree = ET.fromstring(
+                rxml
+                ).find("Result").find("tree")
+            releases = [ r.get("releaseName") 
+                         for r in reltree.getiterator("treeBranch") 
+                         if relStatusCond(r.get("status")) ]
+
+            # Filter all special purpose releases (e.g. -MIG, -SLHC)
+            releases = filter(lambda x: x.count("-")==0, releases)
+        except Exception, e:
+            self.msg.error(e.message)
+            raise RuntimeError(
+                'Could not parse result of TCFormGetReleaseTreeDevView:\n%s' % rxml
+                )
+
+        # Sort by release number
+        releases.sort(key=lambda x: [int(y) if y.isdigit() else 0 for y in x.split('.')])
+        return releases
+
+    def get_clients(self, project, release, full_pkg_name):
+        """return the list of clients (full-pkg-name, version) of
+        `full_pkg_name` for project `project` and release `release`
+        """
+        args = {
+            'groupName': project, # AtlasOffline, AtlasEvent, ...
+            'releaseName': release,
+            }
+        if full_pkg_name[0] != "/":
+            full_pkg_name = "/"+full_pkg_name
+        args['fullPackageName'] = full_pkg_name
+        
+        result = self.exec_cmd(cmd="TCListPackageVersionClient", args=args)
+        if not result:
+            raise RuntimeError(
+                'error executing TCListPackageVersionClient'
+                )
+        
+        rxml = result.output('xml')
+        import xml.etree.cElementTree as ET
+        try:
+            rows = xml2dict(ET.fromstring(rxml))['AMIMessage']["Result"]["rowset"]['row']
+        except Exception, e:
+            self.msg.error(e.message)
+            raise RuntimeError(
+                'could not parse result of TCListPackageVersionClient:\n%s' % rxml
+                )
+
+        if not isinstance(rows, (tuple,list)):
+            rows = [rows]
+            
+        clients = []
+        for row in rows:
+            fields = row['field']
+            client_name = None
+            client_vers = None
+            release_vers = None
+            group_name = None
+            for f in fields:
+                if f['name'] == 'fullPackageName':
+                    client_name = f['_text']
+                elif f['name'] == 'packageTag':
+                    client_vers = f['_text']
+                elif f['name'] == 'releaseName':
+                    release_vers = f['_text']
+                elif f['name'] == 'groupName':
+                    group_name = f['_text']
+            if client_name is None or client_vers is None:
+                self.msg.warning("could not find client-info for:\n%s", fields)
+            else:
+                if client_name[0] == '/':
+                    client_name = client_name[1:]
+                clients.append((client_name, client_vers, release_vers, group_name))
+        return clients
+    
+    pass # Client
+
diff --git a/Tools/PyUtils/python/AthFile/__init__.py b/Tools/PyUtils/python/AthFile/__init__.py
new file mode 100644
index 00000000000..38008220272
--- /dev/null
+++ b/Tools/PyUtils/python/AthFile/__init__.py
@@ -0,0 +1,199 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/python/AthFile/__init__.py
+# @purpose a simple abstraction of a file to retrieve informations out of it
+# @author Sebastien Binet <binet@cern.ch>
+# @date October 2008
+from __future__ import with_statement
+
+__doc__ = "a simple abstraction of a file to retrieve informations out of it"
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+### imports -------------------------------------------------------------------
+import os
+import imp
+import hashlib
+
+__all__        = []
+__pseudo_all__ = [
+    'AthFile',
+    'ftype',
+    'fopen',
+    'exists',
+    'server',
+    ]
+
+import PyUtils.Decorators as _decos
+import impl as _impl
+import tests as _tests
+AthFile = _impl.AthFile
+
+from PyCmt.decorator import decorator as _dec
+@_dec
+def _update_cache(fct, *args):
+    res = fct(*args)
+    import PyUtils.AthFile as af
+    if af.server._do_pers_cache:
+        try:
+            af.server.load_cache()
+        except Exception:
+            pass
+    return res
+
+### classes -------------------------------------------------------------------
+import types
+class ModuleFacade(types.ModuleType):
+    """a helper class to manage the instantiation of the ``AthFileMgr`` and
+    ``AthFileServer`` objects and allow attribute-like access to methods
+    (stolen from PyRoot)
+    """
+    def __init__( self, module ):
+        types.ModuleType.__init__(self, module.__name__)
+        self.__dict__['module'] = module
+        self.__dict__[ '__doc__'  ] = module.__doc__
+        self.__dict__[ '__name__' ] = module.__name__
+        self.__dict__[ '__file__' ] = module.__file__
+
+        self.__dict__['_tests'] = _tests
+        self.__dict__['_impl']  = _impl
+        self.__dict__['_guess_file_type'] = _guess_file_type
+
+        self.__dict__['server'] = _impl.g_server
+        
+        import atexit
+        atexit.register(self.shutdown)
+        del atexit
+        
+    def __getattr__(self, k):
+        if k in self.__dict__:
+            return self.__dict__.get(k)
+        if k.startswith('__'):
+            return types.ModuleType.__getattribute__(self, k)
+        return object.__getattribute__(self, k)
+    
+    def restart_server(self):
+        return
+    
+    def shutdown(self):
+        #self.server._cleanup_pyroot()
+        return
+    
+    @property
+    def msg(self):
+        return self.server.msg()
+    
+    @property
+    def cache(self):
+        return self.server.cache()
+
+    @property
+    def save_cache(self):
+        return self.server.save_cache
+
+    @property
+    def load_cache(self):
+        return self.server.load_cache
+
+    @property
+    def flush_cache(self):
+        return self.server.flush_cache
+    
+    @_decos.forking
+    def ftype(self, fname):
+        return self.server.ftype(fname)
+
+    @_decos.forking
+    def fname(self, fname):
+        return self.server.fname(fname)
+
+    @_decos.forking
+    def exists(self, fname):
+        return self.server.exists(fname)
+
+    @property
+    def tests(self):
+        return self._tests
+
+    @_update_cache  # also decorate with _update_cache to pick-up the changes 
+    @_decos.forking # from the forked athfile server...
+    def fopen(self, fnames, evtmax=1):
+        """
+        helper function to create @c AthFile instances
+        @param `fnames` name of the file (or a list of names of files) to inspect
+        @param `nentries` number of entries to process (for each file)
+        
+        Note that if `fnames` is a list of filenames, then `fopen` returns a list
+        of @c AthFile instances.
+        """
+        if isinstance(fnames, (list, tuple)):
+            infos = []
+            for fname in fnames:
+                info = self.server.fopen(fname, evtmax)
+                infos.append(info)
+                pass
+            return infos
+        return self.server.fopen(fnames, evtmax)
+
+    @_update_cache  # also decorate with _update_cache to pick-up the changes 
+    @_decos.forking # from the forked athfile server...
+    def pfopen(self, fnames, evtmax=1):
+        """
+        helper function to create @c AthFile instances
+        @param `fnames` name of the file (or a list of names of files) to inspect
+        @param `nentries` number of entries to process (for each file)
+        
+        Note that if `fnames` is a list of filenames, then `fopen` returns a list
+        of @c AthFile instances.
+
+        This is a parallel (multi-threaded) version of ``fopen``.
+        """
+        return self.server.pfopen(fnames, evtmax)
+
+    ## def __del__(self):
+    ##     self._mgr.shutdown()
+    ##     return super(ModuleFacade, self).__del__()
+    
+    pass # class ModuleFacade
+
+###
+
+def _guess_file_type(fname, msg):
+    """guess the type of an input file (bs,rdo,esd,aod,...)
+    """
+    input_type = None
+    import PyUtils.AthFile as af
+    try:
+        file_type,file_name = af.ftype(fname)
+    except Exception:
+        raise # for now
+    if file_type == 'bs':
+        input_type = 'bs'
+    elif file_type == 'pool':
+        import PyUtils.PoolFile as pf
+        stream_names = pf.extract_stream_names(fname)
+        stream_names = [s.lower() for s in stream_names]
+        if len(stream_names) > 1:
+            msg.warning('got many stream names: %r', stream_names)
+            msg.warning('only considering the 1st one...')
+        elif len(stream_names) <= 0:
+            msg.warning('got an empty list of stream names')
+            raise SystemExit(1)
+        stream_name = stream_names[0]
+        input_type = {
+            'stream1':    'rdo',
+            'streamesd' : 'esd',
+            'streamaod' : 'aod',
+            # FIXME: TODO: TAG, DPD
+            }.get(stream_name, 'aod')
+
+    else:
+        msg.error('unknown file type (%s) for file [%s]',
+                  file_type, file_name)
+    return input_type
+
+
+### exec at import ------------------------------------------------------------
+import sys
+sys.modules[ __name__ ] = ModuleFacade( sys.modules[ __name__ ] )
+del ModuleFacade
diff --git a/Tools/PyUtils/python/AthFile/impl.py b/Tools/PyUtils/python/AthFile/impl.py
new file mode 100644
index 00000000000..fec6683c034
--- /dev/null
+++ b/Tools/PyUtils/python/AthFile/impl.py
@@ -0,0 +1,1441 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/python/AthFile/impl.py
+# @purpose a simple abstraction of a file to retrieve informations out of it
+# @author Sebastien Binet <binet@cern.ch>
+# @date November 2009
+
+from __future__ import with_statement
+
+__version__ = "$Revision: 588873 $"
+__author__  = "Sebastien Binet"
+__doc__ = "implementation of AthFile-server behind a set of proxies to isolate environments"
+
+import errno
+import os
+import subprocess
+import sys
+
+import PyUtils.Helpers as H
+from PyUtils.Helpers    import ShutUp
+from .timerdecorator import timelimit, TimeoutError
+
+# see bug #95942 for the excruciating details
+try:
+    from AthenaCommon.Include import excludeTracePattern
+    excludeTracePattern.append("*cache.ascii.gz")
+    del excludeTracePattern
+except:
+    pass
+
+### globals -------------------------------------------------------------------
+DEFAULT_AF_CACHE_FNAME = os.environ.get('DEFAULT_AF_CACHE_FNAME',
+                                        'athfile-cache.ascii.gz')
+
+DEFAULT_AF_TIMEOUT = 20
+'''Default timeout for commands to be completed.'''
+
+### utils ----------------------------------------------------------------------
+
+def _get_real_ext(fname):
+    """little helper to get the 'real' extension of a filename, handling 'fake' extensions (e.g. foo.ascii.gz -> .ascii)"""
+    se = os.path.splitext
+    f,ext = se(fname)
+    if ext in ('.gz',):
+        _,ext = se(f)
+    return ext
+
+def _my_open(name, mode='r', bufsiz=-1):
+    """helper method to handle gzipped or not files.
+    if `name` ends with '.gz' the correct gzip.open function will be called.
+    """
+    f,ext = os.path.splitext(name)
+    if ext in ('.gz',):
+        import gzip
+        def gzip_exit(self, type, value, traceback):
+            return self.close()
+        def gzip_enter(self):
+            return self
+        gzip.GzipFile.__exit__ = gzip_exit
+        gzip.GzipFile.__enter__= gzip_enter
+        return gzip.open(name, mode)
+    else:
+        return open(name, mode, bufsiz)
+    
+def _find_file(filename, pathlist, access):
+    """Find <filename> with rights <access> through <pathlist>."""
+    import os
+    import os.path as osp
+    # special case for those filenames that already contain a path
+    if osp.dirname(filename):
+        if os.access(filename, access):
+            return filename
+
+    # test the file name in all possible paths until first found
+    for path in pathlist:
+        f = osp.join(path, filename)
+        if os.access(f, access):
+            return f
+
+    # no such accessible file avalailable
+    return None
+
+def _setup_ssl(msg, root):
+    x509_proxy = os.environ.get('X509_USER_PROXY', '')
+    if x509_proxy:
+        # setup proper credentials
+        root.TSSLSocket.SetUpSSL(
+            x509_proxy,
+            "/etc/grid-security/certificates",
+            x509_proxy,
+            x509_proxy)
+    else:
+        msg.warning("protocol https is requested but no X509_USER_PROXY was found! (opening the file might fail.)")
+        pass
+    return
+    
+def _create_file_infos():
+    """simple helper function to create consistent dicts for the
+    fileinfos attribute of AthFile
+    """
+    d = {
+        'file_md5sum': None,
+        'file_name': None,
+        'file_size': -1,
+        'file_type': None,
+        'file_guid': None,
+        'nentries' : 0, # to handle empty files
+        'run_number': [],
+        'run_type': [],
+        'evt_type': [],
+        'evt_number': [],
+        'lumi_block': [],
+        'beam_energy': [],
+        'beam_type':   [],
+        'stream_tags': [],
+        'metadata_items': None,
+        'eventdata_items': None,
+        'stream_names': None,
+        'geometry': None,
+        'conditions_tag': None,
+        'det_descr_tags': None,
+        ##
+        'metadata': None,
+        'tag_info': None,
+        }
+    return d
+
+def ami_dsinfos(dsname):
+    """a helper function to query AMI for informations about a dataset name.
+    `dsname` can be either a logical dataset name (a bag of files) or a
+    logical filename.
+    """
+    import PyUtils.AmiLib as A
+    import PyUtils.xmldict as _x
+    import xml.etree.cElementTree as ET
+
+    # keep order of tokens !
+    for token in ('ami://', '//', '/'):
+        if dsname.startswith(token):
+            dsname = dsname[len(token):]
+            pass
+        pass
+    
+    ami = A.Client()
+    try:
+        res = ami.exec_cmd(cmd="GetDatasetInfo", logicalFileName=dsname)
+    except A.PyAmi.AMI_Error:
+        # maybe a logical dataset name then ?
+        res = ami.exec_cmd(cmd="GetDatasetInfo", logicalDatasetName=dsname)
+    res = _x.xml2dict(ET.fromstring(res.transform('xml')))
+    data = res['AMIMessage']['Result']
+        
+    # get only interesting informations...
+    rowset = data['rowset']
+    if isinstance(rowset, list):
+        fields = rowset[-1]['row']['field']
+    else:
+        fields = rowset['row']['field']
+
+    # translate into athfile-infos format
+    af_infos = _create_file_infos()
+    for i in fields:
+        if not ('name' in i and '_text' in i):
+            continue
+        k = i['name']
+        v = i['_text']
+
+        if v.lower() == 'none':
+            v = None
+            
+        if k == 'logicalDatasetName':
+            af_infos['file_name'] = 'ami://'+v
+
+        elif k == 'totalEvents':
+            af_infos['nentries'] = int(v)
+
+        elif k == 'runNumber':
+            af_infos['run_number'] = [int(v)]
+            
+        elif k == 'geometryVersion':
+            af_infos['geometry'] = v
+
+        elif k == 'conditionsTag':
+            af_infos['conditions_tag'] = v
+
+        elif k == 'beamType':
+            af_infos['beam_type'] = [v]
+            
+        ## elif k == 'fileType':
+        ##     af_infos['file_type'] = v
+
+        elif k == 'dataType':
+            af_infos['file_type'] = 'bs' if v.lower() == 'raw' else 'pool'
+            stream_name = 'Stream' + v.upper()
+            af_infos['stream_names'] = [stream_name]
+
+        elif k == 'streamName':
+            stream_type,stream_name = v.split('_')
+            af_infos['stream_tags'] = [
+                {'obeys_lbk': None,
+                 'stream_type': stream_type,
+                 'stream_name': stream_name}
+                ]
+    # FIXME !!
+    af_infos['file_guid'] = af_infos['file_name']
+    # FIXME !!
+    if not af_infos['run_number']:
+        dsname = af_infos['file_name']
+        idx = [i for i,j in enumerate(fields)
+               if j['name']=='contained_dataset']
+        if len(idx)==1:
+            #try to extract run-number from the name of the first dataset
+            dsname = fields[idx[0]]['_text'].split(';')[0]
+            try:
+                # an AMI dsname is of the form:
+                # (project_name).(run_nbr).[...]
+                run_number = dsname.split('.')[1]
+                af_infos['run_number'] = [int(run_number)]
+            except ValueError:
+                #af_infos['run_number'] = ['N/A']
+                pass
+                
+        else:
+            try:
+                # an AMI dsname is of the form:
+                # (project_name).(run_nbr).[...]
+                run_number = dsname.split('.')[1]
+                af_infos['run_number'] = [int(run_number)]
+            except ValueError:
+                #af_infos['run_number'] = ['N/A']
+                pass
+        pass
+    
+    return af_infos
+        
+### classes -------------------------------------------------------------------
+class AthFile (object):
+    """A handle to an athena file (POOL,ROOT or ByteStream)
+    """
+    __slots__ = ('fileinfos',)
+    
+    @staticmethod
+    def from_infos(infos):
+        o = AthFile()
+        o.fileinfos = _create_file_infos() # ensure basic layout
+        o.fileinfos.update(infos.copy())
+        return o
+
+    @staticmethod
+    def from_fname(fname):
+        import PyUtils.AthFile as af
+        return af.fopen(fname)
+
+    @property
+    def name(self):
+        return self.fileinfos['file_name']
+    
+    @property
+    def nentries(self):
+        return self.fileinfos['nentries']
+    
+    @property
+    def infos(self):
+        return self.fileinfos
+
+    @property
+    def run_number (self):
+        """return the list of unique run-numbers the @c AthFile contains"""
+        return list(set(self.infos['run_number']))
+    # backward compatibility
+    run_numbers = run_number
+    
+    @property
+    def evt_number (self):
+        """return the list of unique evt-numbers the @c AthFile contains"""
+        return list(set(self.infos['evt_number']))
+    
+    @property
+    def lumi_block (self):
+        """return the list of unique lumi-block nbrs the @c AthFile contains
+        """
+        return list(set(self.infos['lumi_block']))
+    
+    @property
+    def run_type (self):
+        """return the list of unique run-types the @c AthFile contains"""
+        return list(set(self.infos['run_type']))
+    
+    @property
+    def beam_type (self):
+        """return the list of unique beam-types the @c AthFile contains"""
+        return list(set(self.infos['beam_type']))
+    
+    @property
+    def beam_energy (self):
+        """return the list of unique beam-energies the @c AthFile contains"""
+        return list(set(self.infos['beam_energy']))
+    
+    pass # AthFile class
+
+class AthFileServer(object):
+    """the object serving AthFile requests
+    """
+    
+    def __init__(self):
+
+        import PyUtils.Logging as _L
+        self._msg = _L.logging.getLogger("AthFile")
+        self.set_msg_lvl(_L.logging.INFO)
+        
+        if os.environ.get('ATHFILE_DEBUG', '0') == '1':
+            import PyUtils.Logging as _L
+            self.set_msg_lvl(_L.logging.VERBOSE)
+            pass
+        
+        self.msg().debug('importing ROOT...')
+        import PyUtils.RootUtils as ru
+        self.pyroot = ru.import_root()
+        try:
+            ru._pythonize_tfile()
+        except Exception, err:
+            self.msg().warning('problem during TFile pythonization:\n%s', err)
+            
+        self.msg().debug('importing ROOT... [done]')
+
+        # a cache of already processed requests
+        self._cache = {}
+        self._do_pers_cache = True
+        self.enable_pers_cache()
+        return
+
+    # make the _peeker on-demand to get an up-to-date os.environ
+    @property
+    def _peeker(self):
+        return FilePeeker(self)
+    
+    def _cleanup_pyroot(self):
+        import PyUtils.RootUtils as ru
+        root = ru.import_root()
+        tfiles = root.gROOT.GetListOfFiles()[:]
+        for i,f in enumerate(tfiles):
+            try:
+                if f:
+                    f.Close()
+                    del f
+            except Exception,err:
+                self._msg.info('could not close a TFile:\n%s', err)
+                pass
+        tfiles[:] = []
+
+    def msg(self):
+        return self._msg
+    
+    def set_msg_lvl(self, lvl):
+        self.msg().setLevel(lvl)
+        
+    def _md5_for_file(self, f, block_size=2**20, do_fast_md5=True):
+        """helper function to calculate a MD5 checksum
+        ``f`` can be filename, an open python file or an open TFile
+        """
+        import hashlib
+        md5 = hashlib.md5()
+        do_close = False
+        if isinstance(f, basestring):
+            protocol,fname = self.fname(f)
+            f = self._root_open(fname)
+            do_close = True
+            if f is None or not f:
+                raise IOError(errno.ENOENT,
+                              "No such file or directory",
+                              fname)
+
+        assert hasattr(f, 'read'), \
+               "'f' must be a file-like object. (f=%r, type=%s)"%(
+            f,type(f),
+            )
+        orig_pos = f.tell()
+        f.seek(0)
+        try:
+            while True:
+                data = f.read(block_size)
+                if not data:
+                    break
+                md5.update(data)
+                if do_fast_md5:
+                    break
+        finally:
+            f.seek(orig_pos)
+        if do_close:
+            f.Close()
+        return md5.hexdigest()
+    
+    def _root_open(self, fname):
+        import PyUtils.Helpers as H
+        # speed-up by tampering LD_LIBRARY_PATH to not load reflex-dicts
+        import re
+        with H.restricted_ldenviron(projects=['AtlasCore']):
+            with H.ShutUp(filters=[
+                re.compile(
+                    'TClass::TClass:0: RuntimeWarning: no dictionary for.*'),
+                re.compile(
+                    'Warning in <TEnvRec::ChangeValue>: duplicate entry.*'
+                    ),
+                ]):
+                root_open = self.pyroot.TFile.Open
+                protocol, _ = self.fname(fname)
+                if protocol == 'https':
+                    _setup_ssl(self.msg, self.pyroot)
+                    root_open = self.pyroot.TWebFile
+                    pass
+                f = root_open(fname+"?filetype=raw", "read")
+                if f is None or not f:
+                    raise IOError(errno.ENOENT, 'No such file or directory',
+                                  fname)
+                return f
+        return
+
+    def pfopen(self, fnames, evtmax=1):
+        if isinstance(fnames, (list, tuple)):
+            self.msg().debug("using mp.pool... (files=%s)" % len(fnames))
+            fct = _do_fopen
+            do_pers_cache = self._do_pers_cache
+            self.disable_pers_cache()
+            import multiprocessing as mp
+            from multiprocessing.pool import ThreadPool
+            # Never run more than 4 parallel instances
+            pool_sz = min(mp.cpu_count(), 4)
+            pool = ThreadPool(pool_sz)
+
+            infos = None
+            try:
+                setattr(self, '_evtmax', evtmax)
+                infos = pool.map(fct, fnames)
+            finally:
+                delattr(self, '_evtmax')
+                if do_pers_cache:
+                    self.enable_pers_cache()
+                    pass
+                pass
+            # collect back infos into ourself
+            for f in infos:
+                fname = f.infos['file_name']
+                self._cache[fname] = f
+                pass
+            # synchronize once
+            try:
+                self._sync_pers_cache()
+            except Exception, err:
+                self.msg().info('could not synchronize the persistent cache:\n%s', err)
+                pass
+                
+            return infos
+        return self._fopen_file(fnames, evtmax)
+        
+    def fopen(self, fnames, evtmax=1):
+        if isinstance(fnames, (list, tuple)):
+            infos = []
+            for fname in fnames:
+                info = self._fopen_file(fname, evtmax)
+                infos.append(info)
+            return infos
+        return self._fopen_file(fnames, evtmax)
+        
+    def _fopen_stateless(self, fname, evtmax):
+        msg = self.msg()
+        cache = dict(self._cache)
+        fids = []
+        for k,v in cache.iteritems():
+            v = v.infos
+            fid = v.get('file_md5sum', v['file_guid'])
+            if fid:
+                fids.append((fid,k))
+            pass
+        for v in fids:
+            fid, k = v
+            cache[fid] = k
+            pass
+        
+        protocol, fname = self.fname(fname)
+        if protocol in ('fid', 'lfn'):
+            protocol, fname = self.fname(fname)
+
+        use_cache = False
+        sync_cache = True
+        if protocol in ('', 'file'):
+            fid = self.md5sum(fname)
+            fid_in_cache = fid in cache
+            # also check the cached name in case 2 identical files
+            # are named differently or under different paths
+            fid_match_fname = cache.get(fid,None) == fname
+            if fid_in_cache and fid_match_fname:
+                use_cache = True
+                sync_cache = False
+                msg.debug('fetched [%s] from cache (md5sum is a match)', fname)
+                f = cache[cache[fid]]
+        elif protocol in ('ami',):
+            use_cache = True
+            sync_cache = True # yes, we want to update the pers. cache
+            # take data from AMI
+            infos = ami_dsinfos(fname[len('ami://'):])
+            msg.debug('fetched [%s] from cache', fname)
+            fid = infos.get('file_md5sum', infos['file_guid'])
+            cache[fid] = fname
+            f = AthFile.from_infos(infos)
+            cache[fname] = f
+            # hysteresis...
+            cache[infos['file_name']] = f
+        else:
+            # use the cache indexed by name rather than md5sums to
+            # skip one TFile.Open...
+            # Note: we assume files on mass storage systems do not
+            # change very often.
+            if fname in self._cache:
+                use_cache = True
+                sync_cache = False
+                msg.debug('fetched [%s] from cache', fname)
+                f = cache[fname]
+
+        if not use_cache:
+            msg.info("opening [%s]...", fname)
+            infos = self._peeker(fname, evtmax)
+            f = AthFile.from_infos(infos)
+            cache[fname] = f
+            # hysteresis...
+            cache[infos['file_name']] = f
+            sync_cache = True
+            pass
+
+        # remove the fids we added...
+        for v in fids:
+            fid, k = v
+            # in case there were duplicate fids
+            try:             del cache[fid]
+            except KeyError: pass
+            pass
+
+        return (fname, cache, sync_cache)
+
+    def _fopen_file(self, fname, evtmax):
+        msg = self.msg()
+        fname, cache, sync_cache = self._fopen_stateless(fname, evtmax)
+        if sync_cache:
+            try:
+                self._cache = cache
+                self._sync_pers_cache()
+            except Exception,err:
+                msg.info('could not synchronize the persistent cache:\n%s', err)
+            pass
+        return self._cache[fname]
+    
+    def md5sum(self, fname):
+        """return the md5 checksum of file ``fname``
+        """
+        if isinstance(fname, basestring):
+            protocol,fname = self.fname(fname)
+        
+        md5 = self._md5_for_file(fname)
+        return md5
+    
+    @timelimit(timeout=DEFAULT_AF_TIMEOUT)
+    def fname(self, fname):
+        """take a file name, return the pair (protocol, 'real' file name)
+        """
+        import os.path as osp
+        fname = osp.expanduser(osp.expandvars(fname))
+
+        msg = self.msg()
+        
+        def _normalize_uri(uri):
+            if uri.startswith('/'):
+                return 'file:'+uri
+            return uri
+        
+        from urlparse import urlsplit
+        url = urlsplit(_normalize_uri(fname))
+        protocol = url.scheme
+        def _normalize(fname):
+            import os.path as osp
+            from posixpath import normpath
+            fname = normpath(fname)
+            #fname = osp.realpath(osp.abspath(normpath(fname)))
+            if fname.startswith('//'): fname = fname[1:]
+            return fname
+
+        if protocol in ('', 'file', 'pfn'):
+            protocol = ''
+            fname = _normalize(url.path)
+
+            ## hack for '/castor/cern.ch/...' paths
+            if fname.startswith('/castor/'):
+                protocol = 'rfio'
+                fname = protocol + ':' + fname
+                
+        elif protocol in ('rfio', 'castor'):
+            protocol = 'rfio'
+            fname = _normalize(url.path)
+            fname = protocol+':'+fname
+
+        elif protocol in ('root','dcap', 'dcache', 'http', 'https'):
+            #fname = fname
+            pass
+
+        elif protocol in ('gsidcap',):
+            protocol = 'gfal:gsidcap'
+            pass
+        
+        elif protocol in ('lfn','fid',):
+            # percolate through the PoolFileCatalog
+            from PyUtils.PoolFile import PoolFileCatalog as pfc
+            fname = pfc().pfn(protocol+':'+url.path)
+            pass
+
+        elif protocol in ('ami',):
+            # !! keep order of tokens !
+            for token in ('ami:', '//', '/'):
+                if fname.startswith(token):
+                    fname = fname[len(token):]
+            fname = 'ami://' + fname
+            pass
+        
+        else:
+            msg.warning('unknown protocol [%s]. we\'ll just return our input',
+                        protocol)
+            #fname = fname
+            pass
+        
+        return (protocol, fname)
+
+    def cache(self):
+        return self._cache
+
+    def enable_pers_cache(self):
+        """configure the file server to write out the persistent cache
+        of inspected files.
+        """
+        # first disable previous cache, if any, to prevent hysteresis...
+        self.disable_pers_cache()
+        msg = self.msg()
+        self._do_pers_cache = True
+        
+        import os
+        fname = DEFAULT_AF_CACHE_FNAME
+        if (fname and
+            os.path.exists(fname) and
+            os.access(fname, os.R_OK)):
+            msg.info('loading cache from [%s]...', fname)
+            try:
+                self.load_cache(fname)
+                msg.info('loading cache from [%s]... [done]', fname)
+            except TimeoutError:
+                msg.info('loading cache timed out!')
+        return
+
+    def disable_pers_cache(self):
+        """configure the file server to NOT write out the persistent cache
+        of inspected files.
+        if the persistent cache wasn't enabled, this is a no-op.
+        """
+        self._do_pers_cache = False
+        return
+    
+    def _sync_pers_cache(self):
+        if not self._do_pers_cache:
+            return
+        msg = self.msg()
+        import os
+        fname = DEFAULT_AF_CACHE_FNAME
+        if not fname:
+            # protect against empty or invalid (None) cache file names
+            return
+        import uuid
+        pid = str(os.getpid())+'-'+str(uuid.uuid4())
+        fname_,fname_ext = os.path.splitext(fname)
+        if fname_ext in ('.gz',):
+            fname_,fname_ext = os.path.splitext(fname_)
+            pid_fname = fname_ + '.' + pid + fname_ext + ".gz"
+        else:
+            pid_fname = fname_ + '.' + pid + fname_ext
+        msg.debug('synch-ing cache to [%s]...', fname)
+        try:
+            msg.debug('writing to [%s]...', pid_fname)
+            self.save_cache(pid_fname)
+            if os.path.exists(pid_fname):
+                # should be atomic on most FS...
+                os.rename(pid_fname, fname)
+            else:
+                msg.warning("could not save to [%s]", pid_fname)
+            msg.debug('synch-ing cache to [%s]... [done]', fname)
+        except Exception,err:
+            msg.debug('synch-ing cache to [%s]... [failed]', fname)
+            msg.debug('reason:\n%s', err)
+            pass
+        return
+
+    # dead-lock on self.msg (I think!)...
+    #@timelimit(timeout=DEFAULT_AF_TIMEOUT)
+    def load_cache(self, fname=DEFAULT_AF_CACHE_FNAME):
+        """load file informations from a cache file.
+        the back-end (JSON, ASCII, pickle, ...) is inferred from the
+        extension of the `fname` parameter.
+        defaults to py-ASCII.
+        """
+        import os
+        import os.path as osp
+        msg = self.msg()
+
+        ext = _get_real_ext(osp.basename(fname))
+        if len(ext) == 0:
+            # illegal file...
+            msg.info('load_cache: invalid file [%s]', fname)
+            return
+
+        ext = ext[1:] if ext[0]=='.' else ext
+        try:
+            loader = getattr(self, '_load_%s_cache'%ext)
+        except AttributeError:
+            msg.info('load_cache: could not find a suitable backend for '
+                     'extension [.%s] => using [ascii]', ext)
+            loader = self._load_ascii_cache
+
+        try:
+            search_path = os.environ.get('DATAPATH',os.getcwd())
+            search_path = search_path.split(os.pathsep)
+            fname = _find_file(osp.expanduser(osp.expandvars(fname)),
+                             search_path,
+                             os.R_OK) or fname
+        except ImportError:
+            # not enough karma... tough luck!
+            pass
+
+        # ensure one can read that file...
+        with open(fname, 'r') as file_handle:
+            pass
+
+        msg.debug('loading cache from [%s]...', fname)
+        cache = {}
+        try:
+            cache = loader(fname)
+        except Exception, err:
+            msg.info("problem loading cache from [%s]!", fname)
+            msg.info(repr(err))
+            pass
+
+        self._cache.update(cache)
+        msg.debug('loading cache from [%s]... [done]', fname)
+
+    def save_cache(self, fname=DEFAULT_AF_CACHE_FNAME):
+        """save file informations into a cache file.
+        the back-end (JSON, ASCII, pickle, ...) is inferred from the
+        extension of the `fname` parameter.
+        falls back to py-ASCII.
+        """
+        msg = self.msg()
+        import os
+        if os.path.exists(fname):
+            os.rename(fname, fname+'.bak')
+        ext = _get_real_ext(fname)
+        ext = ext[1:] # drop the dot
+        try:
+            saver = getattr(self, '_save_%s_cache'%ext)
+        except AttributeError:
+            msg.info('save_cache: could not find a suitable backend for '
+                     'extension [.%s] => using [ascii]', ext)
+            saver = self._save_ascii_cache
+        try:
+            saver(fname)
+        except IOError,err:
+            import errno
+            if err.errno != errno.EACCES:
+                raise
+            else:
+                msg.info('could not save cache in [%s]', fname)
+        except Exception,err:
+            msg.warning('could not save cache into [%s]:\n%s', fname, err)
+        return
+    
+    def _load_pkl_cache(self, fname):
+        """load file informations from pickle/shelve 'fname'"""
+        try: import cPickle as pickle
+        except ImportError: import pickle
+        import shelve
+        db = shelve.open(fname, protocol=pickle.HIGHEST_PROTOCOL)
+        return db['fileinfos_cache'].copy()
+
+    def _save_pkl_cache(self, fname):
+        """save file informations into pickle/shelve 'fname'"""
+        try: import cPickle as pickle
+        except ImportError: import pickle
+        import shelve
+        db = shelve.open(fname, protocol=pickle.HIGHEST_PROTOCOL)
+        db['fileinfos_cache'] = self._cache.copy()
+        db.close()
+        return
+    
+    def _load_json_cache(self, fname):
+        """load file informations from a JSON file"""
+        try:
+            import simplejson as json
+        except ImportError:
+            import json
+        with _my_open(fname) as fd:
+            cache = json.load(fd)
+        return dict((k,AthFile.from_infos(v)) for k,v in cache)
+        
+    def _save_json_cache(self, fname):
+        """save file informations using JSON"""
+        try:
+            import simplejson as json
+        except ImportError:
+            import json
+        cache = self._cache
+        with _my_open(fname, 'w') as fd:
+            json.dump([(k, cache[k].fileinfos) for k in cache],
+                      fd,
+                      indent=2,
+                      sort_keys=True)
+        return
+    
+    def _load_ascii_cache(self, fname):
+        """load file informations from a pretty-printed python code"""
+        dct = {}
+        ast = compile(_my_open(fname).read(), fname, 'exec')
+        exec ast in dct,dct
+        del ast
+        try:
+            cache = dct['fileinfos']
+        except Exception, err:
+            raise
+        finally:
+            del dct
+        return dict((k,AthFile.from_infos(v)) for k,v in cache)
+    
+    def _save_ascii_cache(self, fname):
+        """save file informations into pretty-printed python code"""
+        from pprint import pprint
+        cache = self._cache
+        with _my_open(fname, 'w') as fd:
+            print >> fd, "# this is -*- python -*-"
+            print >> fd, "# this file has been automatically generated."
+            print >> fd, "fileinfos = ["
+            fd.flush()
+            for k in cache:
+                print >> fd, "\n## new-entry"
+                pprint((k, cache[k].fileinfos),
+                       stream=fd,
+                       width=120)
+                fd.flush()
+                print >> fd, ", "
+            print >> fd, "]"
+            print >> fd, "### EOF ###"
+            fd.flush()
+        return
+    
+    def _load_db_cache(self, fname):
+        """load file informations from a sqlite file"""
+        import PyUtils.dbsqlite as dbsqlite
+        cache = dbsqlite.open(fname)
+        d = {}
+        for k,v in cache.iteritems():
+            d[k] = AthFile.from_infos(v)
+        return d
+        
+    def _save_db_cache(self, fname):
+        """save file informations using sqlite"""
+        import PyUtils.dbsqlite as dbsqlite
+        db = dbsqlite.open(fname,flags='w')
+        cache = self._cache
+        for k in cache:
+            db[k] = cache[k].fileinfos
+        db.close()
+        return
+    
+    def flush_cache(self):
+        self._cache = {}
+        return
+
+    @timelimit(timeout=DEFAULT_AF_TIMEOUT)
+    def ftype(self, fname):
+        """
+        returns the type of a file ('pool' or 'bs') together with its
+        canonical name. `fname` can be a string or a `ROOT.TFile` handle.
+
+        example:
+        >>> import PyUtils.AthFile as af
+        >>> af.ftype ('castor:/castor/cern.ch/foo.pool')
+        ('pool', 'rfio:/castor/cern.ch/foo.pool')
+        
+        >>> af.ftype ('LFN:ttbar.pool')
+        ('pool', '/afs/cern.ch/somewhere/ttbar.pool')
+        
+        >>> af.ftype ('rfio:/castor/cern.ch/bs.data')
+        ('bs', 'rfio:/castor/cern.ch/bs.data')
+        
+        >>> af.ftype ('rfio:/castor/cern.ch/bs.data')
+        ('bs', 'rfio:/castor/cern.ch/bs.data')
+        """
+        msg = self.msg()
+        import os
+        import os.path as osp
+
+        _is_root_file = None
+        do_close = True
+        if isinstance(fname, basestring):
+            if not self.exists(fname):
+                import errno
+                raise IOError(
+                    errno.ENOENT,
+                    'No such file or directory',
+                    fname
+                    )
+            protocol,fname = self.fname(fname)
+            if protocol == 'ami':
+                # FIXME: what (else) can we do ?
+                ami_infos = self.fopen(fname).infos
+                return ami_infos['file_type'], fname
+
+            root = self.pyroot
+            f = self._root_open(fname)
+        else:
+            do_close = False
+            f = fname
+            
+        _is_root_file= bool(f and f.IsOpen() and 'root' in f.read(10))
+        if f and do_close:
+            f.Close()
+            del f
+
+        ftype = 'pool' if _is_root_file else 'bs'
+        return (ftype, fname)
+
+    @timelimit(timeout=DEFAULT_AF_TIMEOUT)
+    def exists(self, fname):
+        """helper function to test if a fiven `fname` exists.
+
+        handles local filesystems as well as RFIO.
+        usage example:
+        >>> import PyUtils.AthFile as af
+        >>> af.exists('/castor/cern.ch/user/b/binet/reffiles/14.1.0.x/AllBasicSamples.AOD.pool.root')
+        False
+        >>> af.exists('rfio:/castor/cern.ch/user/b/binet/reffiles/14.1.0.x/AllBasicSamples.AOD.pool.root')
+        True
+        >>> af.exists('castor:/castor/cern.ch/user/b/binet/reffiles/14.1.0.x/AllBasicSamples.AOD.pool.root')
+        True
+        >>> # you need a valid PoolFileCatalog.xml file for this to work:
+        >>> af.exists('LFN:top_CSC-01-02-00_RDO_extract.pool')
+        True
+        >>> af.exists('/afs/cern.ch/atlas/offline/ReleaseData/v2/testfile/calib1_csc11.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000301_tid003138._00016_extract_10evt.pool.root')
+        True
+        """
+        import os
+
+        msg = self.msg()
+
+        def _root_exists(fname):
+            exists = False
+            f = None
+            try:
+                f = self._root_open(fname)
+                exists = f and f.IsOpen()
+            except Exception:
+                # swallow...
+                pass
+            finally:
+                if f:
+                    f.Close()
+                    del f
+            return bool(exists)
+
+        protocol,fname = self.fname(fname)
+
+        if protocol in ('fid', 'lfn'):
+            return self.exists(fname)
+
+        elif protocol in ('ami',):
+            # FIXME: what else can we do ?
+            try:
+                infos = ami_dsinfos(fname)
+                return True
+            except Exception:
+                return False
+        
+        ## elif protocol in ('dcap', 'dcache', 'gfal:gsidcap'):
+        ##     ## FIXME: temporary hack. remove when ROOT bug #57409 is fixed.
+        ##     if protocol == 'dcap':
+        ##         fname = fname[len('dcap:'):]
+        ##     elif protocol == 'dcache':
+        ##         fname = fname[len('dcache:'):]
+        ##     else:
+        ##         pass
+        ##     ## FIXME -end
+        ##     return _root_exists(fname)
+
+        else:
+            return _root_exists(fname)
+        # un-reachable
+        return False
+
+    pass # class AthFileServer
+
+class FilePeeker(object):
+    def __init__(self, server):
+        self.server= server
+        self.msg   = server.msg
+        self.pyroot= server.pyroot
+        self._sub_env = dict(os.environ)
+        # prevent ROOT from looking into $HOME for .rootrc files
+        # we carefully (?) set this environment variable *only* in the
+        # subprocess to not stomp on the toes of our parent one which is
+        # user-driven (and might need user-customized macros or configurations)
+        self._sub_env['ROOTENV_NO_HOME'] = '1'
+
+        # prevent from running athena-mp unadvertantly...
+        self._sub_env['ATHENA_PROC_NUMBER'] ='0'
+
+        # prevent from running athena in interactive mode (and freeze)
+        if 'PYTHONINSPECT' in self._sub_env:
+            del self._sub_env['PYTHONINSPECT']
+
+        # prevent from running athena with igprof
+        for k in ('LD_PRELOAD', 'IGPROF'):
+            if k in self._sub_env:
+                del self._sub_env[k]
+
+    def _root_open(self, fname, raw=False):
+        import PyUtils.Helpers as H
+        with H.restricted_ldenviron(projects=['AtlasCore']):
+            root = self.pyroot
+            import re
+            with H.ShutUp(filters=[
+                re.compile('TClass::TClass:0: RuntimeWarning: no dictionary for class.*'),
+                re.compile("Error in <T.*?File::Init>:.*? not a ROOT file")]):
+                # for AttributeListLayout which uses CINT for its dict...
+                # first try the APR version
+                ooo = root.gSystem.Load('libRootCollection')
+                if ooo < 0:
+                    # then try the POOL one
+                    root.gSystem.Load('liblcg_RootCollection')
+                root_open = root.TFile.Open
+
+                # we need to get back the protocol b/c of the special
+                # case of secure-http which needs to open TFiles as TWebFiles...
+                protocol, _ = self.server.fname(fname)
+                if protocol == 'https':
+                    _setup_ssl(self.msg(), root)
+                    root_open = root.TWebFile
+                if raw:
+                    f = root_open(fname+'?filetype=raw', 'READ')
+                else:
+                    f = root_open(fname, 'READ')
+                if f is None or not f:
+                    raise IOError(errno.ENOENT,
+                                  'No such file or directory',
+                                  fname)
+                return f
+        
+    def _is_tag_file(self, fname, evtmax):
+        is_tag = False
+        tag_ref= None
+        tag_guid=None
+        nentries = 0
+        runs=[]
+        evts=[]
+        import PyUtils.Helpers as H
+        with H.restricted_ldenviron(projects=['AtlasCore']):
+            root = self.pyroot
+            do_close = True
+            if isinstance(fname, basestring):
+                f = self._root_open(fname, raw=False)
+            else:
+                f = fname
+                do_close = False
+            schema = f.Get('Schema') if f else None
+            if schema:
+                is_tag  = True
+                # note: we used to use .rstrip('\0') b/c of the change in
+                # semantics in PyROOT (char[] and const char* may not mean
+                # the same thing)
+                # see https://savannah.cern.ch/bugs/?100920 for the gory details
+                # but in the end, we use ctypes...
+                # see https://savannah.cern.ch/bugs/?101200 for the gory details
+                import ctypes
+                tag_ref = str(ctypes.c_char_p(schema.m_eventRefColumnName).value)
+            del schema
+            metadata= f.Get('CollectionMetadata') if f else None
+            if metadata:
+                nbytes = metadata.GetEntry(0)
+                # note: we used to use .rstrip('\0') b/c of the change in
+                # semantics in PyROOT (char[] and const char* may not mean
+                # the same thing)
+                # see https://savannah.cern.ch/bugs/?100920 for the gory details
+                # but in the end, we use ctypes...
+                # see https://savannah.cern.ch/bugs/?101200 for the gory details
+                # 
+                # make sure it is what we think it is
+                import ctypes
+                key_name = str(ctypes.c_char_p(metadata.Key).value)
+                assert key_name == 'POOLCollectionID' 
+                tag_guid = str(ctypes.c_char_p(metadata.Value).value)
+            del metadata
+            coll_tree = f.Get('POOLCollectionTree') if f else None
+            if coll_tree:
+                nentries = coll_tree.GetEntries()
+                if evtmax in (-1, None):
+                    evtmax = nentries
+                evtmax = int(evtmax)
+                for row in xrange(evtmax):
+                    if coll_tree.GetEntry(row) < 0:
+                        break
+                    runnbr = coll_tree.RunNumber
+                    runs.append(runnbr)
+                    evtnbr = coll_tree.EventNumber
+                    evts.append(evtnbr)
+            del coll_tree
+            if f and do_close:
+                f.Close()
+                del f
+        return (is_tag, tag_ref, tag_guid, nentries, runs, evts)
+
+    def _is_empty_pool_file(self, fname):
+        is_empty = False
+        import PyUtils.Helpers as H
+        with H.restricted_ldenviron(projects=['AtlasCore']):
+            root = self.pyroot
+            do_close = True
+            if isinstance(fname, basestring):
+                f = self._root_open(fname, raw=False)
+            else:
+                f = fname
+                do_close = False
+            payload = f.Get('CollectionTree') if f else None
+            if payload:
+                is_empty = False
+            else:
+                is_empty = True
+            del payload
+
+            if f and do_close:
+                f.Close()
+                del f
+        return is_empty
+     
+    def _process_call(self, fname, evtmax, projects=['AtlasCore']):
+        msg = self.msg()
+        import PyUtils.Helpers as H
+        f = _create_file_infos()
+        protocol, _ = self.server.fname(fname)
+        f_raw  = self._root_open(fname, raw=True)
+        if f_raw is None or not f_raw:
+            raise IOError(
+                errno.ENOENT,
+                'No such file or directory',
+                fname)
+        f_root = f_raw
+        try:
+            file_type, file_name = self.server.ftype(f_raw)
+            import os
+
+            protocol,file_name = self.server.fname(fname)
+            f['file_md5sum'] = self.server.md5sum(f_raw)
+            f['file_name'] = file_name
+            f['file_type'] = file_type
+            f['file_size'] = f_raw.GetSize()
+            if file_type == 'pool':
+                f_root = self._root_open(fname, raw=False)
+                # POOL files are most nutritious when known to PoolFileCatalog.xml
+                # FIXME: best would be to do that in athfile_peeker.py but
+                #        athena.py closes sys.stdin when in batch, which confuses
+                #        PyCmt.Cmt:subprocess.getstatusoutput
+                cmd = ['pool_insertFileToCatalog.py',
+                       file_name,]
+                subprocess.call(cmd, env=self._sub_env)
+                #
+                with H.restricted_ldenviron(projects=None):
+                    is_tag, tag_ref, tag_guid, nentries, runs, evts = self._is_tag_file(f_root, evtmax)
+                    if is_tag:
+                        f['stream_names'] = ['TAG']
+                        f['file_guid'] = tag_guid
+                        f['nentries'] = nentries
+                        f['run_number'] = runs
+                        f['evt_number'] = evts
+                    else:
+                        import tempfile
+                        #'peeker_%i.pkl' % os.getpid()
+                        fd_pkl,out_pkl_fname = tempfile.mkstemp(suffix='.pkl')
+                        #out_pkl_fname = 'peeked.out.pkl'
+                        import os
+                        os.close(fd_pkl)
+                        if os.path.exists(out_pkl_fname):
+                            os.remove(out_pkl_fname)
+                        import AthenaCommon.ChapPy as api
+                        app = api.AthenaApp(cmdlineargs=["--nprocs=0"])
+                        app << """
+                            FNAME = %s
+                            """ % str([file_name])
+                        app << """
+                            import os
+                            # prevent from running athena-mp in child processes
+                            os.putenv('ATHENA_PROC_NUMBER','0')
+    
+                            # prevent from running athena in interactive mode (and freeze)
+                            if 'PYTHONINSPECT' in os.environ:
+                                del os.environ['PYTHONINSPECT']
+            
+
+                            include('AthenaPython/athfile_peeker.py')
+                            from AthenaCommon.AlgSequence import AlgSequence
+                            job = AlgSequence()
+                            # we don't really need this...
+                            job.peeker.outfname='%(outfname)s'
+                            job.peeker.infname='%(infname)s'
+
+                            # metadata + taginfo
+                            import IOVDbSvc.IOVDb
+
+                            # evt-max
+                            theApp.EvtMax = %(evtmax)i
+                            """ % {
+                            'infname' : file_name,
+                            'outfname': out_pkl_fname,
+                            'evtmax': evtmax,
+                            }
+                        import os
+                        import uuid
+                        stdout_fname = (
+                            'athfile-%i-%s.log.txt' %
+                            (os.getpid(), uuid.uuid4())
+                            )
+                        stdout = open(stdout_fname, "w")
+                        print >> stdout,"="*80
+                        print >> stdout,self._sub_env
+                        print >> stdout,"="*80
+                        stdout.flush()
+                        sc = app.run(stdout=stdout, env=self._sub_env)
+                        stdout.flush()
+                        stdout.close()
+                        import AthenaCommon.ExitCodes as ath_codes
+                        if sc == 0:
+                            #import shelve
+                            import PyUtils.dbsqlite as dbsqlite
+                            msg.info('extracting infos from [%s]...',
+                                     out_pkl_fname)
+                            db = dbsqlite.open(out_pkl_fname)
+                            msg.info('keys: %s',db.keys())
+                            f.update(db['fileinfos'])
+                            db.close()
+                            msg.info('extracting infos from [%s]... [ok]',
+                                     out_pkl_fname)
+                            import os
+                            os.remove(stdout.name)
+                        else:
+                            # maybe an empty file
+                            # trust but verify
+                            if not self._is_empty_pool_file(f_root):
+                                # actually a problem in athena !
+                                from textwrap import dedent
+                                err = dedent("""
+                                %s
+                                problem running chappy!
+                                code: [%s (%s)]
+                                what: [%s]
+                                => corrupted input file ?
+                                %s
+                                logfile: [%s]
+                                """% (":"*25,
+                                      sc,errno.errorcode.get(sc,sc),
+                                      ath_codes.codes.get(sc,sc),
+                                      ":"*25,
+                                      stdout.name
+                                      ))
+                                msg.error(err)
+                                raise IOError(sc, err)
+                            msg.info('athena failed to initialize.')
+                            msg.info('=> probably an empty input POOL file')
+                        ## if os.path.exists(out_pkl_fname):
+                        ##     os.remove(out_pkl_fname)
+                    # TAG-file
+                    # app.exit()
+            else: # bytestream
+                bs_fileinfos = self._process_bs_file(file_name,
+                                                     evtmax=evtmax,
+                                                     full_details=False)
+                del bs_fileinfos['file_name']
+                del bs_fileinfos['file_size']
+                del bs_fileinfos['file_type']
+                del bs_fileinfos['file_md5sum']
+                f.update(bs_fileinfos)
+        finally:
+            try:
+                f_raw.Close()
+                f_root.Close()
+                del f_raw
+                del f_root
+            except Exception,err:
+                msg.warning(
+                    'problem while closing raw and root file handles:\n%s',
+                    err
+                    )
+        return f
+
+    def __call__(self, fname, evtmax):
+        import re
+        import PyUtils.Helpers as H
+        with H.ShutUp(filters=[re.compile('.*')]):
+            try:
+                f = self._process_call(fname, evtmax, projects)
+            except Exception,err:
+                # give it another chance but with the full environment
+                f = self._process_call(fname, evtmax, projects=None)
+
+        return f
+
+    def _process_bs_file (self, fname, evtmax=1, full_details=True):
+        msg = self.msg()
+        import eformat as ef
+
+        data_reader = ef.EventStorage.pickDataReader(fname)
+        assert data_reader, \
+               'problem picking a data reader for file [%s]'%fname
+
+        beam_type   = '<beam-type N/A>'
+        try:
+            beam_type = data_reader.beamType()
+        except Exception,err:
+            msg.warning ("problem while extracting beam-type information")
+            pass
+
+        beam_energy = '<beam-energy N/A>'
+        try:
+            beam_energy = data_reader.beamEnergy()
+        except Exception,err:
+            msg.warning ("problem while extracting beam-type information")
+            pass
+
+        bs = ef.istream(fname)
+
+        file_infos = _create_file_infos()
+        infos = []; _append = infos.append
+        nentries = bs.total_events
+        file_infos['nentries'] = nentries
+        import uuid
+        def _uuid():
+            return str(uuid.uuid4()).upper()
+        bs_metadata = {}
+        for md in data_reader.freeMetaDataStrings():
+            if md.startswith('Event type:'):
+                k = 'evt_type'
+                v = []
+                if 'is sim' in md:   v.append('IS_SIMULATION')
+                else:                v.append('IS_DATA')
+                if 'is atlas' in md: v.append('IS_ATLAS')
+                else:                v.append('IS_TESTBEAM')
+                if 'is physics' in md: v.append('IS_PHYSICS')
+                else:                  v.append('IS_CALIBRATION')
+                bs_metadata[k] = tuple(v)
+            elif md.startswith('GeoAtlas:'):
+                k = 'geometry'
+                v = md.split('GeoAtlas:')[1].strip()
+                bs_metadata[k] = v
+            elif md.startswith('IOVDbGlobalTag:'):
+                k = 'conditions_tag'
+                v = md.split('IOVDbGlobalTag:')[1].strip()
+                bs_metadata[k] = v
+            elif '=' in md:
+                k,v = md.split('=')
+                bs_metadata[k] = v
+
+        # for bwd/fwd compat...
+        # see: https://savannah.cern.ch/bugs/?73208
+        for key_name,fct_name in (
+            ('GUID','GUID'),
+            ('Stream','stream'),
+            ('Project', 'projectTag'),
+            ('LumiBlock', 'lumiblockNumber'),
+            ('run_number', 'runNumber'),
+            ):
+            if key_name in bs_metadata:
+                # no need: already in bs metadata dict
+                continue
+            if hasattr(data_reader, fct_name):
+                v = getattr(data_reader, fct_name)()
+                bs_metadata[key_name] = v
+        # for bwd/fwd compat... -- END
+            
+        file_infos['file_guid'] = bs_metadata.get('GUID', _uuid())
+        file_infos['evt_type']  = bs_metadata.get('evt_type', [])
+        file_infos['geometry']  = bs_metadata.get('geometry', None)
+        file_infos['conditions_tag'] = bs_metadata.get('conditions_tag', None)
+        file_infos['bs_metadata'] = bs_metadata
+
+        if not data_reader.good():
+            # event-less file...
+            file_infos['run_number'].append(bs_metadata.get('run_number', 0))
+            file_infos['lumi_block'].append(bs_metadata.get('LumiBlock', 0))
+            # FIXME: not sure how to do that...
+            #stream_tags=[dict(stream_type=bs_metadata.get('Stream',''),
+            #                  stream_name=bs_metadata.get('Project', ''),
+            #                  obeys_lbk="N/A")]
+            #file_infos['stream_tags'].extend(stream_tags)
+            return file_infos
+        
+        if evtmax == -1:
+            evtmax = nentries
+            
+        ievt = iter(bs)
+        for i in xrange(evtmax):
+            try:
+                evt = ievt.next()
+                evt.check() # may raise a RuntimeError
+                stream_tags = [dict(stream_type=tag.type,
+                                    stream_name=tag.name,
+                                    obeys_lbk=bool(tag.obeys_lumiblock))
+                               for tag in evt.stream_tag()]
+                file_infos['run_number'].append(evt.run_no())
+                file_infos['evt_number'].append(evt.global_id())
+                file_infos['lumi_block'].append(evt.lumi_block())
+                file_infos['run_type'].append(ef.helper.run_type2string(evt.run_type()))
+                file_infos['beam_type'].append(beam_type)
+                file_infos['beam_energy'].append(beam_energy)
+                file_infos['stream_tags'].extend(stream_tags)
+
+            except RuntimeError, err:
+                print "** WARNING ** detected a corrupted bs-file:\n",err
+        """
+        detailed dump how-to:
+        ---------------------
+        import eformat as ef
+        import eformat.dump as edump
+        edump.event_callback.append (('.+', edump.fullevent_handler))
+        edump.dump (stream=ef.istream(fname), skip=0, total=0)
+        """
+        return file_infos
+
+    pass # class FilePeeker
+
+### globals
+g_server = AthFileServer()
+
+def _do_fopen(fname):
+    self = g_server
+    evtmax= getattr(g_server, '_evtmax', 1)
+    return self._fopen_file(fname, evtmax)
diff --git a/Tools/PyUtils/python/AthFile/tests.py b/Tools/PyUtils/python/AthFile/tests.py
new file mode 100644
index 00000000000..d624a819079
--- /dev/null
+++ b/Tools/PyUtils/python/AthFile/tests.py
@@ -0,0 +1,479 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/python/AthFile/tests.py
+# @purpose a simple abstraction of a file to retrieve informations out of it
+# @author Sebastien Binet <binet@cern.ch>
+# @date October 2008
+from __future__ import with_statement
+
+import unittest, sys
+
+verbose = False
+
+def _compare_fileinfos(af, fileinfos):
+    all_good = True
+    err_log = []
+    for k in ('file_md5sum',
+              'file_name',
+              'file_type',
+              'file_guid',
+              'nentries',
+              'run_number',
+              'run_type',
+              'evt_type',
+              'evt_number',
+              'lumi_block',
+              'beam_energy',
+              'beam_type',
+              'stream_tags',
+              'metadata_items',
+              'eventdata_items',
+              'stream_names',
+              'geometry',
+              'conditions_tag',
+              'det_descr_tags',
+              #'metadata',
+              'tag_info',
+              ):
+        chk = af.fileinfos[k]
+        ref = fileinfos[k]
+        if isinstance(chk, (list,tuple)):
+            chk = sorted(chk)
+        if isinstance(ref, (list,tuple)):
+            ref = sorted(ref)
+        if chk != ref:
+            all_good = False
+            err_log.append("key [%s] miscompare:\nref:%s\nchk:%s"%(k,ref,chk))
+    assert all_good, "\n".join(err_log)
+
+class AthFileTest(unittest.TestCase):
+
+    def setUp(self):
+        import PyUtils.AthFile as af
+        import PyUtils.Logging as L
+        af.msg.setLevel(L.logging.WARNING)
+        # we don't want to accidentally read informations from a sticky cache
+        af.server.flush_cache()
+
+    def tearDown(self):
+        # we don't want to accidentally write informations to a sticky cache
+        import PyUtils.AthFile as af
+        af.server.flush_cache()
+
+    def test001(self):
+        """test cosmics POOL file"""
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root'
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root')
+        assert af.exists(fname)
+
+        f1 = af.fopen(fname)
+        if verbose:
+            print "::: f1.fileinfos:"
+            print f1.fileinfos
+        f1_ref = {'file_md5sum':'36ff1ef242bd3240227016e71e241a89', 'metadata_items': [('EventStreamInfo', 'StreamESD'), ('LumiBlockCollection', 'LumiBlocks'), ('DataHeader', ';00;MetaDataSvc'), ('IOVMetaDataContainer', '/GLOBAL/DETSTATUS/LBSUMM')], 'stream_names': ['StreamESD'], 'run_type': ['N/A'], 'stream_tags': [{'obeys_lbk': True, 'stream_type': 'physics', 'stream_name': 'IDCosmic'}], 'tag_info': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPermanent-01', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', '/MUONALIGN/MDT/ENDCAP/SIDEC': 'MuonAlignMDTEndCapCAlign-REPRO-08', '/MUONALIGN/MDT/BARREL': 'MuonAlignMDTBarrelAlign-0100-SEC0109', 'GeoAtlas': 'ATLAS-GEO-02-01-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections.topophioff-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections.topogap-v1', 'AtlasRelease': 'AtlasTier0-15.0.0.4', 'IOVDbGlobalTag': 'COMCOND-ES1C-001-00', '/MUONALIGN/TGC/SIDEA': 'MuonAlignTGCEndCapAAlign-REPRO-01', '/SCT/DAQ/Calibration/NoiseOccupancyDefects': 'HEAD', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections.etaoff-v4_1', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections.trcorr-v5', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/MUONALIGN/MDT/ENDCAP/SIDEA': 'MuonAlignMDTEndCapAAlign-REPRO-08', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-CSC05-BERT', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections.topolw-v1', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-CSC05-BERT', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFrac-CSC05-BERT', '/MUONALIGN/TGC/SIDEC': 'MuonAlignTGCEndCapCAlign-REPRO-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-CSC05-BERT', '/SCT/DAQ/Calibration/NPtGainDefects': 'HEAD', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections.etamod-v4', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections.phimod-v4', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections.rfac-v4', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections.calhits-v5', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections.phioff-v4', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections.topoetaoff-v1', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections.gap-v4', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections.topophimod-v1'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root', 'file_guid': '5A6CD469-D01D-DE11-82E4-000423D67746', 'beam_type': ['N/A'], 'lumi_block': [1L], 'conditions_tag': 'COMCOND-ES1C-001-00', 'det_descr_tags': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPermanent-01', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', '/MUONALIGN/MDT/ENDCAP/SIDEC': 'MuonAlignMDTEndCapCAlign-REPRO-08', '/MUONALIGN/MDT/BARREL': 'MuonAlignMDTBarrelAlign-0100-SEC0109', 'GeoAtlas': 'ATLAS-GEO-02-01-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections.topophioff-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections.topogap-v1', 'AtlasRelease': 'AtlasTier0-15.0.0.4', 'IOVDbGlobalTag': 'COMCOND-ES1C-001-00', '/MUONALIGN/TGC/SIDEA': 'MuonAlignTGCEndCapAAlign-REPRO-01', '/SCT/DAQ/Calibration/NoiseOccupancyDefects': 'HEAD', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections.etaoff-v4_1', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections.trcorr-v5', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/MUONALIGN/MDT/ENDCAP/SIDEA': 'MuonAlignMDTEndCapAAlign-REPRO-08', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-CSC05-BERT', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections.topolw-v1', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-CSC05-BERT', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFrac-CSC05-BERT', '/MUONALIGN/TGC/SIDEC': 'MuonAlignTGCEndCapCAlign-REPRO-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-CSC05-BERT', '/SCT/DAQ/Calibration/NPtGainDefects': 'HEAD', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections.etamod-v4', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections.phimod-v4', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections.rfac-v4', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections.calhits-v5', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections.phioff-v4', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections.topoetaoff-v1', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections.gap-v4', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections.topophimod-v1'}, 'nentries': 10L, 'eventdata_items': [('EventInfo', 'ByteStreamEventInfo'), ('PixelRDO_Container', 'PixelRDOs'), ('SCT_RDO_Container', 'SCT_RDOs'), ('TRT_RDO_Container', 'TRT_RDOs'), ('InDet::PixelClusterContainer', 'PixelClusters'), ('InDet::SCT_ClusterContainer', 'SCT_Clusters'), ('BCM_RDO_Container', 'BCM_RDOs'), ('LArDigitContainer', 'LArDigitContainer_IIC'), ('LArDigitContainer', 'LArDigitContainer_Thinned'), ('CaloCellContainer', 'AllCalo'), ('CaloTowerContainer', 'CombinedTower'), ('CaloClusterContainer', 'CaloCalTopoCluster'), ('CaloClusterContainer', 'CaloTopoCluster'), ('CaloClusterContainer', 'EMTopoCluster430'), ('CaloClusterContainer', 'LArClusterEM'), ('CaloClusterContainer', 'LArClusterEM7_11Nocorr'), ('CaloClusterContainer', 'LArClusterEMFrwd'), ('CaloClusterContainer', 'LArClusterEMSofte'), ('CaloClusterContainer', 'LArMuClusterCandidates'), ('CaloClusterContainer', 'MuonClusterCollection'), ('CaloClusterContainer', 'Tau1P3PCellCluster'), ('CaloClusterContainer', 'Tau1P3PCellEM012ClusterContainer'), ('CaloClusterContainer', 'Tau1P3PPi0ClusterContainer'), ('CaloClusterContainer', 'egClusterCollection'), ('TileDigitsContainer', 'TileDigitsFlt'), ('TileCellContainer', 'MBTSContainer'), ('TileL2Container', 'TileL2Cnt'), ('TileMuContainer', 'TileMuObj'), ('TileCosmicMuonContainer', 'TileCosmicMuonHT'), ('ElectronContainer', 'ElectronAODCollection'), ('ElectronContainer', 'ElectronCollection'), ('PhotonContainer', 'PhotonAODCollection'), ('PhotonContainer', 'PhotonCollection'), ('ElectronContainer', 'egammaForwardCollection'), ('ElectronContainer', 'softeCollection'), ('Analysis::TauJetContainer', 'TauRecContainer'), ('JetKeyDescriptor', 'JetKeyMap'), ('MissingETSig', 'METSig'), ('MissingEtCalo', 'MET_Base'), ('MissingEtCalo', 'MET_Base0'), ('MissingEtCalo', 'MET_Calib'), ('MissingET', 'MET_CellOut'), ('MissingET', 'MET_CellOut_MiniJet'), ('MissingEtCalo', 'MET_CorrTopo'), ('MissingET', 'MET_Cryo'), ('MissingET', 'MET_CryoCone'), ('MissingET', 'MET_Final'), ('MissingEtCalo', 'MET_LocHadTopo'), ('MissingET', 'MET_LocHadTopoObj'), ('MissingET', 'MET_Muon'), ('MissingET', 'MET_MuonBoy'), ('MissingET', 'MET_MuonBoy_Spectro'), ('MissingET', 'MET_MuonBoy_Track'), ('MissingET', 'MET_RefEle'), ('MissingET', 'MET_RefFinal'), ('MissingET', 'MET_RefGamma'), ('MissingET', 'MET_RefJet'), ('MissingET', 'MET_RefMuon'), ('MissingET', 'MET_RefMuon_Track'), ('MissingET', 'MET_RefTau'), ('MissingEtCalo', 'MET_Topo'), ('MissingET', 'MET_TopoObj'), ('MissingET', 'ObjMET_Elec'), ('MissingET', 'ObjMET_Final'), ('MissingET', 'ObjMET_IdTrk'), ('MissingET', 'ObjMET_Jet'), ('MissingET', 'ObjMET_MiniJet'), ('MissingET', 'ObjMET_Muon'), ('MissingET', 'ObjMET_Rest'), ('MissingET', 'ObjMET_TauJet'), ('Trk::SegmentCollection', 'ConvertedMBoySegments'), ('Trk::SegmentCollection', 'MooreSegments'), ('Trk::SegmentCollection', 'MuGirlSegments'), ('TrackCollection', 'CombinedInDetTracks'), ('TrackCollection', 'CombinedInDetTracks_CTB'), ('TrackCollection', 'Combined_Tracks'), ('TrackCollection', 'ConvertedMBoyMuonSpectroOnlyTracks'), ('TrackCollection', 'ConvertedMBoyTracks'), ('TrackCollection', 'ConvertedMuIdCBTracks'), ('TrackCollection', 'ConvertedMuTagTracks'), ('TrackCollection', 'ConvertedStacoTracks'), ('TrackCollection', 'MooreExtrapolatedTracks'), ('TrackCollection', 'MooreTracks'), ('TrackCollection', 'MuGirlRefittedTracks'), ('TrackCollection', 'MuTagIMOTracks'), ('TrackCollection', 'MuidExtrapolatedTracks'), ('TrackCollection', 'ResolvedPixelTracks_CTB'), ('TrackCollection', 'ResolvedSCTTracks_CTB'), ('TrackCollection', 'TRTStandaloneTRTTracks_CTB'), ('InDet::PixelGangedClusterAmbiguities', 'PixelClusterAmbiguitiesMap'), ('LArFebErrorSummary', 'LArFebErrorSummary'), ('ComTime', 'TRT_Phase'), ('Analysis::TauDetailsContainer', 'TauRecDetailsContainer'), ('Analysis::TauDetailsContainer', 'TauRecExtraDetailsContainer'), ('Analysis::MuonContainer', 'CaloESDMuonCollection'), ('Analysis::MuonContainer', 'CaloESDMuonCollection2'), ('Analysis::MuonContainer', 'CaloMuonCollection'), ('Analysis::MuonContainer', 'MuGirlLowBetaCollection'), ('Analysis::MuonContainer', 'MuidESDMuonCollection'), ('Analysis::MuonContainer', 'MuidMuonCollection'), ('Analysis::MuonContainer', 'StacoESDMuonCollection'), ('Analysis::MuonContainer', 'StacoMuonCollection'), ('MissingETSigHypoContainer', 'EtMissHypoCollection'), ('TRT_BSIdErrContainer', 'TRT_ByteStreamIdErrs'), ('InDet::TRT_DriftCircleContainer', 'TRT_DriftCircles'), ('MissingETSigObjContainer', 'EtMissObjCollection'), ('Muon::MdtPrepDataContainer', 'MDT_DriftCircles'), ('JetCollection', 'Cone4H1TopoJets'), ('JetCollection', 'Cone4H1TowerJets'), ('JetCollection', 'Cone7H1TowerJets'), ('egDetailContainer', 'SofteDetailContainer'), ('egDetailContainer', 'egDetailAOD'), ('egDetailContainer', 'egDetailContainer'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollection'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollectionNextBC'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollectionPriorBC'), ('Muon::RpcPrepDataContainer', 'RPC_Measurements'), ('CaloShowerContainer', 'CaloCalTopoCluster_Data'), ('CaloShowerContainer', 'CaloTopoCluster_Data'), ('CaloShowerContainer', 'EMTopoCluster430_Data'), ('CaloShowerContainer', 'LArClusterEM7_11Nocorr_Data'), ('CaloShowerContainer', 'LArClusterEMSofte_Data'), ('CaloShowerContainer', 'LArClusterEM_Data'), ('CaloShowerContainer', 'LArMuClusterCandidates_Data'), ('CaloShowerContainer', 'MuonClusterCollection_Data'), ('CaloShowerContainer', 'Tau1P3PCellCluster_Data'), ('CaloShowerContainer', 'Tau1P3PCellEM012ClusterContainer_Data'), ('CaloShowerContainer', 'Tau1P3PPi0ClusterContainer_Data'), ('CaloShowerContainer', 'egClusterCollection_Data'), ('InDetBSErrContainer', 'PixelByteStreamErrs'), ('InDetBSErrContainer', 'SCT_ByteStreamErrs'), ('TRT_BSErrContainer', 'TRT_ByteStreamErrs'), ('CaloCellLinkContainer', 'CaloCalTopoCluster_Link'), ('CaloCellLinkContainer', 'CaloTopoCluster_Link'), ('CaloCellLinkContainer', 'EMTopoCluster430_Link'), ('CaloCellLinkContainer', 'LArClusterEM7_11Nocorr_Link'), ('CaloCellLinkContainer', 'LArClusterEMSofte_Link'), ('CaloCellLinkContainer', 'LArClusterEM_Link'), ('CaloCellLinkContainer', 'LArMuClusterCandidates_Link'), ('CaloCellLinkContainer', 'MuonClusterCollection_Link'), ('CaloCellLinkContainer', 'Tau1P3PCellCluster_Link'), ('CaloCellLinkContainer', 'Tau1P3PCellEM012ClusterContainer_Link'), ('CaloCellLinkContainer', 'Tau1P3PPi0ClusterContainer_Link'), ('CaloCellLinkContainer', 'egClusterCollection_Link'), ('Rec::MuonSpShowerContainer', 'MuonSpShowers'), ('Rec::TrackParticleContainer', 'Combined_TrackParticles'), ('Rec::TrackParticleContainer', 'MooreTrackParticles'), ('Rec::TrackParticleContainer', 'MuGirlRefittedTrackParticles'), ('Rec::TrackParticleContainer', 'MuTagIMOTrackParticles'), ('Rec::TrackParticleContainer', 'MuTagTrackParticles'), ('Rec::TrackParticleContainer', 'MuidExtrTrackParticles'), ('Rec::TrackParticleContainer', 'MuonboyMuonSpectroOnlyTrackParticles'), ('Rec::TrackParticleContainer', 'MuonboyTrackParticles'), ('Rec::TrackParticleContainer', 'StacoTrackParticles'), ('Rec::TrackParticleContainer', 'TrackParticleCandidate'), ('Muon::TgcPrepDataContainer', 'TGC_Measurements'), ('Muon::TgcPrepDataContainer', 'TGC_MeasurementsNextBC'), ('Muon::TgcPrepDataContainer', 'TGC_MeasurementsPriorBC'), ('MuonCaloEnergyContainer', 'MuonCaloEnergyCollection'), ('DataHeader', 'StreamESD')], 'run_number': [91900L], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-GEO-02-01-00', 'evt_number': [2244L], 'evt_type': ('IS_DATA', 'IS_ATLAS', 'IS_PHYSICS'), 'metadata': {'/GLOBAL/DETSTATUS/LBSUMM': []}}
+        _compare_fileinfos(f1,f1_ref)
+        assert f1.run_number==f1_ref['run_number']
+        assert f1.evt_number==f1_ref['evt_number']
+        assert f1.lumi_block==f1_ref['lumi_block']
+        assert f1.run_type  ==f1_ref['run_type']
+        assert f1.beam_type ==f1_ref['beam_type']
+        assert f1.beam_energy==f1_ref['beam_energy']
+        pass # test1
+
+    def test002(self):
+        """test bytestream file on CASTOR"""
+        if 1:
+            return # FIXME
+    
+        import PyUtils.AthFile as af
+        fname = 'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data'
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('bs', 'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data')
+        
+        f2 = af.fopen(fname)
+        if verbose:
+            print "::: f2.fileinfos:"
+            print f2.fileinfos
+        f2_ref = {'file_md5sum':'e3e301bca63e4b5acb3b3cba43127ff9', 'metadata_items': None, 'stream_names': None, 'run_type': ['TEST'], 'stream_tags': [{'obeys_lbk': True, 'stream_type': 'physics', 'stream_name': 'IDCosmic'}, {'obeys_lbk': False, 'stream_type': 'calibration', 'stream_name': 'IDTracks'}], 'tag_info': None, 'file_type': 'bs', 'file_name': 'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data', 'file_guid': '7B1EABBD-12E0-4184-ABF0-84EB677D92E7', 'beam_type': [0], 'lumi_block': [54], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 417, 'eventdata_items': None, 'run_number': [92226], 'beam_energy': [0], 'geometry': None, 'evt_number': [8349492], 'evt_type': [], 'metadata': None}
+        _compare_fileinfos(f2,f2_ref)
+        assert f2.run_number==f2_ref['run_number']
+        assert f2.evt_number==f2_ref['evt_number']
+        assert f2.lumi_block==f2_ref['lumi_block']
+        assert f2.run_type  ==f2_ref['run_type']
+        assert f2.beam_type ==f2_ref['beam_type']
+        assert f2.beam_energy==f2_ref['beam_energy']
+
+    def test003(self):
+        """test RDO POOL file"""
+        import PyUtils.AthFile as af
+        fname = '/afs/cern.ch/atlas/offline/ReleaseData/v3/testfile/valid1.005200.T1_McAtNlo_Jimmy.digit.RDO.e322_s488_d151_tid039414_RDO.039414._00001_extract_10evt.pool.root'
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', '/afs/cern.ch/atlas/offline/ReleaseData/v3/testfile/valid1.005200.T1_McAtNlo_Jimmy.digit.RDO.e322_s488_d151_tid039414_RDO.039414._00001_extract_10evt.pool.root')
+        
+        f3 = af.fopen(fname)
+        if verbose:
+            print "::: f3.fileinfos:"
+            print f3.fileinfos
+        f3_ref = {'file_md5sum':'85f7b3d2da72cb387a8345091c2e00ca','metadata_items': [('EventStreamInfo', 'Stream1'), ('DataHeader', ';00;MetaDataSvc'), ('IOVMetaDataContainer', '/Digitization/Parameters'), ('IOVMetaDataContainer', '/Simulation/Parameters')], 'stream_names': ['Stream1'], 'run_type': ['N/A'], 'stream_tags': [], 'tag_info': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPerm-02_test', '/TRT/Cond/Status': 'TrtStrawStatus-02', '/LAR/Identifier/FebRodAtlas': 'FebRodAtlas-005', '/LAR/ElecCalibMC': 'LARElecCalibMC-CSC02-J-QGSP_BERT', 'GeoAtlas': 'ATLAS-GEO-02-01-00', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', 'AtlasRelease': 'AtlasProduction-14.2.25.3', 'TGC_support': 'TGC Big Wheel', '/GLOBAL/BField/Map': 'BFieldMap-000', 'IOVDbGlobalTag': 'OFLCOND-SIM-00-00-00', 'MDT_support': 'MDT Big Wheel', '/LAR/Identifier/OnOffIdAtlas': 'OnOffIdAtlas-012'}, 'file_type': 'pool', 'file_name': '/afs/cern.ch/atlas/offline/ReleaseData/v3/testfile/valid1.005200.T1_McAtNlo_Jimmy.digit.RDO.e322_s488_d151_tid039414_RDO.039414._00001_extract_10evt.pool.root', 'file_guid': 'E29E4282-D8ED-DD11-8435-000423D59D52', 'beam_type': ['N/A'], 'lumi_block': [0L], 'conditions_tag': 'OFLCOND-SIM-00-00-00', 'det_descr_tags': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPerm-02_test', '/TRT/Cond/Status': 'TrtStrawStatus-02', '/LAR/Identifier/FebRodAtlas': 'FebRodAtlas-005', '/LAR/ElecCalibMC': 'LARElecCalibMC-CSC02-J-QGSP_BERT', 'GeoAtlas': 'ATLAS-GEO-02-01-00', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', 'AtlasRelease': 'AtlasProduction-14.2.25.3', 'TGC_support': 'TGC Big Wheel', '/GLOBAL/BField/Map': 'BFieldMap-000', 'IOVDbGlobalTag': 'OFLCOND-SIM-00-00-00', 'MDT_support': 'MDT Big Wheel', '/LAR/Identifier/OnOffIdAtlas': 'OnOffIdAtlas-012'}, 'nentries': 10L, 'eventdata_items': [('EventInfo', 'McEventInfo'), ('PixelRDO_Container', 'PixelRDOs'), ('SCT_RDO_Container', 'SCT_RDOs'), ('TRT_RDO_Container', 'TRT_RDOs'), ('InDetSimDataCollection', 'BCM_SDO_Map'), ('InDetSimDataCollection', 'PixelSDO_Map'), ('InDetSimDataCollection', 'SCT_SDO_Map'), ('InDetSimDataCollection', 'TRT_SDO_Map'), ('BCM_RDO_Container', 'BCM_RDOs'), ('LArDigitContainer', 'LArDigitContainer_MC_Thinned'), ('LArRawChannelContainer', 'LArRawChannels'), ('LArTTL1Container', 'LArTTL1EM'), ('LArTTL1Container', 'LArTTL1HAD'), ('TileRawChannelContainer', 'TileRawChannelCnt'), ('TileTTL1Container', 'TileTTL1Cnt'), ('TileTTL1Container', 'TileTTL1MBTS'), ('TileHitVector', 'MBTSHits'), ('CscRawDataContainer', 'CSCRDO'), ('TgcRdoContainer', 'TGCRDO'), ('MdtCsmContainer', 'MDTCSM'), ('RpcPadContainer', 'RPCPAD'), ('ROIB::RoIBResult', 'RoIBResult'), ('CTP_RDO', 'CTP_RDO'), ('DataVector<LVL1::JetElement>', 'JetElements'), ('DataVector<LVL1::TriggerTower>', 'TriggerTowers'), ('MuCTPI_RDO', 'MUCTPI_RDO'), ('McEventCollection', 'TruthEvent'), ('DataVector<LVL1::JEMEtSums>', 'JEMEtSums'), ('MuonSimDataCollection', 'MDT_SDO'), ('MuonSimDataCollection', 'RPC_SDO'), ('MuonSimDataCollection', 'TGC_SDO'), ('DataVector<LVL1::CPMTower>', 'CPMTowers'), ('DataVector<LVL1::CPMHits>', 'CPMHits'), ('DataVector<LVL1::CMMEtSums>', 'CMMEtSums'), ('DataVector<LVL1::JEMRoI>', 'JEMRoIs'), ('LVL1::CMMRoI', 'CMMRoIs'), ('DataVector<LVL1::JEMHits>', 'JEMHits'), ('DataVector<LVL1::CPMRoI>', 'CPMRoIs'), ('DataVector<LVL1::CMMJetHits>', 'CMMJetHits'), ('DataVector<LVL1::CMMCPHits>', 'CMMCPHits'), ('CscSimDataCollection', 'CSC_SDO'), ('TrackRecordCollection', 'CaloEntryLayer'), ('TrackRecordCollection', 'MuonEntryLayer'), ('TrackRecordCollection', 'MuonExitLayer'), ('CaloCalibrationHitContainer', 'LArCalibrationHitActive'), ('CaloCalibrationHitContainer', 'LArCalibrationHitDeadMaterial'), ('CaloCalibrationHitContainer', 'LArCalibrationHitInactive'), ('DataHeader', 'Stream1')], 'run_number': [5200L], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-GEO-02-01-00', 'evt_number': [30002L], 'evt_type': ('IS_SIMULATION', 'IS_ATLAS', 'IS_PHYSICS'), 'metadata': {'/Digitization/Parameters': {'physicsList': 'QGSP_BERT', 'N_beamGasInputFiles': 0, 'doBeamHalo': False, 'N_cavernInputFiles': 0, 'overrideMetadata': False, 'numberOfBeamHalo': 1.0, 'doCavern': False, 'IOVDbGlobalTag': 'default', 'N_beamHaloInputFiles': 0, 'initialBunchCrossing': -36, 'doCaloNoise': True, 'N_minBiasInputFiles': 0, 'numberOfCollisions': 0.0, 'rndmSvc': 'AtRanluxGenSvc', 'rndmSeedList': ['BCM_Digitization 49261511 105132395', 'PixelDigitization 10513240 492615105', 'SCT_Digitization 49261511 105132395', 'TRT_ElectronicsNoise 124 346', 'TRT_Noise 1235 3457', 'TRT_ThresholdFluctuations 12346 34568', 'TRT_ProcessStraw 123457 345679', 'TRT_SimDriftTime 1234568 3456790', 'TRT_PAI 12345679 34567891', 'TRT_FakeConditions 123456790 345678902', 'LArDigitization 1235 5679', 'Tile_HitVecToCnt 4789900 989240513', 'Tile_DigitsMaker 4789900 989240513', 'CSC_Digitization 49261511 105132395', 'MDTResponse 49261511 105132395', 'MDT_Digitization 49261511 105132395', 'MDT_DigitizationTwin 393242562 857132382', 'TGC_Digitization 49261511 105132395', 'RPC_Digitization 49261511 105132395', 'CscDigitToCscRDOTool 49261511 105132395', 'Tile_HitToTTL1 4789900 989240513', 'CTPSimulation 1979283044 1924452190'], 'numberOfCavern': 2, 'doMuonNoise': True, 'doInDetNoise': True,'numberOfBeamGas': 1.0, 'finalBunchCrossing': 32, 'doBeamGas': False, 'doMinimumBias': False, 'bunchSpacing': 25, 'DetDescrVersion': 'ATLAS-GEO-02-01-00', 'lvl1TriggerMenu': 'lumi1E31_no_Bphysics_no_prescale', 'rndmSeedOffset2': 1, 'rndmSeedOffset1': 1}, '/Simulation/Parameters': {'EtaPhiStatus': True, 'PhysicsList': 'QGSP_BERT', 'CalibrationRun': 'DeadLAr', 'SimLayout': 'ATLAS-GEO-02-01-00', 'DoLArBirk': False, 'LArParameterization': 0, 'MagneticField': 'OracleDB', 'WorldRRange': 'default', 'SeedsG4': 'default', 'NeutronTimeCut': 150.0, 'WorldZRange': 'default', 'Seeds': 'default', 'G4Version': 'geant4.9.1.patch03.atlas01', 'RunType': 'atlas', 'VertexStatus': True, 'IOVDbGlobalTag': 'default', 'VRangeStatus': True}}}
+        _compare_fileinfos(f3,f3_ref)
+        assert f3.run_number==f3_ref['run_number']
+        assert f3.evt_number==f3_ref['evt_number']
+        assert f3.lumi_block==f3_ref['lumi_block']
+        assert f3.run_type  ==f3_ref['run_type']
+        assert f3.beam_type ==f3_ref['beam_type']
+        assert f3.beam_energy==f3_ref['beam_energy']
+
+    def test004(self):
+        """test empty POOL file"""
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/empty-file.pool'
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/empty-file.pool')
+        
+        f4 = af.fopen(fname)
+        if verbose:
+            print "::: f4.fileinfos:"
+            print f4.fileinfos
+        f4_ref = {'file_md5sum':'519643438bf3a0e7a1e637463d73d9e9','metadata_items': [('DataHeader', ';00;MetaDataSvc'), ('EventBookkeeperCollection', 'EventBookkeepers'), ('EventBookkeeperCollection', 'EventSelector.Counter'), ('EventStreamInfo', 'DPD_EGAMTAUCOMM'), ('IOVMetaDataContainer', '/GLOBAL/DETSTATUS/LBSUMM'), ('IOVMetaDataContainer', '/TRIGGER/HLT/HltConfigKeys'), ('IOVMetaDataContainer', '/TRIGGER/HLT/Menu'), ('IOVMetaDataContainer', '/TRIGGER/LVL1/Lvl1ConfigKey'), ('IOVMetaDataContainer', '/TRIGGER/LVL1/Menu'), ('IOVMetaDataContainer', '/TRIGGER/LVL1/Prescales'), ('IOVMetaDataContainer', '/TagInfo'), ('LumiBlockCollection', 'IncompleteLumiBlocks')], 'stream_names': ['DPD_EGAMTAUCOMM'], 'run_type': ['N/A'], 'stream_tags': [], 'tag_info': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPerm-02_test', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', 'GeoAtlas': 'ATLAS-GEO-03-00-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections.topophioff-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections.topogap-v1', 'AtlasRelease': 'any', 'IOVDbGlobalTag': 'COMCOND-ES1C-000-00', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections.etaoff-v4_1', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections.phioff-v4', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-003-01', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections.topolw-v1', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-003-01', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFRac-003-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-003-01', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections.phimod-v4', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections.etamod-v4', 'AMITag': 'f57', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections.rfac-v4', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections.calhits-v5', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections.trcorr-v5', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections.topoetaoff-v1', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections.gap-v4', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections.topophimod-v1'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/empty-file.pool', 'file_guid': 'CC6B79F4-043E-DE11-BD81-000423D67862', 'conditions_tag': 'COMCOND-ES1C-000-00', 'beam_type': ['N/A'], 'lumi_block': [], 'det_descr_tags': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPerm-02_test', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', 'GeoAtlas': 'ATLAS-GEO-03-00-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections.topophioff-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections.topogap-v1', 'AtlasRelease': 'any', 'IOVDbGlobalTag': 'COMCOND-ES1C-000-00', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections.etaoff-v4_1', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections.phioff-v4', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-003-01', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections.topolw-v1', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-003-01', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFRac-003-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-003-01', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections.phimod-v4', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections.etamod-v4', 'AMITag': 'f57', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections.rfac-v4', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections.calhits-v5', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections.trcorr-v5', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections.topoetaoff-v1', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections.gap-v4', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections.topophimod-v1'}, 'nentries': 0, 'eventdata_items': [], 'run_number': [], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-GEO-03-00-00', 'evt_number': [], 'evt_type': [], 'metadata': None}
+
+        f4.fileinfos['tag_info']['AtlasRelease'] = 'any'
+        f4.fileinfos['det_descr_tags']['AtlasRelease'] = 'any'
+        _compare_fileinfos(f4,f4_ref)
+        assert f4.run_number==f4_ref['run_number']
+        assert f4.evt_number==f4_ref['evt_number']
+        assert f4.lumi_block==f4_ref['lumi_block']
+        assert f4.run_type  ==f4_ref['run_type']
+        assert f4.beam_type ==f4_ref['beam_type']
+        assert f4.beam_energy==f4_ref['beam_energy']
+
+        return
+
+    def test005(self):
+        """test (old) HITS POOL file"""
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/calib1_csc11.005200.T1_McAtNlo_Jimmy.simul.HITS.v12003104_tid004131._00069.pool.root.10'
+
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/calib1_csc11.005200.T1_McAtNlo_Jimmy.simul.HITS.v12003104_tid004131._00069.pool.root.10')
+        
+        f5 = af.fopen(fname)
+        if verbose:
+            print "::: f5.fileinfos:"
+            print f5.fileinfos
+        f5_ref = {'file_md5sum':'b109aa2689abeb8aa282605c29087d64', 'metadata_items': [], 'stream_names': ['Stream1'], 'run_type': ['N/A'], 'stream_tags': [], 'tag_info': {'AtlasRelease': 'AtlasOffline-12.0.31', 'GeoAtlas': 'ATLAS-CSC-01-02-00', 'IOVDbGlobalTag': 'OFLCOND-CSC-00-01-00'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/calib1_csc11.005200.T1_McAtNlo_Jimmy.simul.HITS.v12003104_tid004131._00069.pool.root.10', 'file_guid': '00C5C040-EB75-DB11-9308-00E0812B9987', 'beam_type': ['N/A'], 'lumi_block': [0L], 'conditions_tag': 'OFLCOND-CSC-00-01-00', 'det_descr_tags': {'AtlasRelease': 'AtlasOffline-12.0.31', 'GeoAtlas': 'ATLAS-CSC-01-02-00', 'IOVDbGlobalTag': 'OFLCOND-CSC-00-01-00'}, 'nentries': 50L, 'eventdata_items': [('EventInfo', 'McEventInfo'), ('SiHitCollection', 'PixelHits'), ('SiHitCollection', 'SCT_Hits'), ('LArHitContainer', 'LArHitEMB'), ('LArHitContainer', 'LArHitEMEC'), ('LArHitContainer', 'LArHitFCAL'), ('LArHitContainer', 'LArHitHEC'), ('TileHitVector', 'MBTSHits'), ('TileHitVector', 'TileHitVec'), ('RPCSimHitCollection', 'RPC_Hits'), ('TGCSimHitCollection', 'TGC_Hits'), ('CSCSimHitCollection', 'CSC_Hits'), ('MDTSimHitCollection', 'MDT_Hits'), ('McEventCollection', 'TruthEvent'), ('TRTUncompressedHitCollection', 'TRTUncompressedHits'), ('TrackRecordCollection', 'CaloEntryLayer'), ('TrackRecordCollection', 'MuonEntryLayer'), ('TrackRecordCollection', 'MuonExitLayer'), ('CaloCalibrationHitContainer', 'LArCalibrationHitActive'), ('CaloCalibrationHitContainer', 'LArCalibrationHitDeadMaterial'), ('CaloCalibrationHitContainer', 'LArCalibrationHitInactive'), ('CaloCalibrationHitContainer', 'TileCalibrationCellHitCnt'), ('CaloCalibrationHitContainer', 'TileCalibrationDMHitCnt'), ('DataHeader', 'Stream1')], 'run_number': [5200L], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-CSC-01-02-00', 'evt_number': [6136L], 'evt_type': ('IS_SIMULATION', 'IS_ATLAS', 'IS_PHYSICS'), 'metadata': {}}
+        _compare_fileinfos(f5,f5_ref)
+        assert f5.run_number==f5_ref['run_number']
+        assert f5.evt_number==f5_ref['evt_number']
+        assert f5.lumi_block==f5_ref['lumi_block']
+        assert f5.run_type  ==f5_ref['run_type']
+        assert f5.beam_type ==f5_ref['beam_type']
+        assert f5.beam_energy==f5_ref['beam_energy']
+        return
+
+    def test006(self):
+        """test old EVGEN POOL file"""
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/testSim.0011.mu_pt5_eta60.EVGEN.pool.root'
+
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/testSim.0011.mu_pt5_eta60.EVGEN.pool.root')
+        f6 = af.fopen(fname)
+        if verbose:
+            print "::: f6.fileinfos:"
+            print f6.fileinfos
+        f6_ref = {'file_md5sum':'b6b58e325235b4fbbf0aebd5e028ab08', 'metadata_items': [], 'stream_names': ['Stream1'], 'run_type': ['N/A'], 'stream_tags': [], 'tag_info': {'AtlasRelease': 'any'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/testSim.0011.mu_pt5_eta60.EVGEN.pool.root', 'file_guid': 'ACC40752-51BB-DB11-8437-000423D65662', 'beam_type': ['N/A'], 'lumi_block': [0L], 'conditions_tag': None, 'det_descr_tags': {'AtlasRelease': 'any'}, 'nentries': 1053L, 'eventdata_items': [('EventInfo', 'McEventInfo'), ('McEventCollection', 'GEN_EVENT'), ('DataHeader', 'Stream1')], 'run_number': [11L], 'beam_energy': ['N/A'], 'geometry': None, 'evt_number': [1L], 'evt_type': ('IS_SIMULATION', 'IS_ATLAS', 'IS_PHYSICS'), 'metadata': {}}
+        f6.fileinfos['tag_info']['AtlasRelease'] = 'any'
+        f6.fileinfos['det_descr_tags']['AtlasRelease'] = 'any'
+        _compare_fileinfos(f6,f6_ref)
+        assert f6.run_number==f6_ref['run_number']
+        assert f6.evt_number==f6_ref['evt_number']
+        assert f6.lumi_block==f6_ref['lumi_block']
+        assert f6.run_type  ==f6_ref['run_type']
+        assert f6.beam_type ==f6_ref['beam_type']
+        assert f6.beam_energy==f6_ref['beam_energy']
+        return
+
+    def test007(self):
+        """test ESD commissionning POOL file"""
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.commissionning.15.2.0.pool'
+
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.commissionning.15.2.0.pool')
+        
+        f7 = af.fopen(fname)
+        if verbose:
+            print "::: f7.fileinfos:"
+            print f7.fileinfos
+        f7_ref = {'file_md5sum':'c52c2056f049094abe559af10216937c', 'metadata_items': [('EventStreamInfo', 'StreamESD'), ('LumiBlockCollection', 'LumiBlocks'), ('DataHeader', ';00;MetaDataSvc'), ('IOVMetaDataContainer', '/GLOBAL/DETSTATUS/LBSUMM'), ('IOVMetaDataContainer', '/TagInfo')], 'stream_names': ['StreamESD'], 'run_type': ['N/A'], 'stream_tags': [{'obeys_lbk': True, 'stream_type': 'physics', 'stream_name': 'IDCosmic'}], 'tag_info': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPermanent-01', '/GLOBAL/BTagCalib/IP3D': 'BTagCalib-03-00', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', '/MUONALIGN/MDT/ENDCAP/SIDEC': 'MuonAlignMDTEndCapCAlign-REPRO-08', '/MUONALIGN/MDT/BARREL': 'MuonAlignMDTBarrelAlign-0100-SEC0109', '/CALO/H1Weights/H1WeightsCone4Topo': 'CaloH1WeightsCone4Topo-02-000', '/TILE/OFL01/CALIB/LAS/LIN': 'TileOfl01CalibLasLin-HLT-UPD1-00', 'GeoAtlas': 'ATLAS-GEO-03-00-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections-00-02-00-DC3-v2', 'AtlasRelease': 'any', 'IOVDbGlobalTag': 'COMCOND-ES1C-001-01', '/MUONALIGN/TGC/SIDEA': 'MuonAlignTGCEndCapAAlign-REPRO-01', '/MUONALIGN/TGC/SIDEC': 'MuonAlignTGCEndCapCAlign-REPRO-01', '/CALO/CaloSwClusterCorrections/larupdate': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/clcon': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/CES': 'TileOfl01CalibCes-HLT-UPD1-01', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SV1': 'BTagCalib-03-00', '/MUONALIGN/MDT/ENDCAP/SIDEA': 'MuonAlignMDTEndCapAAlign-REPRO-08', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-CSC05-BERT', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-CSC05-BERT', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFrac-CSC05-BERT', '/GLOBAL/BTagCalib/JetProb': 'BTagCalib-03-00', '/CALO/EMTopoClusterCorrections/larupdate': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/GLOBAL/BTagCalib/SoftEl': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/lwc': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/EMS': 'TileOfl01CalibEms-HLT-UPD1-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-CSC05-BERT', '/TILE/OFL01/CALIB/CIS/FIT/LIN': 'TileOfl01CalibCisFitLin-HLT-UPD1-00', '/GLOBAL/BTagCalib/IP2D': 'BTagCalib-03-00', '/GLOBAL/BTagCalib/JetFitter': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SoftMu': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/EMTopoClusterCorrections/topoetaoffsw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections-00-02-00-DC3-v2'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/esd.commissionning.15.2.0.pool', 'file_guid': '487184A1-9343-DE11-AACC-001E4F3E5C1F', 'beam_type': ['N/A'], 'lumi_block': [1L], 'conditions_tag': 'COMCOND-ES1C-001-01', 'det_descr_tags': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPermanent-01', '/GLOBAL/BTagCalib/IP3D': 'BTagCalib-03-00', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', '/MUONALIGN/MDT/ENDCAP/SIDEC': 'MuonAlignMDTEndCapCAlign-REPRO-08', '/MUONALIGN/MDT/BARREL': 'MuonAlignMDTBarrelAlign-0100-SEC0109', '/CALO/H1Weights/H1WeightsCone4Topo': 'CaloH1WeightsCone4Topo-02-000', '/TILE/OFL01/CALIB/LAS/LIN': 'TileOfl01CalibLasLin-HLT-UPD1-00', 'GeoAtlas': 'ATLAS-GEO-03-00-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections-00-02-00-DC3-v2', 'AtlasRelease': 'any', 'IOVDbGlobalTag': 'COMCOND-ES1C-001-01', '/MUONALIGN/TGC/SIDEA': 'MuonAlignTGCEndCapAAlign-REPRO-01', '/MUONALIGN/TGC/SIDEC': 'MuonAlignTGCEndCapCAlign-REPRO-01', '/CALO/CaloSwClusterCorrections/larupdate': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/clcon': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/CES': 'TileOfl01CalibCes-HLT-UPD1-01', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SV1': 'BTagCalib-03-00', '/MUONALIGN/MDT/ENDCAP/SIDEA': 'MuonAlignMDTEndCapAAlign-REPRO-08', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-CSC05-BERT', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-CSC05-BERT', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFrac-CSC05-BERT', '/GLOBAL/BTagCalib/JetProb': 'BTagCalib-03-00', '/CALO/EMTopoClusterCorrections/larupdate': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/GLOBAL/BTagCalib/SoftEl': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/lwc': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/EMS': 'TileOfl01CalibEms-HLT-UPD1-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-CSC05-BERT', '/TILE/OFL01/CALIB/CIS/FIT/LIN': 'TileOfl01CalibCisFitLin-HLT-UPD1-00', '/GLOBAL/BTagCalib/IP2D': 'BTagCalib-03-00', '/GLOBAL/BTagCalib/JetFitter': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SoftMu': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/EMTopoClusterCorrections/topoetaoffsw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections-00-02-00-DC3-v2'}, 'nentries': 10L, 'eventdata_items': [('EventInfo', 'ByteStreamEventInfo'), ('PixelRDO_Container', 'PixelRDOs'), ('SCT_RDO_Container', 'SCT_RDOs'), ('TRT_RDO_Container', 'TRT_RDOs'), ('InDet::PixelClusterContainer', 'PixelClusters'), ('InDet::SCT_ClusterContainer', 'SCT_Clusters'), ('BCM_RDO_Container', 'BCM_RDOs'), ('LArDigitContainer', 'LArDigitContainer_IIC'), ('LArDigitContainer', 'LArDigitContainer_Thinned'), ('CaloCellContainer', 'AllCalo'), ('CaloTowerContainer', 'CombinedTower'), ('CaloClusterContainer', 'CaloCalTopoCluster'), ('CaloClusterContainer', 'CaloTopoCluster'), ('CaloClusterContainer', 'EMTopoCluster430'), ('CaloClusterContainer', 'LArClusterEM'), ('CaloClusterContainer', 'LArClusterEM7_11Nocorr'), ('CaloClusterContainer', 'LArClusterEMFrwd'), ('CaloClusterContainer', 'LArClusterEMSofte'), ('CaloClusterContainer', 'LArMuClusterCandidates'), ('CaloClusterContainer', 'MuonClusterCollection'), ('CaloClusterContainer', 'Tau1P3PCellCluster'), ('CaloClusterContainer', 'Tau1P3PCellEM012ClusterContainer'), ('CaloClusterContainer', 'Tau1P3PPi0ClusterContainer'), ('CaloClusterContainer', 'egClusterCollection'), ('TileDigitsContainer', 'TileDigitsFlt'), ('TileCellContainer', 'MBTSContainer'), ('TileL2Container', 'TileL2Cnt'), ('TileMuContainer', 'TileMuObj'), ('TileCosmicMuonContainer', 'TileCosmicMuonHT'), ('ElectronContainer', 'ElectronAODCollection'), ('ElectronContainer', 'ElectronCollection'), ('PhotonContainer', 'PhotonAODCollection'), ('PhotonContainer', 'PhotonCollection'), ('ElectronContainer', 'egammaForwardCollection'), ('ElectronContainer', 'softeCollection'), ('Analysis::TauJetContainer', 'TauRecContainer'), ('JetKeyDescriptor', 'JetKeyMap'), ('MissingEtCalo', 'MET_Base'), ('MissingEtCalo', 'MET_Base0'), ('MissingEtCalo', 'MET_Calib'), ('MissingET', 'MET_CellOut'), ('MissingEtCalo', 'MET_CorrTopo'), ('MissingET', 'MET_Cryo'), ('MissingET', 'MET_CryoCone'), ('MissingET', 'MET_Final'), ('MissingEtCalo', 'MET_LocHadTopo'), ('MissingET', 'MET_LocHadTopoObj'), ('MissingET', 'MET_Muon'), ('MissingET', 'MET_MuonBoy'), ('MissingET', 'MET_MuonBoy_Spectro'), ('MissingET', 'MET_MuonBoy_Track'), ('MissingET', 'MET_RefEle'), ('MissingET', 'MET_RefFinal'), ('MissingET', 'MET_RefGamma'), ('MissingET', 'MET_RefJet'), ('MissingET', 'MET_RefTau'), ('MissingEtCalo', 'MET_Topo'), ('MissingET', 'MET_TopoObj'), ('Trk::SegmentCollection', 'ConvertedMBoySegments'), ('Trk::SegmentCollection', 'MooreSegments'), ('Trk::SegmentCollection', 'MuGirlSegments'), ('TrackCollection', 'CombinedInDetTracks'), ('TrackCollection', 'CombinedInDetTracks_CTB'), ('TrackCollection', 'Combined_Tracks'), ('TrackCollection', 'ConvertedMBoyMuonSpectroOnlyTracks'), ('TrackCollection', 'ConvertedMBoyTracks'), ('TrackCollection', 'ConvertedMuIdCBTracks'), ('TrackCollection', 'ConvertedMuTagTracks'), ('TrackCollection', 'ConvertedStacoTracks'), ('TrackCollection', 'MooreExtrapolatedTracks'), ('TrackCollection', 'MooreTracks'), ('TrackCollection', 'MuGirlRefittedTracks'), ('TrackCollection', 'MuTagIMOTracks'), ('TrackCollection', 'MuidExtrapolatedTracks'), ('TrackCollection', 'ResolvedPixelTracks_CTB'), ('TrackCollection', 'ResolvedSCTTracks_CTB'), ('TrackCollection', 'TRTStandaloneTRTTracks_CTB'), ('InDet::PixelGangedClusterAmbiguities', 'PixelClusterAmbiguitiesMap'), ('LArFebErrorSummary', 'LArFebErrorSummary'), ('ComTime', 'TRT_Phase'), ('Analysis::TauDetailsContainer', 'TauRecDetailsContainer'), ('Analysis::TauDetailsContainer', 'TauRecExtraDetailsContainer'), ('Muon::CscPrepDataContainer', 'CSC_Clusters'), ('Analysis::MuonContainer', 'CaloESDMuonCollection'), ('Analysis::MuonContainer', 'CaloMuonCollection'), ('Analysis::MuonContainer', 'MuGirlLowBetaCollection'), ('Analysis::MuonContainer', 'MuidESDMuonCollection'), ('Analysis::MuonContainer', 'MuidMuonCollection'), ('Analysis::MuonContainer', 'StacoESDMuonCollection'), ('Analysis::MuonContainer', 'StacoMuonCollection'), ('TRT_BSIdErrContainer', 'TRT_ByteStreamIdErrs'), ('InDet::TRT_DriftCircleContainer', 'TRT_DriftCircles'), ('Muon::MdtPrepDataContainer', 'MDT_DriftCircles'), ('JetCollection', 'Cone4H1TopoJets'), ('JetCollection', 'Cone4H1TowerJets'), ('JetCollection', 'Cone7H1TowerJets'), ('egDetailContainer', 'SofteDetailContainer'), ('egDetailContainer', 'egDetailAOD'), ('egDetailContainer', 'egDetailContainer'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollection'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollectionNextBC'), ('Muon::TgcCoinDataContainer', 'TrigT1CoinDataCollectionPriorBC'), ('Muon::RpcCoinDataContainer', 'RPC_triggerHits'), ('Muon::CscStripPrepDataContainer', 'CSC_Measurements'), ('Muon::RpcPrepDataContainer', 'RPC_Measurements'), ('CaloShowerContainer', 'CaloCalTopoCluster_Data'), ('CaloShowerContainer', 'CaloTopoCluster_Data'), ('CaloShowerContainer', 'EMTopoCluster430_Data'), ('CaloShowerContainer', 'LArClusterEM7_11Nocorr_Data'), ('CaloShowerContainer', 'LArClusterEMSofte_Data'), ('CaloShowerContainer', 'LArClusterEM_Data'), ('CaloShowerContainer', 'LArMuClusterCandidates_Data'), ('CaloShowerContainer', 'MuonClusterCollection_Data'), ('CaloShowerContainer', 'Tau1P3PCellCluster_Data'), ('CaloShowerContainer', 'Tau1P3PCellEM012ClusterContainer_Data'), ('CaloShowerContainer', 'Tau1P3PPi0ClusterContainer_Data'), ('CaloShowerContainer', 'egClusterCollection_Data'), ('InDetBSErrContainer', 'PixelByteStreamErrs'), ('InDetBSErrContainer', 'SCT_ByteStreamErrs'), ('TRT_BSErrContainer', 'TRT_ByteStreamErrs'), ('CaloCellLinkContainer', 'CaloCalTopoCluster_Link'), ('CaloCellLinkContainer', 'CaloTopoCluster_Link'), ('CaloCellLinkContainer', 'EMTopoCluster430_Link'), ('CaloCellLinkContainer', 'LArClusterEM7_11Nocorr_Link'), ('CaloCellLinkContainer', 'LArClusterEMSofte_Link'), ('CaloCellLinkContainer', 'LArClusterEM_Link'), ('CaloCellLinkContainer', 'LArMuClusterCandidates_Link'), ('CaloCellLinkContainer', 'MuonClusterCollection_Link'), ('CaloCellLinkContainer', 'Tau1P3PCellCluster_Link'), ('CaloCellLinkContainer', 'Tau1P3PCellEM012ClusterContainer_Link'), ('CaloCellLinkContainer', 'Tau1P3PPi0ClusterContainer_Link'), ('CaloCellLinkContainer', 'egClusterCollection_Link'), ('Rec::MuonSpShowerContainer', 'MuonSpShowers'), ('Rec::TrackParticleContainer', 'Combined_TrackParticles'), ('Rec::TrackParticleContainer', 'MooreTrackParticles'), ('Rec::TrackParticleContainer', 'MuGirlRefittedTrackParticles'), ('Rec::TrackParticleContainer', 'MuTagIMOTrackParticles'), ('Rec::TrackParticleContainer', 'MuTagTrackParticles'), ('Rec::TrackParticleContainer', 'MuidExtrTrackParticles'), ('Rec::TrackParticleContainer', 'MuonboyMuonSpectroOnlyTrackParticles'), ('Rec::TrackParticleContainer', 'MuonboyTrackParticles'), ('Rec::TrackParticleContainer', 'StacoTrackParticles'), ('Rec::TrackParticleContainer', 'TrackParticleCandidate'), ('Muon::TgcPrepDataContainer', 'TGC_Measurements'), ('Muon::TgcPrepDataContainer', 'TGC_MeasurementsNextBC'), ('Muon::TgcPrepDataContainer', 'TGC_MeasurementsPriorBC'), ('MuonCaloEnergyContainer', 'MuonCaloEnergyCollection'), ('DataHeader', 'StreamESD')], 'run_number': [91900L], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-GEO-03-00-00', 'evt_number': [2244L], 'evt_type': ('IS_DATA', 'IS_ATLAS', 'IS_PHYSICS'), 'metadata': {'/GLOBAL/DETSTATUS/LBSUMM': [], '/TagInfo': {'/TRT/Cond/StatusPermanent': 'TrtStrawStatusPermanent-01', '/GLOBAL/BTagCalib/IP3D': 'BTagCalib-03-00', '/CALO/HadCalibration/CaloDMCorr2': 'CaloHadDMCorr-002-00', '/MUONALIGN/MDT/ENDCAP/SIDEC': 'MuonAlignMDTEndCapCAlign-REPRO-08', '/MUONALIGN/MDT/BARREL': 'MuonAlignMDTBarrelAlign-0100-SEC0109', '/CALO/H1Weights/H1WeightsCone4Topo': 'CaloH1WeightsCone4Topo-02-000', '/TILE/OFL01/CALIB/LAS/LIN': 'TileOfl01CalibLasLin-HLT-UPD1-00', 'GeoAtlas': 'ATLAS-GEO-03-00-00', '/CALO/EMTopoClusterCorrections/topophioff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topogap': 'EMTopoClusterCorrections-00-02-00-DC3-v2', 'AtlasRelease': 'AtlasOffline-rel_1', 'IOVDbGlobalTag': 'COMCOND-ES1C-001-01', '/MUONALIGN/TGC/SIDEA': 'MuonAlignTGCEndCapAAlign-REPRO-01', '/MUONALIGN/TGC/SIDEC': 'MuonAlignTGCEndCapCAlign-REPRO-01', '/CALO/CaloSwClusterCorrections/larupdate': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/clcon': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/etaoff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phimod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/CES': 'TileOfl01CalibCes-HLT-UPD1-01', '/CALO/CaloSwClusterCorrections/trcorr': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SV1': 'BTagCalib-03-00', '/MUONALIGN/MDT/ENDCAP/SIDEA': 'MuonAlignMDTEndCapAAlign-REPRO-08', '/CALO/HadCalibration/CaloOutOfClusterPi0': 'CaloHadOOCCorrPi0-CSC05-BERT', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/CALO/EMTopoClusterCorrections/topolw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/HadCalibration/H1ClusterCellWeights': 'CaloH1CellWeights-CSC05-BERT', '/CALO/HadCalibration/CaloEMFrac': 'CaloEMFrac-CSC05-BERT', '/GLOBAL/BTagCalib/JetProb': 'BTagCalib-03-00', '/CALO/EMTopoClusterCorrections/larupdate': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/GLOBAL/BTagCalib/SoftEl': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/lwc': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/TILE/OFL01/CALIB/EMS': 'TileOfl01CalibEms-HLT-UPD1-01', '/CALO/HadCalibration/CaloOutOfCluster': 'CaloHadOOCCorr-CSC05-BERT', '/TILE/OFL01/CALIB/CIS/FIT/LIN': 'TileOfl01CalibCisFitLin-HLT-UPD1-00', '/GLOBAL/BTagCalib/IP2D': 'BTagCalib-03-00', '/GLOBAL/BTagCalib/JetFitter': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/etamod': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/GLOBAL/BTagCalib/SoftMu': 'BTagCalib-03-00', '/CALO/CaloSwClusterCorrections/rfac': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/calhits': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/CaloSwClusterCorrections/phioff': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/H1Weights/H1WeightsConeTopo': 'CaloH1WeightsConeTopo-00-000', '/GLOBAL/TrackingGeo/LayerMaterial': 'TagInfo/AtlasLayerMat_v11_/GeoAtlas', '/CALO/EMTopoClusterCorrections/topoetaoffsw': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/EMTopoClusterCorrections/topoetaoff': 'EMTopoClusterCorrections-00-02-00-DC3-v2', '/CALO/CaloSwClusterCorrections/gap': 'CaloSwClusterCorrections-00-02-00-v6_calh', '/CALO/EMTopoClusterCorrections/topophimod': 'EMTopoClusterCorrections-00-02-00-DC3-v2'}}}
+    
+        f7.fileinfos['tag_info']['AtlasRelease'] = 'any'
+        f7.fileinfos['det_descr_tags']['AtlasRelease'] = 'any'
+        _compare_fileinfos(f7,f7_ref)
+        assert f7.run_number==f7_ref['run_number']
+        assert f7.evt_number==f7_ref['evt_number']
+        assert f7.lumi_block==f7_ref['lumi_block']
+        assert f7.run_type  ==f7_ref['run_type']
+        assert f7.beam_type ==f7_ref['beam_type']
+        assert f7.beam_energy==f7_ref['beam_energy']
+        return
+
+    def test008(self):
+        """test RDO POOL file via xrootd"""
+        import PyUtils.AthFile as af
+
+        fname="root://eosatlas//eos/atlas/atlascerngroupdisk/trig-daq/validation/test_data/valid1.005640.CharybdisJimmy.digit.RDO.e322_s483/RDO.027377._00069.pool.root.3"
+
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('pool', 'root://eosatlas//eos/atlas/atlascerngroupdisk/trig-daq/validation/test_data/valid1.005640.CharybdisJimmy.digit.RDO.e322_s483/RDO.027377._00069.pool.root.3')
+        
+        f8 = af.fopen(fname)
+        if verbose:
+            print "::: f8.fileinfos:"
+            print f8.fileinfos
+
+        f8_ref = {'file_md5sum': '7f6798d2115b5c1cdad02eb98dec5d68', 'stream_tags': [], 'tag_info': {'IOVDbGlobalTag': 'OFLCOND-SIM-00-00-00','/TRT/Cond/Status': 'TrtStrawStatus-02', '/LAR/Identifier/FebRodAtlas': 'FebRodAtlas-005', '/LAR/ElecCalibMC': 'LARElecCalibMC-CSC02-J-QGSP_BERT', 'GeoAtlas': 'ATLAS-GEO-02-01-00', 'TGC_support': 'TGC Big Wheel', 'AtlasRelease': 'any', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/GLOBAL/BField/Map': 'BFieldMap-000', 'MDT_support': 'MDT Big Wheel', '/LAR/Identifier/OnOffIdAtlas': 'OnOffIdAtlas-012'}, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/atlascerngroupdisk/trig-daq/validation/test_data/valid1.005640.CharybdisJimmy.digit.RDO.e322_s483/RDO.027377._00069.pool.root.3', 'beam_type': ['N/A'], 'det_descr_tags': {'IOVDbGlobalTag': 'OFLCOND-SIM-00-00-00','/TRT/Cond/Status': 'TrtStrawStatus-02', '/LAR/Identifier/FebRodAtlas': 'FebRodAtlas-005', '/LAR/ElecCalibMC': 'LARElecCalibMC-CSC02-J-QGSP_BERT', 'GeoAtlas': 'ATLAS-GEO-02-01-00', 'TGC_support': 'TGC Big Wheel', 'AtlasRelease': 'any', '/LAR/Identifier/LArTTCellMapAtlas': 'LARIdentifierLArTTCellMapAtlas-DC3-05', '/GLOBAL/BField/Map': 'BFieldMap-000', 'MDT_support': 'MDT Big Wheel', '/LAR/Identifier/OnOffIdAtlas': 'OnOffIdAtlas-012'}, 'nentries': 25L, 'evt_number': [1814L], 'file_guid': '4E971C9E-A9A4-DD11-8A9A-00145E6D4F72', 'metadata': {'/Digitization/Parameters': {'physicsList': 'QGSP_BERT', 'N_beamGasInputFiles': 0, 'doBeamHalo': False, 'N_cavernInputFiles': 0, 'overrideMetadata': False, 'numberOfBeamHalo': 1.0, 'doCavern': False, 'IOVDbGlobalTag': 'default', 'N_beamHaloInputFiles': 0, 'initialBunchCrossing': -36, 'doCaloNoise': True, 'N_minBiasInputFiles': 0, 'numberOfCollisions': 2.2999999999999998, 'rndmSvc': 'AtRanluxGenSvc', 'rndmSeedList': ['PixelDigitization 10513308 492615173', 'SCT_Digitization 49261579 105132463', 'TRT_ElectronicsNoise 192 414', 'TRT_Noise 1303 3525', 'TRT_ThresholdFluctuations 12414 34636', 'TRT_ProcessStraw 123525 345747', 'TRT_SimDriftTime 1234636 3456858', 'TRT_PAI 12345747 34567959', 'TRT_FakeConditions 123456858 345678970', 'BCM_Digitization 49261579 105132463', 'LArDigitization 1303 5747', 'Tile_HitVecToCnt 4789968 989240581', 'Tile_DigitsMaker 4789968 989240581', 'CSC_Digitization 49261579 105132463', 'MDTResponse 49261579 105132463', 'MDT_Digitization 49261579 105132463', 'MDT_DigitizationTwin 393242630 857132450', 'TGC_Digitization 49261579 105132463', 'RPC_Digitization 49261579 105132463', 'CscDigitToCscRDOTool 49261579 105132463', 'Tile_HitToTTL1 4789968 989240581', 'CTPSimulation 1979283112 1924452258'], 'numberOfCavern': 2, 'doMuonNoise': True, 'doInDetNoise': True, 'numberOfBeamGas': 1.0, 'finalBunchCrossing': 32, 'doBeamGas': False, 'doMinimumBias': False, 'bunchSpacing': 25, 'DetDescrVersion': 'ATLAS-GEO-02-01-00', 'lvl1TriggerMenu': 'lumi1E31_no_Bphysics_no_prescale', 'rndmSeedOffset2': 69, 'rndmSeedOffset1': 69}, '/Simulation/Parameters': {'MagneticField': 'OracleDB', 'PhysicsList': 'QGSP_BERT', 'CalibrationRun': 'DeadLAr', 'SimLayout': 'ATLAS-GEO-02-01-00', 'DoLArBirk': False, 'LArParameterization': 0, 'VertexStatus': True, 'EtaPhiStatus': True, 'WorldRRange': 'default', 'RunType': 'atlas', 'WorldZRange': 'default', 'Seeds': 'default', 'G4Version': 'geant4.8.3.patch02.atlas04', 'NeutronTimeCut': 150.0, 'SeedsG4': 'default', 'IOVDbGlobalTag': 'default', 'VRangeStatus': True}}, 'metadata_items': [('DataHeader', ';00;MetaDataSvc'), ('IOVMetaDataContainer', '/Digitization/Parameters'), ('IOVMetaDataContainer', '/Simulation/Parameters')], 'stream_names': ['Stream1'], 'run_type': ['N/A'], 'conditions_tag': 'OFLCOND-SIM-00-00-00', 'lumi_block': [1L], 'eventdata_items': [('EventInfo', 'McEventInfo'), ('PixelRDO_Container', 'PixelRDOs'), ('SCT_RDO_Container', 'SCT_RDOs'), ('TRT_RDO_Container', 'TRT_RDOs'), ('InDetSimDataCollection', 'BCM_SDO_Map'), ('InDetSimDataCollection', 'PixelSDO_Map'), ('InDetSimDataCollection', 'SCT_SDO_Map'), ('InDetSimDataCollection', 'TRT_SDO_Map'), ('BCM_RDO_Container', 'BCM_RDOs'), ('LArDigitContainer', 'LArDigitContainer_MC_Thinned'), ('LArRawChannelContainer', 'LArRawChannels'), ('LArTTL1Container', 'LArTTL1EM'), ('LArTTL1Container', 'LArTTL1HAD'), ('TileRawChannelContainer', 'TileRawChannelCnt'), ('TileTTL1Container', 'TileTTL1Cnt'), ('TileTTL1Container', 'TileTTL1MBTS'), ('TileHitVector', 'MBTSHits'), ('CscRawDataContainer', 'CSCRDO'), ('TgcRdoContainer', 'TGCRDO'), ('MdtCsmContainer', 'MDTCSM'), ('RpcPadContainer', 'RPCPAD'), ('ROIB::RoIBResult', 'RoIBResult'), ('CTP_RDO', 'CTP_RDO'), ('DataVector<LVL1::JetElement>', 'JetElements'), ('DataVector<LVL1::TriggerTower>', 'TriggerTowers'), ('MuCTPI_RDO', 'MUCTPI_RDO'), ('McEventCollection', 'TruthEvent'), ('DataVector<LVL1::JEMEtSums>', 'JEMEtSums'), ('MuonSimDataCollection', 'MDT_SDO'), ('MuonSimDataCollection', 'RPC_SDO'), ('MuonSimDataCollection', 'TGC_SDO'), ('DataVector<LVL1::CPMTower>', 'CPMTowers'), ('DataVector<LVL1::CPMHits>', 'CPMHits'), ('DataVector<LVL1::CMMEtSums>', 'CMMEtSums'), ('DataVector<LVL1::JEMRoI>', 'JEMRoIs'), ('LVL1::CMMRoI', 'CMMRoIs'), ('DataVector<LVL1::JEMHits>', 'JEMHits'), ('DataVector<LVL1::CPMRoI>', 'CPMRoIs'), ('DataVector<LVL1::CMMJetHits>', 'CMMJetHits'), ('DataVector<LVL1::CMMCPHits>', 'CMMCPHits'), ('CscSimDataCollection', 'CSC_SDO'), ('TrackRecordCollection', 'CaloEntryLayer'), ('TrackRecordCollection', 'MuonEntryLayer'), ('TrackRecordCollection', 'MuonExitLayer'), ('CaloCalibrationHitContainer', 'LArCalibrationHitActive'), ('CaloCalibrationHitContainer', 'LArCalibrationHitDeadMaterial'), ('CaloCalibrationHitContainer', 'LArCalibrationHitInactive'), ('DataHeader', 'Stream1')], 'run_number': [5640L], 'beam_energy': ['N/A'], 'geometry': 'ATLAS-GEO-02-01-00', 'evt_type': ('IS_SIMULATION', 'IS_ATLAS', 'IS_PHYSICS')}
+
+        f8.fileinfos['tag_info']['AtlasRelease'] = 'any'
+        f8.fileinfos['det_descr_tags']['AtlasRelease'] = 'any'
+        _compare_fileinfos(f8,f8_ref)
+        assert f8.run_number==f8_ref['run_number']
+        assert f8.evt_number==f8_ref['evt_number']
+        assert f8.lumi_block==f8_ref['lumi_block']
+        assert f8.run_type  ==f8_ref['run_type']
+        assert f8.beam_type ==f8_ref['beam_type']
+        assert f8.beam_energy==f8_ref['beam_energy']
+
+        return
+
+    def test009(self):
+        """test URI formation"""
+        import PyUtils.AthFile as af
+
+        # test POOL files
+        for fname in [
+            'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root',
+            #'rfio://castor/cern.ch/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root',
+            'rfio:///castor/cern.ch/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root',
+            '/castor/cern.ch/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root',
+            ]:
+            """ ####FIXME####
+            assert af.exists(fname), "problem with filename [%s]" % fname
+            assert af.ftype(fname) == ('pool', 'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/esd.gcc34.15.1.x.pool.root'), "problem with filename [%s]" % fname
+            """
+        
+        # test bytestream files
+        for fname in [
+            'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data',
+            'rfio://castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data',
+            'rfio:///castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data',
+            ]:
+            """ ####FIXME####
+            assert af.exists(fname), "problem with filename [%s]" % fname
+            assert af.ftype(fname) == ('bs', 'rfio:/castor/cern.ch/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data')
+            """
+            
+        # test pool files served by xrootd
+        for fname in [
+            "root://eosatlas//eos/atlas/user/b/binet/utests/athfile/TAG.102229._000001.pool.root.1"
+            ]:
+            assert af.exists(fname), "problem with filename [%s]" % fname
+            assert af.ftype(fname) == (
+                'pool',
+                "root://eosatlas//eos/atlas/user/b/binet/utests/athfile/TAG.102229._000001.pool.root.1")
+            pass
+
+        # test AMI urls
+        for fname in [
+            "ami:data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW",
+            "ami:/data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW",
+            "ami://data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW",
+            ]:
+            assert af.ftype(fname) == (
+                'bs',
+                'ami://data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW'
+                )
+        return # test9
+
+    def test010(self):
+        """test TAG file"""
+        import PyUtils.AthFile as af
+        fname = "root://eosatlas//eos/atlas/user/b/binet/utests/athfile/TAG.102229._000001.pool.root.1"
+        assert af.exists(fname)
+        assert af.ftype(fname) == (
+            'pool',
+            'root://eosatlas//eos/atlas/user/b/binet/utests/athfile/TAG.102229._000001.pool.root.1')
+        
+        f10 = af.fopen(fname)
+        if verbose:
+            print "::: f10.fileinfos:"
+            print f10.fileinfos
+
+        f10_ref = {'metadata_items': None, 'stream_names': ['TAG'], 'run_type': [], 'stream_tags': [], 'evt_type': [], 'tag_info': None, 'file_type': 'pool', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/utests/athfile/TAG.102229._000001.pool.root.1', 'evt_number': [25], 'beam_energy': [], 'eventdata_items': None, 'run_number': [142391], 'geometry': None, 'beam_type': [], 'file_guid': '10A1A6D0-98EF-DE11-8D70-003048C6617E', 'file_md5sum': 'bce350a81aa253cc7eb8385a62775938', 'lumi_block': [], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 71L, 'metadata': None}
+
+        _compare_fileinfos(f10,f10_ref)
+        assert f10.run_number==f10_ref['run_number']
+        assert f10.evt_number==f10_ref['evt_number']
+        assert f10.lumi_block==f10_ref['lumi_block']
+        assert f10.run_type  ==f10_ref['run_type']
+        assert f10.beam_type ==f10_ref['beam_type']
+        assert f10.beam_energy==f10_ref['beam_energy']
+        
+        return # test10
+    
+    def test011(self):
+        """test AMI dataset query (RAW)"""
+        import PyUtils.AthFile as af
+        fname = "ami:data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW"
+
+        assert af.exists(fname)
+        assert af.ftype(fname) == (
+            'bs',
+            'ami://data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW'
+            )
+        
+        f11 = af.fopen(fname)
+        if verbose:
+            print "::: f11.fileinfos:"
+            print f11.fileinfos
+
+        f11_ref = {'metadata_items': None, 'stream_names': ['StreamRAW'], 'run_type': [], 'stream_tags': [{'obeys_lbk': None, 'stream_type': 'calibration', 'stream_name': 'LArElec-Delay-32s-High-Em'}], 'evt_type': [], 'tag_info': None, 'file_type': 'bs', 'file_name': 'ami://data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW', 'evt_number': [], 'beam_energy': [], 'eventdata_items': None, 'run_number': [150430], 'geometry': None, 'beam_type': [], 'file_guid': 'ami://data10_calib.00150430.calibration_LArElec-Delay-32s-High-Em.daq.RAW', 'file_md5sum': None, 'lumi_block': [], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 3072, 'metadata': None}
+
+        _compare_fileinfos(f11,f11_ref)
+        assert f11.run_number==f11_ref['run_number']
+        assert f11.evt_number==f11_ref['evt_number']
+        assert f11.lumi_block==f11_ref['lumi_block']
+        assert f11.run_type  ==f11_ref['run_type']
+        assert f11.beam_type ==f11_ref['beam_type']
+        assert f11.beam_energy==f11_ref['beam_energy']
+        
+        return # test11
+
+    def test012(self):
+        """test AMI dataset query (ESD)"""
+        import PyUtils.AthFile as af
+        fname = "ami:data09_900GeV.00142191.physics_BPTX.merge.ESD.r1093_p101"
+
+        # FIXME
+        assert af.exists(fname)
+        assert af.ftype(fname) == (
+            'pool',
+            'ami://data09_900GeV.00142191.physics_BPTX.merge.ESD.r1093_p101',
+            )
+        
+        f12 = af.fopen(fname)
+        if verbose:
+            print "::: f12.fileinfos:"
+            print f12.fileinfos
+
+        f12_ref = {'metadata_items': None, 'stream_names': ['StreamESD'], 'run_type': [], 'stream_tags': [{'obeys_lbk': None, 'stream_type': 'physics', 'stream_name': 'BPTX'}], 'evt_type': [], 'tag_info': None, 'file_type': 'pool', 'file_name': 'ami://data09_900GeV.00142191.physics_BPTX.merge.ESD.r1093_p101', 'evt_number': [], 'beam_energy': [], 'eventdata_items': None, 'run_number': [142191], 'geometry': None, 'beam_type': ['collisions'], 'file_guid': 'ami://data09_900GeV.00142191.physics_BPTX.merge.ESD.r1093_p101', 'file_md5sum': None, 'lumi_block': [], 'conditions_tag': 'COMCOND-REPPST-004-00', 'det_descr_tags': None, 'nentries': 1256124, 'metadata': None}
+
+        _compare_fileinfos(f12,f12_ref)
+        assert f12.run_number==f12_ref['run_number']
+        assert f12.evt_number==f12_ref['evt_number']
+        assert f12.lumi_block==f12_ref['lumi_block']
+        assert f12.run_type  ==f12_ref['run_type']
+        assert f12.beam_type ==f12_ref['beam_type']
+        assert f12.beam_energy==f12_ref['beam_energy']
+        
+        return # test12
+
+    def test013(self):
+        """test AMI dataset query (AOD)"""
+        import PyUtils.AthFile as af
+        fname = "ami:data09_idcomm.00111427.physics_L1TT-b6.merge.AOD.f97_m48"
+
+        # FIXME
+        assert af.exists(fname)
+        assert af.ftype(fname) == (
+            'pool',
+            'ami://data09_idcomm.00111427.physics_L1TT-b6.merge.AOD.f97_m48',
+            )
+        
+        f13 = af.fopen(fname)
+        if verbose:
+            print "::: f13.fileinfos:"
+            print f13.fileinfos
+
+        f13_ref = {'metadata_items': None, 'stream_names': ['StreamAOD'], 'run_type': [], 'stream_tags': [{'obeys_lbk': None, 'stream_type': 'physics', 'stream_name': 'L1TT-b6'}], 'evt_type': [], 'tag_info': None, 'file_type': 'pool', 'file_name': 'ami://data09_idcomm.00111427.physics_L1TT-b6.merge.AOD.f97_m48', 'evt_number': [], 'beam_energy': [], 'eventdata_items': None, 'run_number': [111427], 'geometry': None, 'beam_type': [], 'file_guid': 'ami://data09_idcomm.00111427.physics_L1TT-b6.merge.AOD.f97_m48', 'file_md5sum': None, 'lumi_block': [], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 27, 'metadata': None}
+
+        _compare_fileinfos(f13,f13_ref)
+        assert f13.run_number==f13_ref['run_number']
+        assert f13.evt_number==f13_ref['evt_number']
+        assert f13.lumi_block==f13_ref['lumi_block']
+        assert f13.run_type  ==f13_ref['run_type']
+        assert f13.beam_type ==f13_ref['beam_type']
+        assert f13.beam_energy==f13_ref['beam_energy']
+        
+        return # test13
+
+    def test014(self):
+        """test AMI dataset query (TAG)"""
+        import PyUtils.AthFile as af
+        fname = "ami:data10_1beam.00150419.express_express.merge.TAG_COMM.x2_m396"
+
+        # FIXME
+        assert af.exists(fname)
+        assert af.ftype(fname) == (
+            'pool',
+            'ami://data10_1beam.00150419.express_express.merge.TAG_COMM.x2_m396',
+            )
+        
+        f14 = af.fopen(fname)
+        if verbose:
+            print "::: f14.fileinfos:"
+            print f14.fileinfos
+
+        f14_ref = {'metadata_items': None, 'stream_names': ['StreamTAG_COMM'], 'run_type': [], 'stream_tags': [{'obeys_lbk': None, 'stream_type': 'express', 'stream_name': 'express'}], 'evt_type': [], 'tag_info': None, 'file_type': 'pool', 'file_name': 'ami://data10_1beam.00150419.express_express.merge.TAG_COMM.x2_m396', 'evt_number': [], 'beam_energy': [], 'eventdata_items': None, 'run_number': [150419], 'geometry': None, 'beam_type': [], 'file_guid': 'ami://data10_1beam.00150419.express_express.merge.TAG_COMM.x2_m396', 'file_md5sum': None, 'lumi_block': [], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 407, 'metadata': None}
+
+        _compare_fileinfos(f14,f14_ref)
+        assert f14.run_number==f14_ref['run_number']
+        assert f14.evt_number==f14_ref['evt_number']
+        assert f14.lumi_block==f14_ref['lumi_block']
+        assert f14.run_type  ==f14_ref['run_type']
+        assert f14.beam_type ==f14_ref['beam_type']
+        assert f14.beam_energy==f14_ref['beam_energy']
+        
+        return # test14
+
+    def test015(self):
+        """test bytestream file via XROOTD"""
+    
+        import PyUtils.AthFile as af
+        fname = 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data'
+        assert af.exists(fname)
+        assert af.ftype(fname) == ('bs', 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data')
+        
+        f15 = af.fopen(fname)
+        if verbose:
+            print "::: f15.fileinfos:"
+            print f15.fileinfos
+        f15_ref = {'file_md5sum':'e3e301bca63e4b5acb3b3cba43127ff9', 'metadata_items': None, 'stream_names': None, 'run_type': ['TEST'], 'stream_tags': [{'obeys_lbk': True, 'stream_type': 'physics', 'stream_name': 'IDCosmic'}, {'obeys_lbk': False, 'stream_type': 'calibration', 'stream_name': 'IDTracks'}], 'tag_info': None, 'file_type': 'bs', 'file_name': 'root://eosatlas//eos/atlas/user/b/binet/regr-tests/athfile/daq.ATLAS.0092226.physics.IDCosmic.LB0054.SFO-1._0001.data', 'file_guid': '72013664-ECA3-DD11-A90E-0015171A45AC', 'beam_type': [0], 'lumi_block': [54], 'conditions_tag': None, 'det_descr_tags': None, 'nentries': 417, 'eventdata_items': None, 'run_number': [92226], 'beam_energy': [0], 'geometry': None, 'evt_number': [8349492], 'evt_type': [], 'metadata': None}
+        _compare_fileinfos(f15,f15_ref)
+        assert f15.run_number==f15_ref['run_number']
+        assert f15.evt_number==f15_ref['evt_number']
+        assert f15.lumi_block==f15_ref['lumi_block']
+        assert f15.run_type  ==f15_ref['run_type']
+        assert f15.beam_type ==f15_ref['beam_type']
+        assert f15.beam_energy==f15_ref['beam_energy']
+
+        return # test15
+    
+        
+### tests ---------------------------------------------------------------------
+def main(verbose=False):
+    import PyUtils.AthFile as af
+    af.server
+    loader = unittest.TestLoader()
+    testSuite = loader.loadTestsFromModule( sys.modules[ __name__ ] )
+
+    runner = unittest.TextTestRunner( verbosity = 2 )
+    result = not runner.run( testSuite ).wasSuccessful()
+    return result
+
+if __name__ == "__main__":
+    import sys
+    print __file__
+    sys.exit(main())
+    
diff --git a/Tools/PyUtils/python/AthFile/timerdecorator.py b/Tools/PyUtils/python/AthFile/timerdecorator.py
new file mode 100644
index 00000000000..0ec2e9dd28f
--- /dev/null
+++ b/Tools/PyUtils/python/AthFile/timerdecorator.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file timerdecorator.py
+# @purpose decorate functions which will have a limited alloted time to finish execution
+# @date February 2011
+# ripped off from:
+#   http://code.activestate.com/recipes/483752/
+
+import sys
+import os
+import threading
+
+if 'linux' in sys.platform.lower():
+    def _run_from_valgrind():
+        """
+        helper function to detect if one runs under valgrind or not
+        """
+        for l in open('/proc/self/maps'):
+            if '/valgrind' in l:
+                return True
+        return False
+
+else: # mac-os
+    def _run_from_valgrind():
+        """
+        helper function to detect if one runs under valgrind or not
+        """
+        return 'VALGRIND_STARTUP_PWD' in os.environ
+    
+class TimeoutError(Exception):
+    pass
+
+def timelimit(timeout):
+    def internal(function):
+        def internal2(*args, **kw):
+            class Calculator(threading.Thread):
+                def __init__(self):
+                    threading.Thread.__init__(self)
+                    self.result = None
+                    self.error = None
+                
+                def run(self):
+                    try:
+                        self.result = function(*args, **kw)
+                    except BaseException:
+                        self.error = sys.exc_info()[0]
+            
+            c = Calculator()
+            c.start()
+            if _run_from_valgrind():
+                # don't set any timeout under valgrind...
+                c.join()
+            else:
+                c.join(timeout)
+            if c.isAlive():
+                raise TimeoutError
+            if c.error:
+                raise c.error
+            return c.result
+        return internal2
+    return internal
diff --git a/Tools/PyUtils/python/Cmt.py b/Tools/PyUtils/python/Cmt.py
new file mode 100755
index 00000000000..e49319906ee
--- /dev/null
+++ b/Tools/PyUtils/python/Cmt.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+## @author: Sebastien Binet
+## @file :  Cmt.py
+## @purpose: a little wrapper around cmt.exe to provide fast(er) access to the
+##           list of clients of a given package. It also provides a mean to
+##           retrieve all these clients...
+from __future__ import with_statement
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet"
+
+raise ImportError('PyUtils.Cmt is deprecated. Please use PyCmt.Cmt instead')
diff --git a/Tools/PyUtils/python/Decorators.py b/Tools/PyUtils/python/Decorators.py
new file mode 100644
index 00000000000..085743eb57b
--- /dev/null
+++ b/Tools/PyUtils/python/Decorators.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @author: Sebastien Binet <binet@cern.ch>
+# @date:   March 2008
+# @purpose: a set of decorators. Most of them (if not all) have been stolen
+#           from here:
+#           http://www.phyast.pitt.edu/~micheles/python/documentation.html
+#
+from __future__ import with_statement
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+__all__ = [
+    'memoize',
+    'forking',
+    'async',
+    ]
+
+from PyCmt.Decorators import *
diff --git a/Tools/PyUtils/python/Dso.py b/Tools/PyUtils/python/Dso.py
new file mode 100755
index 00000000000..e687339724c
--- /dev/null
+++ b/Tools/PyUtils/python/Dso.py
@@ -0,0 +1,650 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+## @author: Sebastien Binet
+## @file : Dso.py
+## @purpose: a set of classes to model so-called 'rootmap' files
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet"
+
+__all__ = [
+    'DsoDb',
+    'gen_typeregistry_dso',
+    'load_typeregistry_dso',
+
+    ]
+
+import os
+import re
+
+def _libName(lib):
+    import platform
+    if platform.system() == "Linux":
+        if lib[:3]  != "lib": lib = "lib"+lib
+        if lib[-3:] != ".so": lib = lib+".so"
+    return lib
+
+### data ----------------------------------------------------------------------
+_aliases = {
+    'ElementLinkInt_p1'           : 'ElementLink_p1<unsigned int>',
+    'basic_string<char>'          : 'string',
+    'std::basic_string<char>'     : 'string',
+    'vector<basic_string<char> >' : 'vector<string>',
+    ## FIXME: in waiting for a proper registration of typedefs in genreflex...
+    'INavigable4MomentumCollection' : 'DataVector<INavigable4Momentum>',
+    'IParticleContainer'            : 'DataVector<IParticle>',
+    }
+_typedefs = {
+    ## FIXME: in waiting for a proper registration of typedefs in genreflex...
+    'INavigable4MomentumCollection' : 'DataVector<INavigable4Momentum>',
+    'IParticleContainer'            : 'DataVector<IParticle>',
+    }
+
+_cpp_builtins = (
+    'char',      'unsigned char',      'signed char',
+    'signed', 
+    'short int',                       'short signed', 'short signed int',
+    'short',     'unsigned short',     'signed short',
+    'int',       'unsigned int',
+
+    'long int',
+    'long signed int',
+    'signed long int',
+
+    'long',
+    'long signed',  'signed long',
+    'unsigned long',
+    'unsigned long int',
+    'long unsigned int',
+    
+    'long long',
+    'long long int',
+    'unsigned long long',
+    'longlong',
+    
+    # no clue from where this one comes from, who's requesting it nor who
+    # got the alien naming scheme idea...
+    'ulonglong',
+    
+    'float',
+    'double',
+    'long double',
+    'bool',
+    )
+
+_is_stl_sequence = re.compile (r'std::(?P<ContType>.*?)'\
+                               r'<(?P<TemplateArg>.*?)'\
+                               r',\s*?std::allocator<\2> >')
+_is_stl_mapping = re.compile (r'std::map<'\
+                              r'(?P<TemplateArg1>.*?),\s*?'\
+                              r'(?P<TemplateArg2>.*?)'\
+                              r',\s*?std::allocator<\2> >')
+    
+
+### functions -----------------------------------------------------------------
+
+### helpers
+def _get_native_libname(libname):
+    """ return the OS-native name from an OS-indenpendent one """
+    import sys
+    plat = sys.platform
+    if plat.count('linux')>0:
+        lib_prefix,lib_suffix = 'lib', '.so'
+    elif plat == 'win32':
+        lib_prefix,lib_suffix = '', '.dll'
+    elif plat == 'darwin':
+        lib_prefix,lib_suffix = 'lib','.dylib'
+    else:
+        raise RuntimeError ("sorry platform [%s] is not (yet?) supported"%plat)
+    _sys_libname = libname
+    if not _sys_libname.startswith (lib_prefix):
+        _sys_libname = ''.join([lib_prefix,_sys_libname])
+    if not _sys_libname.endswith (lib_suffix):
+        _sys_libname = ''.join([_sys_libname, lib_suffix])
+    return _sys_libname
+
+def load_library (libname):
+    """
+    Helper method to load a library by its natural name, not the OS-native name.
+    But if the OS-native name is given, it is safely handled too.
+    usage:
+     >>> load_library ('AthenaServices')
+     >>> load_library ('AthenaServicesDict')
+    """
+    _sys_libname = _get_native_libname(libname)
+    import ctypes
+    return ctypes.cdll.LoadLibrary (_sys_libname)
+
+def find_library(libname):
+    """
+    Helper function to find the (full)path to a library given its natural name.
+     @return None on failure
+     
+    usage:
+     >>> find_library('AthenaServices')
+     '/afs/cern.ch/.../AtlasCore/[release]/InstallArea/.../libAthenaServices.so
+    """
+    import os, sys
+    import ctypes.util as cu
+    ## # ctypes.util.find_library does not return the path
+    ## # to the library, just the basename of the so-name...
+    ## lib = cu._findLib_ldconfig(libname) or cu._findLib_gcc(libname)
+    ## if lib:
+    ##     return os.path.abspath(lib)
+    _sys_libname = _get_native_libname(libname)
+    # FIXME: REALLY not portable...
+    if os.name != 'posix':
+        raise RuntimeError('sorry OS [%s] is not supported' % os.name)
+    
+    if 'LD_LIBRARY_PATH' in os.environ:
+        for d in os.environ['LD_LIBRARY_PATH'].split(os.pathsep):
+            lib = os.path.join(d, _sys_libname)
+            if os.path.exists(lib):
+                return lib
+    return
+
+_dflt_typereg_fname = 'typereg_dso_db.csv'
+def gen_typeregistry_dso(oname=_dflt_typereg_fname):
+    '''inspect all the accessible reflex types and get their rootmap-naming.
+    also associate the clid if available.
+    '''
+    import CLIDComps.clidGenerator as _c
+    cliddb = _c.clidGenerator(db=None)
+    del _c
+
+    import PyUtils.path as _p
+    oname = _p.path(oname)
+    del _p
+        
+    import PyUtils.Logging as _L
+    msg = _L.logging.getLogger('typereg-dso')
+    #msg.setLevel(_L.logging.INFO)
+    msg.setLevel(_L.logging.VERBOSE)
+    del _L
+    
+    msg.info("installing registry in [%s]...", oname)
+
+    # FIXME: should use the Cxx one...
+    #reg = DsoDb()
+    reg = PyDsoDb()
+    
+    cls_names = reg.db.keys()
+    msg.debug("::: loading reflex")
+    import PyCintex
+    PyCintex.Cintex.Enable()
+    PyCintex.loadDict('libReflexRflx.so')
+    rflx = PyCintex.makeNamespace('Reflex')
+    if not rflx:
+        rflx = PyCintex.makeNamespace('ROOT::Reflex')
+    rflx = rflx.Type
+    assert(rflx)
+
+    import PyCintex
+    _load_lib = PyCintex.loadDict
+    def _load_dict(libname,retry=10):
+        msg.debug("::: loading [%s]...", libname)
+        try:
+            return _load_lib(libname)
+        except (Exception,SystemError,), err:
+            msg.warning("**error** %s", err)
+        return
+
+    # we need to pre-load these guys as HepPDT is missing a linkopts
+    # against HepPID. see bug #46551
+    hep_pid = PyCintex.loadDict('libHepPID.so')
+    hep_pdt = PyCintex.loadDict('libHepPDT.so')
+
+    from PyUtils.Decorators import forking
+    
+    import os
+    dict_libs = reduce(set.union, [set(v) for v in reg.db.values()])
+    dict_libs = [os.path.basename(l) for l in dict_libs]
+
+    _veto_libs = [
+        'libG4EventGraphicsDict.so', # freaking statics !
+        ]
+    dict_libs = [l for l in dict_libs if l not in _veto_libs]
+    
+    msg.debug("::: loading dict-libraries...")
+    @forking
+    def inspect_dict_lib(lib):
+        _load_dict(lib)
+        try:
+            rflx_names = update_db(lib)
+            return rflx_names
+        except Exception, err:
+            msg.warning(err)
+        return {}
+
+    msg.debug(":"*80)
+    def update_db(libname):
+        rflx_names={}
+        for i in xrange(rflx.TypeSize()):
+            rflx_type = rflx.TypeAt(i)
+            rflx_name = rflx_type.Name(7)
+            root_name = _to_rootmap_name(rflx_name)
+##             # could also retro-fit typedefs, and allow their auto-loading...
+##             if rflx_type.IsTypedef():
+##                 import ROOT
+##                 print "[%s] ::: processing [%s -> %s]..." % (
+##                     ROOT.TClass.GetClass(rflx_name).GetSharedLibs(),
+##                     rflx_type.Name(6),
+##                     rflx_name)
+            if not(root_name in reg.db):
+##                 print "::ERR::",root_name
+                continue
+            ##rflx_names[rflx_name] = root_name
+            rflx_names[root_name] = rflx_name
+        return rflx_names
+
+    rflx_names = {}
+    for lib in dict_libs:
+        rflx_names.update(inspect_dict_lib(lib))
+                     
+    msg.debug("::: rflx types: %d %d",len(rflx_names),len(reg.db.keys()))
+    msg.info("::: saving informations in [%s]...", oname)
+    
+    import csv
+    db= csv.writer(open(oname,'w'), delimiter=';')
+    keys = sorted(rflx_names.keys())
+    for k in keys:
+        v = rflx_names[k]
+        clid = (cliddb.getClidFromName(k) or
+                cliddb.getClidFromName(v) or
+                cliddb.getClidFromTid(k)  or
+                cliddb.getClidFromTid(v))
+        if k != v:
+            db.writerow([k,v,clid or ''])
+        elif clid:
+            db.writerow([k,v,clid])
+
+    return rflx_names
+
+def load_typeregistry_dso(iname=None):
+    import os
+    import PyUtils.path as _p
+    if iname is None:
+        iname = _p.path(_dflt_typereg_fname)
+        if not iname.exists():
+            import os
+            projects = os.environ['CMTPATH'].split(os.pathsep)[:2]
+            for project_root in projects:
+                n = _p.path(project_root)/"InstallArea"/"share"/iname
+                if n.exists():
+                    iname = n
+                    break
+    else:
+        iname = _p.path(iname)
+
+    if not iname.exists():
+        raise OSError('no such file [%s]'%iname)
+    
+    import PyUtils.Logging as _L
+    msg = _L.logging.getLogger("typereg-dso")
+    msg.setLevel(_L.logging.INFO)
+    del _L
+    msg.info("::: loading typeregistry from [%s]...", iname)
+    
+    rflx_names = {}
+    f = iname.open(mode='r')
+    import csv
+    db = csv.reader(f, delimiter=';')
+    for row in db:
+        row = [i.strip() for i in row]
+        root_name = row[0]
+        rflx_name = row[1]
+        rflx_names[root_name] = rflx_name
+
+    del _p, csv
+    return rflx_names
+
+
+import re
+def _is_rootcint_dict (libname):
+    """helper function to reject rootcint libraries entries from rootmap
+    files (which appeared w/ ROOT v21/22)
+    It seems all of them (and on all platforms) are named like:
+     vector<char>: vector.dll
+    """
+    if libname == ".dll": # pathological case...
+        return False
+    pat = re.compile(r'\w*?.dll')
+    return not (libname.startswith("lib")) and \
+           not (pat.match (libname) is None)
+
+class CxxDsoDb(object):
+    """
+    The repository of 'rootmap' files (location, content,...)
+    """
+    def __init__(self):
+        # import cintex
+        import PyCintex; PyCintex.Cintex.Enable()
+        # import root
+        import PyUtils.RootUtils as ru
+        ROOT = ru.import_root()
+        self._cxx = ROOT.Ath.DsoDb.instance()
+        # load reflex
+        _load_dict = PyCintex.loadDict
+        _load_dict('ReflexRflx')
+        self._rflx = PyCintex.makeNamespace('Reflex')
+        if not self._rflx:
+            self._rflx = PyCintex.makeNamespace('ROOT::Reflex')
+        return
+
+    def _to_py(self, cxx):
+        dd = {}
+        kk = self._cxx.py_keys_from(cxx)
+        vv = self._cxx.py_vals_from(cxx)
+        for i in range(kk.size()):
+            dd[kk[i]] = list(vv[i])
+        return dd
+
+    @property
+    def db(self):
+        return self._to_py(self._cxx.db())
+    
+    @property
+    def pf(self):
+        return self._to_py(self._cxx.pf())
+    
+    def has_type(self, typename):
+        return self._cxx.has_type(typename)
+
+    def load_type(self, typename):
+        return self._cxx.load_type(typename)
+    
+    def capabilities(self, libname):
+        return list(self._cxx.capabilities(libname))
+    
+    def duplicates(self, libname, pedantic=False):
+        return self._to_py(self._cxx.duplicates(libname, pedantic))
+
+    def dict_duplicates(self, pedantic=False):
+        return self._to_py(self._cxx.dict_duplicates(pedantic))
+
+    dictDuplicates = dict_duplicates
+    
+    def pf_duplicates(self, pedantic=False):
+        return self._to_py(self._cxx.pf_duplicates(pedantic))
+
+    pfDuplicates = pf_duplicates
+    
+    def libs(self, detailed=False):
+        return list(self._cxx.libs(detailed))
+
+    def content(self, pedantic):
+        return self._to_py(self._cxx.content(pedantic))
+
+    @property
+    def dso_files(self):
+        return list(self._cxx.dso_files())
+
+    @property
+    def dsoFiles(self):
+        return self.dso_files
+
+def _to_rootmap_name(typename):
+    """
+    helper method to massage a typename into something understandable
+    by the rootmap files
+    """
+    global _aliases
+    typename = typename.replace(', ',',')
+    # first the easy case: builtins
+    if typename in _cpp_builtins:
+        return typename
+    # known missing aliases ?
+    if typename in _aliases.keys():
+        t = _aliases[typename]
+        return _to_rootmap_name(t)
+    # handle default template arguments of STL sequences
+    if _is_stl_sequence.match(typename):
+        # rootmap files do not contain the default template arguments
+        # for STL containers... consistency, again.
+        _m = _is_stl_sequence.match(typename)
+        _cont_type = _m.group('ContType')
+        _m_type = _m.group('TemplateArg')
+        # handle the dreaded 'std::Bla<Foo<d> >
+        _m_type = _to_rootmap_name(_m_type.strip())
+        if _m_type.endswith('>'):
+            _m_type += ' '
+        typename = 'std::%s<%s>' % (_m.group('ContType'),
+                                    _m_type)
+    # need to massage a bit the typename to match ROOT naming convention
+    typename = typename.replace('std::basic_string<char> ',
+                                'string ')
+    typename = typename.replace('std::basic_string<char>',
+                                'string')
+    typename = typename.replace('std::', '')
+    typename = typename.replace('> >', '>->')
+    typename = typename.replace(' >', '>')
+    typename = typename.replace('>->', '> >')
+    return typename
+
+def _to_rflx_name (typename):
+    """helper method to massage a typename into something understandable
+    by reflex (which doesn't understand the same thing than rootmaps).
+    """
+    global _aliases,_typedefs
+    typename = typename.replace(', ',',')
+    # first the easy case: builtins
+    if typename in _cpp_builtins:
+        return typename
+    # known missing typedefs ?
+    if typename in _typedefs.keys():
+        t = _typedefs[typename]
+        return _to_rflx_name(t)
+    # handle default template arguments of STL sequences
+    if _is_stl_sequence.match(typename):
+        # rootmap files do not contain the default template arguments
+        # for STL containers... consistency, again.
+        _m = _is_stl_sequence.match (typename)
+        _m_type = _m.group('TemplateArg')
+        # handle the dreaded 'std::Bla<Foo<d> >
+        _m_type = _to_rflx_name (_m_type.strip())
+        if _m_type.endswith('>'):
+            _m_type += ' '
+        typename = 'std::%s<%s>' % (_m.group('ContType'), _m_type)
+    typename = typename.replace('std::string>',
+                                'std::basic_string<char> >')
+    typename = typename.replace('std::string',
+                                'std::basic_string<char>')
+    return typename
+
+class PyDsoDb( object ):
+    """
+    The repository of 'rootmap' files (location, content,...)
+    """
+    RootMap = "rootmap"
+    DsoMap  = "dsomap"
+    PluginNamespace = "__pf__"
+    
+    def __init__(self, name = "DsoDb"):
+        object.__init__(self)
+        self.name = name
+        self.db = { } # repository of components
+        self.pf = { } # repository of known components to the plugin svc
+
+        import PyUtils.Logging as _L
+        self.msg = _L.logging.getLogger('DsoDb')
+
+        self.dsoPath = os.environ['LD_LIBRARY_PATH']
+        self.__buildRepository()
+        return
+
+    def __buildRepository(self):
+        msg = self.msg
+        self.dsoFiles = set()
+        dsoPath = [p for p in self.dsoPath.split( os.pathsep )
+                   if not p.startswith(os.environ['ROOTSYS'])]
+        for path in dsoPath:
+            if not os.path.exists(path): continue
+            dir_content = None
+            try:
+                dir_content = os.listdir(path)
+            except Exception:
+                # try again...
+                try:
+                    dir_content = os.listdir(path)
+                except Exception,err:
+                    msg.warning("caught:\n%s", err)
+            if dir_content is None:
+                msg.warning("could not run os.listdir on [%s]" % path)
+                dir_content = []
+            dsoFiles = [ f for f in dir_content
+                         if f.endswith(self.RootMap) ]
+            for dsoFile in dsoFiles:
+                dsoFile = os.path.join( path, dsoFile )
+                if os.path.exists(dsoFile):
+                    line_nbr = -1
+                    self.dsoFiles.add(dsoFile)
+                    for line in open(dsoFile, 'r'):
+                        line_nbr += 1
+                        line = line.strip()
+                        if len(line) <= 0 or line[0] == "#":
+                            continue
+                        line = line.split()
+                        # Note that as of LCG-55, rootmaps have the following
+                        # format: 'symbol': libDict.so [listOfLinkedLibs.so...]
+                        # we are only interested in libDict.so...
+                        try:
+                            dsoKey, libName = line[0], line[1]
+                        except Exception,err:
+                            msg.warning(
+                                'could not parse %s:%i', dsoFile, line_nbr
+                                )
+                            msg.warning(
+                                '(some) reflex-dicts may fail to be auto-loaded'
+                                )
+                            msg.warning(err)
+                            continue
+                        dsoKey = dsoKey\
+                                .replace("Library.", "")\
+                                .replace( ":", ""  )\
+                                .replace( "@", ":" )\
+                                .replace( "-", " " )
+                        if dsoKey.startswith( self.PluginNamespace ):
+                            db = self.pf
+                        else:
+                            db = self.db
+                        if not db.has_key(dsoKey): db[dsoKey] = list()
+                        import re
+                        if _is_rootcint_dict (libName):
+                            #print "## discarding [%s]..." % libName
+                            continue
+                        libName = os.path.join(path, _libName(libName))
+                        db[dsoKey].append(libName)
+                        pass # loop over dso-lines
+                pass # loop over dsoFiles
+            pass # iter over dsoPath
+        return
+
+    def __str__(self):
+        s = os.linesep.join( [
+            "+--- %s ---" % self.name,
+            "|nbr of lib components: %i" % len(self.db.keys()),
+            "|nbr of pf  components: %i" % len(self.pf.keys()),
+            "|nbr of dso files:      %i" % len(self.dsoFiles),
+            "|nbr of known libs:     %i" % len(self.libs()),
+            "+-------------------------"
+            ] )
+        
+        return s
+
+    def __dups(self, db, pedantic):
+        dups = {}
+        for k in db.keys():
+            if len(db[k]) == 1: continue
+            if pedantic:        libs = db[k]
+            else:
+                baseLibs = set()
+                libs = []
+                for lib in db[k]:
+                    if os.path.basename(lib) not in baseLibs:
+                        libs.append(lib)
+                        baseLibs.add(os.path.basename(lib))
+                        pass
+                    pass
+            if len(libs) > 1:
+                dups[k] = [ lib for lib in libs ]
+        return dups
+
+    def duplicates(self, libName, pedantic = False):
+        caps = self.capabilities(libName)
+        dups = {}
+        for dupDb in [ self.dictDuplicates(pedantic),
+                       self.pfDuplicates(pedantic) ]:
+            for k in dupDb:
+                if k in caps:
+                    if not dups.has_key(k): dups[k] = []
+                    dups[k] += [ lib for lib in dupDb[k]
+                                 if not libName in os.path.basename(lib) ]
+        dups.keys().sort()
+        for k in dups.keys():
+            dups[k].sort()
+        return dups
+    
+    def dictDuplicates(self, pedantic = False):
+        return self.__dups(self.db, pedantic)
+
+    def pfDuplicates(self, pedantic = False):
+        return self.__dups(self.pf, pedantic)
+
+    def capabilities(self, libName):
+        libName = _libName(libName)
+        caps = set()
+        for db in [self.db, self.pf]:
+            for k in db.keys():
+                if libName in [ os.path.basename(lib) for lib in db[k] ]:
+                    caps.add( k )
+        caps = [ cap for cap in caps ]
+        caps.sort()
+        if len(caps) == 0:
+            print "::: ERROR: No such library [%s] in dsoDb !!" % libName
+            raise ValueError, ""
+        return caps
+
+    def libs(self, detailedDump = False):
+        if detailedDump: fct = lambda x : x
+        else:            fct = os.path.basename
+        libs = set()
+        for db in [self.pf, self.db]:
+            for k in db.keys():
+                for lib in db[k]:
+                    libs.add(fct(lib))
+        libs = [ lib for lib in libs ]
+        libs.sort()
+        return libs
+    
+    def content(self, pedantic):
+        d = {}
+        for db in [self.pf, self.db]:
+            for k in db.keys():
+                if pedantic: libs = db[k]
+                else:
+                    baseLibs = set()
+                    libs = []
+                    for lib in db[k]:
+                        if os.path.basename(lib) not in baseLibs:
+                            libs.append(lib)
+                            baseLibs.add(os.path.basename(lib))
+                            pass
+                        pass
+                d[k] = [ lib for lib in libs ]
+        return d
+
+    def _to_rootmap_name(self, typename):
+        """
+        helper method to massage a typename into something understandable
+        by the rootmap files
+        """
+        return _to_rootmap_name(typename)
+
+    def _to_rflx_name (self, typename):
+        """helper method to massage a typename into something understandable
+        by reflex (which doesn't understand the same thing than rootmaps).
+        """
+        return _to_rflx_name(typename)
+
+DsoDb = CxxDsoDb
+#DsoDb = PyDsoDb
diff --git a/Tools/PyUtils/python/Helpers.py b/Tools/PyUtils/python/Helpers.py
new file mode 100755
index 00000000000..ef58d017e5c
--- /dev/null
+++ b/Tools/PyUtils/python/Helpers.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @author: Sebastien Binet <binet@cern.ch>
+# @date:   March 2007
+#
+#
+from __future__ import with_statement
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import sys
+import os
+
+import re
+from tempfile import NamedTemporaryFile
+class ShutUp(object):
+    """
+    A little helper class to keep ROOT silent...
+    """
+    DefaultFilter = [
+        re.compile("Warning in <TClass::TClass>: no dictionary for class."),
+        re.compile("Warning in <TEnvRec::ChangeValue>: duplicate entry."),
+        re.compile("Error in <TStreamerInfo::BuildOld>:."),
+        ]
+    def __init__(self, filters = DefaultFilter):
+        self._dummy = False # if dummy, we don't really shut-up ROOT...
+        if os.environ.get('PYUTILS_SHUTUP_DEBUG', '0') == '1':
+            self._dummy = True
+            
+        self.save_err  = open( '/dev/null', 'w' )
+        self.save_out  = open( '/dev/null', 'w' )
+        try:
+            self.quiet_err = NamedTemporaryFile( suffix = ".msg.log" )
+            self.quiet_out = NamedTemporaryFile( suffix = ".msg.log" )
+        except OSError:
+            # load problem ?
+            # retry a bit later... (in 2 seconds)
+            import time
+            time.sleep(2) #
+            try:
+                self.quiet_err = NamedTemporaryFile( suffix = ".msg.log" )
+                self.quiet_out = NamedTemporaryFile( suffix = ".msg.log" )
+            except OSError:
+                # then (implicitly) fallback on sys.stderr
+                self._dummy = True
+
+        self.filters = filters
+
+        if not self._dummy:
+            os.dup2( sys.stderr.fileno(), self.save_err.fileno() )
+            os.dup2( sys.stdout.fileno(), self.save_out.fileno() )
+        return
+    
+    def mute(self):
+        if not self._dummy:
+            os.dup2( self.quiet_err.fileno(), sys.stderr.fileno() )
+            os.dup2( self.quiet_out.fileno(), sys.stdout.fileno() )
+        return
+    
+    def unMute(self):
+        if not self._dummy:
+            os.dup2( self.save_err.fileno(), sys.stderr.fileno() )
+            os.dup2( self.save_out.fileno(), sys.stdout.fileno() )
+            self.__filterRootMessages(self.quiet_err)
+            self.quiet_err.seek(0)
+            self.__filterRootMessages(self.quiet_out)
+            self.quiet_out.seek(0)
+        return
+
+    def __filterRootMessages(self, fd):
+        fd.seek(0)
+        for l in fd.readlines():
+            printOut = True
+            for filter in self.filters:
+                if re.match(filter, l):
+                    printOut = False
+            if printOut:
+                print "PyRoot:",l.replace("\n","")
+            pass
+        return
+
+    # context-manager protocol
+    def __enter__(self):
+        return self.mute()
+    def __exit__(self,exc_type, exc_val, exc_tb):
+        return self.unMute()
+    
+###
+from contextlib import contextmanager
+@contextmanager
+def restricted_ldenviron(projects=None, msg=None):
+    """
+    a context helper to limit ROOT automatic loading of dictionaries
+    to a given set of cmt-projects (LCGCMT, AtlasCore, ...)
+    """
+    if projects is None:
+        # nothing to do.
+        # execute user stuff
+        yield
+        # end of story
+        return
+
+    if isinstance(projects, str):
+        projects = [p.strip() for p in projects.split() if p.strip() != '']
+    if not isinstance(projects, (list, tuple)):
+        raise TypeError("projects has to be a list, tuple or space-separated"
+                        " string")
+
+    import os, sys
+    from PyCmt.Cmt import CmtWrapper
+    cmt = CmtWrapper()
+    def _get_projects_paths(project_names, cmt=cmt):
+        """return the list of paths of a project and its dependencies
+        """
+        if isinstance(project_names, str):
+            project_names = project_names.split()
+        projects = []
+        for proj_name in project_names:
+            projects.extend(cmt.project_deps(proj_name) + [proj_name])
+        projects = list(set(projects))
+        proj_paths = []
+        tree = cmt.project_tree()
+        for p in projects:
+            path = tree[p].path
+            proj_paths.append(path)
+        return proj_paths
+        
+    # select only projects user asked for (and their dependencies)
+    usr_projects = _get_projects_paths(projects)
+    # get the same thing for all the projects we are currently using
+    cur_projects = _get_projects_paths(cmt.projects_dag()[0].name)
+    # intersect:
+    blacklist = [p for p in cur_projects if p not in usr_projects]
+    
+    original_env = os.environ.copy()
+    orig_ld_path = os.environ.get('LD_LIBRARY_PATH', '')
+    orig_bin_path= os.environ.get('PATH', '')
+    
+    if 0:
+        print ":::cmt projects:",usr_projects
+        print ":::blacklist:",blacklist
+
+    def _slim_down(orig_path, blacklist=blacklist):
+        """helper method to slim down a path by removing every entry which
+        starts with some element of the blacklist
+        """
+        new_path = []
+        for d in orig_path.split(os.pathsep):
+            # removing every entry which is in the blacklist
+            burned = [p for p in blacklist if d.startswith(p)]
+            if len(burned) == 0:
+                new_path.append(d)
+        return os.pathsep.join(new_path)
+            
+    # slim-down LD_LIBRARY_PATH
+    new_ld_path = _slim_down(orig_ld_path)
+    # and the PATH (to keep everything consistent)
+    new_bin_path= _slim_down(orig_bin_path)
+    
+    # commit the new values
+    os.environ['LD_LIBRARY_PATH'] = new_ld_path
+    os.environ['PATH'] = new_bin_path
+
+    # execute user stuff...
+    try:
+        yield
+    finally:
+        # restore original environment
+        os.environ.update(original_env)
+    
+        
diff --git a/Tools/PyUtils/python/Logging.py b/Tools/PyUtils/python/Logging.py
new file mode 100644
index 00000000000..7e231771faf
--- /dev/null
+++ b/Tools/PyUtils/python/Logging.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+## @author: Sebastien Binet
+## @file :  Logging.py
+## @purpose: try to import Logging from AthenaCommon.
+##           falls back on stdlib's one
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet"
+
+__all__ = ['msg', 'logging']
+
+from PyCmt.Logging import msg, logging
diff --git a/Tools/PyUtils/python/MpUtils.py b/Tools/PyUtils/python/MpUtils.py
new file mode 100644
index 00000000000..94b91cb7d11
--- /dev/null
+++ b/Tools/PyUtils/python/MpUtils.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file: PyUtils/MpUtils.py
+# @purpose: a set of little tools for multiprocessing
+#           stolen from ClePy
+#            http://pypi.python.org/pypi/clepy/0.1
+#            MIT
+
+def chunkify(s, chunksize):
+
+    """
+    Yield sequence s in chunks of size chunksize.
+
+    >>> list(chunkify('abcdefg', 2))
+    ['ab', 'cd', 'ef', 'g']
+
+    >>> list(chunkify('abcdefg', 99))
+    ['abcdefg']
+
+    """
+
+    for i in range(0, len(s), chunksize):
+        yield s[i:i+chunksize]
+
+from multiprocessing import Pipe, Process
+
+class SubProcessIterator(object):
+    """Instances of this class process iterators in separate processes."""
+    def __init__(self, itertask, eoi='__eoi__'):
+        """Create a new subprocess iterator.
+
+        itertask : some iterable task to execute in a subprocess
+        eoi : an end-of-iteration marker - returned from the subprocess
+              to signal that iteration is complete.
+        """
+        self.client, self.master = Pipe()
+        self.end_of_input = eoi
+        pargs = [itertask, self.master, eoi]
+        self.process = Process(target=self.work, args=pargs)
+        self.started = False
+
+    def _start(self):
+        self.started = True
+        self.process.start()
+
+    @staticmethod
+    def work(iterator, master, eoi):
+        """The actual callable that is executed in the subprocess."""
+        for chunk in iterator:
+            master.send(chunk)
+        master.send(eoi)
+
+    def __iter__(self):
+        if not self.started:
+            self._start()
+        return self
+
+    def next(self):
+        item = self.client.recv()
+        if item != self.end_of_input:
+            return item
+        else:
+            self.next = self._empty
+            raise StopIteration
+
+    def _empty(self, *args, **params):
+        raise StopIteration
+
+def piter(iterable, eoi=None):
+    """Create a new subprocess iterator.
+
+    iterable : some iterable task to execute in a subprocess
+    eoi : an end-of-iteration marker - returned from the subprocess
+          to signal that iteration is complete.
+    """
+    return SubProcessIterator(iterable, eoi=eoi)
+
diff --git a/Tools/PyUtils/python/PoolFile.py b/Tools/PyUtils/python/PoolFile.py
new file mode 100755
index 00000000000..3e8881554bd
--- /dev/null
+++ b/Tools/PyUtils/python/PoolFile.py
@@ -0,0 +1,1219 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @author: Sebastien Binet <binet@cern.ch>
+# @date:   March 2007
+#
+#
+from __future__ import with_statement
+
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+### --- data ------------------------------------------------------------------
+__all__ = [
+    'PoolFileCatalog',
+    'extract_stream_names',
+    'extract_streams_from_tag',
+    'PoolOpts',
+    'extract_items',
+    'PoolRecord',
+    'PoolFile',
+    'DiffFiles',
+    
+    'merge_pool_files',
+    ]
+
+### --- imports ---------------------------------------------------------------
+import sys
+import os
+import shelve
+import whichdb
+
+from Helpers import ShutUp
+from Decorators import forking
+
+### --- data ------------------------------------------------------------------
+class Units (object):
+    kb = 1024.
+    Mb = 1024.*1024.
+
+### --- implementations -------------------------------------------------------
+class PoolFileCatalog(object):
+    """ reverse-engineering of the POOL FileCatalog.
+        allows to retrieve the physical filename from a logical one, provided
+        that the file-id is known to the (real) PoolFileCatalog
+    """
+    DefaultCatalog = "xmlcatalog_file:PoolFileCatalog.xml"
+    AllowedProtocols = (
+        # see: PoolSvc::createCatalog
+        # http://alxr.usatlas.bnl.gov/lxr/source/atlas/Database/AthenaPOOL/PoolSvc/src/PoolSvc.cxx?v=head#736
+        "xmlcatalog_file:",  # POOL default
+        "apcfile:",          # ATLAS_POOLCOND_PATH
+        "prfile:",           # file via PathResolver
+        "file:",             # simple file on local FS
+        )
+    
+    def __init__ (self, catalog=None):
+        super (PoolFileCatalog, self).__init__()
+        self.catalog = None
+
+        if catalog is None:
+            # chase poolfilecatalog location
+            catalog = os.environ.get("POOL_CATALOG", self.DefaultCatalog)
+
+        if isinstance(catalog, basestring):
+            catalog = [catalog]
+            
+        if not isinstance (catalog, (basestring, list)):
+            raise TypeError(
+                "catalog contact string should be a string or a list thereof! (got %r)"%
+                type(catalog))
+
+        osp = os.path
+        def osp_exp(x):
+            return osp.expanduser(osp.expandvars(x))
+
+        def _handle_apcfile_old(x):
+            """ return $ATLAS_POOLCOND_PATH/poolcond/x
+            """
+            if not 'ATLAS_POOLCOND_PATH' in os.environ:
+                return osp_exp(x)
+            pcp = os.environ["ATLAS_POOLCOND_PATH"]
+            if x.startswith("apcfile:"):
+                x = x[len("apcfile:"):]
+            return osp_exp(osp.join(pcp, 'poolcond', x))
+
+        def _handle_apcfile(x):
+            """ return $ATLAS_POOLCOND_PATH/x
+            """
+            if not 'ATLAS_POOLCOND_PATH' in os.environ:
+                return osp_exp(x)
+            pcp = os.environ["ATLAS_POOLCOND_PATH"]
+            if x.startswith("apcfile:"):
+                x = x[len("apcfile:"):]
+            return osp_exp(osp.join(pcp, x))
+
+        def _handle_xmlcatalog_file(x):
+            return osp_exp(x[len("xmlcatalog_file:"):])
+
+        def _handle_prfile(x):
+            x = x[len("prfile:"):]
+            x = osp_exp(x)
+            try:
+                import AthenaCommon.Utils.unixtools as u
+                return u.FindFile(x,
+                                  os.environ['DATAPATH'].split(os.pathsep),
+                                  os.R_OK)
+            except ImportError,err:
+                return x
+
+        def _handle_file(x):
+            x = x[len("file:"):]
+            x = osp_exp(x)
+            return x
+        
+        cat_dispatch = {
+            "xmlcatalog_file:": _handle_xmlcatalog_file,
+            "apcfile:": _handle_apcfile,
+            "prfile:":  _handle_prfile,
+            "file:":    _handle_file,
+            }
+        assert sorted(cat_dispatch.keys()) == sorted(self.AllowedProtocols), \
+               "catalog dispatch keys does not match AllowedProtocols:" \
+               "\n%s\n%s" % (sorted(cat_dispatch.keys()),
+                             sorted(self.AllowedProtocols))
+
+        from . import xmldict
+        def _build_catalog(catalog):
+            if not catalog.startswith(self.AllowedProtocols):
+                raise ValueError(
+                    "sorry PoolFile:PoolFileCatalog only supports %s"
+                    " as a protocol for the POOL file catalog (got: '%s')"
+                    % (self.AllowedProtocols, catalog)
+                    )
+            for protocol, handler in cat_dispatch.iteritems():
+                if catalog.startswith(protocol):
+                    catalog = handler(catalog)
+                    break
+            # make sure the catalog exists...
+            import os
+
+            if not os.path.exists (catalog):
+                return {}
+                # raise RuntimeError(
+                #     'could not find any PoolFileCatalog in [%s]' % catalog
+                #     )
+        
+       
+            root = xmldict.ElementTree.parse (catalog).getroot()
+            return dict(xmldict.xml2dict(root))
+
+        errors = []
+        cat = {'POOLFILECATALOG':{'File':[]}}
+        for c in catalog:
+            try:
+                bc = _build_catalog(c)
+                pc = bc.get('POOLFILECATALOG',{})
+                files = []
+                if pc:
+                    files = pc.get('File',[])
+                if isinstance(files, dict):
+                    files = [files]
+                cat['POOLFILECATALOG']['File'].extend(files)
+            except Exception, err:
+                errors.append(err)
+
+        if errors:
+            raise errors[0] # FIXME : should we customize this a bit ?
+
+        self.catalog = cat
+        pass
+
+    def pfn (self, url_or_fid):
+        """find the physical file name given a url or a file-id"""
+        import os.path as osp
+        url_or_fid = osp.expanduser(osp.expandvars(url_or_fid))
+        import types
+        if isinstance (url_or_fid, types.ListType):
+            return [self._pfn(f) for f in url_or_fid]
+        else:
+            return self._pfn(url_or_fid)
+        
+    def _pfn (self, url_or_fid):
+        """find the physical file name given a url or a file-id"""
+        if not ('POOLFILECATALOG' in self.catalog):
+            return None
+        if not ('File' in self.catalog['POOLFILECATALOG']):
+            return None
+
+        PFN_IDX = 0 # take this pfn when alternates exist
+        
+        files = self.catalog['POOLFILECATALOG']['File']
+        if isinstance(files, dict):
+            # in case there where only one entry in the catalog
+            files = [files]
+        import re
+        if url_or_fid.lower().startswith('fid:'):
+            url_or_fid = url_or_fid[len('fid:'):]
+        if re.compile (r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$').match (url_or_fid):
+            fid = url_or_fid.lower()
+            # better to check consistency of catalog over all entries
+            # than declare success on first match...
+            match = {}
+            for f in files:
+                if f.ID.lower() == fid:
+                    match[fid] = []
+                    pfn = f.physical.pfn
+                    if isinstance(pfn, (list,tuple)):
+                        match[fid].append([i.name for i in pfn])
+                    else:
+                        match[fid].append([pfn.name])
+            if len(match[fid])==1:
+                return match[fid][0][PFN_IDX]
+            if len(match[fid])>1:
+                raise LookupError (
+                    "more than one match for FID='%s'!\n%r"%(fid,match)
+                    )
+            raise KeyError ("no entry with FID='%s' in catalog" % fid)
+        else:
+            url = url_or_fid
+            if url.lower().startswith("lfn:"):
+                url = url[len("lfn:"):]
+                # better to check consistency of catalog over all entries
+                # than declare success on first match...
+                match = {}
+                for f in files:
+                    if (f.logical != '' # no LFN for this entry
+                        and f.logical.lfn.name == url):
+                        match[url] = []
+                        pfn = f.physical.pfn
+                        if isinstance(pfn, (list,tuple)):
+                            match[url].append([i.name for i in pfn])
+                        else:
+                            match[url].append([pfn.name])
+                if len(match[url])==1:
+                    return match[url][0][PFN_IDX]
+                if len(match[url])>1:
+                    raise LookupError (
+                        "more than one match for LFN='%s'!\n%r"%(url,match)
+                    )
+                raise KeyError ("no entry with LFN='%s' in catalog" % url)
+            # assume that if not LFN: then PFN:, no matter what...
+            if url.lower().startswith("pfn:"):
+                url = url[len("pfn:"):]
+            return url
+
+    def __call__ (self, url_or_fid):
+        return self.pfn (url_or_fid)
+    
+    pass
+
+def extract_stream_names(fname):
+    """find the stream names ('StreamESD', 'StreamAOD',...) contained in a
+       given POOL file
+    @params:
+     `fname`     the filename of the POOL file to inspect (can be LFN or PFN)
+
+    example:
+     >>> import PyUtils.PoolFile as pf
+     >>> streams = pf.extract_stream_names ('ESD.pool.root')
+     >>> print streams
+     ['StreamESD']
+    """
+    import PyUtils.AthFile as af
+    f = af.fopen(fname)
+    return f.fileinfos['stream_names']
+
+@forking
+def extract_streams_from_tag (fname,
+                              tree_name="POOLCollectionTree",
+                              nentries=-1,
+                              stream_refs=None):
+    """find the GUID(s) of a list of streams which are contained inside a TAG
+       file.
+       @params:
+       `fname`       the filename of the TAG file to inspect
+                     (can be a LFN or FID)
+       `tree_name`   the name of the TTree containing the stream-refs
+                     ('CollectionTree' is the usual default)
+       `nentries`    the number of entries to inspect, among the ttree entries
+                     (-1: all the entries)
+       `stream_refs` a list of stream names
+                     (ex: ['StreamAOD_ref', 'Stream1_ref'] or,
+                          None - to inspect all the stream refs in the TAG)
+
+    example:
+     >>> streams = extract_streams_from_tag ('tag.pool')
+     >>> from pprint import pprint
+     >>> pprint (streams)
+     {'Stream1_ref': ['BEE16671-B9F6-DA11-8219-00304871B611'],
+      'StreamAOD_ref': ['96F3018E-A0AC-DD11-8115-000423D59D52'],
+      'StreamESD_ref': ['384D0CFC-9FAC-DD11-A905-000423D59D52'],
+      'StreamRDO_ref': ['22C5BD99-3059-DB11-8D44-0030488365E6']}
+     >>> from PoolFile import PoolFileCatalog as pfc
+     >>> pprint (pfc().pfn(streams['StreamAOD_ref'][0]))
+     ['aod.pool']
+    """
+    
+    import sys
+    import PyUtils.RootUtils as ru
+    ROOT = ru.import_root()
+    import PyCintex; PyCintex.Cintex.Enable()
+
+    print "::: opening file [%s]..." % fname
+
+    # get the "final" file name (handles all kind of protocols)
+    import PyUtils.AthFile as af
+    try:
+        protocol, fname = af.server.fname(fname)
+    except Exception,err:
+        print "::: warning: problem extracting file name from PoolFileCatalog"
+        print "::: warning: will use [%s]" % fname
+    
+    f = ROOT.TFile.Open (fname, "read")
+    assert not f.IsZombie() and f.IsOpen(), \
+           "problem opening POOL file [%s]"%fname
+
+    # backward compat:
+    # in 15.2.0 TAG file got a new tree name 'POOLCollectionTree'
+    # it was named 'CollectionTree' before that...
+    keys = [k.GetName() for k in f.GetListOfKeys()]
+    if tree_name not in keys and "CollectionTree" not in keys:
+        err= "::: error: neither [%s] nor [CollectionTree] in file [%s]" % (
+            tree_name, fname)
+        print err
+        raise RuntimeError(err)
+    # try the backward compat. hack
+    if tree_name not in keys:
+        tree_name = "CollectionTree"
+        
+    t = f.Get(tree_name)
+    assert isinstance(t, ROOT.TTree), \
+           "could not retrieve tree [%s]" % tree_name
+    
+    # interesting branch names
+    branches = [str(b.GetName()) for b in t.GetListOfBranches()
+                if b.GetName().endswith ("_ref")]
+    if stream_refs is None:
+        stream_refs = branches
+    else:
+        _streams = stream_refs[:]
+        stream_refs = []
+        for ref in _streams:
+            if not ref in branches:
+                print "::: discarding [%s] from file chasing..."%ref
+            else:
+                stream_refs.append (ref)
+    if nentries <= 0: nentries = t.GetEntries()
+    else:             nentries = min (nentries, t.GetEntries())
+    print "::: chasing streams: %s" % stream_refs
+    print "::: ...over entries: %r" % nentries
+    
+    # disable everything...
+    t.SetBranchStatus ("*", 0)
+
+    streams = dict()
+    for ref in stream_refs:
+        streams[ref] = list() # list of FileIDs, according to POOL
+                              # ex: 'B2B485E1-BB37-DD11-984C-0030487A17BA'
+        # but (re-)enable the branches we are interested in
+        t.SetBranchStatus (ref, 1)
+    
+    import re
+    # Pool token are of the form:
+    # '[DB=7CCD8D32-BC37-DD11-967E-0030487CD916]\
+    #  [CNT=POOLContainer_DataHeader]\
+    #  [CLID=72FBBC6F-C8BE-4122-8790-DC627696C176]\
+    #  [TECH=00000202]\
+    #  [OID=0000008C-000002BA]'
+    token = re.compile (r'[[]DB=(?P<FID>.*?)[]]'\
+                        r'[[]CNT=(?P<CNT>.*?)[]]'\
+                        r'[[]CLID=(?P<CLID>.*?)[]]'\
+                        r'[[]TECH=(?P<TECH>.*?)[]]'\
+                        r'[[]OID=(?P<OID>.*?)[]]')
+    for i in xrange(nentries):
+        t.GetEntry (i)
+        for ref in stream_refs:
+            try:
+                token_str = getattr(t, ref)
+            except AttributeError,err:
+                # filthy work-around...
+                try:
+                    token_branch = t.GetBranch (ref)
+                    token_branch.GetEntry (0)
+                    token_str = token_branch.GetLeaf("Token").GetValueString()
+                except Exception,new_err:
+                    print "::: could not access stream-ref [%s] (entry #%i)"%(
+                        ref, i
+                        )
+                    continue
+            tok = token.match (token_str)
+            if not tok:
+                print "::: invalid POOL token: [%s]" % token_str
+                continue
+            streams[ref].append (tok.group('FID'))
+
+    for ref in stream_refs:
+        streams[ref] = list(set(streams[ref]))
+    return streams
+
+class PoolOpts(object):
+    FAST_MODE   = False
+    SUPER_DETAILED_BRANCH_SZ = False
+    READ_MODE   = "READ"
+    POOL_HEADER = "POOLContainer"
+    EVENT_DATA  = "CollectionTree"
+    META_DATA   = "MetaData"
+    HDR_FORMAT  = "  %11s     %11s     %11s      %11s  %5s  %s"
+    ROW_FORMAT  = "%12.3f kb %12.3f kb %12.3f kb %12.3f %8i  %s"
+
+    @classmethod
+    def isData(cls, name):
+        return ( len(name) >= len("##") and name[:2] != "##" ) and \
+               ( name != PoolOpts.POOL_HEADER )
+
+    @classmethod
+    def isDataHeader(cls, name):
+        return ( name == PoolOpts.POOL_HEADER ) or \
+               ( name == PoolOpts.POOL_HEADER+"_DataHeader" )
+
+    @classmethod
+    def isEventData(cls, name):
+        return len(name) >= len(PoolOpts.EVENT_DATA) and \
+               name[:len(PoolOpts.EVENT_DATA)] == PoolOpts.EVENT_DATA
+
+    pass # class PoolOpts
+
+def _get_total_size (branch):
+   if PoolOpts.FAST_MODE:
+       return -1.
+   if not PoolOpts.SUPER_DETAILED_BRANCH_SZ:
+       return branch.GetTotalSize()
+   brSize = 0
+   branch.LoadBaskets()
+   for bnum in range(0, branch.GetWriteBasket()):
+       basket = branch.GetBasket(bnum)
+       brSize += basket.GetObjlen() - 8
+   return brSize
+
+def retrieveBranchInfos( branch, poolRecord, ident = "" ):
+    fmt = "%s %3i %8.3f %8.3f %8.3f %s"
+    if 0:
+        out = fmt % ( ident,
+                      branch.GetListOfBranches().GetSize(),
+                      _get_total_size (branch), 
+                      branch.GetTotBytes(),
+                      branch.GetZipBytes(),
+                      branch.GetName() )
+        print out
+        
+    branches  = branch.GetListOfBranches()
+    for b in branches:
+        poolRecord.memSize  += _get_total_size (b) / Units.kb
+        if (b.GetZipBytes() < 0.001):
+            poolRecord.memSizeNoZip  += _get_total_size (b) / Units.kb
+        poolRecord.diskSize += b.GetZipBytes() / Units.kb
+        poolRecord = retrieveBranchInfos ( b, poolRecord, ident+"  " )
+        
+    return poolRecord
+
+def make_pool_record (branch, dirType):
+    memSize = _get_total_size (branch) / Units.kb
+    zipBytes = branch.GetZipBytes()
+    memSizeNoZip = memSize if zipBytes < 0.001 else 0.
+    diskSize     = branch.GetZipBytes() / Units.kb
+    return PoolRecord(branch.GetName(), memSize, diskSize, memSizeNoZip,
+                      branch.GetEntries(),
+                      dirType=dirType)
+
+def extract_items(pool_file, verbose=True, items_type='eventdata'):
+    """Helper function to read a POOL file and extract the item-list from the
+    DataHeader content.
+    @params
+      `pool_file`  the name of the pool file to inspect
+      `verbose`    self-explanatory
+      `items_type` what kind of items one is interested in
+                   allowed values: 'eventdata' 'metadata'
+    Note: this function is actually executed in a forked sub-process
+          if `fork` is True
+    """
+    _allowed_values = ('eventdata',
+                       'metadata',)
+    if not items_type in _allowed_values:
+        err = "".join([
+            "invalid argument for 'items_type'. ",
+            "got: [%s] " % items_type,
+            "(allowed values: %r)" % _allowed_values
+            ])
+        raise ValueError, err
+    import PyUtils.AthFile as af
+    f = af.fopen(pool_file)
+    key = '%s_items' % items_type
+    items = f.fileinfos[key]
+    if items is None:
+        items = []
+    return items
+
+class PoolRecord(object):
+    """
+    """
+    class Sorter:
+        DiskSize      = "diskSize"
+        MemSize       = "memSize"
+        ContainerName = "name"
+
+        @staticmethod
+        def allowedValues():
+            return [ PoolRecord.Sorter.DiskSize,
+                     PoolRecord.Sorter.MemSize,
+                     PoolRecord.Sorter.ContainerName ]
+        pass
+    def __init__(self, name, memSize, diskSize, memSizeNoZip, nEntries, dirType,
+                 detailedInfos = ""):
+        object.__init__(self)
+        self.name          = name
+        self.memSize       = memSize
+        self.diskSize      = diskSize
+        self.memSizeNoZip  = memSizeNoZip
+        self.nEntries      = nEntries
+        self.dirType       = dirType
+        self.details       = detailedInfos
+        return
+
+class PoolFile(object):
+    """
+    A simple class to retrieve informations about the content of a POOL file.
+    It should be abstracted from the underlying technology used to create this
+    POOL file (Db, ROOT,...).
+    Right now, we are using the easy and loosy solution: going straight to the
+    ROOT 'API'.
+    """
+    
+    def __init__(self, fileName, verbose=True):
+        object.__init__(self)
+
+        self._fileInfos = None
+        self.keys       = None
+        self.dataHeader = PoolRecord("DataHeader", 0, 0, 0,
+                                     nEntries = 0,
+                                     dirType = "T")
+        self.data       = []
+        self.verbose = verbose
+
+        # get the "final" file name (handles all kind of protocols)
+        try:
+            import PyUtils.AthFile as af
+            protocol, fileName = af.server.fname(fileName)
+        except Exception,err:
+            print "## warning: problem opening PoolFileCatalog:\n%s"%err
+            pass
+        
+        self.poolFile = None
+        dbFileName = whichdb.whichdb( fileName )
+        if not dbFileName in ( None, '' ):
+            if self.verbose==True:
+                print "## opening file [%s]..." % str(fileName)
+            db = shelve.open( fileName, 'r' )
+            if self.verbose==True:
+                print "## opening file [OK]"
+            report = db['report']
+            self._fileInfos = report['fileInfos']
+            self.dataHeader = report['dataHeader']
+            self.data       = report['data']
+        else:
+            import PyUtils.Helpers as _H
+            projects = 'AtlasCore' if PoolOpts.FAST_MODE else None
+            with _H.restricted_ldenviron (projects=projects):
+                if self.verbose==True:
+                    print "## opening file [%s]..." % str(fileName)
+                self.__openPoolFile( fileName )
+                if self.verbose==True:
+                    print "## opening file [OK]"
+                self.__processFile()
+            
+        return
+
+    def __openPoolFile(self, fileName):
+        # hack to prevent ROOT from loading graphic libraries and hence bother
+        # our fellow Mac users
+        if self.verbose==True:
+            print "## importing ROOT..."
+        import PyUtils.RootUtils as ru
+        ROOT = ru.import_root()
+        if self.verbose==True:
+            print "## importing ROOT... [DONE]"
+        # prevent ROOT from being too verbose
+        rootMsg = ShutUp()
+        rootMsg.mute()
+        ROOT.gErrorIgnoreLevel = ROOT.kFatal
+        rootMsg.unMute()
+
+        import PyCintex
+        PyCintex.Cintex.Enable()
+
+        rootMsg.mute()
+        poolFile = None
+        try:
+            poolFile = ROOT.TFile.Open( fileName, PoolOpts.READ_MODE )
+        except Exception, e:
+            rootMsg.unMute()
+            print "## Failed to open file [%s] !!" % fileName
+            print "## Reason:"
+            print e
+            print "## Bailing out..."
+            raise IOError, "Could not open file [%s]" % fileName
+
+        rootMsg.unMute()
+
+        if poolFile == None:
+            print "## Failed to open file [%s] !!" % fileName
+            msg = "Could not open file [%s]" % fileName
+            raise IOError, msg
+
+        self.poolFile = poolFile
+        assert self.poolFile.IsOpen() and not self.poolFile.IsZombie(), \
+               "Invalid POOL file or a Zombie one" 
+        self._fileInfos = {
+            'name' : self.poolFile.GetName(),
+            'size' : self.poolFile.GetSize(),
+            }
+        return
+
+    def __processFile(self):
+        ## first we try to fetch the DataHeader
+        name  = PoolOpts.POOL_HEADER
+        dhKey = self.poolFile.FindKey( name )
+        if dhKey:
+            nEntries = dhKey.ReadObj().GetEntries()
+        else:
+            name  = PoolOpts.POOL_HEADER + "_DataHeader"
+            dhKey = self.poolFile.FindKey( name )
+            if dhKey:
+                nEntries = dhKey.ReadObj().GetEntries()
+            else:
+                nEntries = 0
+
+        keys = []
+        containers = []
+        for k in self.poolFile.GetListOfKeys():
+            containerName = k.ReadObj().GetName()
+            if containerName not in containers:
+                keys.append(k)
+                containers.append(containerName)
+                pass
+            pass
+        keys.sort()
+        self.keys = keys
+        del containers
+        
+        for k in keys:
+            tree = k.ReadObj()
+            name = tree.GetName()
+            
+            if not PoolOpts.isDataHeader(name) and \
+               not PoolOpts.isData(name) :
+                continue
+
+            if PoolOpts.isDataHeader(name):
+                if name == PoolOpts.POOL_HEADER:
+                    contName     = "DataHeader"
+                else:
+                    contName     = name.replace(PoolOpts.POOL_HEADER+"_", "" )
+                memSize      = tree.GetTotBytes() / Units.kb
+                diskSize     = tree.GetZipBytes() / Units.kb
+                memSizeNoZip = 0.0
+                if diskSize < 0.001:
+                    memSizeNoZip = memSize
+                nEntries     = tree.GetEntries()
+                ## try to also handle non-T/P separated DataHeaders
+                ## (from old files)...
+                dhBranchNames = [
+                    br.GetName() for br in tree.GetListOfBranches()
+                    if br.GetName().count("DataHeader_p") > 0
+                ]
+                if len(dhBranchNames) == 1:
+                    dhBranch = tree.GetBranch(dhBranchNames[0])
+                    poolRecord = retrieveBranchInfos(
+                        dhBranch,
+                        PoolRecord( contName, 0., 0., 0.,
+                                    nEntries,
+                                    dirType = "T" ),
+                        ident = "  "
+                        )
+                else:
+                    poolRecord = PoolRecord(contName, memSize, diskSize, memSizeNoZip,
+                                            nEntries,
+                                            dirType = "T")
+                    
+                if contName == "DataHeader":
+                    self.dataHeader = poolRecord
+                else:
+                    self.data += [ poolRecord ]
+            elif PoolOpts.isData(name):
+                if not hasattr(tree, 'GetListOfBranches'):
+                    continue
+                branches = tree.GetListOfBranches()
+                ## print "=-=->",name,type(tree).__name__
+                dirType = "T"
+                if name in (PoolOpts.EVENT_DATA, PoolOpts.META_DATA):
+                    dirType = "B"
+                for i,branch in enumerate(branches):
+                    poolRecord = retrieveBranchInfos(
+                        branch,
+                        make_pool_record(branch, dirType),
+                        ident = "  "
+                        )
+                    ## if dirType == "T":
+                    ##     poolRecord.name = name.replace( PoolOpts.EVENT_DATA,
+                    ##                                     "" )
+                    self.data += [ poolRecord ]
+            else:
+                print "WARNING: Don't know how to deal with branch [%s]" % \
+                      name
+
+            pass # loop over keys
+        
+        return
+    
+    def fileInfos(self):
+        return os.linesep.join( [
+            "File:" + self._fileInfos['name'],
+            "Size: %12.3f kb" % (self._fileInfos['size'] / Units.kb),
+            "Nbr Events: %i" % self.dataHeader.nEntries
+            ] )
+
+    
+    def checkFile(self, sorting = PoolRecord.Sorter.DiskSize):
+        if self.verbose==True:
+            print self.fileInfos()
+
+        ## sorting data
+        data = self.data
+        if sorting in PoolRecord.Sorter.allowedValues():
+            import operator
+            data.sort(key = operator.attrgetter(sorting) )
+
+        def _get_val(x, dflt=-999.):
+            if PoolOpts.FAST_MODE:
+                return dflt
+            return x
+
+        totMemSize  = _get_val(self.dataHeader.memSize, dflt=0.)
+        totDiskSize = self.dataHeader.diskSize
+        
+        def _safe_div(num,den):
+            if float(den) == 0.:
+                return 0.
+            return num/den   
+                 
+        if self.verbose==True:
+            print ""
+            print "="*80
+            print PoolOpts.HDR_FORMAT % ( "Mem Size", "Disk Size","Size/Evt",
+                                          "MissZip/Mem","items",
+                                          "(X) Container Name (X=Tree|Branch)" )
+            print "="*80
+            
+            print PoolOpts.ROW_FORMAT % (
+                _get_val (self.dataHeader.memSize),
+                self.dataHeader.diskSize,
+                _safe_div(self.dataHeader.diskSize,float(self.dataHeader.nEntries)),
+                _get_val (_safe_div(self.dataHeader.memSizeNoZip,
+                                    self.dataHeader.memSize)),
+                self.dataHeader.nEntries,
+                "("+self.dataHeader.dirType+") "+self.dataHeader.name
+                )
+            print "-"*80
+
+        for d in data:
+            totMemSize  += 0. if PoolOpts.FAST_MODE else d.memSize
+            totDiskSize += d.diskSize
+            memSizeNoZip = d.memSizeNoZip/d.memSize if d.memSize != 0. else 0.
+            if self.verbose==True:
+                print PoolOpts.ROW_FORMAT % (
+                    _get_val (d.memSize),
+                    d.diskSize,
+                    _safe_div(d.diskSize, float(self.dataHeader.nEntries)),
+                    _get_val (memSizeNoZip),
+                    d.nEntries,
+                    "("+d.dirType+") "+d.name
+                    )
+
+        if self.verbose==True:
+            print "="*80
+            print PoolOpts.ROW_FORMAT % (
+                totMemSize,
+                totDiskSize,
+                _safe_div(totDiskSize, float(self.dataHeader.nEntries)),
+                0.0,
+                self.dataHeader.nEntries,
+                "TOTAL (POOL containers)"
+                )
+            print "="*80
+            if PoolOpts.FAST_MODE:
+                print "::: warning: FAST_MODE was enabled: some columns' content ",
+                print "is meaningless..."
+        return
+
+    def detailedDump(self, bufferName = sys.stdout.name ):
+        if self.poolFile == None or \
+           self.keys     == None:
+            print "Can't perform a detailedDump with a shelve file as input !"
+            return
+                  
+        if bufferName == sys.stdout.name:
+            bufferName = "/dev/stdout"
+        stdout = open("/dev/stdout", "w")
+        out = open( bufferName, "w" )
+        os.dup2( sys.stdout.fileno(), stdout.fileno() )
+        os.dup2( out.fileno(),        sys.stdout.fileno() )
+
+        out.write( "#" * 80 + os.linesep )
+        out.write( "## detailed dump" + os.linesep )
+        out.flush()
+        
+        for key in self.keys:
+            tree = key.ReadObj()
+            name = tree.GetName()
+
+            if PoolOpts.isDataHeader(name) or \
+               PoolOpts.isData(name):
+                try:
+                    print >> sys.stderr, "=== [%s] ===" % name
+                    tree.Print()
+                except Exception, err:
+                    print >> sys.stderr, "Caught:",err
+                    print >> sys.stderr, sys.exc_info()[0]
+                    print >> sys.stderr, sys.exc_info()[1]
+                    pass
+                pass
+            pass
+        out.write( "#" * 80 + os.linesep )
+        out.flush()
+        out.write( "#" * 80 + os.linesep )
+##         out.write( "#### Map ####" + os.linesep )
+##         out.flush()
+##         self.poolFile.Map()
+##         out.write( "#" * 80 + os.linesep )
+        out.flush()
+        if bufferName != "<stdout>":
+            os.dup2( stdout.fileno(), sys.stdout.fileno() )
+            pass
+        return
+
+    def poolRecord(self, name):
+        """
+        Return a PoolRecord according to its (branch) name
+        Raise KeyError if no match is found
+        """
+        for data in self.data:
+            if data.name == name:
+                return data
+        raise KeyError, "No PoolRecord with name [%s]" % name
+
+    def saveReport (self, fileName):
+        """
+        Save all the gathered informations into a python shelve or a CSV file
+        (depending on the @param `fileName` extension)
+        """
+        import os
+        if os.path.splitext(fileName)[-1] == '.csv':
+            return self._save_csv_report (fileName)
+        return self._save_shelve_report (fileName)
+    
+    def _save_shelve_report(self, fileName):
+        """
+        Save all the gathered informations into a python shelve
+        Data can then be read like so:
+         >>> import shelve
+         >>> db = shelve.open( 'myfile.dat', 'r' )
+         >>> report = db['report']
+         >>> print 'fileSize:',report['fileSize']
+         >>> print 'dataHeader/memSize:',report['dataHeader'].memSize
+         >>> for d in report['data']:
+         ...   print 'data:',d.name,d.nEntries,d.memSize
+        """
+        import shelve, os
+        if os.path.exists (fileName):
+            os.unlink (fileName)
+        db = shelve.open (fileName)
+        db['report'] = {
+            'fileInfos'  : self._fileInfos,
+            'nbrEvts'    : self.dataHeader.nEntries,
+            'dataHeader' : self.dataHeader,
+            'data'       : self.data
+            }
+        db.close()
+        return
+
+    def _save_csv_report(self, fileName):
+        """
+        Save all the gathered informations into a CSV file
+        """
+        import csv, os
+        if os.path.exists (fileName):
+            os.unlink (fileName)
+        o = csv.writer (open (fileName, 'w'))
+        nentries = self.dataHeader.nEntries
+        map (o.writerow,
+             [ ['file name', self._fileInfos['name']],
+               ['file size', self._fileInfos['size']],
+               ['nbr evts',  self.dataHeader.nEntries],
+               ['mem size', 'disk size', 'mem size nozip', 'items',
+                'container name', 'branch type'],
+               ])
+        map (o.writerow,
+             [ [d.memSize, d.diskSize, d.memSizeNoZip,
+                d.nEntries, d.name, d.dirType]
+               for d in self.data ])
+        return
+
+    def printBreakDown(self, categories=None):
+        """
+        Print out the sizes of containers broken-down by categories.
+        Categories is supposed to be a list of item-lists, an item list being
+        a list of pairs (class-name, storegate-key)
+        """
+
+        def parse( data, klass, key ):
+            import re
+            # pattern to match 'CppClassName_pX_MyKey'
+            # eg: EventInfo_p2_McEventInfo
+            #     TauDetailsContainer_tlp1
+            tp_pattern = re.compile(r'(?P<ClassName>.*?)_(?P<Vers>(tlp|p)[0-9])((?P<SgKey>_.*)|)')
+
+            hint_pattern = re.compile(
+                r'%s(_(tlp|p)[0-9]|)_%s' % ( klass, key )
+                )
+            return re.match( hint_pattern, data )
+            
+            className = None
+            sgKey     = None
+            pat = re.match(tp_pattern, data)
+            if pat:
+                className = pat.group("ClassName")
+                sgKey     = pat.group("SgKey")
+                if sgKey == str(None): sgKey = '*'
+                if len(sgKey)>0 and sgKey[0] == '_': sgKey = sgKey[1:]
+                return (className, sgKey)
+            else:
+                return (data,'')
+
+        for cat in categories:
+            for d in self.data:
+                item = parse( d.name, cat.className, cat.key )
+                #print ": [%s/%s]" % (className, sgKey),
+            #for cat in categories:
+                
+        return
+
+    def __del__(self):
+        if self.poolFile and hasattr(self.poolFile, 'Close'):
+            try:
+                self.poolFile.Close()
+                self.poolFile = None
+            except Exception,err:
+                print "WARNING:",err
+                pass
+            
+    pass # class PoolFile
+
+class DiffFiles(object):
+    """
+    A helper class to compare 2 POOL files and check that they match, both in
+    terms of containers' content and containers' sizes
+    """
+
+    def __init__(self, refFileName, chkFileName, verbose = False, ignoreList = None):
+        object.__init__(self)
+
+        self.verbose = verbose
+        refFileName = os.path.expandvars( os.path.expanduser( refFileName ) )
+        chkFileName = os.path.expandvars( os.path.expanduser( chkFileName ) )
+
+        if ignoreList is None:
+            ignoreList = []
+            
+        try:
+            self.refFile = PoolFile( refFileName )
+            self.chkFile = PoolFile( chkFileName )
+            self.ignList = sorted( ignoreList )
+        except Exception, err:
+            print "## Caught exception [%s] !!" % str(err.__class__)
+            print "## What:",err
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            raise err
+        except :
+            print "## Caught something !! (don't know what)"
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            err  = "Error while opening POOL files !"
+            err += " chk : %s%s" % ( chkFileName, os.linesep )
+            err += " ref : %s%s" % ( refFileName, os.linesep )
+            raise Exception, err
+        
+        self.allGood = True
+        self.summary = []
+        
+        self.__checkDiff()
+        return
+
+    def __checkDiff(self):
+
+        self.summary += [
+            "=" * 80,
+            "::: Comparing POOL files...",
+            " ref : %s" % self.refFile._fileInfos['name'],
+            " chk : %s" % self.chkFile._fileInfos['name'],
+            "-" * 80,
+            ]
+
+        if self.chkFile.dataHeader.nEntries != \
+           self.refFile.dataHeader.nEntries :
+            self.summary += [
+                "## WARNING: files don't have the same number of entries !!",
+                "   ref : %r" % self.refFile.dataHeader.nEntries,
+                "   chk : %r" % self.chkFile.dataHeader.nEntries,
+                ]
+        
+        refNames = sorted( [d.name for d in self.refFile.data] )
+        chkNames = sorted( [d.name for d in self.chkFile.data] )
+
+        if chkNames != refNames:
+            self.summary += [
+                "## ERROR: files don't have the same content !!",
+                ]
+            addNames = [ n for n in chkNames if n not in refNames ]
+            if len( addNames ) > 0:
+                self.summary += [ "## collections in 'chk' and not in 'ref'" ]
+                for n in addNames:
+                    self.summary += [ "  + %s" % n ]
+            subNames = [ n for n in refNames if n not in chkNames ]
+            if len( subNames ) > 0:
+                self.summary += [ "## collections in 'ref' and not in 'chk'" ]
+                for n in subNames:
+                    self.summary += [ "  - %s" % n ]
+            self.allGood = False
+            pass
+
+        if len(self.ignList) > 0:
+                self.summary += [ "## Ignoring the following:" ]
+                for n in self.ignList:
+                    self.summary += [ "  %s" % n ]
+
+        commonContent = [ d for d in chkNames if (d in refNames and d not in self.ignList)]
+
+        if not self.allGood:
+            self.summary += [ "=" * 80 ]
+        self.summary += [ "::: comparing common content (mem-size)..." ]
+
+        for name in commonContent:
+            chkMemSize = self.chkFile.poolRecord(name).memSize
+            refMemSize = self.refFile.poolRecord(name).memSize
+            if chkMemSize != refMemSize:
+                self.summary += [
+                    "[ERR] %12.3f kb (ref) ==> %12.3f kb (chk) | %s" % \
+                    ( refMemSize, chkMemSize, name )
+                    ]
+                self.allGood = False
+            elif self.verbose:
+                self.summary += [
+                    " [OK] %12.3f kb                                 | %s" % \
+                    ( chkMemSize, name )
+                    ]
+
+        self.summary += [ "=" * 80 ]
+        
+        ## final decision
+        if self.allGood: self.summary += [ "## Comparison : [OK]"  ]
+        else:            self.summary += [ "## Comparison : [ERR]" ]
+
+        return self.allGood
+
+    def status(self):
+        if self.allGood: return 0
+        else:            return 1
+
+    def printSummary(self, out = sys.stdout):
+        for i in self.summary:
+            out.writelines( i + os.linesep )
+            pass
+        return
+    
+class Counter(object):
+    """
+    A counter just contains an item list (pairs class-name/sg-key) and the size
+    """
+    size = 0
+    def __init__(self, name, itemList):
+        object.__init__(self)
+        self.name = name
+        self.itemList = itemList
+    pass # Counter
+
+
+### ---------------------------------------------------------------------------
+def merge_pool_files(input_files, output_file,
+                     nevts=-1,
+                     msg=None,
+                     logfile=None):
+    """take a bunch of input pool files and produce a single one.
+    autoconfiguration (through RecExCommon) is (attempted to be) performed.
+    """
+    if msg is None:
+        from .Logging import logging
+        msg = logging.getLogger('pool-merge')
+        msg.setLevel(logging.INFO)
+
+    import sys
+    if logfile is None:
+        logfile = sys.stdout
+    else:
+        logfile = open(logfile, 'w')
+
+    """
+    # XXX: should we get rid of duplicates ?
+    #input_files = list(set(input_files))
+    if len(input_files) <= 1:
+        msg.error('not enough input files: %s', input_files)
+        return 2
+        
+    import PyUtils.AthFile as af
+    try:
+        af.server
+    except (RuntimeError,), err:
+        # FIXME: we should not rely on such fragile error detection
+        if err.message == "AthFileServer already shutdown":
+            af.restart_server()
+            
+    # optimization...
+    try:
+        _af_cache_fname = 'recexcommon-afserver-cache.ascii'
+        af.server.load_cache(_af_cache_fname)
+    except (IOError,), err:
+        msg.info('could not load AthFile.server cache from [%s]:\n%s',
+                 _af_cache_fname, err)
+
+    # another optimization
+    _af_cache_fname = 'merge-poolfiles-afserver-cache.ascii'
+    fi = af.fopen(input_files[0])
+    af.server.save_cache(_af_cache_fname)
+
+    # make sure we do deal with POOL files
+    if fi.infos['file_type'] != 'pool':
+        msg.error('all input files are not POOL ones !')
+        return 3
+
+    # guess input file type...
+    input_type = af._guess_file_type(input_files[0], msg)
+    """
+    
+    import AthenaCommon.ChapPy as api
+    app = api.AthenaApp()
+
+    import textwrap
+    app << textwrap.dedent("""\
+    # automatically generated joboptions file
+
+    # input files configuration
+    from AthenaCommon.AthenaCommonFlags import athenaCommonFlags as acf
+    input_files = %(input-files)s
+
+    import AthenaCommon.Logging as _L
+    msg = _L.log
+
+    # events to process
+    acf.EvtMax = EvtMax = theApp.EvtMax = %(evts)s
+
+    # configure the copy job
+    import AthenaPython.ConfigLib as apcl
+    cfg = apcl.AutoCfg(name='merge-files',
+                       input_files=input_files,
+                       output_file='%(output-file)s')
+    cfg.configure_job()
+
+    if cfg.is_rdo() or cfg.is_esd() or cfg.is_aod() or cfg.is_tag():
+        # main jobos
+        include ('RecExCond/RecExCommon_flags.py')
+        include ('RecExCommon/RecExCommon_topOptions.py')
+    elif cfg.is_hits():
+        import AthenaCommon.DetFlags as acdf
+        acdf.DetFlags.detdescr.all_setOn()
+        import AtlasGeoModel.SetGeometryVersion
+        import AtlasGeoModel.GeoModelInit
+        import AtlasGeoModel.SetupRecoGeometry
+    else:
+        pass
+        
+    # adding the merged output-stream
+    
+    """) % {
+        #'input-type'   : input_type.upper(),
+        'input-files'  : input_files,
+        'output-file'  : output_file,
+        'evts'         : nevts,
+        #'af-cache-name': _af_cache_fname,
+        }
+    
+    msg.info(':'*40)
+    msg.info('input files: %s', input_files)
+    #msg.info('input type:  %s', input_type)
+    msg.info('events:      %s', nevts)
+    msg.info('output file: %s', output_file)
+    msg.info(':'*40)
+    msg.info('running merger...')
+    
+    import AthenaCommon.ExitCodes as ath_codes
+    sc = app.run(stdout=logfile)
+    
+    msg.info('running merger... [done]')
+    msg.info('athena status-code: sc=[%s] (%s)', sc, ath_codes.what(sc))
+
+    if logfile not in (sys.stdout, sys.stderr):
+        logfile.close()
+        pass
+
+    return sc
+
+
diff --git a/Tools/PyUtils/python/RootUtils.py b/Tools/PyUtils/python/RootUtils.py
new file mode 100644
index 00000000000..dd71fae35d9
--- /dev/null
+++ b/Tools/PyUtils/python/RootUtils.py
@@ -0,0 +1,312 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.RootUtils
+# @author Sebastien Binet
+# @purpose a few utils to ease the day-to-day work with ROOT
+# @date November 2009
+
+from __future__ import with_statement
+
+__doc__ = "a few utils to ease the day-to-day work with ROOT"
+__version__ = "$Revision: 543921 $"
+__author__ = "Sebastien Binet"
+
+__all__ = [
+    'import_root',
+    'root_compile',
+    ]
+
+### imports -------------------------------------------------------------------
+import os
+import sys
+import re
+from pprint import pprint
+
+from .Decorators import memoize
+
+### functions -----------------------------------------------------------------
+def import_root(batch=True):
+    """a helper method to wrap the 'import ROOT' statement to prevent ROOT
+    from screwing up the display or loading graphics libraries when in batch
+    mode (which is the default.)
+
+    e.g.
+    >>> ROOT = import_root(batch=True)
+    >>> f = ROOT.TFile.Open(...)
+    """
+    import ROOT
+    ROOT.gROOT.SetBatch(batch)
+    if batch:
+        ROOT.PyConfig.IgnoreCommandLineOptions = True
+    import PyCintex
+    PyCintex.Cintex.Enable()
+    return ROOT
+
+def root_compile(src=None, fname=None, batch=True):
+    """a helper method to compile a set of C++ statements (via ``src``) or
+    a C++ file (via ``fname``) via ACLiC
+    """
+    if src is not None and fname is not None:
+        raise ValueError("'src' xor 'fname' should be not None, *not* both")
+
+    if src is None and fname is None:
+        raise ValueError("'src' xor 'fname' should be None, *not* both")
+
+    import os
+    from .Helpers import ShutUp as root_shutup
+    
+    ROOT = import_root(batch=batch)
+    compile_options = "f"
+    if 'dbg' in os.environ.get('CMTCONFIG', 'opt'):
+        compile_options += 'g'
+    else:
+        compile_options += 'O'
+
+    src_file = None
+    if src:
+        import textwrap
+        import tempfile
+        src_file = tempfile.NamedTemporaryFile(prefix='root_aclic_',
+                                               suffix='.cxx')
+        src_file.write(textwrap.dedent(src))
+        src_file.flush()
+        src_file.seek(0)
+        fname = src_file.name
+        pass
+
+    elif fname:
+        import os.path as osp
+        fname = osp.expanduser(osp.expandvars(fname))
+        pass
+        
+    assert os.access(fname, os.R_OK), "could not read [%s]"%(fname,)
+    orig_root_lvl = ROOT.gErrorIgnoreLevel
+    ROOT.gErrorIgnoreLevel = ROOT.kWarning
+    try:
+        with root_shutup():
+            sc = ROOT.gSystem.CompileMacro(fname, compile_options)
+        if sc == ROOT.kFALSE:
+            raise RuntimeError(
+                'problem compiling ROOT macro (rc=%s)'%(sc,)
+                )
+    finally:
+        ROOT.gErrorIgnoreLevel = orig_root_lvl
+    return
+        
+@memoize
+def _pythonize_tfile():
+    import PyCintex; PyCintex.Cintex.Enable()
+    root = import_root()
+    import PyUtils.Helpers as H
+    with H.ShutUp(filters=[
+        re.compile(
+            'TClass::TClass:0: RuntimeWarning: no dictionary for.*'),
+        re.compile(
+            'Warning in <TEnvRec::ChangeValue>: duplicate entry.*'
+            ),
+        ]):
+        PyCintex.loadDict("RootUtilsPyROOTDict")
+        rootutils = getattr(root, "RootUtils")
+        pybytes = getattr(rootutils, "PyBytes")
+        read_root_file = getattr(rootutils, "_pythonize_read_root_file")
+        tell_root_file = getattr(rootutils, "_pythonize_tell_root_file")
+        pass
+    def read(self, size=-1):
+        """read([size]) -> read at most size bytes, returned as a string.
+
+        If the size argument is negative or omitted, read until EOF is reached.
+        Notice that when in non-blocking mode, less data than what was requested
+        may be returned, even if no size parameter was given.
+
+        FIXME: probably doesn't follow python file-like conventions...
+        """
+        SZ = 4096
+        
+        if size>=0:
+            #size = _adjust_sz(size)
+            #print "-->0",self.tell(),size
+            c_buf = read_root_file(self, size)
+            if c_buf and c_buf.sz:
+                #print "-->1",self.tell(),c_buf.sz
+                #self.seek(c_buf.sz+self.tell())
+                #print "-->2",self.tell()
+                buf = c_buf.buffer()
+                buf.SetSize(c_buf.sz)
+                return str(buf[:])
+            return ''
+        else:
+            size = SZ
+            out = []
+            while True:
+                #size = _adjust_sz(size)
+                c_buf = read_root_file(self, size)
+                if c_buf and c_buf.sz:
+                    buf = c_buf.buffer()
+                    buf.SetSize(c_buf.sz)
+                    out.append(str(buf[:]))
+                else:
+                    break
+            return ''.join(out)
+    root.TFile.read = read
+    del read
+    
+    root.TFile.seek = root.TFile.Seek
+    root.TFile.tell = lambda self: tell_root_file(self)
+    ## import os
+    ## def tell(self):
+    ##     fd = os.dup(self.GetFd())
+    ##     return os.fdopen(fd).tell()
+    ## root.TFile.tell = tell
+    ## del tell
+    return 
+
+
+class RootFileDumper(object):
+    """
+    A helper class to dump in more or less human readable form the content of
+    any TTree.
+    """
+    
+    def __init__(self, fname, tree_name="CollectionTree"):
+        object.__init__(self)
+
+        ROOT = import_root()
+
+        # remember if an error or problem occurred during the dump
+        self.allgood = True
+        
+        self.root_file = ROOT.TFile.Open(fname)
+        if (self.root_file is None or
+            not isinstance(self.root_file, ROOT.TFile) or
+            not self.root_file.IsOpen()):
+            raise IOError('could not open [%s]'% fname)
+
+        self.tree = self.root_file.Get(tree_name)
+        if self.tree is None or not isinstance(self.tree, ROOT.TTree):
+            raise AttributeError('no tree [%s] in file [%s]', tree_name, fname)
+
+        if 0:
+            self._trees = []
+            keys = [k.GetName() for k in self.root_file.GetListOfKeys()]
+            for k in keys:
+                o = self.root_file.Get(k)
+                if isinstance(o, ROOT.TTree):
+                    self._trees.append(k)
+                    pass
+
+        return
+
+    def dump(self, tree_name, itr_entries, leaves=None):
+
+        ROOT = import_root()
+        import AthenaPython.PyAthena as PyAthena
+        _pythonize = PyAthena.RootUtils.PyROOTInspector.pyroot_inspect2
+
+        self.tree = self.root_file.Get(tree_name)
+        if self.tree is None or not isinstance(self.tree, ROOT.TTree):
+            raise AttributeError('no tree [%s] in file [%s]', tree_name, fname)
+
+        tree = self.tree
+        nentries = tree.GetEntries()
+        branches = sorted([b.GetName().rstrip('\0') for b in tree.GetListOfBranches()])
+        if leaves is None: leaves = branches
+        else:              leaves = [str(b).rstrip('\0') for b in leaves]
+        
+        # handle itr_entries
+        if isinstance(itr_entries, basestring):
+            if ':' in itr_entries:
+                def toint(s):
+                    if s == '':
+                        return None
+                    try:
+                        return int(s)
+                    except ValueError:
+                        return s
+                from itertools import islice
+                itr_entries = islice(xrange(nentries),
+                                     *map(toint, itr_entries.split(':')))
+            elif ('range' in itr_entries or
+                  ',' in itr_entries):
+                itr_entries = eval(itr_entries)
+            else:
+                try:
+                    _n = int(itr_entries)
+                    itr_entries = xrange(_n)
+                except ValueError:
+                    print "** err ** invalid 'itr_entries' argument. will iterate over all entries."
+                    itr_entries = xrange(nentries)
+        else:
+            itr_entries = xrange(itr_entries)
+                
+        for ientry in itr_entries:
+            hdr = ":: entry [%05i]..." % (ientry,)
+            #print hdr
+            #print >> self.fout, hdr
+            err = tree.LoadTree(ientry)
+            if err < 0:
+                print "**err** loading tree for entry",ientry
+                self.allgood = False
+                break
+
+            nbytes = tree.GetEntry(ientry)
+            if nbytes <= 0:
+                print "**err** reading entry [%s] of tree [%s]" % (ientry, tree_name)
+                hdr = ":: entry [%05i]... [ERR]" % (ientry,)
+                print hdr
+                self.allgood = False
+                continue
+
+            for br_name in leaves:
+                hdr = "::  branch [%s]..." % (br_name,)
+                #print hdr
+                #tree.GetBranch(br_name).GetEntry(ientry)
+                py_name = [br_name]
+                
+                val = getattr(tree, br_name)
+                if not (val is None):
+                    #print "-->",val,br_name
+                    try:
+                        vals = _pythonize(val, py_name)
+                    except Exception, err:
+                        print "**err** for branch [%s] val=%s (type=%s)" % (
+                            br_name, val, type(val),
+                            )
+                        self.allgood = False
+                        print err
+                    for o in vals:
+                        n = map(str, o[0])
+                        v = o[1]
+                        yield tree_name, ientry, n, v
+
+                pass # loop over branch names
+            pass # loop over entries
+    pass # class RootFileDumper
+
+### test support --------------------------------------------------------------
+def _test_main():
+    root = import_root()
+    def no_raise(msg, fct, *args, **kwds):
+        caught = False
+        try:
+            fct(*args, **kwds)
+        except Exception, err:
+            caught = True
+        assert not caught, "%s:\n%s\nERROR" % (msg, err,)
+
+    no_raise("problem pythonizing TFile", fct=_pythonize_tfile)
+    no_raise("problem compiling dummy one-liner",
+             root_compile, "void foo1() { return ; }")
+    no_raise("problem compiling dummy one-liner w/ kwds",
+             fct=root_compile, src="void foo1() { return ; }")
+    import tempfile
+    with tempfile.NamedTemporaryFile(prefix="foo_",suffix=".cxx") as tmp:
+        print >> tmp, "void foo2() { return ; }"
+        tmp.flush()
+        no_raise("problem compiling a file",
+                 fct=root_compile, fname=tmp.name)
+
+    print "OK"
+
+if __name__ == "__main__":
+    _test_main()
+    
diff --git a/Tools/PyUtils/python/WorkAreaLib.py b/Tools/PyUtils/python/WorkAreaLib.py
new file mode 100644
index 00000000000..36822f892eb
--- /dev/null
+++ b/Tools/PyUtils/python/WorkAreaLib.py
@@ -0,0 +1,416 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.WorkAreaLib
+# @purpose factor out some useful methods out of setupWorkArea for clients
+# @author Sebastien Binet
+
+import os, sys
+from PyCmt.Logging import logging
+from PyCmt.Cmt import CmtPkg, CmtStrings
+
+__version__ = '$Revision$'
+__author__  = 'Sebastien Binet'
+
+WORKAREA_VERSION = "WorkArea-00-00-00"
+
+# a list of directory names one should not bother inspecting
+_ignore_dir_list = [
+    "i686-",
+    "x86_64-",
+    "CVS",
+    ".svn",
+    "o..pacman..o",
+    "InstallArea",
+    ]
+
+def _get_currentpath():
+    from PyCmt.Cmt import CmtWrapper
+    cmt = CmtWrapper()
+    installarea = cmt.show(macro_value='CMTINSTALLAREA')
+    prefix = cmt.show(macro_value='cmt_installarea_prefix')
+    return installarea.rstrip(prefix).rstrip(os.sep)
+
+def _is_in_ignore_dir_list(pathname):
+    return any(map(pathname.count, _ignore_dir_list))
+
+def listCmtDirs( path ):
+    """Return the list of paths pointing at 'cmt' directories, accessible
+    from the `path` path.
+    """
+
+    msg = logging.getLogger( "WorkAreaMgr" )
+    
+    cmtDirs = []
+    
+    # fill list of CMT directories
+    import os
+    import os.path as osp
+    for root, dirs, files in os.walk(path):
+        for d in dirs[:]:
+            if _is_in_ignore_dir_list(d):
+                dirs.remove(d)
+        for d in dirs:
+            if d == CmtStrings.CMTDIR:
+                full_name = osp.join(root, d)
+                msg.debug("\t==> found %s" % full_name)
+                cmtDirs.append(full_name)
+    return cmtDirs
+
+def scan( scanDir = os.curdir, suppressList = ["WorkArea"] ):
+    """Search for CMT packages in the given directory and walk down the
+    directory tree.
+    Return the list of found CMT packages.
+    """
+    msg = logging.getLogger( "WorkAreaMgr" )
+    msg.info( "Scanning [%s]" % scanDir )
+    
+    # return value
+    cmtPackages = []
+    
+    # retrieve all cmt-ised directories in the scan directory
+    scanDir = os.path.abspath( scanDir )
+
+    cmtDirs = []
+    try:
+        cmtDirs = listCmtDirs(scanDir)
+    except KeyboardInterrupt:
+        msg.warning( "Scanning has been STOPPED ! (by you)" )
+        pass
+    
+    for cmtDir in cmtDirs:
+        cmtPkg = createCmtPkg(cmtDir)
+        if cmtPkg != None and \
+           cmtPkg.name not in suppressList:
+            cmtPackages.append( cmtPkg )
+        pass
+    
+    return cmtPackages
+
+def createCmtPkg( cmtDir ):
+    """
+    the cmtDir is assumed to be of the form Xyz/cmt
+    One has also to handle the case with or without version-directory
+    """
+    msg = logging.getLogger("WorkAreaMgr")
+    
+    pkgName = None
+    # the CMTREQFILE should provide the name of package
+    # so we extract it from this file
+    try:
+        reqFile = open( os.path.join( cmtDir, CmtStrings.CMTREQFILE ), 'r' )
+        for line in reqFile.readlines():
+            line = line.strip()
+            if len(line) > 0  and \
+               line[0] != "#" and \
+               line.count("package ") > 0:
+                pkgName = line.splitlines()[0]\
+                          .split("package ")[1]\
+                          .replace("\r","")\
+                          .split("#")[0]\
+                          .strip()
+                break
+            pass
+        reqFile.close()
+        del reqFile
+    except IOError:
+        ## No CMTREQFILE in this directory
+        ## ==> not a CMT package then ?
+        ## check if there is any CMT project file instead
+        if not os.path.exists( os.path.join(cmtDir, CmtStrings.CMTPROJFILE) ):
+            msg.warning( "[%s] does NOT contain any '%s' nor '%s' file !!" % \
+                         ( cmtDir,
+                           CmtStrings.CMTREQFILE,
+                           CmtStrings.CMTPROJFILE ) )
+        return None
+
+    if pkgName == None:
+        msg.warning( "No 'package Foo' stmt in %s of %s" % \
+                     ( CmtStrings.CMTREQFILE, cmtDir ) )
+        return None
+    
+    msg.debug( "\t\t==> Analysing [%s]" % cmtDir )
+    
+    # first we try the no-version-directory case as it is the ATLAS
+    # default now.
+    if CmtStrings.CMTVERSIONFILE in os.listdir(cmtDir):
+        version = open( os.path.join( cmtDir, CmtStrings.CMTVERSIONFILE ),
+                        'r' )\
+                        .readline()
+        version = version.splitlines()[0].strip()
+        pkgDir = os.path.split(cmtDir)[0].strip()
+        pkgPath = os.path.split(pkgDir)[0].strip()
+        pass
+
+    # Now we *MAY* be in the case where:
+    # /somePath/MyPkg/MyPkg-00-00-00/cmt
+    # or
+    # /somePath/MyPkg/v1r2p3/cmt
+    # however this is not supported anymore: warn and fallback to previous
+    # case anyway (as user might have screwed up)
+    else:
+        msg.warning("No [%s] file in [%s] directory",
+                    CmtStrings.CMTVERSIONFILE,
+                    cmtDir)
+        msg.warning("Can't reliably infer package version/dir!")
+        version = '*'
+        pkgDir  = os.path.split(cmtDir)[0].strip()
+        pkgPath = os.path.split(pkgDir)[0].strip()
+        msg.warning("Will use:")
+        msg.warning( "\t\t\t- name    = %s" % pkgName )
+        msg.warning( "\t\t\t- version = %s" % version )
+        msg.warning( "\t\t\t- path    = %s" % pkgPath )
+        pass
+
+    msg.debug( "\t\t\t- name    = %s" % pkgName )
+    msg.debug( "\t\t\t- version = %s" % version )
+    msg.debug( "\t\t\t- path    = %s" % pkgPath )
+
+    if pkgName.count(os.sep) > 0 :
+       msg.warning( "About to create a funny CMT package !" )
+       msg.warning( "'PkgName' contains '%s'. Please fix it!" % os.sep )
+       msg.warning( "\t- name    = %s" % pkgName )
+       msg.warning( "\t- version = %s" % version )
+       msg.warning( "\t- path    = %s" % pkgPath )
+       # Ok, so, I fix it - but user is warned...
+       pkgName = os.path.basename(pkgName)
+       pass
+
+    return CmtPkg( pkgName, version, pkgPath )
+
+def createUseList(workAreas, suppressList = ["WorkArea"]):
+
+   msg = logging.getLogger( "WorkAreaMgr" )
+   cmtPackages = []
+   uses        = []
+   
+   for workArea in workAreas:
+      cmtPackages.extend( scan( workArea, suppressList ) )
+      pass
+
+   # Handle duplicate CMT packages:
+   pkgs = {}
+   duplicates = {}
+   for cmtPkg in cmtPackages:
+      if not pkgs.has_key(cmtPkg.name):
+         pkgs[cmtPkg.name] = cmtPkg
+         pass
+      else:
+         # we found a duplicate...
+         # check that the new one has a more recent version
+         if pkgs[cmtPkg.name].version < cmtPkg.version:
+            pkgs[cmtPkg.name] = cmtPkg
+            pass
+         duplicates[cmtPkg.name] = pkgs[cmtPkg.name]
+         pass
+      pass
+   if len(duplicates) > 0:
+      msg.warning( "Found duplicate(s): (listing the ones we kept)" )
+      for k in duplicates.keys():
+         msg.warning( "--" )
+         msg.warning( " Package: %s" % duplicates[k].name )
+         msg.warning( " Version: %s" % duplicates[k].version )
+         msg.warning( " Path:    %s" % duplicates[k].path )
+         pass
+      pass
+
+   del duplicates
+   cmtPackages = [ pkg for pkg in pkgs.values() ]
+   del pkgs
+                      
+   msg.info( "Found %i packages in WorkArea" % len(cmtPackages) )
+   if len(suppressList) >= 1:
+      # -1 because WorkArea is removed by default
+      msg.info( "=> %i package(s) in suppression list" % \
+                int(len(suppressList) - 1) ) 
+
+   for cmtPkg in cmtPackages:
+      # swallow the WorkArea path so we have a "cmt path" to put
+      # in the req file
+      for workArea in workAreas:
+         cmtPkg.path = cmtPkg.path.replace( workArea+os.sep, '' )
+         cmtPkg.path = cmtPkg.path.replace( workArea,        '' )
+         pass
+
+      if cmtPkg.path.endswith( os.sep ):
+         cmtPkg.path = os.path.split(cmtPkg.path)
+         pass
+
+      use = "use %s \t%s \t%s -no_auto_imports" % \
+            ( cmtPkg.name,
+              "*", #cmtPkg.version,
+              cmtPkg.path )
+      msg.debug( "\t%s" % use )
+
+      uses.append( use )
+      pass
+
+   return uses
+
+def _translate_runtimepkg_name(n):
+    db = {
+        'hlt': 'AtlasHLT',
+        'manacore': 'ManaCore',
+        'detcommon': 'DetCommon',
+        }
+    if n in db:
+        return db[n]
+    else:
+        o = 'Atlas'+n[0].upper() + n[1:]
+        if o.startswith('AtlasAtlas'):
+            o = o[len('Atlas'):]
+        return o
+    
+def createWorkArea(workAreas = None, installDir = None,
+                   runTimePkg = None, suppressList = None):
+
+    msg = logging.getLogger("WorkAreaMgr")
+    if workAreas is None:
+        workAreas = []
+    if suppressList is None:
+       suppressList = [ "WorkArea" ]
+    else:
+       suppressList.append( "WorkArea" )
+       pass
+
+    if runTimePkg is None:
+        runTimePkg = os.getenv('AtlasProject', 'AtlasOffline')
+        pass
+    atlasRunTime = _translate_runtimepkg_name(runTimePkg)
+    
+    defaultWorkArea = _get_currentpath()
+    if len(workAreas) <= 0:
+        workAreas = [ defaultWorkArea ]
+    if installDir == None:
+        installDir = defaultWorkArea
+        pass
+
+    msg.info( 80*"#" )
+    msg.info( "Creating a WorkArea CMT package under: [%s] " % installDir )
+
+    try:
+        installWorkArea( installDir,
+                         CmtPkg( "WorkArea", WORKAREA_VERSION, "" ) )
+    except Exception,err:
+        msg.error( "Could NOT create WorkArea package !!" )
+        msg.error( "%r", err)
+        msg.info( 80*"#" )
+        sys.exit(3)
+        
+    except :
+        msg.error( "Could NOT create WorkArea package !!" )
+        msg.info( 80*"#" )
+        sys.exit(4)
+        
+    reqLines = [
+        "package WorkArea",
+        "",
+        "author Sebastien Binet <binet@cern.ch>",
+        "",
+        "######################################",
+        "## Don't edit this file !!          ##",
+        "## It is automatically generated... ##",
+        "######################################",
+        "",
+        "## Generic part...",
+        "use AtlasPolicy 	 \tAtlasPolicy-*",
+        "use %sRunTime \t%sRunTime-*" % (atlasRunTime, atlasRunTime),
+        "",
+        "branches run python",
+        "",
+        "## Install the python classes into InstallArea",
+        "apply_pattern declare_python_modules files=\"*.py\"",
+        "",
+        "",
+        "## execute the post-install targets...",
+        "private ",
+        " macro_append all_dependencies \"\\",
+        "  post_merge_rootmap \\", 
+        "  post_merge_genconfdb \\",
+        "  post_build_tpcnvdb \\",
+        "\"",
+        "end_private",
+        "",
+        "## Automatically generated part...",
+        "" ]
+
+    uses = createUseList(workAreas, suppressList)
+
+    reqLines.extend( uses )
+    reqLines.append( "" )
+    reqLines.append( "## End of generation." )
+    reqLines.append( "## EOF ##" )
+    
+    reqFile = open( os.path.join( installDir,
+                                  "WorkArea",
+                                  CmtStrings.CMTDIR,
+                                  CmtStrings.CMTREQFILE ),
+                    "w" )
+    for reqLine in reqLines:
+        reqFile.writelines( reqLine + os.linesep )
+        pass
+
+    msg.info( "Generation of %s done [OK]" % \
+              os.path.join( "WorkArea",
+                            CmtStrings.CMTDIR,
+                            CmtStrings.CMTREQFILE ) )
+    
+    reqFile.close()
+
+    msg.info( 80*"#" )
+    return
+
+def installWorkArea( installDir, cmtWorkAreaPkg ):
+    msg = logging.getLogger("WorkAreaMgr")
+
+    workAreaDir = os.path.join( installDir,  cmtWorkAreaPkg.name )
+    cmtDir      = os.path.join( workAreaDir, CmtStrings.CMTDIR   )
+
+    if os.path.exists(installDir):
+        if os.access(installDir, os.W_OK):
+            if os.path.exists( workAreaDir ):
+                if not os.path.exists( cmtDir ):
+                    os.mkdir( cmtDir )
+                    pass
+                pass
+            else:
+                os.makedirs( os.path.join( workAreaDir, cmtDir ) )
+            pass
+        else:
+            msg.error( "Can't write under [%s] !!" % installDir )
+            raise OSError
+        pass
+    else:
+        try:
+            os.makedirs(installDir)
+            installWorkArea( installDir, cmtWorkAreaPkg )
+        except OSError, what:
+            msg.error( "Install dir for WorkArea does NOT exist and can't create it !!" )
+            raise OSError, what
+        pass
+
+    msg.debug( "Creating a consistent version file for the WorkArea pkg..." )
+    cmtVersFile = open( os.path.join(cmtDir, CmtStrings.CMTVERSIONFILE), 'w' )
+    cmtVersFile.writelines( cmtWorkAreaPkg.version + os.linesep )
+    cmtVersFile.close()
+    msg.debug( "Create a dummy %s file for the WorkArea pkg..." % CmtStrings.CMTREQFILE ) 
+    cmtReqFile = open( os.path.join(cmtDir, CmtStrings.CMTREQFILE), 'w' )
+    cmtReqFile.writelines( "package %s %s" % (cmtWorkAreaPkg.name, os.linesep ) )
+    cmtReqFile.close()
+
+    msg.debug("creating python directories to workaround CMT bugs...")
+    install_area = os.path.join(installDir, 'InstallArea')
+    pydirs = [os.path.join(install_area, 'python'),
+              os.path.join(install_area, '${CMTCONFIG}', 'lib', 'python')]
+    pydirs = [os.path.expandvars(p) for p in pydirs]
+    
+    for p in pydirs:
+        if not os.path.exists(p):
+            try:
+                os.makedirs(p)
+            except OSError, what:
+                msg.error('could not create directory [%s]',p)
+                msg.error(what)
+    msg.debug("creating python directories to workaround CMT bugs... [ok]")
+                
+    return
+
diff --git a/Tools/PyUtils/python/__init__.py b/Tools/PyUtils/python/__init__.py
new file mode 100755
index 00000000000..b133f4f5d9d
--- /dev/null
+++ b/Tools/PyUtils/python/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# Hook for the PyUtils module
diff --git a/Tools/PyUtils/python/_rfio.py b/Tools/PyUtils/python/_rfio.py
new file mode 100644
index 00000000000..39abdcc1c9f
--- /dev/null
+++ b/Tools/PyUtils/python/_rfio.py
@@ -0,0 +1,468 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+from ctypes import *
+
+def _load_library (libname):
+    """
+    Helper method to load a library by its natural name, not the OS-native name.
+    But if the OS-native name is given, it is safely handled too.
+    usage:
+     >>> load_library ('AthenaServices')
+     >>> load_library ('AthenaServicesDict')
+    """
+    import ctypes, sys
+    plat = sys.platform
+    if plat.count('linux')>0:
+        lib_prefix,lib_suffix = 'lib', '.so'
+    elif plat == 'win32':
+        lib_prefix,lib_suffix = '', '.dll'
+    elif plat == 'darwin':
+        lib_prefix,lib_suffix = 'lib','.dylib'
+    else:
+        raise RuntimeError ("sorry platform [%s] is not (yet?) supported"%plat)
+    _sys_libname = libname
+    if not _sys_libname.startswith (lib_prefix):
+        _sys_libname = ''.join([lib_prefix,_sys_libname])
+    if not _sys_libname.endswith (lib_suffix):
+        _sys_libname = ''.join([_sys_libname, lib_suffix])
+    return ctypes.cdll.LoadLibrary (_sys_libname)
+
+STRING = c_char_p
+_libraries = {}
+_libraries['libshift.so'] = _load_library('libshift.so')
+
+
+rfio_access = _libraries['libshift.so'].rfio_access
+rfio_access.restype = c_int
+rfio_access.argtypes = [STRING, c_int]
+rfio_chdir = _libraries['libshift.so'].rfio_chdir
+rfio_chdir.restype = c_int
+rfio_chdir.argtypes = [STRING]
+rfio_chmod = _libraries['libshift.so'].rfio_chmod
+rfio_chmod.restype = c_int
+rfio_chmod.argtypes = [STRING, c_int]
+rfio_chown = _libraries['libshift.so'].rfio_chown
+rfio_chown.restype = c_int
+rfio_chown.argtypes = [STRING, c_int, c_int]
+rfio_close = _libraries['libshift.so'].rfio_close
+rfio_close.restype = c_int
+rfio_close.argtypes = [c_int]
+rfio_close_v3 = _libraries['libshift.so'].rfio_close_v3
+rfio_close_v3.restype = c_int
+rfio_close_v3.argtypes = [c_int]
+rfio_end = _libraries['libshift.so'].rfio_end
+rfio_end.restype = c_int
+rfio_end.argtypes = []
+rfio_symend = _libraries['libshift.so'].rfio_symend
+rfio_symend.restype = c_int
+rfio_symend.argtypes = []
+rfio_unend = _libraries['libshift.so'].rfio_unend
+rfio_unend.restype = c_int
+rfio_unend.argtypes = []
+rfio_fchmod = _libraries['libshift.so'].rfio_fchmod
+rfio_fchmod.restype = c_int
+rfio_fchmod.argtypes = [c_int, c_int]
+rfio_fchown = _libraries['libshift.so'].rfio_fchown
+rfio_fchown.restype = c_int
+rfio_fchown.argtypes = [c_int, c_int, c_int]
+class stat(Structure):
+    pass
+rfio_fstat = _libraries['libshift.so'].rfio_fstat
+rfio_fstat.restype = c_int
+rfio_fstat.argtypes = [c_int, POINTER(stat)]
+rfio_getcwd = _libraries['libshift.so'].rfio_getcwd
+rfio_getcwd.restype = STRING
+rfio_getcwd.argtypes = [STRING, c_int]
+rfio_lockf = _libraries['libshift.so'].rfio_lockf
+rfio_lockf.restype = c_int
+rfio_lockf.argtypes = [c_int, c_int, c_long]
+__off_t = c_long
+off_t = __off_t
+rfio_lseek = _libraries['libshift.so'].rfio_lseek
+rfio_lseek.restype = off_t
+rfio_lseek.argtypes = [c_int, off_t, c_int]
+rfio_lstat = _libraries['libshift.so'].rfio_lstat
+rfio_lstat.restype = c_int
+rfio_lstat.argtypes = [STRING, POINTER(stat)]
+rfio_mkdir = _libraries['libshift.so'].rfio_mkdir
+rfio_mkdir.restype = c_int
+rfio_mkdir.argtypes = [STRING, c_int]
+rfio_mstat = _libraries['libshift.so'].rfio_mstat
+rfio_mstat.restype = c_int
+rfio_mstat.argtypes = [STRING, POINTER(stat)]
+rfio_munlink = _libraries['libshift.so'].rfio_munlink
+rfio_munlink.restype = c_int
+rfio_munlink.argtypes = [STRING]
+rfio_msymlink = _libraries['libshift.so'].rfio_msymlink
+rfio_msymlink.restype = c_int
+rfio_msymlink.argtypes = [STRING, STRING]
+rfio_mstat_reset = _libraries['libshift.so'].rfio_mstat_reset
+rfio_mstat_reset.restype = c_int
+rfio_mstat_reset.argtypes = []
+rfio_munlink_reset = _libraries['libshift.so'].rfio_munlink_reset
+rfio_munlink_reset.restype = c_int
+rfio_munlink_reset.argtypes = []
+rfio_msymlink_reset = _libraries['libshift.so'].rfio_msymlink_reset
+rfio_msymlink_reset.restype = c_int
+rfio_msymlink_reset.argtypes = []
+rfio_open = _libraries['libshift.so'].rfio_open
+rfio_open.restype = c_int
+rfio_open.argtypes = [STRING, c_int]
+rfio_open_v3 = _libraries['libshift.so'].rfio_open_v3
+rfio_open_v3.restype = c_int
+rfio_open_v3.argtypes = [STRING, c_int, c_int]
+rfio_perror = _libraries['libshift.so'].rfio_perror
+rfio_perror.restype = None
+rfio_perror.argtypes = [STRING]
+class iovec(Structure):
+    pass
+rfio_preseek = _libraries['libshift.so'].rfio_preseek
+rfio_preseek.restype = c_int
+rfio_preseek.argtypes = [c_int, POINTER(iovec), c_int]
+rfio_read = _libraries['libshift.so'].rfio_read
+rfio_read.restype = c_int
+rfio_read.argtypes = [c_int, c_void_p, c_int]
+rfio_read_v3 = _libraries['libshift.so'].rfio_read_v3
+rfio_read_v3.restype = c_int
+rfio_read_v3.argtypes = [c_int, STRING, c_int]
+rfio_readlink = _libraries['libshift.so'].rfio_readlink
+rfio_readlink.restype = c_int
+rfio_readlink.argtypes = [STRING, STRING, c_int]
+rfio_rename = _libraries['libshift.so'].rfio_rename
+rfio_rename.restype = c_int
+rfio_rename.argtypes = [STRING, STRING]
+rfio_rmdir = _libraries['libshift.so'].rfio_rmdir
+rfio_rmdir.restype = c_int
+rfio_rmdir.argtypes = [STRING]
+rfio_serrno = _libraries['libshift.so'].rfio_serrno
+rfio_serrno.restype = c_int
+rfio_serrno.argtypes = []
+rfio_serror = _libraries['libshift.so'].rfio_serror
+rfio_serror.restype = STRING
+rfio_serror.argtypes = []
+rfio_stat = _libraries['libshift.so'].rfio_stat
+rfio_stat.restype = c_int
+rfio_stat.argtypes = [STRING, POINTER(stat)]
+class rfstatfs(Structure):
+    pass
+rfio_statfs = _libraries['libshift.so'].rfio_statfs
+rfio_statfs.restype = c_int
+rfio_statfs.argtypes = [STRING, POINTER(rfstatfs)]
+rfio_symlink = _libraries['libshift.so'].rfio_symlink
+rfio_symlink.restype = c_int
+rfio_symlink.argtypes = [STRING, STRING]
+rfio_unlink = _libraries['libshift.so'].rfio_unlink
+rfio_unlink.restype = c_int
+rfio_unlink.argtypes = [STRING]
+rfio_write = _libraries['libshift.so'].rfio_write
+rfio_write.restype = c_int
+rfio_write.argtypes = [c_int, c_void_p, c_int]
+rfio_write_v3 = _libraries['libshift.so'].rfio_write_v3
+rfio_write_v3.restype = c_int
+rfio_write_v3.argtypes = [c_int, STRING, c_int]
+rfio_smstat = _libraries['libshift.so'].rfio_smstat
+rfio_smstat.restype = c_int
+rfio_smstat.argtypes = [c_int, STRING, POINTER(stat), c_int]
+rfio_lseek_v3 = _libraries['libshift.so'].rfio_lseek_v3
+rfio_lseek_v3.restype = c_int
+rfio_lseek_v3.argtypes = [c_int, c_int, c_int]
+rfio_close64_v3 = _libraries['libshift.so'].rfio_close64_v3
+rfio_close64_v3.restype = c_int
+rfio_close64_v3.argtypes = [c_int]
+class stat64(Structure):
+    pass
+rfio_fstat64 = _libraries['libshift.so'].rfio_fstat64
+rfio_fstat64.restype = c_int
+rfio_fstat64.argtypes = [c_int, POINTER(stat64)]
+__quad_t = c_longlong
+__off64_t = __quad_t
+off64_t = __off64_t
+rfio_lockf64 = _libraries['libshift.so'].rfio_lockf64
+rfio_lockf64.restype = c_int
+rfio_lockf64.argtypes = [c_int, c_int, off64_t]
+rfio_lseek64 = _libraries['libshift.so'].rfio_lseek64
+rfio_lseek64.restype = off64_t
+rfio_lseek64.argtypes = [c_int, off64_t, c_int]
+rfio_lseek64_v3 = _libraries['libshift.so'].rfio_lseek64_v3
+rfio_lseek64_v3.restype = off64_t
+rfio_lseek64_v3.argtypes = [c_int, off64_t, c_int]
+rfio_lstat64 = _libraries['libshift.so'].rfio_lstat64
+rfio_lstat64.restype = c_int
+rfio_lstat64.argtypes = [STRING, POINTER(stat64)]
+rfio_mstat64 = _libraries['libshift.so'].rfio_mstat64
+rfio_mstat64.restype = c_int
+rfio_mstat64.argtypes = [STRING, POINTER(stat64)]
+rfio_open64 = _libraries['libshift.so'].rfio_open64
+rfio_open64.restype = c_int
+rfio_open64.argtypes = [STRING, c_int]
+rfio_open64_v3 = _libraries['libshift.so'].rfio_open64_v3
+rfio_open64_v3.restype = c_int
+rfio_open64_v3.argtypes = [STRING, c_int, c_int]
+class iovec64(Structure):
+    pass
+rfio_preseek64 = _libraries['libshift.so'].rfio_preseek64
+rfio_preseek64.restype = c_int
+rfio_preseek64.argtypes = [c_int, POINTER(iovec64), c_int]
+rfio_read64_v3 = _libraries['libshift.so'].rfio_read64_v3
+rfio_read64_v3.restype = c_int
+rfio_read64_v3.argtypes = [c_int, STRING, c_int]
+rfio_stat64 = _libraries['libshift.so'].rfio_stat64
+rfio_stat64.restype = c_int
+rfio_stat64.argtypes = [STRING, POINTER(stat64)]
+rfio_write64_v3 = _libraries['libshift.so'].rfio_write64_v3
+rfio_write64_v3.restype = c_int
+rfio_write64_v3.argtypes = [c_int, STRING, c_int]
+rfio_smstat64 = _libraries['libshift.so'].rfio_smstat64
+rfio_smstat64.restype = c_int
+rfio_smstat64.argtypes = [c_int, STRING, POINTER(stat64), c_int]
+class __dirstream(Structure):
+    pass
+DIR = __dirstream
+rfio_closedir = _libraries['libshift.so'].rfio_closedir
+rfio_closedir.restype = c_int
+rfio_closedir.argtypes = [POINTER(DIR)]
+class _IO_FILE(Structure):
+    pass
+FILE = _IO_FILE
+rfio_fclose = _libraries['libshift.so'].rfio_fclose
+rfio_fclose.restype = c_int
+rfio_fclose.argtypes = [POINTER(FILE)]
+rfio_feof = _libraries['libshift.so'].rfio_feof
+rfio_feof.restype = c_int
+rfio_feof.argtypes = [POINTER(FILE)]
+rfio_ferror = _libraries['libshift.so'].rfio_ferror
+rfio_ferror.restype = c_int
+rfio_ferror.argtypes = [POINTER(FILE)]
+rfio_fflush = _libraries['libshift.so'].rfio_fflush
+rfio_fflush.restype = c_int
+rfio_fflush.argtypes = [POINTER(FILE)]
+rfio_fileno = _libraries['libshift.so'].rfio_fileno
+rfio_fileno.restype = c_int
+rfio_fileno.argtypes = [POINTER(FILE)]
+rfio_fopen = _libraries['libshift.so'].rfio_fopen
+rfio_fopen.restype = POINTER(FILE)
+rfio_fopen.argtypes = [STRING, STRING]
+rfio_fread = _libraries['libshift.so'].rfio_fread
+rfio_fread.restype = c_int
+rfio_fread.argtypes = [c_void_p, c_int, c_int, POINTER(FILE)]
+rfio_fseek = _libraries['libshift.so'].rfio_fseek
+rfio_fseek.restype = c_int
+rfio_fseek.argtypes = [POINTER(FILE), c_long, c_int]
+rfio_ftell = _libraries['libshift.so'].rfio_ftell
+rfio_ftell.restype = c_long
+rfio_ftell.argtypes = [POINTER(FILE)]
+rfio_fwrite = _libraries['libshift.so'].rfio_fwrite
+rfio_fwrite.restype = c_int
+rfio_fwrite.argtypes = [c_void_p, c_int, c_int, POINTER(FILE)]
+rfio_getc = _libraries['libshift.so'].rfio_getc
+rfio_getc.restype = c_int
+rfio_getc.argtypes = [POINTER(FILE)]
+rfio_pclose = _libraries['libshift.so'].rfio_pclose
+rfio_pclose.restype = c_int
+rfio_pclose.argtypes = [POINTER(FILE)]
+rfio_popen = _libraries['libshift.so'].rfio_popen
+rfio_popen.restype = POINTER(FILE)
+rfio_popen.argtypes = [STRING, STRING]
+rfio_pread = _libraries['libshift.so'].rfio_pread
+rfio_pread.restype = c_int
+rfio_pread.argtypes = [STRING, c_int, c_int, POINTER(FILE)]
+rfio_pwrite = _libraries['libshift.so'].rfio_pwrite
+rfio_pwrite.restype = c_int
+rfio_pwrite.argtypes = [STRING, c_int, c_int, POINTER(FILE)]
+rfio_opendir = _libraries['libshift.so'].rfio_opendir
+rfio_opendir.restype = POINTER(DIR)
+rfio_opendir.argtypes = [STRING]
+class dirent(Structure):
+    pass
+rfio_readdir = _libraries['libshift.so'].rfio_readdir
+rfio_readdir.restype = POINTER(dirent)
+rfio_readdir.argtypes = [POINTER(DIR)]
+rfio_rewinddir = _libraries['libshift.so'].rfio_rewinddir
+rfio_rewinddir.restype = c_int
+rfio_rewinddir.argtypes = [POINTER(DIR)]
+rfio_fopen64 = _libraries['libshift.so'].rfio_fopen64
+rfio_fopen64.restype = POINTER(FILE)
+rfio_fopen64.argtypes = [STRING, STRING]
+rfio_fseeko64 = _libraries['libshift.so'].rfio_fseeko64
+rfio_fseeko64.restype = c_int
+rfio_fseeko64.argtypes = [POINTER(FILE), off64_t, c_int]
+rfio_ftello64 = _libraries['libshift.so'].rfio_ftello64
+rfio_ftello64.restype = off64_t
+rfio_ftello64.argtypes = [POINTER(FILE)]
+class dirent64(Structure):
+    pass
+rfio_readdir64 = _libraries['libshift.so'].rfio_readdir64
+rfio_readdir64.restype = POINTER(dirent64)
+rfio_readdir64.argtypes = [POINTER(DIR)]
+size_t = c_uint
+rfio_errmsg_r = _libraries['libshift.so'].rfio_errmsg_r
+rfio_errmsg_r.restype = STRING
+rfio_errmsg_r.argtypes = [c_int, c_int, STRING, size_t]
+rfio_errmsg = _libraries['libshift.so'].rfio_errmsg
+rfio_errmsg.restype = STRING
+rfio_errmsg.argtypes = [c_int, c_int]
+rfio_serror_r = _libraries['libshift.so'].rfio_serror_r
+rfio_serror_r.restype = STRING
+rfio_serror_r.argtypes = [STRING, size_t]
+__ino_t = c_ulong
+dirent._fields_ = [
+    ('d_ino', __ino_t),
+    ('d_off', __off_t),
+    ('d_reclen', c_ushort),
+    ('d_type', c_ubyte),
+    ('d_name', c_char * 256),
+]
+__u_quad_t = c_ulonglong
+__ino64_t = __u_quad_t
+dirent64._pack_ = 4
+dirent64._fields_ = [
+    ('d_ino', __ino64_t),
+    ('d_off', __off64_t),
+    ('d_reclen', c_ushort),
+    ('d_type', c_ubyte),
+    ('d_name', c_char * 256),
+]
+__dev_t = __u_quad_t
+__mode_t = c_uint
+__nlink_t = c_uint
+__uid_t = c_uint
+__gid_t = c_uint
+__blksize_t = c_long
+__blkcnt_t = c_long
+class timespec(Structure):
+    pass
+__time_t = c_long
+timespec._fields_ = [
+    ('tv_sec', __time_t),
+    ('tv_nsec', c_long),
+]
+stat._pack_ = 4
+stat._fields_ = [
+    ('st_dev', __dev_t),
+    ('__pad1', c_ushort),
+    ('st_ino', __ino_t),
+    ('st_mode', __mode_t),
+    ('st_nlink', __nlink_t),
+    ('st_uid', __uid_t),
+    ('st_gid', __gid_t),
+    ('st_rdev', __dev_t),
+    ('__pad2', c_ushort),
+    ('st_size', __off_t),
+    ('st_blksize', __blksize_t),
+    ('st_blocks', __blkcnt_t),
+    ('st_atim', timespec),
+    ('st_mtim', timespec),
+    ('st_ctim', timespec),
+    ('__unused4', c_ulong),
+    ('__unused5', c_ulong),
+]
+__blkcnt64_t = __quad_t
+stat64._pack_ = 4
+stat64._fields_ = [
+    ('st_dev', __dev_t),
+    ('__pad1', c_uint),
+    ('__st_ino', __ino_t),
+    ('st_mode', __mode_t),
+    ('st_nlink', __nlink_t),
+    ('st_uid', __uid_t),
+    ('st_gid', __gid_t),
+    ('st_rdev', __dev_t),
+    ('__pad2', c_uint),
+    ('st_size', __off64_t),
+    ('st_blksize', __blksize_t),
+    ('st_blocks', __blkcnt64_t),
+    ('st_atim', timespec),
+    ('st_mtim', timespec),
+    ('st_ctim', timespec),
+    ('st_ino', __ino64_t),
+]
+iovec._fields_ = [
+    ('iov_base', c_void_p),
+    ('iov_len', size_t),
+]
+__dirstream._fields_ = [
+]
+class _IO_marker(Structure):
+    pass
+_IO_lock_t = None
+_IO_FILE._pack_ = 4
+_IO_FILE._fields_ = [
+    ('_flags', c_int),
+    ('_IO_read_ptr', STRING),
+    ('_IO_read_end', STRING),
+    ('_IO_read_base', STRING),
+    ('_IO_write_base', STRING),
+    ('_IO_write_ptr', STRING),
+    ('_IO_write_end', STRING),
+    ('_IO_buf_base', STRING),
+    ('_IO_buf_end', STRING),
+    ('_IO_save_base', STRING),
+    ('_IO_backup_base', STRING),
+    ('_IO_save_end', STRING),
+    ('_markers', POINTER(_IO_marker)),
+    ('_chain', POINTER(_IO_FILE)),
+    ('_fileno', c_int),
+    ('_flags2', c_int),
+    ('_old_offset', __off_t),
+    ('_cur_column', c_ushort),
+    ('_vtable_offset', c_byte),
+    ('_shortbuf', c_char * 1),
+    ('_lock', POINTER(_IO_lock_t)),
+    ('_offset', __off64_t),
+    ('__pad1', c_void_p),
+    ('__pad2', c_void_p),
+    ('__pad3', c_void_p),
+    ('__pad4', c_void_p),
+    ('__pad5', size_t),
+    ('_mode', c_int),
+    ('_unused2', c_char * 40),
+]
+rfstatfs._fields_ = [
+    ('totblks', c_long),
+    ('freeblks', c_long),
+    ('bsize', c_long),
+    ('totnods', c_long),
+    ('freenods', c_long),
+]
+iovec64._pack_ = 4
+iovec64._fields_ = [
+    ('iov_base', off64_t),
+    ('iov_len', c_int),
+]
+_IO_marker._fields_ = [
+    ('_next', POINTER(_IO_marker)),
+    ('_sbuf', POINTER(_IO_FILE)),
+    ('_pos', c_int),
+]
+__all__ = ['rfio_lstat', 'rfio_msymlink_reset', 'FILE', '__off64_t',
+           'size_t', 'rfio_preseek', 'rfio_fstat', 'rfio_fflush',
+           'rfio_closedir', '__ino64_t', 'rfio_serrno', 'rfio_feof',
+           'rfio_lseek_v3', 'rfio_ftell', 'rfio_fchown', 'rfio_lseek',
+           'rfio_close64_v3', 'rfio_fchmod', '__time_t',
+           'rfio_serror', 'rfio_fseeko64', 'rfio_read64_v3',
+           '__nlink_t', 'rfio_open', 'rfio_smstat', '_IO_lock_t',
+           'timespec', '__off_t', 'rfio_ftello64', 'rfio_close',
+           'rfio_write_v3', 'rfio_munlink_reset', 'rfio_mkdir',
+           'rfio_lockf64', 'rfio_statfs', 'rfio_errmsg',
+           'rfio_readlink', 'rfio_close_v3', 'dirent64',
+           'rfio_lseek64_v3', 'rfio_end', 'rfio_popen', 'rfio_symend',
+           'rfio_fopen64', 'rfio_pread', 'rfio_fopen', '__dirstream',
+           'rfio_preseek64', '__blkcnt_t', 'rfio_msymlink',
+           'rfio_fclose', 'rfio_readdir', 'rfio_readdir64',
+           'rfio_rename', '__mode_t', 'off64_t', 'rfio_pwrite',
+           '__blksize_t', 'rfio_fwrite', 'rfio_fseek', 'rfio_lockf',
+           'rfio_stat', 'rfio_ferror', 'rfio_rewinddir', 'rfio_getc',
+           'rfio_getcwd', 'iovec64', 'rfio_chmod', 'rfio_chdir',
+           'rfio_serror_r', 'rfio_smstat64', 'rfio_mstat64',
+           'rfio_rmdir', 'rfio_symlink', '__blkcnt64_t', '__dev_t',
+           'rfio_unlink', 'rfio_lseek64', 'rfio_munlink',
+           'rfio_pclose', 'rfio_open64_v3', 'rfio_write', '_IO_FILE',
+           'rfio_fileno', 'rfio_mstat', 'rfio_perror', 'off_t',
+           'iovec', 'rfio_chown', 'stat64', 'rfio_fread',
+           '_IO_marker', 'rfio_fstat64', '__u_quad_t', 'DIR',
+           'rfstatfs', 'dirent', '__gid_t', 'rfio_opendir', 'stat',
+           'rfio_read_v3', 'rfio_open64', '__ino_t', 'rfio_access',
+           'rfio_stat64', 'rfio_mstat_reset', 'rfio_lstat64',
+           'rfio_read', '__quad_t', 'rfio_unend', 'rfio_errmsg_r',
+           'rfio_open_v3', '__uid_t', 'rfio_write64_v3']
+
diff --git a/Tools/PyUtils/python/acmdlib.py b/Tools/PyUtils/python/acmdlib.py
new file mode 100644
index 00000000000..3101c75d1cd
--- /dev/null
+++ b/Tools/PyUtils/python/acmdlib.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.acmdlib
+# @purpose a library to ease the writing of sub-command scripts
+# @author Sebastien Binet
+# @date January 2010
+
+from __future__ import with_statement
+
+__version__ = "$Revision: 464077 $"
+__doc__ = "a library to ease the writing of sub-command scripts"
+__author__ = "Sebastien Binet"
+
+__all__ = [
+    'Command',
+    'command',
+    'argument',
+    'register',
+    'register_file',
+    ]
+
+### imports -------------------------------------------------------------------
+import sys
+import extensions as ext_plugins
+import argparse
+import textwrap
+
+from PyUtils.decorator import decorator
+
+### globals -------------------------------------------------------------------
+ACMD_GROUPNAME = 'acmdlib.commands'
+"""The name under which all commands are grouped
+"""
+
+ACMD_PARSER = argparse.ArgumentParser(
+    prog="acmd",
+    description="a general script interface with sub-commands",
+    )
+ACMD_PARSER.add_argument(
+    '--version',
+    action='version',
+    version=__version__,
+    help="show program's version number and exit")
+
+ACMD_SUBPARSERS = ACMD_PARSER.add_subparsers(
+    dest='command',
+    title='commands',
+    metavar='COMMAND',
+    )
+
+### classes -------------------------------------------------------------------
+class Command(object):
+    """A wrapper class to manage the creation of commands and their arguments
+
+    this is very heavily inspired from:
+    http://pypi.python.org/pypi/django-boss (MIT licence)
+    """
+
+    def __init__(self, fct, **kwargs):
+        object.__init__(self)
+        self.fct = fct
+        self.parser = self._make_parser(**kwargs)
+        self._init_arguments()
+        plugin_name = kwargs.get('name') or self.name
+        register(
+            plugin_name,
+            '%s:%s' % (self.fct.__module__, self.fct.__name__)
+            )
+        return
+
+    @property
+    def name(self):
+        return self.fct.__name__.replace('_','-')
+
+    @property
+    def help(self):
+        if getattr(self.fct, '__doc__', None):
+            # just the first line of the doc string
+            return self.fct.__doc__.splitlines()[0]
+
+    @property
+    def description(self):
+        if getattr(self.fct, '__doc__', None):
+            return textwrap.dedent(self.fct.__doc__)
+
+    @property
+    def add_argument(self):
+        return self.parser.add_argument
+
+    def __call__(self, *args, **kwargs):
+        return self.fct(*args, **kwargs)
+    
+    def _make_parser(self, **kwargs):
+        """Create and register a subparser for this command."""
+
+        kwargs.setdefault('help', self.help)
+        kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)
+        kwargs.setdefault('description', self.description)
+        kwargs.setdefault('name', self.name)
+        names = (kwargs.get('name') or self.name).split('.')
+        
+        def _get_subparser(a):
+            if a._subparsers:
+                for action in a._subparsers._actions:
+                    if isinstance(action, argparse._SubParsersAction):
+                        return action
+                raise RuntimeError('could not find adequate subparser')
+            return a.add_subparsers(dest='command',
+                                    title='commands',
+                                    metavar='COMMAND')
+        def _get_parser(node, idx, names):
+            name = names[idx]
+            if name in node.choices:
+                return node.choices[name]
+            full_name = ' '.join(names[:idx+1])
+            args = {
+                'name' : name,
+                'help' : 'a group of sub-commands',
+                }
+            return node.add_parser(**args)
+        
+        parser = ACMD_PARSER
+        node   = _get_subparser(parser)
+
+        for i,n in enumerate(names[:-1]):
+            node = _get_subparser(parser)
+            parser = _get_parser(node, i, names)
+                
+        node = _get_subparser(parser)
+        kwargs['name'] = names[-1]
+        parser = node.add_parser(**kwargs)
+        return parser
+
+    def _init_arguments(self):
+        if hasattr(self.fct, '_acmdlib_arguments'):
+            while self.fct._acmdlib_arguments:
+                args, kwargs = self.fct._acmdlib_arguments.pop()
+                self.add_argument(*args, **kwargs)
+        
+    pass # Command
+
+### functions -----------------------------------------------------------------
+
+def command(*args, **kwargs):
+    """Decorator to declare that a function is a command.
+    """
+    def deco(fct):
+        return Command(fct, **kwargs)
+    if args:
+        return deco(*args)
+    return deco
+
+def argument(*args, **kwargs):
+    """Decorator to add an argument to a command.
+    """
+    def deco(fct):
+        if isinstance(fct, Command):
+            cmd = fct
+            cmd.add_argument(*args, **kwargs)
+        else:
+            if not hasattr(fct, '_acmdlib_arguments'):
+                fct._acmdlib_arguments = []
+            fct._acmdlib_arguments.append((args, kwargs))
+        #print "===",args,kwargs,type(fct),fct
+        return fct
+    return deco
+    
+def register(name, value):
+    """Registers a plugin, given a name and value.
+
+    ex: register('check-file', 'PyUtils.CheckFileLib:fct')
+    """
+    group = ACMD_GROUPNAME
+    return ext_plugins.register(group, name, value)
+
+def register_file(path):
+    """Registers a config-like file"""
+    from ConfigParser import ConfigParser
+    parser = ConfigParser()
+    parser.read([path])
+    for group in parser.sections():
+        for name in parser.options(group):
+            value = parser.get(group, name)
+            ext_plugins.register(group, name, value)
+
+# add a special command to list all registered commands
+@command
+@argument('-d','--detailed', action='store_true', default=False,
+          help='print full help of each command')
+def list_commands(args):
+    """a special command to list all the commands 'acmd' can run
+    """
+    cmds = list(ext_plugins.get(group=ACMD_GROUPNAME))
+    if len(cmds) == 0:
+        print "::: no command found in registry"
+        return 1
+    print "::: found [%i] command%s" % (len(cmds),
+                                        "s" if len(cmds)>1 else "")
+    cmds.sort(cmp=lambda x,y: cmp(x.name, y.name))
+    cmds = [cmd for cmd in cmds if cmd.name != 'list-commands']
+    for i, cmd in enumerate(cmds):
+        if args.detailed:
+            print "="*80
+        print " - %s" % (' '.join(cmd.name.split('.')),)
+        if args.detailed:
+            try:
+                cmd.load().parser.print_help()
+            except Exception,err:
+                print "** could not inspect command [%s]:\n%s" % (
+                    cmd.name,
+                    err)
+            print "="*80
+            print ""
+    return 0
+#acmdlib.register('list-commands', 'PyUtils.acmdlib:list_commands')
+
+# an argument to force the loading of all available commands
+ACMD_PARSER.add_argument(
+    '--load-commands',
+    action='store_true',
+    default=False,
+    help='force the loading of all available commands')
+def _load_commands():
+    cmds = list(ext_plugins.get(group=ACMD_GROUPNAME))
+    for cmd in cmds:
+        try:
+            cmd.load()
+        except Exception,err:
+            print "** could not load command [%s]:\n%s" % (
+                cmd.name,
+                err)
+
diff --git a/Tools/PyUtils/python/bwdcompat.py b/Tools/PyUtils/python/bwdcompat.py
new file mode 100644
index 00000000000..c6c7d9843ca
--- /dev/null
+++ b/Tools/PyUtils/python/bwdcompat.py
@@ -0,0 +1,7 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+import subprocess
+
+### monkey-patch subprocess (fwd compat w/ py-3.x) ----------------------------
+import PyCmt.bwdcompat
+### ---------------------------------------------------------------------------
diff --git a/Tools/PyUtils/python/castor.py b/Tools/PyUtils/python/castor.py
new file mode 100644
index 00000000000..8861a423406
--- /dev/null
+++ b/Tools/PyUtils/python/castor.py
@@ -0,0 +1,414 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file castor.py
+# @brief A simple helper to handle simple tasks with CASTOR
+#
+#   - nsls     : lists CASTOR name server directory. Handles wildcards only
+#                in the filename (no wildcard in path to file allowed)
+#   - stager_get: takes a path-to-file-pattern and stages the matching files
+#                per bunch of N (default=10) files
+#   - rfcat    : todo
+#   - rfcp     : done in a very naive way
+#   - rfiod    : todo
+#   - rfrename : todo
+#   - rfstat   : done
+#   - rfchmod  : todo
+#   - rfdir    : done
+#   - rfmkdir  : todo
+#   - rfrm     : todo
+#   - rftp     : todo
+#
+# date:   May 2006
+# @author: Sebastien Binet <binet@cern.ch>
+
+import commands
+import os
+import fnmatch
+import re
+
+def group(iterator, count):
+    """
+    This function extracts items from a sequence or iterator 'count' at a time:
+    >>> list(group([0, 1, 2, 3, 4, 5, 6], 2))
+    [(0, 1), (2, 3), (4, 5)]
+    Stolen from :
+    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/439095
+    """
+    itr = iter(iterator)
+    while True:
+        yield tuple([itr.next() for i in xrange(count)])
+
+__author__  = "Sebastien Binet <binet@cern.ch>"
+__version__ = "$Revision$"
+__doc__ = """A set of simple helper methods to handle simple tasks with CASTOR.
+"""
+
+def hasWildcard(name) :
+    """Return true if the name has a UNIX wildcard (*,?,[,])
+    """
+    if ( name.count('*') > 0 or name.count('?') > 0 or
+         name.count('[') > 0 or name.count(']') > 0 ) :
+        return True
+    else :
+        return False
+
+def nsls(files, prefix=None):
+    """
+    lists CASTOR name server directory/file entries.
+    If path is a directory, nsls lists the entries in the directory;
+    they are sorted alphabetically.
+
+    `files` specifies the CASTOR pathname.
+    `prefix` specifies the prefix one wants to prepend to the path found.
+             (e.g. prefix='root://castoratlas/' or 'rfio:' or 'castor:')
+
+    ex:
+    >>> nsls('/castor/cern.ch/atlas/*')
+    >>> nsls('/castor/cern.ch/atl*/foo?[bar]/*.pool.root.?')
+    >>> nsls('/castor/cern.ch/atlas/*', prefix='root://castoratlas/')
+    """
+    _prefix = 'root://castoratlas/'
+    path, fname = os.path.split(files)
+    for p in (_prefix, 'rfio:', 'castor:'):
+        if path.startswith(p):
+            path = path[len(p):]
+            if path.startswith('//'):
+                path = path[1:]
+            if not path.startswith('/'):
+                path = '/'+path
+            break
+    if hasWildcard(path):
+        paths = nsls(path)
+        return sum([nsls(os.path.join(p,fname))
+                    for p in paths], [])
+    sc, flist = commands.getstatusoutput('nsls %s' % (path,))
+    if sc: # command failed
+        print flist
+        return []
+
+    flist = flist.split()
+    if not (os.path.basename(files) in ['', '*']): # no need to filter
+        pattern = fnmatch.translate(os.path.basename(files))
+        flist = filter(lambda x: re.search(pattern, x), flist)
+    if prefix and isinstance(prefix, basestring):
+        return [os.path.join(prefix+path, p) for p in flist]
+    else:
+        return [os.path.join(path, p) for p in flist]
+
+def _old_nsls(path) :
+    """
+    lists CASTOR name server directory/file entries.
+    If path is a directory, nsls lists the entries in the directory;
+    they are sorted alphabetically.
+
+    path specifies the CASTOR pathname. If path does not start  with  /,
+    it  is  prefixed  by  the content of the CASTOR_HOME environment
+    variable.
+
+    ex:
+    >>> nsls( '/castor/cern.ch/atlas/*' )
+    >>> nsls( 'mydata' )
+    """
+
+    wildcards = False
+    tail = "*"
+    path = os.path.expandvars(path)
+    
+    if path.endswith('/') :
+        path = path[0:len(path)-1]
+    # Do we detect a wildcard in the path we are given ?
+    # if so then we have to parse it to remove them because
+    # nsls does not understand them.
+    # The ouput of the command will be filtered afterwards
+    if hasWildcard(path) :
+        wildcards = True
+
+        wholepath = path.split(os.sep)
+
+        paths = nsls(path)
+        return sum([nsls(os.path.join(i,fname))
+                    for i in paths], [])
+    
+        # Here we assume the wildcards are located *only* in the filename !!
+        tail      = wholepath[len(wholepath)-1]
+        if tail == '' :
+            if len(wholepath) >= 2 :
+                tail = wholepath[len(wholepath)-2]
+            else :
+                raise Exception, \
+                      "Malformed path to files: <"+path+">"
+            
+        # Check that the wildcard is not in the path to files
+        if tail.count('/') > 0 :
+            if tail.endswith('/') :
+                # the / is sitting in last position. Can safely remove it
+                tail = tail[0:len(tail)-1]
+            else :
+                raise Exception, \
+                      "No wildcard allowed in the path to files: <"+path+">"
+               
+            
+        path      = path.split(tail)[0]
+        if hasWildcard(path) :
+            raise ValueError("No wildcard allowed in the path to files: <"+path+">")
+        #print path
+        
+    status,output = commands.getstatusoutput('nsls '+path)
+
+    if status != 0 :
+        print output
+        return []
+
+    flist = output.splitlines()
+
+    if wildcards :
+        flist = fnmatch.filter(ut,tail)
+
+    for i in xrange(0,len(output)) :
+        if output[i].count(path) < 1:
+            output[i] = path+"/"+output[i]
+        output[i] = output[i].replace('//','/')
+    return output
+
+def pool_nsls( path ) :
+    """
+    lists CASTOR name server directory/file entries.
+    Prepend the 'rfio:' prefix so the output list can be used as an input
+    for an xmlfile_catalog file.
+    """
+    _prefix = 'root://castoratlas/'
+    files = nsls(path)
+    for i in xrange(len(files)) :
+        files[i] = _prefix+files[i]
+        pass
+
+    return files
+
+def getFileSize( pathToFile = None ) :
+    """
+    Use nsls -l function to read the file and decypher the output string to
+    extract the size of the file.
+    Returns the size in Mb
+    """
+    if hasWildcard(pathToFile) :
+        raise Exception, \
+              "No wildcard allowed in the path to files: <"+pathToFile+">"
+    
+    status,output = commands.getstatusoutput( 'nsls -l '+pathToFile )
+    #'nsls -l $CASTOR_DIR/$FILE | awk -F ' ' '{print $5}'
+
+    if status != 0 :
+        print "** PyCastor ERROR **"
+        print output
+        return []
+
+    output = output.splitlines()
+
+    #print "output:",output
+    #print "output size= ",len(output)
+
+    if len(output) != 1 :
+        raise Exception, \
+              "Wrong status (didn't find only *1* file!!)"
+
+    output = output[0]
+    output = output.split( " " )
+    
+    result = []
+    # Removes whitespaces
+    for i in output :
+        if i != '' :
+            result.append( i )
+            pass
+        pass
+
+    size = int(result[4])/(1024.*1024.) # size in Mb
+    #print "size = ",size," Mb"
+    
+    return size
+
+def stagein( fileListPattern = None, nSlices = 10, verbose = True ) :
+    """
+    Take a path to a file pattern and stages all the files corresponding
+    to this pattern by bunchs of N (default=10) files.
+    """
+    files = nsls( fileListPattern )
+    if ( type(files) != type([]) or len(files) < 1 ) :
+        raise Exception, \
+              "Error, no file to stagein !!"
+        return
+
+    slices = list(group(files,nSlices))
+
+    for slice in slices :
+        stageList = ' -M '.join( [''] + [ s for s in slice ] )
+        cmd = 'stager_get %s' % stageList
+        if verbose :
+            print ">>> cmd= ",cmd
+        status,output = commands.getstatusoutput(cmd)
+        
+        if status != 0 :
+            print "** PyCastor ERROR **"
+            print output
+            pass
+        else :
+            if verbose :
+                # print output
+                pass
+            pass
+        pass
+    
+    return 0
+
+def stager_qry(inFiles):
+    """
+    Find out the stage status of the inFiles
+    returns dictionary of outStatus(files:status) status = 0|1
+    """
+    outStatus = dict()
+    for inFile in inFiles:
+        cmd = "stager_qry -M %s " % ( inFile, )
+        sc,out = commands.getstatusoutput( cmd )
+        if sc != 0:
+            print "** PyCastor ERROR **"
+            print "## Could not check status of this file [%s] !!" % inFile
+            print "## status sc=", sc
+            print "## output out=", out
+        
+        #for str in out.split():
+        #   print "out_str=", str
+        
+        if out.split()[-1] == "STAGED":
+            outStatus[inFile] = 1
+        else:
+            outStatus[inFile] = 0   
+        #print "-"*77
+
+    return outStatus
+
+def extract_rfio(inFile, outDir):
+    """
+    Extract the list of rfio:/castor/.. files from given input file_name 
+    - Finds out STAGED status of files using stager_qry -M ...
+    - if STAGED: rfcp them into outDir 
+    - if NOT: stage them in using stager_get -M ...
+    - returns status dictionary returned by stager_qry() above
+    """
+    allGood = True
+    f = open(inFile, 'r')
+    file_text = f.read()
+    f.close()
+    
+    import re
+    import urlparse
+    def grep_path(schema, text):
+        expr = "\"" + schema + "\:.+?\""
+        lines = re.findall(expr, text)
+        lines = [line.strip('"') for line in lines ]
+        import urlparse
+        paths = [urlparse.urlparse(line)[2] for line in lines]
+        #results = [str[len(schema)+1:] for str in results if str.startswith(schema+':')]
+        return paths
+
+    path_list = grep_path("rfio", file_text)
+    print "rfio_file list extracted from input file =", inFile
+    print "-"*77; print path_list; print "-"*77
+    
+    def _print(str):
+        print str
+    
+    status_dict = stager_qry(path_list)
+    ready_files_list = [file for file in status_dict if status_dict[file] == 1]
+    print "---STAGED (ready to be copied):";  
+    p = map(_print, ready_files_list); print "-"*77
+    
+    noready_files_list = [file for file in status_dict if status_dict[file] == 0]
+    print "---NOT STAGED (not ready to be copied):";  
+    p = map(_print, noready_files_list); print "-"*77
+    
+    def _rfcp(file): #aux func. just for reporting purpose
+        print "rfcp ", file
+        return file    
+    rfcp( map(_rfcp, ready_files_list), #[file for file in ready_files_list],  
+          outDir  )
+    
+    def _stager_get(file): #aux func. just for reporting purpose
+        print "stager_get -M ", file
+        stager_get(file)       
+    map(_stager_get, noready_files_list) #[stager_get(file) for file in noready_files_list if 1 print "stager_get -M ", file]
+    
+    return status_dict #returned from stager_qry(),
+    #not completely true since the outcome of rfcp is not checked here
+        
+def stager_get(inFile):
+    """
+    STAGE IN the inFile on castor
+    """
+    allGood = True
+    cmd = "stager_get -M %s" % (inFile)
+    sc,out = commands.getstatusoutput( cmd )
+    if sc != 0:
+        print "** PyCastor ERROR **"
+        print "## Could not stager_get this file [%s] !!" % inFile
+        allGood = False
+        pass
+    if allGood:
+        return 0
+    return 1
+
+def rfcp( inFiles, outDir ):
+    """
+    Copy the inFiles into the outDir
+    """
+    allGood = True
+    for inFile in inFiles:
+        cmd = "rfcp %s %s" % ( inFile,
+                               os.path.join( outDir,
+                                             os.path.basename(inFile) ) )
+        sc,out = commands.getstatusoutput( cmd )
+        if sc != 0:
+            print "** PyCastor ERROR **"
+            print "## Could not copy this file [%s] !!" % inFile
+            allGood = False
+            pass
+        pass
+    if allGood:
+        return 0
+    return 1
+    
+
+def rfstat (pathname):
+    """rfstat <file_path>
+    Perform a stat system call on the given path
+       @param `pathname` to a file or directory on a castor node
+       @return a dictionary of entries built from rfstat's summary
+    """
+    cmd = 'rfstat %s' % pathname
+    sc, out = commands.getstatusoutput (cmd)
+    if sc != 0:
+        print "** PyCastor ERROR **"
+        print ":: command: [%s]" % cmd
+        print ":: status:  [%s]" % sc
+        print out
+        raise RuntimeError (sc)
+
+    stat = dict()
+    for l in out.splitlines():
+        l = l.strip()
+        o = l.split(':')
+        hdr = o[0].strip()
+        tail= ''.join(l.split(o[0]+':')[1:]).strip()
+        stat[hdr] = tail
+    return stat
+
+def rfdir (paths, recursive=False):
+    """ rfdir file|directory
+    """
+    if isinstance(paths, str):
+        paths = [paths]
+        
+    cmd = "rfdir %s %s" % ('-R' if recursive else '',
+                           ' '.join(paths))
+    sc, out = commands.getstatusoutput (cmd)
+    return sc, out
+        
diff --git a/Tools/PyUtils/python/coverage.py b/Tools/PyUtils/python/coverage.py
new file mode 100644
index 00000000000..8d3b01f57eb
--- /dev/null
+++ b/Tools/PyUtils/python/coverage.py
@@ -0,0 +1,338 @@
+#
+# $Id: coverage.py,v 1.1 2008-12-16 05:46:38 ssnyder Exp $
+#
+# File: coverage.py
+# Purpose: A coverage utility for component tests.
+# Created: Dec 2008, sss from D0 code circa Jan, 2000.
+#
+# Derived from trace.py by Andrew Dalke and Skip Montanaro, mainly by
+# cutting out a whole bunch of stuff.
+#
+# To do coverage testing for some component 'Foo', add the following
+# to the start of Foo's test module:
+#
+#   import python_util.coverage
+#   python_util.coverage.Coverage ('Foo')
+#
+# If all executable lines in Foo are covered by the test, there will be
+# no output.  Otherwise, a summary of the number of uncovered lines
+# will be printed to stdout, and a file Foo.cover will be created
+# with an annotated source listing for Foo, showing coverage counts
+# and uncovered lines.
+#
+# You can also run coverage testing in conjunction with doctest-based
+# regression tests.  In general, doctests can be both in the source file
+# itself, or in a separate test file.  If you have a separate test file,
+# you can run all the tests for a module DIR.MOD like this:
+#
+#   from PyUtils import coverage
+#   c = coverage.Coverage ('DIR.MOD')
+#   c.doctest_cover ()
+#
+# This will run any doctests in the DIR.MOD source file, plus any
+# additional doctests in the test file.  If any lines in the DIR.MOD
+# source file are uncovered, a coverage report will be generated.
+#
+# The coverage testing may be inhibited by setting the environment
+# variable NOCOVER.
+#
+# Coverage testing for an individual line may be suppressed by
+# adding '#pragma: NO COVER' to the end of the line.
+#
+# Original permission notice:
+# Copyright 1999, Bioreason, Inc., all rights reserved.
+# Author: Andrew Dalke
+#
+# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
+# Author: Skip Montanaro
+#
+# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
+#
+# Permission to use, copy, modify, and distribute this Python software
+# and its associated documentation for any purpose without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# neither Automatrix nor Bioreason be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+
+import sys
+import string
+
+## The completely brain-damaged fnorb setup overrides the builtin
+## parser module, which we need!  Cudgel it out of the way.
+#sys.path = [x for x in sys.path if string.find (x, '/fnorb/') < 0]
+
+import re
+import os
+import __builtin__
+import dis
+import sys
+
+running_coverage = 0
+
+
+# Given a code string, return the SET_LINENO information
+# This works up to python 2.2
+def _find_LINENO_from_code_22(code):
+    """return all of the SET_LINENO information from a code block"""
+    co_code = code.co_code
+    linenos = {}
+
+    # This code was filched from the `dis' module then modified
+    n = len(co_code)
+    i = 0
+    prev_op = None
+    prev_lineno = 0
+    while i < n:
+        c = co_code[i]
+        op = ord(c)
+        if op == dis.SET_LINENO:
+            if prev_op == op:
+                # two SET_LINENO in a row, so the previous didn't
+                # indicate anything.  This occurs with triple
+                # quoted strings (?).  Remove the old one.
+                del linenos[prev_lineno]
+            prev_lineno = ord(co_code[i+1]) + ord(co_code[i+2])*256
+            linenos[prev_lineno] = 1
+        if op >= dis.HAVE_ARGUMENT:
+            i = i + 3
+        else:
+            i = i + 1
+        prev_op = op
+    return linenos
+
+
+# This works from python 2.3 on.
+def _find_LINENO_from_code_23 (code):
+    linenos = {}
+    for (o, l) in dis.findlinestarts (code):
+        linenos[l] = 1
+    return linenos
+
+# Choose the appropriate version.
+if dis.__dict__.has_key ('SET_LINENO'):
+    _find_LINENO_from_code = _find_LINENO_from_code_22
+else:
+    _find_LINENO_from_code = _find_LINENO_from_code_23
+
+def _find_LINENO(code):
+    """return all of the SET_LINENO information from a code object"""
+    import types
+
+    # get all of the lineno information from the code of this scope level
+    #linenos = _find_LINENO_from_string(code.co_code)
+    linenos = _find_LINENO_from_code(code)
+
+    # and check the constants for references to other code objects
+    for c in code.co_consts:
+        if type(c) == types.CodeType:
+            # find another code object, so recurse into it
+            linenos.update(_find_LINENO(c))
+    return linenos
+
+def find_executable_linenos(filename):
+    """return a dict of the line numbers from executable statements in a file
+
+    Works by finding all of the code-like objects in the module then searching
+    the byte code for 'SET_LINENO' terms (so this won't work one -O files).
+
+    """
+    import parser
+
+    prog = open(filename).read()
+    ast = parser.suite(prog)
+    code = parser.compileast(ast, filename)
+
+    # The only way I know to find line numbers is to look for the
+    # SET_LINENO instructions.  Isn't there some way to get it from
+    # the AST?
+
+    return _find_LINENO(code)
+
+class Coverage:
+    def __init__(self, modname, toplev_name = 'main'):
+        global running_coverage
+        running_coverage = 1
+        self.modname = modname
+        self.toplev_name = toplev_name
+        self.counts = {}   # keys are linenumber
+        self.mutex = None
+        if not os.environ.has_key ('NOCOVER'):
+            #self.save_import = __builtin__.__import__
+            #__builtin__.__import__ = self.import_hook
+            sys.settrace (self.trace)
+        return
+
+
+    #def import_hook (self, name, globals={}, locals={}, fromlist=[]):
+    #    loaded = sys.modules.has_key (name)
+    #    mod = self.save_import (name, globals, locals, fromlist)
+    #    if not loaded:
+    #        if name == 'thread_util':
+    #            self.set_thread_hook (mod)
+    #        elif name == 'itc' or name == 'd0me':
+    #            self.set_itc_hook (mod)
+    #    return mod
+
+
+    #def set_thread_hook (self, thread_util):
+    #    if not self.mutex:
+    #        self.mutex = thread_util.ACE_Thread_Mutex ()
+    #    thread_util.Pythread._save_run = thread_util.Pythread._run
+    #    def _run (self, coverage=self):
+    #        sys.settrace (coverage.trace)
+    #        return self._save_run ()
+    #    thread_util.Pythread._run = _run
+    #    return
+
+    #def set_itc_hook (self, itc):
+    #    if not self.mutex:
+    #        import thread_util
+    #        assert self.mutex != None
+    #    self.old_itc_hook = itc._callback_hook
+    #    def itc_hook (self=self):
+    #        sys.settrace (self.trace)
+    #        if self.old_itc_hook: self.old_itc_hook ()
+    #        return
+    #    itc._callback_hook = itc_hook
+    #    return
+
+
+    def trace(self, frame, why, arg):
+        # something is fishy about getting the file name
+        modulename = frame.f_globals.get ("__name__")
+        if why == 'call':
+            # Don't bother tracing if we're not in one of these modules.
+            if not (modulename == self.modname or
+                    (modulename == '__main__' and
+                     frame.f_code.co_name == self.toplev_name)):
+                return None
+        if why == 'line':
+            if modulename == self.modname:
+                lineno = frame.f_lineno
+
+                # record the file name and line number of every trace
+                if self.mutex: self.mutex.acquire ()
+                self.counts[lineno] = self.counts.get(lineno, 0) + 1
+                if self.mutex: self.mutex.release ()
+
+        elif why == 'return':
+            if frame.f_code.co_name == self.toplev_name:
+                sys.settrace (None)
+                if self.mutex: self.mutex.acquire ()
+                self.analyze ()
+                m = self.mutex
+                self.mutex = None
+                if m: m.release ()
+
+        return self.trace
+
+    def analyze (self):
+        filename = sys.modules[self.modname].__file__
+        if filename[-4:] == ".pyc" or filename[-4:] == ".pyo":
+            orig_filename = filename[:-4] + '.py'
+        else:
+            orig_filename = filename
+
+        # Get the original lines from the .py file
+        try:
+            lines = open(orig_filename, 'r').readlines()
+        except IOError, err:
+            sys.stderr.write(
+                "%s: Could not open %s for reading because: %s - skipping\n" %
+\
+                ("trace", `filename`, err.strerror))
+            return
+
+        # there are many places where this is insufficient, like a blank
+        # line embedded in a multiline string.
+        blank = re.compile(r'^\s*(#.*)?$')
+
+        executable_linenos = find_executable_linenos(orig_filename)
+
+        lines_hit = self.counts
+        uncovered = 0
+        outlines = []
+        for i in range(len(lines)):
+            line = lines[i]
+
+            # do the blank/comment match to try to mark more lines
+            # (help the reader find stuff that hasn't been covered)
+            if lines_hit.has_key(i+1):
+                # count precedes the lines that we captured
+                prefix = '%5d: ' % lines_hit[i+1]
+            elif blank.match(line):
+                # blank lines and comments are preceded by dots
+                prefix = '    . '
+            else:
+                # lines preceded by no marks weren't hit
+                # Highlight them if so indicated, unless the line contains
+                # '#pragma: NO COVER' (it is possible to embed this into
+                # the text as a non-comment; no easy fix)
+                if executable_linenos.has_key(i+1) and \
+                   string.find(lines[i], '#pragma: NO COVER') == -1:
+                    prefix = '>>>>>> '
+                    uncovered = uncovered + 1
+                else:
+                    prefix = ' '*7
+            outlines.append (prefix + string.expandtabs(line, 8))
+
+        if uncovered:
+            print "*** There were %d uncovered lines." % uncovered
+        else:
+            return
+
+        # build list file name by appending a ".cover" to the module name
+        # and sticking it into the specified directory
+        listfilename = self.modname + ".cover"
+        try:
+            outfile = open(listfilename, 'w')
+        except IOError, err:
+            sys.stderr.write(
+                '%s: Could not open %s for writing because: %s - skipping\n' %
+                ("trace", `listfilename`, err.strerror))
+            return
+
+        for l in outlines:
+            outfile.write (l)
+
+        outfile.close()
+        
+        return
+
+
+    def doctest_cover (self, *args, **kw):
+        import doctest
+        m = __import__ (self.modname)
+
+        # Note a peculiarity of __import__: if modname is like A.B,
+        # then it returns A not B...
+        mm = self.modname.split ('.')
+        if len(mm) > 1:
+            m = getattr (m, mm[-1])
+
+        oldrun = doctest.DocTestRunner.run
+        def xrun (xself, *args, **kw):
+            sys.settrace (self.trace)
+            return oldrun (xself, *args, **kw)
+        doctest.DocTestRunner.run = xrun
+
+        import bdb
+        old_set_continue = bdb.Bdb.set_continue
+        def xcontinue (xself):
+            old_set_continue (xself)
+            sys.settrace (self.trace)
+            return
+        bdb.Bdb.set_continue = xcontinue
+
+        doctest.testmod (m, *args, **kw)
+
+        main = sys.modules['__main__']
+        if m != main:
+            doctest.testmod (main, *args, **kw)
+
+        self.analyze()
+        return
diff --git a/Tools/PyUtils/python/dbsqlite.py b/Tools/PyUtils/python/dbsqlite.py
new file mode 100644
index 00000000000..932eceeadf3
--- /dev/null
+++ b/Tools/PyUtils/python/dbsqlite.py
@@ -0,0 +1,240 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/python/dbsqlite.py
+# reaped off: http://svn.python.org/view/sandbox/trunk/dbm_sqlite
+"""Dbm based on sqlite -- Needed to support shelves
+
+Issues:
+
+    # ??? how to coordinate with whichdb
+    # ??? Any difference between blobs and text
+    # ??? does default encoding affect str-->bytes or PySqlite3 always use UTF-8
+    # ??? what is the correct isolation mode
+
+"""
+
+__version__ = "$Revision: 225332 $"
+__all__ = ['error', 'open']
+
+import sqlite3
+import cPickle as pickle
+from UserDict import DictMixin
+import collections
+from operator import itemgetter
+import shelve
+
+error = sqlite3.DatabaseError
+
+class SQLhash(object, DictMixin):
+    def __init__(self, filename=':memory:', flags='r', mode=None):
+        # XXX add flag/mode handling
+        #   c -- create if it doesn't exist
+        #   n -- new empty
+        #   w -- open existing
+        #   r -- readonly
+        if 'n' in flags:
+            import os
+            if os.path.exists(filename):
+                os.remove(filename)
+
+        MAKE_SHELF = 'CREATE TABLE IF NOT EXISTS shelf (key TEXT PRIMARY KEY NOT NULL, value BLOB)'
+        self.conn = sqlite3.connect(filename)
+        self.conn.text_factory = str
+        if 'r' not in flags or filename==':memory:':
+            self.conn.execute(MAKE_SHELF)
+        self.conn.commit()
+
+    def __len__(self):
+        GET_LEN = 'SELECT COUNT(*) FROM shelf'
+        return self.conn.execute(GET_LEN).fetchone()[0]
+
+    def __bool__(self):
+        # returns None if count is zero
+        GET_BOOL = 'SELECT MAX(ROWID) FROM shelf'
+        return self.conn.execute(GET_BOOL).fetchone()[0] is not None
+
+    def keys(self):
+        return list(self.iterkeys())
+
+    def values(self):
+        return list(self.itervalues())
+
+    def items(self):
+        return list(self.iteritems())
+
+    def __iter__(self):
+        return self.iterkeys()
+
+    def iterkeys(self):
+        GET_KEYS = 'SELECT key FROM shelf ORDER BY ROWID'
+        return iter(SQLHashKeyIterator(self.conn, GET_KEYS, (0,)))
+
+    def itervalues(self):
+        GET_VALUES = 'SELECT value FROM shelf ORDER BY ROWID'
+        return iter(SQLHashValueIterator(self.conn, GET_VALUES, (0,)))
+
+    def iteritems(self):
+        GET_ITEMS = 'SELECT key, value FROM shelf ORDER BY ROWID'
+        return iter(SQLHashItemIterator(self.conn, GET_ITEMS, (0, 1)))
+
+    def __contains__(self, key):
+        HAS_ITEM = 'SELECT 1 FROM shelf WHERE key = ?'
+        return self.conn.execute(HAS_ITEM, (key,)).fetchone() is not None
+
+    def __getitem__(self, key):
+        GET_ITEM = 'SELECT value FROM shelf WHERE key = ?'
+        item = self.conn.execute(GET_ITEM, (key,)).fetchone()
+        if item is None:
+            raise KeyError(key)
+
+        return pickle.loads(item[0])
+
+    def __setitem__(self, key, value):       
+        ADD_ITEM = 'REPLACE INTO shelf (key, value) VALUES (?,?)'
+        value = pickle.dumps(value)
+        self.conn.execute(ADD_ITEM, (key, value))
+        #self.conn.commit()
+
+    def __delitem__(self, key):
+        if key not in self:
+            raise KeyError(key)
+        DEL_ITEM = 'DELETE FROM shelf WHERE key = ?'
+        self.conn.execute(DEL_ITEM, (key,))
+        #self.conn.commit()
+
+    def update(self, items=(), **kwds):
+        try:
+            items = items.items()
+            items = [(k,pickle.dumps(v)) for k,v in items]
+        except AttributeError:
+            pass
+
+        UPDATE_ITEMS = 'REPLACE INTO shelf (key, value) VALUES (?, ?)'
+        self.conn.executemany(UPDATE_ITEMS, items)
+        self.conn.commit()
+        if kwds:
+            self.update(kwds)
+
+    def clear(self):        
+        CLEAR_ALL = 'DELETE FROM shelf; VACUUM;'
+        self.conn.executescript(CLEAR_ALL)
+        self.conn.commit()
+
+    def sync(self):
+        if self.conn is not None:    
+            self.conn.commit()
+
+    def close(self):
+        if self.conn is not None:
+            self.conn.commit()
+            self.conn.close()
+            self.conn = None
+
+    def __del__(self):
+        self.close()
+
+def open(file=None, *args, **kw):
+    if file is not None:
+        return SQLhash(file, *args, **kw)
+    return SQLhash()
+
+def open_shelf(file=None, *args, **kw):
+    _db = open(file, *args, **kw)
+    return shelve.Shelf(_db)
+
+class SQLHashKeyIterator(object):
+    def __init__(self, conn, stmt, indices):
+        c = conn.cursor()
+        c.execute(stmt)
+        
+        self.iter = iter(c)
+        self.getter = itemgetter(*indices)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        return self.getter(self.iter.next())
+
+class SQLHashValueIterator(object):
+    def __init__(self, conn, stmt, indices):
+        c = conn.cursor()
+        c.execute(stmt)
+        
+        self.iter = iter(c)
+        self.getter = itemgetter(*indices)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        o = self.getter(self.iter.next())
+        return pickle.loads(o)
+
+class SQLHashItemIterator(object):
+    def __init__(self, conn, stmt, indices):
+        c = conn.cursor()
+        c.execute(stmt)
+        
+        self.iter = iter(c)
+        self.getter = itemgetter(*indices)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        o = self.getter(self.iter.next())
+        k = o[0]
+        v = pickle.loads(o[1])
+        return (k,v)
+
+if __name__ in '__main___':
+    for d in SQLhash(flags='n'), SQLhash('example',flags='n'):
+        list(d)
+        print(list(d), "start")
+        d['abc'] = 'lmno'
+        print(d['abc'])    
+        d['abc'] = 'rsvp'
+        d['xyz'] = 'pdq'
+        print(d.items())
+        print(d.values())
+        print('***', d.keys())
+        print(list(d), 'list')
+        d.update(p='x', q='y', r='z')
+        print(d.items())
+        
+        del d['abc']
+        try:
+            print(d['abc'])
+        except KeyError:
+            pass
+        else:
+            raise Exception('oh noooo!')
+        
+        try:
+            del d['abc']
+        except KeyError:
+            pass
+        else:
+            raise Exception('drat!')
+
+        print(list(d))
+        print(bool(d), True)        
+        d.clear()
+        print(bool(d), False)
+        print(list(d))
+        d.update(p='x', q='y', r='z')
+        print(list(d))
+        d['xyz'] = 'pdq'
+
+        d['a_list'] = range(5)
+        print(d['a_list'])
+
+        d['a_dict'] = {1:'one',2:'two'}
+        print(d['a_dict'])
+
+        d['a_tuple'] = (1,2,3,4)
+        print(d['a_tuple'])
+        
+        print()
+        d.close()
diff --git a/Tools/PyUtils/python/decorator.py b/Tools/PyUtils/python/decorator.py
new file mode 100644
index 00000000000..0294c91346f
--- /dev/null
+++ b/Tools/PyUtils/python/decorator.py
@@ -0,0 +1,4 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# 
+from PyCmt.decorator import *
diff --git a/Tools/PyUtils/python/fileutils.py b/Tools/PyUtils/python/fileutils.py
new file mode 100644
index 00000000000..bdd78283e5d
--- /dev/null
+++ b/Tools/PyUtils/python/fileutils.py
@@ -0,0 +1,297 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+"""
+A collection of UNIX inspired functions for working with the filesystem.
+
+This module is inspired by Ruby's FileUtils library and of course UNIX.
+
+see: http://github.com/clutchski/fileutils
+"""
+
+from __future__ import with_statement
+
+__author__ = 'clutchski@gmail.com'
+
+# FIXME: still to implement ls, ln, du, df
+
+import itertools
+import grp
+import os
+import pwd as password_db
+import shutil
+import stat
+
+#
+# statics
+#
+
+NL = os.linesep
+
+# chmod modes by permission type, ordered by user, group, other
+
+READ_MODES  = (stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH)
+WRITE_MODES = (stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH)
+EXEC_MODES  = (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)
+MODES = (READ_MODES, WRITE_MODES, EXEC_MODES)
+
+# chmod modes by target, ordered by read, write, execute
+
+USER_MODES  = [m[0] for m in MODES]
+GROUP_MODES = [m[1] for m in MODES]
+OTHER_MODES = [m[2] for m in MODES]
+
+# chmod permission bits
+
+READ_BIT       = 4  # e.g. chmod XYZ is user readable if X >= 4
+WRITE_BIT      = 2
+EXECUTABLE_BIT = 1
+
+# error messages
+
+ERROR_FILE_EXISTS = '[Errno 17] File exists'
+
+def _is_str(obj):
+    return isinstance(obj, basestring)
+
+def _list(paths):
+    return [paths] if _is_str(paths) else paths
+
+def cd(path):
+    """ Change the working directory to the given path. """
+    os.chdir(path)
+
+def pwd():
+    """ Return the current working directory. """
+    return os.getcwd()
+
+def mkdir(dirs):
+    """ Create the given directory or list of directories. """
+    dirs = _list(dirs)
+    map(os.mkdir, dirs)
+
+def mkdir_p(dirs):
+    """ Create the given directory or list of directories, along with any
+        missing parent directories. This function is idempotent, so no 
+        errors will be raised if a directory already exists.
+    """
+    dirs = _list(dirs)
+    for dir_ in dirs:
+        try:
+            os.makedirs(dir_)
+        except OSError, err:
+            #FIXME: possible race condition in the isdir check. is a there a
+            #way to avoid it?
+            if ERROR_FILE_EXISTS in str(err) and os.path.isdir(dir_):
+                # mkdir_p is idempotent in UNIX, thus here as well
+                pass
+            else:
+                raise
+
+def cp(paths, dest):
+    """ Copy the given file or list of files to the destination. When copying 
+        more than one file, the destination must be a directory.
+    """
+    paths = _list(paths)
+    if len(paths) > 1:
+        if not os.path.exists(dest) or not os.path.isdir(dest):
+            raise OSError("target '%s' is not a directory" % dest)
+    # use imap because it terminates at the end of the shortest iterable
+    for _ in itertools.imap(shutil.copy, paths, itertools.repeat(dest)):
+        pass
+
+def _rm_path(path, force=False, recursive=False):
+    if not os.path.exists(path):
+        if force:
+            # rm -f ignores missing paths
+            return
+        raise OSError('no such file or directory: %s' % path)
+    elif not is_writeable(path) and not force:
+        msg = 'cannot rm write-protected file or directory: %s' % path
+        raise OSError(msg)
+    if os.path.isdir(path):
+        if not recursive:
+            raise OSError("cannot remove directory: %s" % path)
+        for child_path in os.listdir(path):
+            _rm(os.path.join(path, child_path), force, recursive)
+        os.rmdir(path)
+    else:
+        os.remove(path)
+
+def _rm(paths, force=False, recursive=False):
+    paths = _list(paths)
+    for path in paths:
+        _rm_path(path, force, recursive)
+
+def rm(files):
+    """ Remove the given file or list of files. """
+    _rm(files)
+
+def rm_f(files):
+    """ Remove the given file or list of files, ignoring non-existant 
+        and write-protected files.
+    """
+    _rm(files, force=True)
+
+def rm_r(paths):
+    """ Recursively remove the given paths or list of paths. """
+    _rm(paths, recursive=True)
+
+def rm_rf(paths):
+    """ Recursively remove the given paths or list of paths, ignoring
+        non-existant and write-protected files.
+    """
+    _rm(paths, force=True, recursive=True)
+
+def rmdir(paths):
+    """ Alias for "rm_r" """
+    rm_r(paths)
+
+def _is_valid_mode(mode):
+    # mode must be a string because literal ints cannot start with zero
+    return _is_str(mode)            \
+       and len(mode) == 4           \
+       and mode.isdigit()           \
+       and mode[0] in ('0', '1')    \
+       and not any((d in mode for d in ['8','9'])) 
+
+def chmod(mode, paths):
+    """ Apply the given permissions to the path or list of paths. The 
+        permissions mode must be specified in octal notation, for example,
+        "0755". 
+    """
+    paths = _list(paths)
+    if not _is_valid_mode(mode):
+        raise OSError('invalid chmod mode: %s' % mode)
+    sticky_bit, user_bit, group_bit, other_bit = [int(c) for c in mode]
+    bit_to_modes = ( (user_bit, USER_MODES)
+                   , (group_bit, GROUP_MODES)
+                   , (other_bit, OTHER_MODES)
+                   )
+    new_mode = 0
+    for bit, (read_mode, write_mode, exec_mode) in bit_to_modes:
+        if bit >= READ_BIT:
+            new_mode = new_mode | read_mode
+            bit = bit - READ_BIT
+        if bit >= WRITE_BIT:
+            new_mode = new_mode | write_mode
+            bit = bit - WRITE_BIT
+        if bit >= EXECUTABLE_BIT:
+            new_mode = new_mode | exec_mode
+    #FIXME: handle sticky bit
+    for path in paths:
+        os.chmod(path, new_mode)
+
+def chmod_R(mode, paths):
+    """ Apply the given permissions recursively to the given paths. The
+        "chmod" function documentation describes the mode argument.
+    """
+    for path in _list(paths):
+        if not os.path.exists(path):
+            raise OSError("no such file or directory: '%s'" % path)
+        chmod(mode, path)
+        if os.path.isdir(path):
+            child_paths = (os.path.join(path, c) for c in os.listdir(path))
+            for child_path in child_paths:
+                chmod_R(mode, child_path)
+
+def mv(paths, dest):
+    """ Move the given files or directories to the destination path. If more
+        that one element is being moved, the destination must be a directory.
+    """
+    paths = _list(paths)
+    if len(paths) > 1:
+        if not os.path.exists(dest):
+            raise OSError("no such file or directory: '%s'" % dest)
+        if not os.path.isdir(dest):
+            raise OSError("target '%s' is not a directory" % dest)
+    for path in paths:
+        if not os.path.exists(path):
+            raise OSError('no such file or directory: %s' % path)
+        shutil.move(path, dest)
+
+def touch(paths):
+    """ Update the access and modification times of the given path or list of
+        paths. Any non-existant files will be created.
+    """
+    for path in _list(paths):
+        if os.path.exists(path) and not is_writeable(path):
+            raise OSError("can't touch write-protected path: %s" % path)
+        with open(path, 'a'):
+            os.utime(path, None)
+
+def chown(user, group, paths):
+    """ Set the user and group ownership of the given path or list 
+        of paths. If the user or group is None, that attribute is unchanged.
+    """
+    paths = _list(paths)
+    user_id = group_id = -1 # defaults which leave ownership unchanged
+    if user is not None:
+        try:
+            user_id = password_db.getpwnam(user)[2]
+        except KeyError:
+            raise OSError("no such user: %s" % user)
+    if group is not None:
+        try:
+            group_id = grp.getgrnam(group)[2]
+        except KeyError:
+            raise OSError("no such group: %s" % group)
+
+    for path in paths:
+        os.chown(path, user_id, group_id)
+
+def chown_R(user, group, paths):
+    """ Recursively set the user and group ownership of the given path or
+        list of paths.
+    """
+    for path in _list(paths):
+        if not os.path.exists(path):
+            raise OSError("no such file or directory: '%s'" % path)
+        chown(user, group, path)
+        if os.path.isdir(path):
+            child_paths = (os.path.join(path, c) for c in os.listdir(path))
+            for child_path in child_paths:
+                chown_R(user, group, child_path)
+        
+def _path_has_permissions(path, modes):
+    """ Return True if the given path has each of the permissions
+        corresponding to the given stat modes (e.g stat.S_IXOTH).
+    """
+    if not os.path.exists(path):
+        msg = "no such file or directory: %s" % path
+        raise OSError(msg)
+    if not modes:
+        raise OSError("must specify permissions to check")
+    return all((os.stat(path).st_mode & m for m in modes))
+
+def _get_modes_for_target(target, u_mode, g_mode, o_mode):
+    modes = []
+    target = target.lower()
+    all_ = 'a' in target
+    if all_ or 'u' in target:
+        modes.append(u_mode)
+    if all_ or 'g' in target:
+        modes.append(g_mode)
+    if all_ or 'o' in target:
+        modes.append(o_mode)
+    return modes
+
+def is_readable(path, by='u'):
+    """ Return True if the path is readable by all of the populations
+    specified, False otherwise.
+    """
+    modes = _get_modes_for_target(by, *READ_MODES)
+    return _path_has_permissions(path, modes)
+
+def is_writeable(path, by='u'):
+    """ Return True if the path is writeable by all of the populations
+    specified, False otherwise.
+    """
+    modes = _get_modes_for_target(by, *WRITE_MODES)
+    return _path_has_permissions(path, modes)
+
+def is_executable(path, by='u'):
+    """ Return True if the path is executable by all of the populations
+    specified, False otherwise.
+    """
+    modes = _get_modes_for_target(by, *EXEC_MODES)
+    return _path_has_permissions(path, modes)
diff --git a/Tools/PyUtils/python/merge_join.py b/Tools/PyUtils/python/merge_join.py
new file mode 100644
index 00000000000..0e8387a150d
--- /dev/null
+++ b/Tools/PyUtils/python/merge_join.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+"""For data stored by a sorted key, perform a merge join over parallel
+iteration through multiple data sources.
+
+Author: Joel Nothman, me@joelnothman.com
+        http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/576371
+        
+Some sequences to join:
+>>> fathers = [
+...     ('Abe', 'Jim'),
+...     ('Benny', 'John'),
+...     ('David', 'Jacob'),
+...     ('Evan', 'Jonas'),
+... ]
+>>> mothers = [
+...     ('Abe', 'Michelle'),
+...     ('Benny', 'Mary'),
+...     ('Caleb', 'Madeline'),
+...     ('Evan', 'Marna'),
+... ]
+>>> phones = [
+...     ('Benny', '000-000-0002'),
+...     ('David', '000-000-0004'),
+... ]
+
+We wish to retrieve row data combining each of these columns:
+
+>>> for t in merge_join(fathers, mothers, phones):
+...     print t
+('Abe', 'Jim', 'Michelle', None)
+('Benny', 'John', 'Mary', '000-000-0002')
+('Caleb', None, 'Madeline', None)
+('David', 'Jacob', None, '000-000-0004')
+('Evan', 'Jonas', 'Marna', None)
+
+"""
+
+def _first_iter_vals(iters):
+    """Generate the first values of each iterator."""
+    for it in iters:
+        try:
+            yield it.next()
+        except StopIteration:
+            yield None
+
+class OutOfDataError: pass
+
+def _merge_join_next(iters, cur_pairs):
+    """Generate the values of the next tuple (key, v1, ..., vn) by finding the
+    minimum key available, and returning the corresponding values where
+    available while advancing the respective iterators."""
+
+    # Find the next key, or quit if all keys are None
+    try:
+        min_key = min(p[0] for p in cur_pairs if p)
+    except ValueError:
+        raise OutOfDataError
+
+    # Yield the key as the first tuple element
+    yield min_key
+
+    for i, (it, p) in enumerate(zip(iters, cur_pairs)):
+        try:
+            k, v = p
+        except TypeError:
+            # p is None => the iterator has stopped
+            yield None
+            continue
+
+        if k != min_key:
+            # No data for this key
+            yield None
+            continue
+
+        # Yes data for this key: yield it
+        yield v
+
+        # Update cur_pairs for this iterator
+        try:
+            cur_pairs[i] = it.next()
+        except StopIteration:
+            cur_pairs[i] = None
+
+
+def merge_join(*iters):
+    """Given a series of n iterators whose data are of form ``(key, value)``,
+    where the keys are sorted and unique for each iterator, generates tuples
+    ``(key, val_1, val_2, ..., val_n)`` for all keys, where ``val_i`` is the
+    value corresponding to ``key`` in the ``i``th iterator, or None if no such
+    pair exists for the ``i``th iterator."""
+
+    iters = [iter(it) for it in iters]
+    cur_pairs = list(_first_iter_vals(iters))
+    try:
+        while True:
+            yield tuple(_merge_join_next(iters, cur_pairs))
+    except OutOfDataError:
+        pass
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
diff --git a/Tools/PyUtils/python/path.py b/Tools/PyUtils/python/path.py
new file mode 100644
index 00000000000..2286a8a28e6
--- /dev/null
+++ b/Tools/PyUtils/python/path.py
@@ -0,0 +1,1007 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+""" path.py - An object representing a path to a file or directory.
+
+Example:
+
+from path import path
+d = path('/home/guido/bin')
+for f in d.files('*.py'):
+    f.chmod(0755)
+
+This module requires Python 2.2 or later.
+
+
+URL:     http://www.jorendorff.com/articles/python/path
+Author:  Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
+Date:    9 Mar 2007
+"""
+
+
+# TODO
+#   - Tree-walking functions don't avoid symlink loops.  Matt Harrison
+#     sent me a patch for this.
+#   - Bug in write_text().  It doesn't support Universal newline mode.
+#   - Better error message in listdir() when self isn't a
+#     directory. (On Windows, the error message really sucks.)
+#   - Make sure everything has a good docstring.
+#   - Add methods for regex find and replace.
+#   - guess_content_type() method?
+#   - Perhaps support arguments to touch().
+
+from __future__ import generators
+
+import sys, warnings, os, fnmatch, glob, shutil, codecs
+try:
+    from hashlib import md5
+except ImportError:
+    md5 = None
+
+__version__ = '2.2'
+__all__ = ['path']
+
+# Platform-specific support for path.owner
+if os.name == 'nt':
+    try:
+        import win32security
+    except ImportError:
+        win32security = None
+else:
+    try:
+        import pwd
+    except ImportError:
+        pwd = None
+
+# Pre-2.3 support.  Are unicode filenames supported?
+_base = str
+_getcwd = os.getcwd
+try:
+    if os.path.supports_unicode_filenames:
+        _base = unicode
+        _getcwd = os.getcwdu
+except AttributeError:
+    pass
+
+# Pre-2.3 workaround for booleans
+try:
+    True, False
+except NameError:
+    True, False = 1, 0
+
+# Pre-2.3 workaround for basestring.
+try:
+    basestring
+except NameError:
+    basestring = (str, unicode)
+
+# Universal newline support
+_textmode = 'r'
+if hasattr(file, 'newlines'):
+    _textmode = 'U'
+
+
+class TreeWalkWarning(Warning):
+    pass
+
+class path(_base):
+    """ Represents a filesystem path.
+
+    For documentation on individual methods, consult their
+    counterparts in os.path.
+    """
+
+    # --- Special Python methods.
+
+    def __new__(typ, *args):
+        """
+        Creates a new path object concatenating the *args.  *args
+        may only contain Path objects or strings.  If *args is
+        empty, Path(os.curdir) is created.
+        """
+        if not args:
+            return typ(os.curdir)
+        for arg in args:
+            if not isinstance(arg, basestring):
+                raise ValueError("%s() arguments must be Path, str or "
+                                 "unicode" % typ.__name__)
+        if len(args) == 1:
+            return _base.__new__(typ, *args)
+        return typ(os.path.join(*args))
+
+    def __repr__(self):
+        return 'path(%s)' % _base.__repr__(self)
+
+    # Adding a path and a string yields a path.
+    def __add__(self, more):
+        try:
+            resultStr = _base.__add__(self, more)
+        except TypeError:  #Python bug
+            resultStr = NotImplemented
+        if resultStr is NotImplemented:
+            return resultStr
+        return self.__class__(resultStr)
+
+    def __radd__(self, other):
+        if isinstance(other, basestring):
+            return self.__class__(other.__add__(self))
+        else:
+            return NotImplemented
+
+    @classmethod
+    def cwd(cls):
+        """ Return the current working directory as a path object. """
+        return cls(_getcwd())
+    getcwd = cwd
+
+    # The / operator joins paths.
+    def __div__(self, rel):
+        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
+
+        Join two path components, adding a separator character if
+        needed.
+        """
+        return self.__class__(os.path.join(self, rel))
+
+    # Make the / operator work even when true division is enabled.
+    __truediv__ = __div__
+
+    # --- Operations on path strings.
+
+    isabs = os.path.isabs
+    def abspath(self):       return self.__class__(os.path.abspath(self))
+    def normcase(self):      return self.__class__(os.path.normcase(self))
+    def normpath(self):      return self.__class__(os.path.normpath(self))
+    def realpath(self):      return self.__class__(os.path.realpath(self))
+    def expanduser(self):    return self.__class__(os.path.expanduser(self))
+    def expandvars(self):    return self.__class__(os.path.expandvars(self))
+    def dirname(self):       return self.__class__(os.path.dirname(self))
+    basename = os.path.basename
+
+    def expand(self):
+        """ Clean up a filename by calling expandvars(),
+        expanduser(), and normpath() on it.
+
+        This is commonly everything needed to clean up a filename
+        read from a configuration file, for example.
+        """
+        return self.expandvars().expanduser().normpath()
+
+    def _get_namebase(self):
+        base, ext = os.path.splitext(self.name)
+        return base
+
+    def _get_ext(self):
+        f, ext = os.path.splitext(_base(self))
+        return ext
+
+    def _get_drive(self):
+        drive, r = os.path.splitdrive(self)
+        return self.__class__(drive)
+
+    parent = property(
+        dirname, None, None,
+        """ This path's parent directory, as a new path object.
+
+        For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
+        """)
+
+    name = property(
+        basename, None, None,
+        """ The name of this file or directory without the full path.
+
+        For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
+        """)
+
+    namebase = property(
+        _get_namebase, None, None,
+        """ The same as path.name, but with one file extension stripped off.
+
+        For example, path('/home/guido/python.tar.gz').name     == 'python.tar.gz',
+        but          path('/home/guido/python.tar.gz').namebase == 'python.tar'
+        """)
+
+    ext = property(
+        _get_ext, None, None,
+        """ The file extension, for example '.py'. """)
+
+    drive = property(
+        _get_drive, None, None,
+        """ The drive specifier, for example 'C:'.
+        This is always empty on systems that don't use drive specifiers.
+        """)
+
+    def splitpath(self):
+        """ p.splitpath() -> Return (p.parent, p.name). """
+        parent, child = os.path.split(self)
+        return self.__class__(parent), child
+
+    def splitdrive(self):
+        """ p.splitdrive() -> Return (p.drive, <the rest of p>).
+
+        Split the drive specifier from this path.  If there is
+        no drive specifier, p.drive is empty, so the return value
+        is simply (path(''), p).  This is always the case on Unix.
+        """
+        drive, rel = os.path.splitdrive(self)
+        return self.__class__(drive), rel
+
+    def splitext(self):
+        """ p.splitext() -> Return (p.stripext(), p.ext).
+
+        Split the filename extension from this path and return
+        the two parts.  Either part may be empty.
+
+        The extension is everything from '.' to the end of the
+        last path segment.  This has the property that if
+        (a, b) == p.splitext(), then a + b == p.
+        """
+        filename, ext = os.path.splitext(self)
+        return self.__class__(filename), ext
+
+    def stripext(self):
+        """ p.stripext() -> Remove one file extension from the path.
+
+        For example, path('/home/guido/python.tar.gz').stripext()
+        returns path('/home/guido/python.tar').
+        """
+        return self.splitext()[0]
+
+    if hasattr(os.path, 'splitunc'):
+        def splitunc(self):
+            unc, rest = os.path.splitunc(self)
+            return self.__class__(unc), rest
+
+        def _get_uncshare(self):
+            unc, r = os.path.splitunc(self)
+            return self.__class__(unc)
+
+        uncshare = property(
+            _get_uncshare, None, None,
+            """ The UNC mount point for this path.
+            This is empty for paths on local drives. """)
+
+    def joinpath(self, *args):
+        """ Join two or more path components, adding a separator
+        character (os.sep) if needed.  Returns a new path
+        object.
+        """
+        return self.__class__(os.path.join(self, *args))
+
+    def splitall(self):
+        r""" Return a list of the path components in this path.
+
+        The first item in the list will be a path.  Its value will be
+        either os.curdir, os.pardir, empty, or the root directory of
+        this path (for example, '/' or 'C:\\').  The other items in
+        the list will be strings.
+
+        path.path.joinpath(*result) will yield the original path.
+        """
+        parts = []
+        loc = self
+        while loc != os.curdir and loc != os.pardir:
+            prev = loc
+            loc, child = prev.splitpath()
+            if loc == prev:
+                break
+            parts.append(child)
+        parts.append(loc)
+        parts.reverse()
+        return parts
+
+    def relpath(self):
+        """ Return this path as a relative path,
+        based from the current working directory.
+        """
+        cwd = self.__class__(os.getcwd())
+        return cwd.relpathto(self)
+
+    def relpathto(self, dest):
+        """ Return a relative path from self to dest.
+
+        If there is no relative path from self to dest, for example if
+        they reside on different drives in Windows, then this returns
+        dest.abspath().
+        """
+        origin = self.abspath()
+        dest = self.__class__(dest).abspath()
+
+        orig_list = origin.normcase().splitall()
+        # Don't normcase dest!  We want to preserve the case.
+        dest_list = dest.splitall()
+
+        if orig_list[0] != os.path.normcase(dest_list[0]):
+            # Can't get here from there.
+            return dest
+
+        # Find the location where the two paths start to differ.
+        i = 0
+        for start_seg, dest_seg in zip(orig_list, dest_list):
+            if start_seg != os.path.normcase(dest_seg):
+                break
+            i += 1
+
+        # Now i is the point where the two paths diverge.
+        # Need a certain number of "os.pardir"s to work up
+        # from the origin to the point of divergence.
+        segments = [os.pardir] * (len(orig_list) - i)
+        # Need to add the diverging part of dest_list.
+        segments += dest_list[i:]
+        if len(segments) == 0:
+            # If they happen to be identical, use os.curdir.
+            relpath = os.curdir
+        else:
+            relpath = os.path.join(*segments)
+        return self.__class__(relpath)
+
+    # --- Listing, searching, walking, and matching
+
+    def listdir(self, pattern=None):
+        """ D.listdir() -> List of items in this directory.
+
+        Use D.files() or D.dirs() instead if you want a listing
+        of just files or just subdirectories.
+
+        The elements of the list are path objects.
+
+        With the optional 'pattern' argument, this only lists
+        items whose names match the given pattern.
+        """
+        names = os.listdir(self)
+        if pattern is not None:
+            names = fnmatch.filter(names, pattern)
+        return [self / child for child in names]
+
+    def dirs(self, pattern=None):
+        """ D.dirs() -> List of this directory's subdirectories.
+
+        The elements of the list are path objects.
+        This does not walk recursively into subdirectories
+        (but see path.walkdirs).
+
+        With the optional 'pattern' argument, this only lists
+        directories whose names match the given pattern.  For
+        example, d.dirs('build-*').
+        """
+        return [p for p in self.listdir(pattern) if p.isdir()]
+
+    def files(self, pattern=None):
+        """ D.files() -> List of the files in this directory.
+
+        The elements of the list are path objects.
+        This does not walk into subdirectories (see path.walkfiles).
+
+        With the optional 'pattern' argument, this only lists files
+        whose names match the given pattern.  For example,
+        d.files('*.pyc').
+        """
+        
+        return [p for p in self.listdir(pattern) if p.isfile()]
+
+    def walk(self, pattern=None, errors='strict'):
+        """ D.walk() -> iterator over files and subdirs, recursively.
+
+        The iterator yields path objects naming each child item of
+        this directory and its descendants.  This requires that
+        D.isdir().
+
+        This performs a depth-first traversal of the directory tree.
+        Each directory is returned just before all its children.
+
+        The errors= keyword argument controls behavior when an
+        error occurs.  The default is 'strict', which causes an
+        exception.  The other allowed values are 'warn', which
+        reports the error via warnings.warn(), and 'ignore'.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in childList:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            try:
+                isdir = child.isdir()
+            except Exception:
+                if errors == 'ignore':
+                    isdir = False
+                elif errors == 'warn':
+                    warnings.warn(
+                        "Unable to access '%s': %s"
+                        % (child, sys.exc_info()[1]),
+                        TreeWalkWarning)
+                    isdir = False
+                else:
+                    raise
+
+            if isdir:
+                for item in child.walk(pattern, errors):
+                    yield item
+
+    def walkdirs(self, pattern=None, errors='strict'):
+        """ D.walkdirs() -> iterator over subdirs, recursively.
+
+        With the optional 'pattern' argument, this yields only
+        directories whose names match the given pattern.  For
+        example, mydir.walkdirs('*test') yields only directories
+        with names ending in 'test'.
+
+        The errors= keyword argument controls behavior when an
+        error occurs.  The default is 'strict', which causes an
+        exception.  The other allowed values are 'warn', which
+        reports the error via warnings.warn(), and 'ignore'.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            dirs = self.dirs()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in dirs:
+            if pattern is None or child.fnmatch(pattern):
+                yield child
+            for subsubdir in child.walkdirs(pattern, errors):
+                yield subsubdir
+
+    def walkfiles(self, pattern=None, errors='strict'):
+        """ D.walkfiles() -> iterator over files in D, recursively.
+
+        The optional argument, pattern, limits the results to files
+        with names that match the pattern.  For example,
+        mydir.walkfiles('*.tmp') yields only files with the .tmp
+        extension.
+        """
+        if errors not in ('strict', 'warn', 'ignore'):
+            raise ValueError("invalid errors parameter")
+
+        try:
+            childList = self.listdir()
+        except Exception:
+            if errors == 'ignore':
+                return
+            elif errors == 'warn':
+                warnings.warn(
+                    "Unable to list directory '%s': %s"
+                    % (self, sys.exc_info()[1]),
+                    TreeWalkWarning)
+                return
+            else:
+                raise
+
+        for child in childList:
+            try:
+                isfile = child.isfile()
+                isdir = not isfile and child.isdir()
+            except:
+                if errors == 'ignore':
+                    continue
+                elif errors == 'warn':
+                    warnings.warn(
+                        "Unable to access '%s': %s"
+                        % (self, sys.exc_info()[1]),
+                        TreeWalkWarning)
+                    continue
+                else:
+                    raise
+
+            if isfile:
+                if pattern is None or child.fnmatch(pattern):
+                    yield child
+            elif isdir:
+                for f in child.walkfiles(pattern, errors):
+                    yield f
+
+    def fnmatch(self, pattern):
+        """ Return True if self.name matches the given pattern.
+
+        pattern - A filename pattern with wildcards,
+            for example '*.py'.
+        """
+        return fnmatch.fnmatch(self.name, pattern)
+    match = fnmatch  # SB
+    
+    def matchcase(self, pattern):
+        """ Test whether the path matches pattern, returning true or
+        false; the comparison is always case-sensitive.
+        """
+        return fnmatch.fnmatchcase(self.name, pattern)
+
+    def glob(self, pattern):
+        """ Return a list of path objects that match the pattern.
+
+        pattern - a path relative to this directory, with wildcards.
+
+        For example, path('/users').glob('*/bin/*') returns a list
+        of all the files users have in their bin directories.
+        """
+        cls = self.__class__
+        return [cls(s) for s in glob.glob(_base(self / pattern))]
+
+
+    # --- Reading or writing an entire file at once.
+
+    def open(self, mode='r'):
+        """ Open this file.  Return a file object. """
+        return file(self, mode)
+
+    def bytes(self):
+        """ Open this file, read all bytes, return them as a string. """
+        f = self.open('rb')
+        try:
+            return f.read()
+        finally:
+            f.close()
+
+    def write_bytes(self, bytes, append=False):
+        """ Open this file and write the given bytes to it.
+
+        Default behavior is to overwrite any existing file.
+        Call p.write_bytes(bytes, append=True) to append instead.
+        """
+        if append:
+            mode = 'ab'
+        else:
+            mode = 'wb'
+        f = self.open(mode)
+        try:
+            f.write(bytes)
+        finally:
+            f.close()
+
+    def text(self, encoding=None, errors='strict'):
+        r""" Open this file, read it in, return the content as a string.
+
+        This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
+        are automatically translated to '\n'.
+
+        Optional arguments:
+
+        encoding - The Unicode encoding (or character set) of
+            the file.  If present, the content of the file is
+            decoded and returned as a unicode object; otherwise
+            it is returned as an 8-bit str.
+        errors - How to handle Unicode errors; see help(str.decode)
+            for the options.  Default is 'strict'.
+        """
+        if encoding is None:
+            # 8-bit
+            f = self.open(_textmode)
+            try:
+                return f.read()
+            finally:
+                f.close()
+        else:
+            # Unicode
+            f = codecs.open(self, 'r', encoding, errors)
+            # (Note - Can't use 'U' mode here, since codecs.open
+            # doesn't support 'U' mode, even in Python 2.3.)
+            try:
+                t = f.read()
+            finally:
+                f.close()
+            return (t.replace(u'\r\n', u'\n')
+                     .replace(u'\r\x85', u'\n')
+                     .replace(u'\r', u'\n')
+                     .replace(u'\x85', u'\n')
+                     .replace(u'\u2028', u'\n'))
+
+    def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
+        r""" Write the given text to this file.
+
+        The default behavior is to overwrite any existing file;
+        to append instead, use the 'append=True' keyword argument.
+
+        There are two differences between path.write_text() and
+        path.write_bytes(): newline handling and Unicode handling.
+        See below.
+
+        Parameters:
+
+          - text - str/unicode - The text to be written.
+
+          - encoding - str - The Unicode encoding that will be used.
+            This is ignored if 'text' isn't a Unicode string.
+
+          - errors - str - How to handle Unicode encoding errors.
+            Default is 'strict'.  See help(unicode.encode) for the
+            options.  This is ignored if 'text' isn't a Unicode
+            string.
+
+          - linesep - keyword argument - str/unicode - The sequence of
+            characters to be used to mark end-of-line.  The default is
+            os.linesep.  You can also specify None; this means to
+            leave all newlines as they are in 'text'.
+
+          - append - keyword argument - bool - Specifies what to do if
+            the file already exists (True: append to the end of it;
+            False: overwrite it.)  The default is False.
+
+
+        --- Newline handling.
+
+        write_text() converts all standard end-of-line sequences
+        ('\n', '\r', and '\r\n') to your platform's default end-of-line
+        sequence (see os.linesep; on Windows, for example, the
+        end-of-line marker is '\r\n').
+
+        If you don't like your platform's default, you can override it
+        using the 'linesep=' keyword argument.  If you specifically want
+        write_text() to preserve the newlines as-is, use 'linesep=None'.
+
+        This applies to Unicode text the same as to 8-bit text, except
+        there are three additional standard Unicode end-of-line sequences:
+        u'\x85', u'\r\x85', and u'\u2028'.
+
+        (This is slightly different from when you open a file for
+        writing with fopen(filename, "w") in C or file(filename, 'w')
+        in Python.)
+
+
+        --- Unicode
+
+        If 'text' isn't Unicode, then apart from newline handling, the
+        bytes are written verbatim to the file.  The 'encoding' and
+        'errors' arguments are not used and must be omitted.
+
+        If 'text' is Unicode, it is first converted to bytes using the
+        specified 'encoding' (or the default encoding if 'encoding'
+        isn't specified).  The 'errors' argument applies only to this
+        conversion.
+
+        """
+        if isinstance(text, unicode):
+            if linesep is not None:
+                # Convert all standard end-of-line sequences to
+                # ordinary newline characters.
+                text = (text.replace(u'\r\n', u'\n')
+                            .replace(u'\r\x85', u'\n')
+                            .replace(u'\r', u'\n')
+                            .replace(u'\x85', u'\n')
+                            .replace(u'\u2028', u'\n'))
+                text = text.replace(u'\n', linesep)
+            if encoding is None:
+                encoding = sys.getdefaultencoding()
+            bytes = text.encode(encoding, errors)
+        else:
+            # It is an error to specify an encoding if 'text' is
+            # an 8-bit string.
+            assert encoding is None
+
+            if linesep is not None:
+                text = (text.replace('\r\n', '\n')
+                            .replace('\r', '\n'))
+                bytes = text.replace('\n', linesep)
+
+        self.write_bytes(bytes, append)
+
+    def lines(self, encoding=None, errors='strict', retain=True):
+        r""" Open this file, read all lines, return them in a list.
+
+        Optional arguments:
+            encoding - The Unicode encoding (or character set) of
+                the file.  The default is None, meaning the content
+                of the file is read as 8-bit characters and returned
+                as a list of (non-Unicode) str objects.
+            errors - How to handle Unicode errors; see help(str.decode)
+                for the options.  Default is 'strict'
+            retain - If true, retain newline characters; but all newline
+                character combinations ('\r', '\n', '\r\n') are
+                translated to '\n'.  If false, newline characters are
+                stripped off.  Default is True.
+
+        This uses 'U' mode in Python 2.3 and later.
+        """
+        if encoding is None and retain:
+            f = self.open(_textmode)
+            try:
+                return f.readlines()
+            finally:
+                f.close()
+        else:
+            return self.text(encoding, errors).splitlines(retain)
+
+    def write_lines(self, lines, encoding=None, errors='strict',
+                    linesep=os.linesep, append=False):
+        r""" Write the given lines of text to this file.
+
+        By default this overwrites any existing file at this path.
+
+        This puts a platform-specific newline sequence on every line.
+        See 'linesep' below.
+
+        lines - A list of strings.
+
+        encoding - A Unicode encoding to use.  This applies only if
+            'lines' contains any Unicode strings.
+
+        errors - How to handle errors in Unicode encoding.  This
+            also applies only to Unicode strings.
+
+        linesep - The desired line-ending.  This line-ending is
+            applied to every line.  If a line already has any
+            standard line ending ('\r', '\n', '\r\n', u'\x85',
+            u'\r\x85', u'\u2028'), that will be stripped off and
+            this will be used instead.  The default is os.linesep,
+            which is platform-dependent ('\r\n' on Windows, '\n' on
+            Unix, etc.)  Specify None to write the lines as-is,
+            like file.writelines().
+
+        Use the keyword argument append=True to append lines to the
+        file.  The default is to overwrite the file.  Warning:
+        When you use this with Unicode data, if the encoding of the
+        existing data in the file is different from the encoding
+        you specify with the encoding= parameter, the result is
+        mixed-encoding data, which can really confuse someone trying
+        to read the file later.
+        """
+        if append:
+            mode = 'ab'
+        else:
+            mode = 'wb'
+        f = self.open(mode)
+        try:
+            for line in lines:
+                isUnicode = isinstance(line, unicode)
+                if linesep is not None:
+                    # Strip off any existing line-end and add the
+                    # specified linesep string.
+                    if isUnicode:
+                        if line[-2:] in (u'\r\n', u'\x0d\x85'):
+                            line = line[:-2]
+                        elif line[-1:] in (u'\r', u'\n',
+                                           u'\x85', u'\u2028'):
+                            line = line[:-1]
+                    else:
+                        if line[-2:] == '\r\n':
+                            line = line[:-2]
+                        elif line[-1:] in ('\r', '\n'):
+                            line = line[:-1]
+                    line += linesep
+                if isUnicode:
+                    if encoding is None:
+                        encoding = sys.getdefaultencoding()
+                    line = line.encode(encoding, errors)
+                f.write(line)
+        finally:
+            f.close()
+
+    if md5 is None:
+        def read_md5(self):
+            """ Calculate the md5 hash for this file.
+            
+            This reads through the entire file.
+            """
+            raise NotImplemented('could not import md5')
+    else:
+        def read_md5(self):
+            """ Calculate the md5 hash for this file.
+
+            This reads through the entire file.
+            """
+            f = self.open('rb')
+            try:
+                m = md5.new()
+                while True:
+                    d = f.read(8192)
+                    if not d:
+                        break
+                    m.update(d)
+            finally:
+                f.close()
+            return m.digest()
+
+    # --- Methods for querying the filesystem.
+
+    exists = os.path.exists
+    isdir = os.path.isdir
+    isfile = os.path.isfile
+    islink = os.path.islink
+    ismount = os.path.ismount
+
+    if hasattr(os.path, 'samefile'):
+        samefile = os.path.samefile
+
+    getatime = os.path.getatime
+    atime = property(
+        getatime, None, None,
+        """ Last access time of the file. """)
+
+    getmtime = os.path.getmtime
+    mtime = property(
+        getmtime, None, None,
+        """ Last-modified time of the file. """)
+
+    if hasattr(os.path, 'getctime'):
+        getctime = os.path.getctime
+        ctime = property(
+            getctime, None, None,
+            """ Creation time of the file. """)
+
+    getsize = os.path.getsize
+    size = property(
+        getsize, None, None,
+        """ Size of the file, in bytes. """)
+
+    if hasattr(os, 'access'):
+        def access(self, mode):
+            """ Return true if current user has access to this path.
+
+            mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
+            """
+            return os.access(self, mode)
+
+    def stat(self):
+        """ Perform a stat() system call on this path. """
+        return os.stat(self)
+
+    def lstat(self):
+        """ Like path.stat(), but do not follow symbolic links. """
+        return os.lstat(self)
+
+    def get_owner(self):
+        r""" Return the name of the owner of this file or directory.
+
+        This follows symbolic links.
+
+        On Windows, this returns a name of the form ur'DOMAIN\User Name'.
+        On Windows, a group can own a file or directory.
+        """
+        if os.name == 'nt':
+            if win32security is None:
+                raise Exception("path.owner requires win32all to be installed")
+            desc = win32security.GetFileSecurity(
+                self, win32security.OWNER_SECURITY_INFORMATION)
+            sid = desc.GetSecurityDescriptorOwner()
+            account, domain, typecode = win32security.LookupAccountSid(None, sid)
+            return domain + u'\\' + account
+        else:
+            if pwd is None:
+                raise NotImplementedError("path.owner is not implemented on this platform.")
+            st = self.stat()
+            return pwd.getpwuid(st.st_uid).pw_name
+
+    owner = property(
+        get_owner, None, None,
+        """ Name of the owner of this file or directory. """)
+
+    if hasattr(os, 'statvfs'):
+        def statvfs(self):
+            """ Perform a statvfs() system call on this path. """
+            return os.statvfs(self)
+
+    if hasattr(os, 'pathconf'):
+        def pathconf(self, name):
+            return os.pathconf(self, name)
+
+
+    # --- Modifying operations on files and directories
+
+    def utime(self, times):
+        """ Set the access and modified times of this file. """
+        os.utime(self, times)
+
+    def chmod(self, mode):
+        os.chmod(self, mode)
+
+    if hasattr(os, 'chown'):
+        def chown(self, uid, gid):
+            os.chown(self, uid, gid)
+
+    def rename(self, new):
+        os.rename(self, new)
+
+    def renames(self, new):
+        os.renames(self, new)
+
+
+    # --- Create/delete operations on directories
+
+    def mkdir(self, mode=0777):
+        os.mkdir(self, mode)
+
+    def makedirs(self, mode=0777):
+        os.makedirs(self, mode)
+
+    def rmdir(self):
+        os.rmdir(self)
+
+    def removedirs(self):
+        os.removedirs(self)
+
+
+    # --- Modifying operations on files
+
+    def touch(self):
+        """ Set the access/modified times of this file to the current time.
+        Create the file if it does not exist.
+        """
+        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
+        os.close(fd)
+        os.utime(self, None)
+
+    def remove(self):
+        os.remove(self)
+
+    def unlink(self):
+        os.unlink(self)
+
+
+    # --- Links
+
+    if hasattr(os, 'link'):
+        def link(self, newpath):
+            """ Create a hard link at 'newpath', pointing to this file. """
+            os.link(self, newpath)
+
+    if hasattr(os, 'symlink'):
+        def symlink(self, newlink):
+            """ Create a symbolic link at 'newlink', pointing here. """
+            os.symlink(self, newlink)
+
+    if hasattr(os, 'readlink'):
+        def readlink(self):
+            """ Return the path to which this symbolic link points.
+
+            The result may be an absolute or a relative path.
+            """
+            return self.__class__(os.readlink(self))
+
+        def readlinkabs(self):
+            """ Return the path to which this symbolic link points.
+
+            The result is always an absolute path.
+            """
+            p = self.readlink()
+            if p.isabs():
+                return p
+            else:
+                return (self.parent / p).abspath()
+
+
+    # --- High-level functions from shutil
+
+    copyfile = shutil.copyfile
+    copymode = shutil.copymode
+    copystat = shutil.copystat
+    copy = shutil.copy
+    copy2 = shutil.copy2
+    copytree = shutil.copytree
+    if hasattr(shutil, 'move'):
+        move = shutil.move
+    rmtree = shutil.rmtree
+
+
+    # --- Special stuff from os
+
+    if hasattr(os, 'chroot'):
+        def chroot(self):
+            os.chroot(self)
+
+    if hasattr(os, 'startfile'):
+        def startfile(self):
+            os.startfile(self)
+
diff --git a/Tools/PyUtils/python/pshell.py b/Tools/PyUtils/python/pshell.py
new file mode 100644
index 00000000000..7f46a4e0d31
--- /dev/null
+++ b/Tools/PyUtils/python/pshell.py
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+"""
+    pshell
+    ~~~~~~
+
+    Helpers for dumping a shell into a file and load back from there.  Based
+    on the python cookbook `recipe 572213`_ by Oren Tirosh.
+
+    Dumping::
+
+        >>> import pshell
+        >>> def foo(a, b):
+        ...     return a + b
+        ...
+        >>> pshell.dump('shell.dump')
+
+    And loading::
+
+        >>> import pshell
+        >>> pshell.load()
+        >>> foo(1, 2)
+        3
+
+    .. _recipe 572213: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/572213
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+import __builtin__
+import __main__ as _main_module
+import sys
+import marshal
+import ctypes
+from pickle import Pickler, Unpickler
+from types import CodeType, FunctionType, ClassType, MethodType, \
+     ModuleType, GetSetDescriptorType, BuiltinMethodType
+
+
+CellType = type((lambda x: lambda y: x)(0).func_closure[0])
+WrapperDescriptorType = type(type.__repr__)
+
+
+def dump(filename='/tmp/console.sess', main_module=_main_module):
+    """Dump the main module into a session file."""
+    f = file(filename, 'wb')
+    try:
+        pickler = ShellPickler(f, 2)
+        pickler._main_module = main_module
+        pickler.dump(main_module)
+    finally:
+        f.close()
+
+
+def load(filename='/tmp/console.sess', main_module=_main_module):
+    """Update the main module with the state from the session file."""
+    f = file(filename, 'rb')
+    try:
+        unpickler = ShellUnpickler(f)
+        unpickler._main_module = main_module
+        module = unpickler.load()
+        main_module.__dict__.update(module.__dict__)
+    finally:
+        f.close()
+
+
+class ShellPickler(Pickler):
+    dispatch = Pickler.dispatch.copy()
+    _main_module = None
+
+
+class ShellUnpickler(Unpickler):
+    _main_module = None
+
+    def find_class(self, module, name):
+        if (module, name) == ('__builtin__', '__main__'):
+            return self._main_module.__dict__
+        return Unpickler.find_class(self, module, name)
+
+
+def register(t):
+    def proxy(func):
+        ShellPickler.dispatch[t] = func
+        return func
+    return proxy
+
+
+def _create_typemap():
+    import types
+    for key, value in types.__dict__.iteritems():
+        if getattr(value, '__module__', None) == '__builtin__' and \
+           type(value) is type:
+            yield value, key
+_typemap = dict(_create_typemap(), **{
+    CellType:                   'CellType',
+    WrapperDescriptorType:      'WrapperDescriptorType'
+})
+_reverse_typemap = dict((v, k) for k, v in _typemap.iteritems())
+
+
+def _unmarshal(string):
+    return marshal.loads(string)
+
+
+def _load_type(name):
+    return _reverse_typemap[name]
+
+
+def _create_type(type, *args):
+    return type(*args)
+
+
+def _create_cell(obj):
+    d = {}
+    p = ctypes.pythonapi.PyCell_New(ctypes.py_object(obj))
+    ctypes.pythonapi.PyDict_SetItemString(ctypes.py_object(d), 'x', p)
+    return d['x']
+
+
+def _import_module(import_name):
+    if '.' in import_name:
+        items = import_name.split('.')
+        module = '.'.join(items[:-1])
+        obj = items[-1]
+    else:
+        return __import__(import_name)
+    return getattr(__import__(module, None, None, [obj]), obj)
+
+
+def _locate_function(obj):
+    if obj.__module__ == '__main__':
+        return False
+    try:
+        found = _import_module(obj.__module__ + '.' + obj.__name__)
+    except:
+        return False
+    return found is obj
+
+
+@register(CodeType)
+def save_code(pickler, obj):
+    pickler.save_reduce(_unmarshal, (marshal.dumps(obj),), obj=obj)
+
+
+@register(FunctionType)
+def save_function(pickler, obj):
+    if not _locate_function(obj):
+        pickler.save_reduce(FunctionType, (obj.func_code, obj.func_globals,
+                                           obj.func_name, obj.func_defaults,
+                                           obj.func_closure), obj=obj)
+    else:
+        Pickler.save_global(pickler, obj)
+
+
+@register(dict)
+def save_module_dict(pickler, obj):
+    if obj is pickler._main_module.__dict__:
+        pickler.write('c__builtin__\n__main__\n', obj=obj)
+    else:
+        Pickler.save_dict(pickler, obj)
+
+
+@register(ClassType)
+def save_classobj(pickler, obj):
+    if obj.__module__ == '__main__':
+        pickler.save_reduce(ClassType, (obj.__name__, obj.__bases__,
+                                        obj.__dict__), obj=obj)
+    else:
+        Pickler.save_global(pickler, obj)
+
+
+@register(MethodType)
+def save_instancemethod(pickler, obj):
+    pickler.save_reduce(MethodType, (obj.im_func, obj.im_self,
+                                     obj.im_class), obj=obj)
+
+
+@register(BuiltinMethodType)
+def save_builtin_method(pickler, obj):
+    if obj.__self__ is not None:
+        pickler.save_reduce(getattr, (obj.__self__, obj.__name__), obj=obj)
+    else:
+        Pickler.save_global(pickler, obj)
+
+
+@register(GetSetDescriptorType)
+@register(WrapperDescriptorType)
+def save_wrapper_descriptor(pickler, obj):
+    pickler.save_reduce(getattr, (obj.__objclass__, obj.__name__), obj=obj)
+
+
+@register(CellType)
+def save_cell(pickler, obj):
+    pickler.save_reduce(_create_cell, (obj.cell_contents,), obj=obj)
+
+
+@register(ModuleType)
+def save_module(pickler, obj):
+    if obj is pickler._main_module:
+        pickler.save_reduce(__import__, (obj.__name__,), obj=obj,
+                            state=obj.__dict__.copy())
+    else:
+        pickler.save_reduce(_import_module, (obj.__name__,), obj=obj)
+
+
+@register(type)
+def save_type(pickler, obj):
+    if obj in _typemap:
+        pickler.save_reduce(_load_type, (_typemap[obj],), obj=obj)
+    elif obj.__module__ == '__main__':
+        pickler.save_reduce(_create_type, (type(obj), obj.__name__,
+                                           obj.__bases__, obj.__dict__),
+                                           obj=obj)
+    else:
+        Pickler.save_global(pickler, obj)
diff --git a/Tools/PyUtils/python/reimport.py b/Tools/PyUtils/python/reimport.py
new file mode 100644
index 00000000000..19ed1c87538
--- /dev/null
+++ b/Tools/PyUtils/python/reimport.py
@@ -0,0 +1,559 @@
+# MIT Licensed
+# Copyright (c) 2009 Peter Shinners <pete@shinners.org> 
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+"""
+This module intends to be a full featured replacement for Python's reload
+function. It is targeted towards making a reload that works for Python
+plugins and extensions used by longer running applications.
+
+Reimport currently supports Python 2.4 through 2.6.
+
+By its very nature, this is not a completely solvable problem. The goal of
+this module is to make the most common sorts of updates work well. It also
+allows individual modules and package to assist in the process. A more
+detailed description of what happens is at
+http://code.google.com/p/reimport .
+"""
+
+
+__all__ = ["reimport", "modified"]
+
+
+import sys
+import os
+import gc
+import inspect
+import weakref
+import traceback
+import time
+
+
+
+__version__ = "1.0"
+__author__ = "Peter Shinners <pete@shinners.org>"
+__license__ = "MIT"
+__url__ = "http://code.google.com/p/reimport"
+
+
+
+_previous_scan_time = time.time() - 1.0
+_module_timestamps = {}
+
+
+# find the 'instance' old style type
+class _OldClass: pass
+_InstanceType = type(_OldClass())
+del _OldClass
+
+
+
+def reimport(*modules):
+    """Reimport python modules. Multiple modules can be passed either by
+        name or by reference. Only pure python modules can be reimported.
+        
+        For advanced control, global variables can be placed in modules
+        that allows finer control of the reimport process.
+        
+        If a package module has a true value for "__package_reimport__"
+        then that entire package will be reimported when any of its children
+        packages or modules are reimported.
+        
+        If a package module defines __reimported__ it must be a callable
+        function that accepts one argument and returns a bool. The argument
+        is the reference to the old version of that module before any
+        cleanup has happend. The function should normally return True to
+        allow the standard reimport cleanup. If the function returns false
+        then cleanup will be disabled for only that module. Any exceptions
+        raised during the callback will be handled by traceback.print_exc,
+        similar to what happens with tracebacks in the __del__ method.
+        """
+    __internal_swaprefs_ignore__ = "reimport"
+    reloadSet = set()
+
+    if not modules:
+        return
+
+    # Get names of all modules being reloaded
+    for module in modules:
+        name, target = _find_exact_target(module)
+        if not target:
+            raise ValueError("Module %r not found" % module)
+        if not _is_code_module(target):
+            raise ValueError("Cannot reimport extension, %r" % name)
+
+        reloadSet.update(_find_reloading_modules(name))
+
+    # Sort module names 
+    reloadNames = _package_depth_sort(reloadSet, False)
+
+    # Check for SyntaxErrors ahead of time. This won't catch all
+    # possible SyntaxErrors or any other ImportErrors. But these
+    # should be the most common problems, and now is the cleanest
+    # time to abort.
+    # I know this gets compiled again anyways. It could be
+    # avoided with py_compile, but I will not be the creator
+    # of messy .pyc files!
+    for name in reloadNames:
+        filename = getattr(sys.modules[name], "__file__", None)
+        if not filename:
+            continue
+        pyname = os.path.splitext(filename)[0] + ".py"
+        try:
+            data = open(pyname, "rU").read() + "\n"
+        except (IOError, OSError):
+            continue
+        
+        compile(data, pyname, "exec", 0, False)  # Let this raise exceptions
+
+    # Move modules out of sys
+    oldModules = {}
+    for name in reloadNames:
+        oldModules[name] = sys.modules.pop(name)
+    ignores = (id(oldModules),)
+    prevNames = set(sys.modules)
+
+    # Reimport modules, trying to rollback on exceptions
+    try:
+        for name in reloadNames:
+            if name not in sys.modules:
+                __import__(name)
+
+    except StandardError:
+        # Try to dissolve any newly import modules and revive the old ones
+        newNames = set(sys.modules) - prevNames
+        newNames = _package_depth_sort(newNames, True)
+        for name in newNames:
+            _unimport_module(sys.modules[name], ignores)
+            assert name not in sys.modules
+
+        sys.modules.update(oldModules)
+        raise
+
+    newNames = set(sys.modules) - prevNames
+    newNames = _package_depth_sort(newNames, True)
+
+    # Update timestamps for loaded time
+    now = time.time() - 1.0
+    for name in newNames:
+        _module_timestamps[name] = (now, True)
+
+    # Rejigger the universe
+    for name in newNames:
+        old = oldModules.get(name)
+        if not old:
+            continue
+        new = sys.modules[name]
+        rejigger = True
+        reimported = getattr(new, "__reimported__", None)
+        if reimported:
+            try:
+                rejigger = reimported(old)
+            except StandardError:
+                # What else can we do? the callbacks must go on
+                # Note, this is same as __del__ behaviour. /shrug
+                traceback.print_exc()
+
+        if rejigger:
+            _rejigger_module(old, new, ignores)
+        else:
+            _unimport_module(new, ignores)
+
+
+
+def modified(path=None):
+    """Find loaded modules that have changed on disk under the given path.
+        If no path is given then all modules are searched.
+        """
+    global _previous_scan_time
+    modules = []
+    
+    if path:
+        path = os.path.normpath(path) + os.sep
+        
+    defaultTime = (_previous_scan_time, False)
+    pycExt = __debug__ and ".pyc" or ".pyo"
+    
+    for name, module in sys.modules.items():
+        filename = _is_code_module(module)
+        if not filename:
+            continue
+
+        filename = os.path.normpath(filename)
+        prevTime, prevScan = _module_timestamps.setdefault(name, defaultTime)
+        if path and not filename.startswith(path):
+            continue
+
+        # Get timestamp of .pyc if this is first time checking this module
+        if not prevScan:
+            pycName = os.path.splitext(filename)[0] + pycExt
+            if pycName != filename:
+                try:
+                    prevTime = os.path.getmtime(pycName)
+                except OSError:
+                    pass
+            _module_timestamps[name] = (prevTime, True)
+
+        # Get timestamp of source file
+        try:
+            diskTime = os.path.getmtime(filename)
+        except OSError:
+            diskTime = None
+                
+        if diskTime is not None and prevTime < diskTime:
+            modules.append(name)
+
+    _previous_scan_time = time.time()
+    return modules
+
+
+
+def _is_code_module(module):
+    """Determine if a module comes from python code"""
+    # getsourcefile will not return "bare" pyc modules. we can reload those?
+    try:
+        return inspect.getsourcefile(module) or ""
+    except TypeError:
+        return ""
+
+
+
+def _find_exact_target(module):
+    """Given a module name or object, find the
+            base module where reimport will happen."""
+    # Given a name or a module, find both the name and the module
+    actualModule = sys.modules.get(module)
+    if actualModule is not None:
+        name = module
+    else:
+        for name, mod in sys.modules.iteritems():
+            if mod is module:
+                actualModule = module
+                break
+        else:
+            return "", None
+
+    # Find highest level parent package that has package_reimport magic
+    parentName = name
+    while True:
+        splitName = parentName.rsplit(".", 1)
+        if len(splitName) <= 1:
+            return name, actualModule
+        parentName = splitName[0]
+        
+        parentModule = sys.modules.get(parentName)
+        if getattr(parentModule, "__package_reimport__", None):
+            name = parentName
+            actualModule = parentModule
+
+
+
+def _find_reloading_modules(name):
+    """Find all modules that will be reloaded from given name"""
+    modules = [name]
+    childNames = name + "."
+    for name in sys.modules.keys():
+        if name.startswith(childNames) and _is_code_module(sys.modules[name]):
+            modules.append(name)
+    return modules
+
+
+
+def _package_depth_sort(names, reverse):
+    """Sort a list of module names by their package depth"""
+    def packageDepth(name):
+        return name.count(".")
+    return sorted(names, key=packageDepth, reverse=reverse)
+
+
+
+# To rejigger is to copy internal values from new to old
+# and then to swap external references from old to new
+
+
+def _rejigger_module(old, new, ignores):
+    """Mighty morphin power modules"""
+    __internal_swaprefs_ignore__ = "rejigger_module"
+    oldVars = vars(old)
+    newVars = vars(new)
+    ignores += (id(oldVars),)
+    old.__doc__ = new.__doc__
+
+    # Get filename used by python code
+    filename = new.__file__
+    fileext = os.path.splitext(filename)
+    if fileext in (".pyo", ".pyc", ".pyw"):
+        filename = filename[:-1]
+
+    for name, value in newVars.iteritems():
+        try: objfile = inspect.getsourcefile(value)
+        except TypeError: objfile = ""
+        
+        if name in oldVars:
+            oldValue = oldVars[name]
+            if oldValue is value:
+                continue
+
+            if objfile == filename:
+                if inspect.isclass(value):
+                    _rejigger_class(oldValue, value, ignores)
+                    
+                elif inspect.isfunction(value):
+                    _rejigger_func(oldValue, value, ignores)
+        
+        setattr(old, name, value)
+
+    for name in oldVars.keys():
+        if name not in newVars:
+            value = getattr(old, name)
+            delattr(old, name)
+            _remove_refs(value, ignores)
+    
+    _swap_refs(old, new, ignores)
+
+
+
+def _rejigger_class(old, new, ignores):
+    """Mighty morphin power classes"""
+    __internal_swaprefs_ignore__ = "rejigger_class"    
+    oldVars = vars(old)
+    newVars = vars(new)
+    ignores += (id(oldVars),)    
+
+    for name, value in newVars.iteritems():
+        if name in ("__dict__", "__doc__", "__weakref__"):
+            continue
+
+        if name in oldVars:
+            oldValue = oldVars[name]
+            if oldValue is value:
+                continue
+
+            if inspect.isclass(value) and value.__module__ == new.__module__:
+                _rejigger_class(oldValue, value, ignores)
+            
+            elif inspect.isfunction(value):
+                _rejigger_func(oldValue, value, ignores)
+
+        setattr(old, name, value)
+    
+    for name in oldVars.keys():
+        if name not in newVars:
+            value = getattr(old, name)
+            delattr(old, name)
+            _remove_refs(value, ignores)
+
+    _swap_refs(old, new, ignores)
+
+
+
+def _rejigger_func(old, new, ignores):
+    """Mighty morphin power functions"""
+    __internal_swaprefs_ignore__ = "rejigger_func"    
+    old.func_code = new.func_code
+    old.func_doc = new.func_doc
+    old.func_defaults = new.func_defaults
+    old.func_dict = new.func_dict
+    _swap_refs(old, new, ignores)
+
+
+
+def _unimport_module(old, ignores):
+    """Remove traces of a module"""
+    __internal_swaprefs_ignore__ = "unimport_module"
+    oldValues = vars(old).values()
+    ignores += (id(oldValues),)    
+
+    # Get filename used by python code
+    filename = old.__file__
+    fileext = os.path.splitext(filename)
+    if fileext in (".pyo", ".pyc", ".pyw"):
+        filename = filename[:-1]
+
+    for value in oldValues:
+        try: objfile = inspect.getsourcefile(value)
+        except TypeError: objfile = ""
+        
+        if objfile == filename:
+            if inspect.isclass(value):
+                _unimport_class(value, ignores)
+                
+            elif inspect.isfunction(value):
+                _remove_refs(value, ignores)
+
+    _remove_refs(old, ignores)
+
+
+
+def _unimport_class(old, ignores):
+    """Remove traces of a class"""
+    __internal_swaprefs_ignore__ = "unimport_class"    
+    oldItems = vars(old).items()
+    ignores += (id(oldItems),)    
+
+    for name, value in oldItems:
+        if name in ("__dict__", "__doc__", "__weakref__"):
+            continue
+
+        if inspect.isclass(value) and value.__module__ == old.__module__:
+            _unimport_class(value, ignores)
+            
+        elif inspect.isfunction(value):
+            _remove_refs(value, ignores)
+
+    _remove_refs(old, ignores)
+
+
+
+
+
+_recursive_tuple_swap = set()
+
+
+
+def _swap_refs(old, new, ignores):
+    """Swap references from one object to another"""
+    __internal_swaprefs_ignore__ = "swap_refs"    
+    # Swap weak references
+    refs = weakref.getweakrefs(old)
+    if refs:
+        try:
+            newRef = weakref.ref(new)
+        except ValueError:
+            pass
+        else:
+            for oldRef in refs:
+                _swap_refs(oldRef, newRef, ignores + (id(refs),))
+    del refs
+
+    # Swap through garbage collector
+    referrers = gc.get_referrers(old)
+    for container in referrers:
+        if id(container) in ignores:
+            continue
+        containerType = type(container)
+        
+        if containerType is list:
+            while True:
+                try:
+                    index = container.index(old)
+                except ValueError:
+                    break
+                container[index] = new
+        
+        elif containerType is tuple:
+            # protect from recursive tuples
+            orig = container
+            if id(orig) in _recursive_tuple_swap:
+                continue
+            _recursive_tuple_swap.add(id(orig))
+            try:
+                container = list(container)
+                while True:
+                    try:
+                        index = container.index(old)
+                    except ValueError:
+                        break
+                    container[index] = new
+                container = tuple(container)
+                _swap_refs(orig, container, ignores + (id(referrers),))
+            finally:
+                _recursive_tuple_swap.remove(id(orig))
+        
+        elif containerType is dict:
+            if "__internal_swaprefs_ignore__" not in container:
+                try:
+                    if old in container:
+                        container[new] = container.pop(old)
+                except TypeError:  # Unhashable old value
+                    pass
+                for k,v in container.iteritems():
+                    if v is old:
+                        container[k] = new
+
+        elif containerType is set:
+            container.remove(old)
+            container.add(new)
+            
+        elif containerType == type:
+            if old in container.__bases__:
+                bases = list(container.__bases__)
+                bases[bases.index(old)] = new
+                container.__bases__ = tuple(bases)
+        
+        elif type(container) is old:
+            container.__class__ = new
+        
+        elif containerType is _InstanceType:
+            if container.__class__ is old:
+                container.__class__ = new
+
+       
+
+def _remove_refs(old, ignores):
+    """Remove references to a discontinued object"""
+    __internal_swaprefs_ignore__ = "remove_refs"
+    
+    # Ignore builtin immutables that keep no other references
+    _isinst = isinstance
+    if (old is None or _isinst(old, int) or _isinst(old, basestring)
+                or _isinst(old, float) or _isinst(old, complex)):
+        return
+    
+    # Remove through garbage collector
+    for container in gc.get_referrers(old):
+        if id(container) in ignores:
+            continue
+        containerType = type(container)
+
+        if containerType == list:
+            while True:
+                try:
+                    container.remove(old)
+                except ValueError:
+                    break
+        
+        elif containerType == tuple:
+            orig = container
+            container = list(container)
+            while True:
+                try:
+                    container.remove(old)
+                except ValueError:
+                    break
+            container = tuple(container)
+            _swap_refs(orig, container, ignores)
+        
+        elif containerType == dict:
+            if "__internal_swaprefs_ignore__" not in container:
+                try:
+                    if old in container:
+                        container.pop(old)
+                except TypeError:  # Unhashable old value
+                    pass
+                for k,v in container.items():
+                    if v is old:
+                        del container[k]
+
+        elif containerType == set:
+            container.remove(old)
diff --git a/Tools/PyUtils/python/rfio.py b/Tools/PyUtils/python/rfio.py
new file mode 100644
index 00000000000..92682a5076b
--- /dev/null
+++ b/Tools/PyUtils/python/rfio.py
@@ -0,0 +1,233 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file: PyUtils/python/rfio.py
+# @purpose: simple ctypes-based python wrapper around libshift (from CASTOR2)
+# @author: Sebastien Binet <binet@cern.ch>
+# @date:   October 2008
+
+__doc__ = "simple ctypes-based python wrapper around librfio (from CASTOR2)"
+__version__ = "$Revision$"
+__author__  = "Sebastien Binet <binet@cern.ch>"
+
+import ctypes, os
+from . import _rfio as c_api
+
+_lib = c_api._libraries['libshift.so']
+
+STRING = ctypes.c_char_p
+
+### -- exports ----------------------------------------------------------------
+__all__ = [
+    'c_api', # the raw ctypes-wrapped library
+    'access',
+    'stat',
+    'lstat',
+    'open',
+    ]
+
+
+### -- helper methods ---------------------------------------------------------
+def _raise_ioerror():
+    global c_api
+    raise IOError(c_api.rfio_serrno(),c_api.rfio_serror())
+
+### -- data -------------------------------------------------------------------
+DEFAULT_BUFFER_SIZE = 8 * 1024  # bytes
+
+### -- classes ----------------------------------------------------------------
+def __str__(self):
+    return str(tuple([self.tv_sec,self.tv_nsec]))
+c_api.timespec.__str__ = __str__
+c_api.timespec.__repr__= __str__
+del __str__
+
+def __str__(self):
+    return str(tuple([
+        self.st_mode,  # protection bits
+        self.st_ino,   # inode number
+        self.st_dev,   # device
+        self.st_nlink, # number of hard links
+        self.st_uid,   # user ID of owner
+        self.st_gid,   # group ID of owner
+        self.st_size,  # size of file, in bytes
+        self.st_atim,  # time of most recent access
+        self.st_mtim,  # time of most recent content modification
+        self.st_ctim,  # time of most recent metadata change
+        ]))
+c_api.stat.__str__ = __str__
+c_api.stat.__repr__= __str__
+del __str__
+
+##
+class File(object):
+    """file-like object for files on CASTOR"""
+
+    def __init__ (self, fname, mode='r'):
+        super (File, self).__init__ ()
+        self.name = fname
+        self.mode = mode
+        if not mode in ('r','w','a'):
+            raise ValueError, "mode should be 'r', 'w' or 'a'"
+        global c_api
+        self._f = c_api.rfio_fopen (fname, mode)
+        if c_api.rfio_serrno():
+            _raise_ioerror()
+        self.fd = c_api.rfio_fileno (self._f)
+
+    def close (self):
+        fd = self.fd
+        if fd is not None:
+            self.fd = None
+            global c_api
+            if c_api.rfio_close (fd):
+                raise_ioerror()
+        return
+
+    def __del__ (self):
+        try:
+            self.close()
+        except:
+            pass
+        
+    def fileno (self):
+        if self.fd is not None:
+            return self.fd
+        raise ValueError ('I/O operation on closed file')
+
+    def flush (self):
+        global c_api
+        if c_api.rfio_fflush (self._f):
+            _raise_ioerror()
+        return
+    
+    def next (self):
+        line = self.readline()
+        if not line:
+            raise StopIteration
+        return line
+
+    def tell (self):
+        bytes = c_api.rfio_ftell (self._f)
+        if bytes < 0:
+            _raise_ioerror()
+        return bytes
+
+    def read (self, n=-1):
+        if n < 0:
+            return self.readall()
+        buf = c_api.STRING('\0'*n)
+        rc = c_api.rfio_fread (buf, 1, n, self._f)
+        if rc < 0:
+            _raise_ioerror()
+        return str(buf.value)
+
+    def readall (self):
+        res = str()
+        while True:
+            data = self.read (DEFAULT_BUFFER_SIZE)
+            if not data:
+                break
+            res += data
+        return res
+
+    def readline (self, limit = -1):
+        r"""Read and return a line from the stream.
+
+        If limit is specified, at most limit bytes will be read.
+        """
+        if limit is None:
+            limit = -1
+        if not isinstance (limit, (int, long)):
+            raise TypeError ("limit must be an integer")
+        res = str()
+        while limit < 0 or len(res) < limit:
+            data = self.read (DEFAULT_BUFFER_SIZE)
+            if not data:
+                break
+            res += data
+            if res.endswith('\n'):
+                break
+        return res
+
+    def readinto (self, b):
+        nb = c_api.rfio_fread (b, 1, len(b), self._f)
+        if nb < 0:
+            _raise_ioerror()
+        return nb
+    
+    def __iter__ (self):
+        return self
+
+    def readlines (self, hint=None):
+        """Return a list of lines from the stream.
+
+        hint can be specified to control the number of lines read: no more
+        lines will be read if the total size (in bytes/characters) of all
+        lines so far exceeds hint.
+        """
+        if hint is None:
+            hint = -1
+        if not isinstance (hint, (int, long)):
+            raise TypeError ("hint must be an integer")
+        if hint <= 0:
+            return list(self)
+        n = 0
+        lines = []; _append = lines.append
+        for line in self:
+            _append (line)
+            n += len(line)
+            if n >= hint:
+                break
+        return lines
+
+    def seek (self, offset, whence=0):
+        rc = c_api.rfio_fseek (self._f, offset, whence)
+        if rc < 0:
+            _raise_ioerror()
+        return
+
+    def write (self, b):
+        # FIXME: use buffer protocol...
+        buf = c_api.STRING(b)
+        nb = c_api.rfio_fwrite (buf, 1, len(buf.value), self._f)
+        if nb < 0:
+            _raise_ioerror()
+        return nb
+
+    def writelines (self, lines):
+        for line in lines:
+            self.write (line)
+
+    ### Context manager ###
+    def __enter__ (self):
+        """Context management protocol. Returns self."""
+        return self
+
+    def __exit__ (self, *args):
+        self.close()
+        
+    pass # class File
+
+### -- module methods ---------------------------------------------------------
+def open (fname, mode='r'):
+    return File (fname, mode)
+
+def stat (fname):
+    st = c_api.stat()
+    if c_api.rfio_stat (fname, st):
+        _raise_ioerror()
+    return st
+
+def lstat (fname):
+    st = c_api.stat()
+    if c_api.rfio_lstat (fname, st):
+        _raise_ioerror()
+    return st
+
+def access (fname, mode):
+    """access(path, mode) -> 1 if granted, 0 otherwise"""
+    rc = c_api.rfio_access (fname, mode)
+    if rc < 0:
+        return 0
+    return 1
+
diff --git a/Tools/PyUtils/python/scripts/__init__.py b/Tools/PyUtils/python/scripts/__init__.py
new file mode 100644
index 00000000000..fff0beffcf7
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/__init__.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# hook for PyUtils.scripts package
+
+# FIXME: waiting for a proper declarative file
+import PyUtils.acmdlib as acmdlib
+acmdlib.register('chk-file', 'PyUtils.scripts.check_file:main')
+acmdlib.register('diff-pool', 'PyUtils.scripts.diff_pool_files:main')
+acmdlib.register('diff-root', 'PyUtils.scripts.diff_root_files:main')
+acmdlib.register('dump-root', 'PyUtils.scripts.dump_root_file:main')
+acmdlib.register('chk-sg', 'PyUtils.scripts.check_sg:main')
+acmdlib.register('ath-dump', 'PyUtils.scripts.ath_dump:main')
+acmdlib.register('chk-rflx', 'PyUtils.scripts.check_reflex:main')
+acmdlib.register('gen-klass', 'PyUtils.scripts.gen_klass:main')
+#acmdlib.register('tc.submit', 'PyUtils.AmiLib:tc_submit')
+#acmdlib.register('tc.pkg-tree', 'PyUtils.AmiLib:tc_pkg_tree')
+#acmdlib.register('ami-dset', 'PyUtils.AmiLib:ami_dset')
+
+acmdlib.register('tc.find-pkg', 'PyUtils.scripts.tc_find_pkg:main')
+acmdlib.register('tc.find-tag', 'PyUtils.scripts.tc_find_tag:main')
+acmdlib.register('tc.submit-tag', 'PyUtils.scripts.tc_submit_tag:main')
+acmdlib.register('tc.show-clients', 'PyUtils.scripts.tc_show_clients:main')
+
+acmdlib.register('get-tag-diff', 'PyUtils.scripts.get_tag_diff:main')
+
+acmdlib.register('merge-files', 'PyUtils.scripts.merge_files:main')
+acmdlib.register('filter-files', 'PyUtils.scripts.filter_files:main')
+
+acmdlib.register('cmt.new-pkg', 'PyUtils.scripts.cmt_newpkg:main')
+##
+
diff --git a/Tools/PyUtils/python/scripts/ath_dump.py b/Tools/PyUtils/python/scripts/ath_dump.py
new file mode 100644
index 00000000000..94f85e99e6a
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/ath_dump.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.ath_dump
+# @purpose entry point for ath-dump command, the dump-athfile cousin
+# @author Sebastien Binet
+# @date January 2010
+
+__version__ = "$Revision: 279982 $"
+__author__ = "Sebastien Binet"
+__doc__ = "entry point for ath-dump command, the dump-athfile cousin"
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='ath-dump')
+@acmdlib.argument('files', nargs='+',
+                  help='path to POOL or BS file(s) to analyse')
+@acmdlib.argument('-o', '--output',
+                  default='athfile-infos.ascii',
+                  help="""Name of the output file which will contain the
+                  informations gathered during AthFile processing.
+                  These informations will be stored into a python-shelve file.
+                  """)
+@acmdlib.argument('--evtmax',
+                  default=1,
+                  type=int,
+                  help="""Maximum number of events to process in each file""")
+def main(args):
+    """simple command-line utility wrapping PyUtils.AthFile.fopen
+    """
+    exitcode = 0
+    fnames = args.files
+    if isinstance(fnames, basestring):
+        fnames = [fnames]
+
+    import sys
+    import os
+    import os.path as osp
+    for i,f in enumerate(fnames):
+        fnames[i] = osp.expandvars(osp.expanduser(f))
+
+    import PyUtils.AthFile as af
+    msg = af.msg
+    for fname in fnames:
+        try:
+            f = af.fopen(fname, evtmax=args.evtmax)
+            msg.info(':'*80)
+            msg.info('::::: summary :::::')
+            fmt = ' - %-15s: %s'
+            print fmt % ('file md5',       f.infos['file_md5sum'])
+            print fmt % ('file name',      f.infos['file_name'])
+            print fmt % ('file type',      f.infos['file_type'])
+            print fmt % ('file guid',      f.infos['file_guid'])
+            print fmt % ('nentries',       f.infos['nentries'])
+            print fmt % ('run number',     f.infos['run_number'])
+            print fmt % ('run type',       f.infos['run_type'])
+            print fmt % ('evt number',     f.infos['evt_number'])
+            print fmt % ('evt type',       f.infos['evt_type'])
+            print fmt % ('lumi block',     f.infos['lumi_block'])
+            print fmt % ('beam energy',    f.infos['beam_energy'])
+            print fmt % ('beam type',      f.infos['beam_type'])
+            print fmt % ('stream tags',    f.infos['stream_tags'])
+            print fmt % ('stream names',   f.infos['stream_names'])
+            print fmt % ('geometry',       f.infos['geometry'])
+            print fmt % ('conditions tag', f.infos['conditions_tag'])
+            _metadata = f.infos['metadata']
+            _metadata = _metadata.keys() if isinstance(_metadata,dict) else None
+            print fmt % ('meta data',      _metadata)
+
+            msg.info(':'*80)
+        except Exception, e:
+            msg.error("Caught exception [%s] !!", str(e.__class__))
+            msg.error("What:\n%s\n%s\n%s",e,
+                      sys.exc_info()[0],
+                      sys.exc_info()[1])
+            exitcode = 1
+            pass
+
+        except :
+            msg.error("Caught something !! (don't know what)")
+            msg.error("\n%s\n%s",sys.exc_info()[0], sys.exc_info()[1])
+            exitcode = 10
+            pass
+        if len(fnames) > 1:
+            print ""
+        pass # loop over fileNames
+    
+    if args.output:
+        oname = args.output
+        msg.info("saving report into [%s]..." % oname)
+        if osp.exists(oname):
+            os.rename(oname, oname+'.bak')
+        af.server.save_cache(oname)
+
+    msg.info("Bye.")
+    return exitcode
+
+
diff --git a/Tools/PyUtils/python/scripts/check_file.py b/Tools/PyUtils/python/scripts/check_file.py
new file mode 100644
index 00000000000..dd856c223c4
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/check_file.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.check_file
+# @purpose read a POOL file and dump its content.
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 276362 $"
+__doc__ = "read a POOL file and dump its content."
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='chk-file')
+@acmdlib.argument('files', nargs='+',
+                  help='path to the POOL file(s) to analyze')
+@acmdlib.argument('-d', '--detailed-dump',
+                  action='store_true',
+                  default=False,
+                  help="""Switch to activate or not a detailed dump
+                  of each TTree in the POOL file""")
+@acmdlib.argument('--sort-fct',
+                  choices=('diskSize','memSize','name'),
+                  default='diskSize',
+                  help="Sorting function used to list containers")
+@acmdlib.argument('--fast',
+                  action='store_true',
+                  default=False,
+                  help="""Enable fast mode.
+                  Memory szie will not be accurate AT ALL""")
+@acmdlib.argument('-o', '--output',
+                  default=None,
+                  help="""name of the output file which will contain the
+                  informations gathered during processing.
+                  These informations will be stored into a python-shelve or
+                  an ASCII/py file (depending on the extension:
+                  .pkl,.dat -> shelve; everything else -> ASCII/py)
+                  """)
+def main(args):
+    """read a POOL file and dump its content.
+    """
+    files = args.files
+    if isinstance(files, basestring):
+        files=[files]
+
+    import sys
+    import os
+    import os.path as osp
+
+    for i,f in enumerate(files):
+        files[i] = osp.expandvars(osp.expanduser(f))
+
+    exitcode = 0
+    for fname in files:
+        try:
+            import PyUtils.PoolFile as PF
+            PF.PoolOpts.FAST_MODE = args.fast
+            pool_file = PF.PoolFile(fname)
+            pool_file.checkFile(sorting=args.sort_fct)
+            if args.detailed_dump:
+                dump_file = osp.basename(fname) + '.txt'
+                print "## dumping details into [%s]" % (dump_file,)
+                pool_file.detailedDump(dump_file)
+            if args.output:
+                oname = args.output
+                print "## saving report into [%s]..." % (oname,)
+                pool_file.saveReport(oname)
+        except Exception, e:
+            print "## Caught exception [%s] !!" % str(e.__class__)
+            print "## What:",e
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            exitcode = 1
+            pass
+
+        except :
+            print "## Caught something !! (don't know what)"
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            exitcode = 10
+            pass
+        if len(files) > 1:
+            print ""
+        pass # loop over fileNames
+    
+    print "## Bye."
+    return exitcode
+
diff --git a/Tools/PyUtils/python/scripts/check_reflex.py b/Tools/PyUtils/python/scripts/check_reflex.py
new file mode 100644
index 00000000000..33ca3089fcb
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/check_reflex.py
@@ -0,0 +1,282 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.check_reflex
+# @purpose a script to check the definitions of (reflex) plugins
+#          across multiple so-called 'rootmap' files
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 276362 $"
+__doc__ = """
+a script to check the definitions of (reflex) plugins across multiple so-called 'rootmap' files
+"""
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='chk-rflx')
+@acmdlib.argument(
+    '--capabilities',
+    nargs='?',
+    default=None,
+    help="Dump the capabilities of a given library (ex: libAthenaServices.so)")
+@acmdlib.argument(
+    '--dups',
+    dest='chk_dups',
+    default=None,
+    help="Check if there is any duplicates among dictionaries for a given library")
+@acmdlib.argument(
+    '--dump-content',
+    action='store_true',
+    default=False,
+    help="Dump the content of all the known plugins (dicts. and components)")
+@acmdlib.argument(
+    "--dso",
+    dest = "dump_dso",
+    action = "store_true",
+    default = False,
+    help = "Dump all the dsomap/rootmap files known to the Dso repository")
+@acmdlib.argument(
+    "--libs",
+    dest = "dump_libs",
+    action = "store_true",
+    default = False,
+    help = "Dump all the libraries known to the Dso repository")
+@acmdlib.argument(
+    "--check-dict-dups",
+    action = "store_true",
+    default = False,
+    help = "Check if there is any duplicates among dictionaries")
+@acmdlib.argument(
+    "--check-pf-dups",
+    action = "store_true",
+    default = False,
+    help = "Check if there is any duplicates among components declared to the PluginSvc")
+@acmdlib.argument(
+    "--check-all-dups",
+    action = "store_true",
+    default = False,
+    help = "Check dictionaries *and* components")
+@acmdlib.argument(
+    "--detailed-dump",
+    action = "store_true",
+    default = False,
+    help = "Performs a detailed dump if duplicates are found")
+@acmdlib.argument(
+    "--pedantic",
+    action = "store_true",
+    default = False,
+    help = """Pedantic mode: if a component is found in 2 libraries which have
+    the same name (usual case of a developer working on a (set of) package(s)),
+    it is still being reported as being duplicated""")
+@acmdlib.argument(
+    "-l",
+    "--level",
+    default = "INFO",
+    help = "Logging level (aka verbosity)")
+def main(args):
+    """a script to check the definitions of (reflex) plugins
+    across multiple so-called 'rootmap' files
+    """
+    exitcode = 0
+
+    print ":"*80
+    print "::: chk-rflx :::"
+
+    import os
+    import PyUtils.Dso as Dso
+
+    _suppression_dct = {
+        'TMath' : ('libCore.so', 'libMathCore.so'),
+        'string': ('libGaudiKernelDict.so',
+                   'libCore.so',
+                   'liblcg_PyCoolDict.so',
+                   'libSTLAddRflx.so'),
+        '__pf__::CNV_71_9631': ('libDataModelTestDataReadCnvPoolCnv.so',
+                                 'libDataModelTestDataWriteCnvPoolCnv.so',),
+        '__pf__::CNV_71_9632': ('libDataModelTestDataReadCnvPoolCnv.so',
+                                 'libDataModelTestDataWriteCnvPoolCnv.so',),
+        '__pf__::CNV_71_9633': ('libDataModelTestDataReadCnvPoolCnv.so',
+                                'libDataModelTestDataWriteCnvPoolCnv.so',),
+        '__pf__::CNV_71_9634': ('libDataModelTestDataReadCnvPoolCnv.so',
+                                'libDataModelTestDataWriteCnvPoolCnv.so',),
+        '__pf__::CNV_71_9639': ('libDataModelTestDataReadCnvPoolCnv.so',
+                                'libDataModelTestDataWriteCnvPoolCnv.so',),
+        ## FIXME !! this shouldn't be suppressed !!
+        '__pf__::RootCollection': ('liblcg_RootCollection.so',
+                                   'libAthAnalysisTools.so',),
+        ## !!
+        
+        }
+
+    
+    def print_db( db, detailedDump = False ):
+        if detailedDump : fct = lambda x: x
+        else:             fct = os.path.basename
+        keys = db.keys()
+        keys.sort()
+        for k in keys:
+            print "%s:" % k
+            libs = db[k]
+            libs.sort()
+            for lib in libs:
+                print "  ",fct(lib)
+        return
+
+    import PyUtils.Dso as Dso
+    dsodb = Dso.DsoDb()
+
+    if args.capabilities:
+        libname = args.capabilities
+        try:
+            capabilities = dsodb.capabilities(libname)
+            print "::: capabilities of [%s]" % (libname,)
+            print os.linesep.join([" %s"%c for c in capabilities])
+        except ValueError,err:
+            exitcode = 1
+            pass
+
+    if args.chk_dups:
+        libname = args.chk_dups
+        try:
+            print "::: checking duplicates for [%s]..." % (libname,)
+            dups = dsodb.duplicates(libname, pedantic=args.pedantic)
+            for k in dups:
+                print " -",k
+                print os.linesep.join([" %s"%v for v in dups[k]])
+            if len(dups.keys())>0:
+                exitcode = 1
+        except ValueError,err:
+            exitcode = 1
+            pass
+
+    if args.dump_content:
+        print "::: dumping content of all known plugins..."
+        entries = dsodb.content(pedantic=args.pedantic)
+        print_db(entries, args.detailed_dump)
+        print "::: known entries:",len(entries.keys())
+
+    if args.dump_libs:
+        print "::: dumping all known libraries..."
+        libs = dsodb.libs(detailedDump=args.detailed_dump)
+        for lib in libs:
+            print " -",lib
+        print "::: known libs:",len(libs)
+
+    if args.dump_dso:
+        print "::: dumping all known dso/rootmap files..."
+        dso_files = [dso for dso in dsodb.dsoFiles]
+        dso_files.sort()
+        for dso_file in dso_files:
+            if not args.detailed_dump:
+                dso_file = os.path.basename(dso_file)
+            print " -",dso_file
+        print "::: known dsos:",len(dso_files)
+
+    if args.check_dict_dups:
+        print ":: checking dict. duplicates..."
+        dups = dsodb.dictDuplicates(pedantic=args.pedantic)
+        suppression_log = []
+        for k in dups:
+            v = dups[k]
+            # mark as error only if it isn't a known dup'
+            if k in _suppression_dct:
+                suppressed = [os.path.basename(ii) in _suppression_dct[k]
+                              for ii in v]
+                if all(suppressed):
+                    msg = "---> ignoring [%s]" % (k,)
+                    suppression_log.append(k[:])
+                    #print msg
+                    pass
+                else:
+                    # that's a new one !!
+                    exitcode = 1
+            else:
+                # that's a new one !!
+                exitcode = 1
+                # print "---> NOT ignoring [%s]" % (k,)
+        print_db(dups, args.detailed_dump)
+        if len(suppression_log):
+            print "-"*40
+            print "## ignoring the following dups':"
+            for k in suppression_log:
+                print " -",k
+            print "-"*40
+        print "## all dups:",len(dups.keys())
+        print "##     dups:",len(dups.keys())-len(suppression_log)
+
+    if args.check_pf_dups:
+        print "::: checking (plugin factories) components dups..."
+        dups = dsodb.pfDuplicates(pedantic=args.pedantic)
+        suppression_log = []
+        for k in dups:
+            v = dups[k]
+            # mark as error only if it isn't a known dup'
+            if k in _suppression_dct:
+                suppressed = [os.path.basename(ii) in _suppression_dct[k]
+                              for ii in v]
+                if all(suppressed):
+                    msg = "---> ignoring [%s]" % (k,)
+                    suppression_log.append(k[:])
+                    #print msg
+                    pass
+                else:
+                    # that's a new one !!
+                    exitcode = 1
+            else:
+                # that's a new one !!
+                exitcode = 1
+                # print "---> NOT ignoring [%s]" % (k,)
+        print_db(dups, args.detailed_dump)
+        if len(suppression_log):
+            print "-"*40
+            print "## ignoring the following dups':"
+            for k in suppression_log:
+                print " -",k
+            print "-"*40
+        print "## all dups:",len(dups.keys())
+        print "##     dups:",len(dups.keys())-len(suppression_log)
+
+    if args.check_all_dups:
+        print "::: checking all components dups..."
+        dups = dsodb.pfDuplicates(pedantic=args.pedantic)
+        dups.update(dsodb.dictDuplicates(pedantic=args.pedantic))
+        
+        suppression_log = []
+        for k in dups:
+            v = dups[k]
+            # mark as error only if it isn't a known dup'
+            if k in _suppression_dct:
+                suppressed = [os.path.basename(ii) in _suppression_dct[k]
+                              for ii in v]
+                if all(suppressed):
+                    msg = "---> ignoring [%s]" % (k,)
+                    suppression_log.append(k[:])
+                    #print msg
+                    pass
+                else:
+                    # that's a new one !!
+                    exitcode = 1
+            else:
+                # that's a new one !!
+                exitcode = 1
+                # print "---> NOT ignoring [%s]" % (k,)
+        print_db(dups, args.detailed_dump)
+        if len(suppression_log):
+            print "-"*40
+            print "## ignoring the following dups':"
+            for k in suppression_log:
+                print " -",k
+            print "-"*40
+        print "## all dups:",len(dups.keys())
+        print "##     dups:",len(dups.keys())-len(suppression_log)
+
+    if exitcode:
+        print "::: ERROR !!"
+    else:
+        print "::: All good."
+
+    print ":"*80
+    return exitcode
diff --git a/Tools/PyUtils/python/scripts/check_sg.py b/Tools/PyUtils/python/scripts/check_sg.py
new file mode 100644
index 00000000000..d60a8f05bf8
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/check_sg.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.check_sg
+# @purpose read a POOL file and dump the DataHeader's content
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 276362 $"
+__doc__ = "read a POOL file and dump the DataHeader's content"
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='chk-sg')
+@acmdlib.argument('files', nargs='+',
+                  help='path to the POOL file(s) to analyze')
+@acmdlib.argument('-o', '--output',
+                  default=None,
+                  help="""name of the output file which will contain the
+                  informations gathered during checkSG processing.
+                  These informations will be stored into a python-shelve or
+                  an ASCII/py file (depending on the extension:
+                  .pkl,.dat -> shelve; everything else -> ASCII/py)
+                  """)
+def main(args):
+    """read a POOL file and dump the DataHeader's content
+
+    ex:
+     $ check-sg aod.pool.root
+     $ check-sg /castor/foo.pool
+     $ check-sg root://castoratlas//castor/foo.pool
+     $ check-sg LFN:ttbar.pool
+    """
+    files = args.files
+    if isinstance(files, basestring):
+        files = [files]
+
+    import os.path as osp
+    for i,f in enumerate(files):
+        files[i] = osp.expandvars(osp.expanduser(f))
+
+    exitcode = 0
+    for fname in files:
+        try:
+            import AthenaCommon.KeyStore as acks
+            print "## checking [%s]..." % (fname,)
+            ks = acks.loadKeyStoreFromPoolFile(
+                keyStore=osp.basename(fname),
+                pool_file=fname,
+                label='inputFile')
+
+            print "="*80
+            print "%40s%s%-40s" % ("Container type", " | ","StoreGate keys")
+            print "%40s%s%-40s" % ("-"*40, "-+-", "-"*(40-3))
+            for name,sgkeys in ks.inputFile.dict().items():
+                print "%40s%s%-40s" % (name, " | ", ', '.join(sgkeys))
+            print "="*80
+            if args.output:
+                outFileName = args.output
+                outFileName = osp.expanduser(outFileName)
+                outFileName = osp.expandvars(outFileName)
+                print "## saving report into [%s]..." % (outFileName,)
+                if osp.splitext(outFileName)[1] in ('.pkl', '.dat'):
+                    # we explicitely import 'bsddb' to try to always
+                    # get that particular backend for the shelve...
+                    import bsddb
+                    import shelve
+                    if os.path.exists(outFileName):
+                        os.remove(outFileName)
+                    db = shelve.open(outFileName)
+                    db['eventdata_items'] = ks.inputFile.dict()
+                    db.close()
+                else:
+                    ks.write(outFileName, label='inputFile')
+        except Exception, e:
+            print "## Caught exception [%s] !!" % str(e.__class__)
+            print "## What:",e
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            exitcode = 1
+            pass
+
+        except :
+            print "## Caught something !! (don't know what)"
+            print sys.exc_info()[0]
+            print sys.exc_info()[1]
+            exitcode = 10
+            pass
+        if len(files) > 1:
+            print ""
+        pass # loop over fileNames
+    
+    print "## Bye."
+    return exitcode
+
diff --git a/Tools/PyUtils/python/scripts/cmt_newpkg.py b/Tools/PyUtils/python/scripts/cmt_newpkg.py
new file mode 100644
index 00000000000..9711a9060cc
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/cmt_newpkg.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.cmt_newpkg
+# @purpose streamline and ease the creation of new cmt packages
+# @author Sebastien Binet
+# @date February 2010
+
+from __future__ import with_statement
+
+__version__ = "$Revision: 279982 $"
+__author__ = "Sebastien Binet"
+__doc__ = "streamline and ease the creation of new cmt packages"
+
+### imports -------------------------------------------------------------------
+import os
+import textwrap
+import commands
+import PyUtils.acmdlib as acmdlib
+
+### functions -----------------------------------------------------------------
+@acmdlib.command(
+    name='cmt.new-pkg'
+    )
+@acmdlib.argument(
+    'pkgname',
+    help="(fully qualified) name of the new package"
+    )
+@acmdlib.argument(
+    '--author',
+    default='${USER}',
+    help='name of the author of this new package'
+    )
+@acmdlib.argument(
+    '-r', '--runtime',
+    action='store_true',
+    default=False,
+    help='switch to make a package a runtime package',
+    )
+def main(args):
+    """create a new cmt package with sensible atlas-oriented defaults
+
+    ex:
+     $ acmd cmt new-pkg Control/MyContainer/NewPackage
+    """
+    sc = 0
+    
+    full_pkg_name = args.pkgname
+    if full_pkg_name[0] == '/':
+        full_pkg_name = full_pkg_name[1:]
+        
+    pkg_path = os.path.dirname(full_pkg_name)
+    pkg_name = os.path.basename(full_pkg_name)
+    pkg_vers = '%s-00-00-00' % pkg_name
+    author = os.path.expanduser(os.path.expandvars(args.author))
+
+    if os.path.exists(full_pkg_name):
+        import shutil
+        shutil.rmtree(full_pkg_name)
+        
+    print textwrap.dedent("""\
+    ::: creating package [%(full_pkg_name)s]...
+    :::   - pkg name:    %(pkg_name)s
+    :::   - pkg version: %(pkg_vers)s
+    :::   - pkg path:    %(pkg_path)s
+    :::   - author:      %(author)s""" % locals())
+    cmd = 'cmt create %(pkg_name)s %(pkg_vers)s %(pkg_path)s' % locals()
+    
+    sc, out = commands.getstatusoutput(cmd)
+    if sc != 0:
+        print "ERROR while running [%s]" % (cmd,)
+        print out
+        return sc
+
+    with open(os.path.join(full_pkg_name,'cmt','requirements'), 'w') as req:
+        print >> req, textwrap.dedent("""\
+        ## automatically generated CMT requirements file
+        package %(pkg_name)s
+        author  %(author)s
+
+        ## for athena policies: this has to be the first use statement
+        use AtlasPolicy \tAtlasPolicy-*
+
+        ## for gaudi tools, services and objects
+        use GaudiInterface \tGaudiInterface-* \tExternal
+
+        ## put here your package dependencies...
+
+        ##
+
+        branches src src/components doc python share
+
+        private
+        ## default is to make component library
+        library %(pkg_name)s *.cxx components/*.cxx
+
+        apply_pattern component_library
+        apply_pattern declare_joboptions files="*.py"
+        apply_pattern declare_python_modules files="*.py"
+
+        end_private
+        
+        """%locals())
+    print "::: creating package [%(full_pkg_name)s]... [done]" % locals()
+
+    cwd = os.getcwd()
+    try:
+        os.chdir(os.path.join(full_pkg_name,'cmt'))
+        _ = commands.getstatusoutput('cmt config')
+    finally:
+        os.chdir(cwd)
+
+    ## if sc != 0:
+    ##     print ":"*80
+    ##     print out
+    ##     return sc
+    
+    return sc
+
diff --git a/Tools/PyUtils/python/scripts/diff_pool_files.py b/Tools/PyUtils/python/scripts/diff_pool_files.py
new file mode 100644
index 00000000000..5f9de645352
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/diff_pool_files.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.diff_pool_files
+# @purpose check that 2 POOL files have same content (containers and sizes).
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 276362 $"
+__doc__ = "check that 2 POOL files have same content (containers and sizes)."
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='diff-pool')
+@acmdlib.argument('old',
+                  help='path to the reference POOL file to analyze')
+@acmdlib.argument('new',
+                  help='path to the POOL file to compare to the reference')
+@acmdlib.argument('-v', '--verbose',
+                  action='store_true',
+                  default=False,
+                  help="""Enable verbose printout""")
+def main(args):
+    """check that 2 POOL files have same content (containers and sizes)
+    """
+
+    import os.path as osp
+    old = osp.expandvars(osp.expanduser(args.old))
+    new = osp.expandvars(osp.expanduser(args.new))
+
+    import PyUtils.PoolFile as PF
+    diff = PF.DiffFiles(refFileName = old,
+                        chkFileName = new,
+                        verbose = args.verbose)
+    diff.printSummary()
+    return diff.status()
diff --git a/Tools/PyUtils/python/scripts/diff_root_files.py b/Tools/PyUtils/python/scripts/diff_root_files.py
new file mode 100644
index 00000000000..fa181777864
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/diff_root_files.py
@@ -0,0 +1,236 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.diff_root_files
+# @purpose check that 2 ROOT files have same content (containers and sizes).
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 543921 $"
+__doc__ = "check that 2 ROOT files have same content (containers and sizes)."
+__author__ = "Sebastien Binet"
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+import PyUtils.RootUtils as ru
+ROOT = ru.import_root()
+
+### globals -------------------------------------------------------------------
+g_ALLOWED_MODES = ('summary', 'detailed')
+g_ALLOWED_ERROR_MODES = ('bailout', 'resilient')
+g_args = None
+
+### classes -------------------------------------------------------------------
+
+### functions -----------------------------------------------------------------
+def _is_summary():
+    global g_args
+    return g_args.mode == 'summary'
+
+def _is_exit_early():
+    global g_args
+    return g_args.error_mode == 'bailout'
+
+@acmdlib.command(name='diff-root')
+@acmdlib.argument('old',
+                  help='path to the reference ROOT file to analyze')
+@acmdlib.argument('new',
+                  help='path to the ROOT file to compare to the reference')
+@acmdlib.argument('-t', '--tree-name',
+                  default='CollectionTree',
+                  help='name of the TTree to compare')
+@acmdlib.argument('--ignore-leaves',
+                  nargs='*',
+                  default=('Token',),
+                  help='set of leaves names to ignore from comparison')
+@acmdlib.argument('--enforce-leaves',
+                  nargs='*',
+                  default=('BCID',),
+                  help='set of leaves names we make sure to compare')
+@acmdlib.argument('--known-hacks',
+                  nargs='*',
+                  default=('m_athenabarcode', 'm_token',),
+                  help='set of leaves which are known to fail (but should be fixed at some point) [default: %(default)s]')
+@acmdlib.argument('--entries',
+                  default='',
+                  help='a list of entries (indices, not event numbers) or an expression (like range(3) or 0,2,1 or 0:3) leading to such a list, to compare.')
+@acmdlib.argument('-v', '--verbose',
+                  action='store_true',
+                  default=False,
+                  help="""Enable verbose printout""")
+@acmdlib.argument('--mode',
+                  choices=g_ALLOWED_MODES,
+                  default='detailed',
+                  help="""\
+Enable a particular mode.
+  'summary': only report the number of differences.
+  'detailed': display everything.
+default='%(default)s'.
+allowed: %(choices)s
+"""
+                  )
+@acmdlib.argument('--error-mode',
+                  choices=g_ALLOWED_ERROR_MODES,
+                  default='bailout',
+                  help="""\
+Enable a particular error mode.
+  'bailout':   bail out on first error.
+  'resilient': keep running.
+default='%(default)s'.
+allowed: %(choices)s
+"""
+                  )
+def main(args):
+    """check that 2 ROOT files have same content (containers and sizes)
+    """
+    global g_args
+    g_args = args
+    
+    import PyUtils.RootUtils as ru
+    root = ru.import_root()
+
+    import PyUtils.Logging as L
+    msg = L.logging.getLogger('diff-root')
+    msg.setLevel(L.logging.INFO)
+
+    if args.entries == '':
+        args.entries = -1
+        
+    msg.info('comparing tree [%s] in files:', args.tree_name)
+    msg.info(' old: [%s]', args.old)
+    msg.info(' new: [%s]', args.new)
+    msg.info('ignore  leaves: %s', args.ignore_leaves)
+    msg.info('enforce leaves: %s', args.enforce_leaves)
+    msg.info('hacks:          %s', args.known_hacks)
+    msg.info('entries:        %s', args.entries)
+    msg.info('mode:           %s', args.mode)
+    msg.info('error mode:     %s', args.error_mode)
+
+    import PyUtils.Helpers as H
+    with H.ShutUp() :
+        fold = ru.RootFileDumper(args.old, args.tree_name)
+        fnew = ru.RootFileDumper(args.new, args.tree_name)
+        pass
+    
+    def tree_infos(tree, args):
+        nentries = tree.GetEntriesFast()
+        leaves = [l.GetName() for l in tree.GetListOfLeaves()
+                  if l not in args.ignore_leaves]
+        return {
+            'entries' : nentries,
+            'leaves': set(leaves),
+            }
+    
+    def diff_tree(fold, fnew, args):
+        infos = {
+            'old' : tree_infos(fold.tree, args),
+            'new' : tree_infos(fnew.tree, args),
+            }
+
+        nentries = min(infos['old']['entries'],
+                       infos['new']['entries'])
+        itr_entries = nentries
+        if args.entries in (-1,'','-1'):
+            #msg.info('comparing over [%s] entries...', nentries)
+            itr_entries = nentries
+            if infos['old']['entries'] != infos['new']['entries']:
+                msg.info('different numbers of entries:')
+                msg.info(' old: [%s]', infos['old']['entries'])
+                msg.info(' new: [%s]', infos['new']['entries'])
+                msg.info('=> comparing [%s] first entries...', nentries)
+        else:
+            itr_entries = args.entries
+            pass
+        msg.info('comparing over [%s] entries...', itr_entries)
+        
+        leaves = infos['old']['leaves'] & infos['new']['leaves']
+        diff_leaves = infos['old']['leaves'] - infos['new']['leaves']
+        if diff_leaves:
+            msg.info('the following variables exist in only one tree !')
+            for l in diff_leaves:
+                msg.info(' - [%s]', l)
+        leaves = leaves - set(args.ignore_leaves)
+        
+        msg.info('comparing [%s] leaves over entries...', len(leaves))
+        all_good = True
+        n_good = 0
+        n_bad = 0
+        import collections
+        from itertools import izip
+        summary = collections.defaultdict(int)
+        for d in izip(fold.dump(args.tree_name, itr_entries),
+                      fnew.dump(args.tree_name, itr_entries)):
+            tree_name, ientry, name, iold = d[0]
+            _,              _,    _, inew = d[1]
+            name[0] = name[0].rstrip('\0')
+            if ((not (name[0] in leaves)) or
+                # FIXME: that's a plain (temporary?) hack
+                name[-1] in args.known_hacks):
+                continue
+            
+            if d[0] == d[1]:
+                diff = False
+                n_good += 1
+                continue
+            n_bad += 1
+            diff = True
+
+            in_synch = d[0][:-1] == d[1][:-1]
+            if not in_synch:
+                if not _is_summary():
+                    print '::sync-old %s' % \
+                          '.'.join(["%03i"%ientry]+map(str, d[0][2]))
+                    print '::sync-new %s' % \
+                          '.'.join(["%03i"%ientry]+map(str, d[1][2]))
+                    pass
+                summary[name[0]] += 1
+                # remember for later
+                fold.allgood = False
+                fnew.allgood = False
+
+                if _is_exit_early():
+                    print "*** exit on first error ***"
+                    break
+                continue
+            
+            n = '.'.join(map(str, ["%03i"%ientry]+name))
+            diff_value = 'N/A'
+            try:
+                diff_value = 50.*(iold-inew)/(iold+inew)
+                diff_value = '%.8f%%' % (diff_value,)
+            except Exception:
+                pass
+            if not _is_summary():
+                print '%s %r -> %r => diff= [%s]' %(n, iold, inew, diff_value)
+                pass
+            summary[name[0]] += 1
+
+            if name[0] in args.enforce_leaves:
+                msg.info("don't compare further")
+                all_good = False
+                break
+            pass # loop over events/branches
+        
+        msg.info('Found [%s] identical leaves', n_good)
+        msg.info('Found [%s] different leaves', n_bad)
+
+        if not _is_summary():
+            keys = sorted(summary.keys())
+            for n in keys:
+                v = summary[n]
+                msg.info(' [%s]: %i leaves differ', n, v)
+                pass
+            pass
+        
+        if (not fold.allgood) or (not fnew.allgood):
+            msg.info('NOTE: there were errors during the dump')
+            msg.info('fold.allgood: %s' % fold.allgood)
+            msg.info('fnew.allgood: %s' % fnew.allgood)
+            n_bad += 0.5
+        return n_bad
+    
+    ndiff = diff_tree(fold, fnew, args)
+    if ndiff != 0:
+        msg.info('files differ!')
+        return 2
+    msg.info('all good.')
+    return 0
diff --git a/Tools/PyUtils/python/scripts/dump_root_file.py b/Tools/PyUtils/python/scripts/dump_root_file.py
new file mode 100644
index 00000000000..2d4d7990085
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/dump_root_file.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.dump_root_file
+# @purpose ascii-fy a ROOT file
+# @author Sebastien Binet
+# @date December 2010
+
+__version__ = "$Revision: 438720 $"
+__doc__ = "ASCII-fy a ROOT file"
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='dump-root')
+@acmdlib.argument('fname',
+                  help='path to the ROOT file to dump')
+@acmdlib.argument('-t', '--tree-name',
+                  default=None,
+                  help='name of the TTree to dump (default:all)')
+@acmdlib.argument('--entries',
+                  default=None,
+                  help="""a list of entries (indices, not event numbers) or an expression leading to such a list, to compare (default:all).
+                  ex: --entries='0:10' to get the first 10 events
+                      --entries='10:20:2' to get the even events between 10 and 20
+                      --entries='range(10)' to get the first 10 events
+                      --entries=10 to get the first 10 events
+                      --entries=0,2,1 to get the entry 0, then 2 then 1
+                  """)
+@acmdlib.argument('-v', '--verbose',
+                  action='store_true',
+                  default=False,
+                  help="""Enable verbose printout""")
+def main(args):
+    """dump the content of a ROOT file into an ASCII format.
+    """
+
+    import PyUtils.RootUtils as ru
+    root = ru.import_root()
+
+    _inspect = root.RootUtils.PyROOTInspector.pyroot_inspect2
+
+    import PyUtils.Logging as L
+    msg = L.logging.getLogger('dump-root')
+    msg.setLevel(L.logging.INFO)
+
+    msg.info('fname: [%s]', args.fname)
+    root_file = root.TFile.Open(args.fname)
+    if (root_file is None or
+        not isinstance(root_file, root.TFile) or
+        not root_file.IsOpen()):
+        msg.error('could not open [%s]', args.fname)
+        return 1
+
+    tree_names = []
+    if args.tree_name:
+        tree_names = args.tree_name.split(',')
+    else:
+        tree_names = []
+        keys = [k.GetName() for k in root_file.GetListOfKeys()]
+        for k in keys:
+            o = root_file.Get(k)
+            if isinstance(o, root.TTree):
+                tree_names.append(k)
+                
+    msg.info('dumping trees:  %s', tree_names)
+
+    rc = 0
+    for tree_name in tree_names:
+        f = ru.RootFileDumper(args.fname, tree_name)
+        nentries = f.tree.GetEntries()
+        if args.entries:
+            nentries = args.entries
+        for d in f.dump(tree_name, nentries):
+            tree_name, ientry, name, data = d
+            n = '.'.join(map(str, [tree_name,"%03i"%ientry]+name))
+            print '%s %r' %(n, data)
+    return 0
diff --git a/Tools/PyUtils/python/scripts/filter_files.py b/Tools/PyUtils/python/scripts/filter_files.py
new file mode 100644
index 00000000000..a295b87f0f4
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/filter_files.py
@@ -0,0 +1,197 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.filter_files
+# @purpose take a bunch of input (pool/bs) files and produce a filtered one
+#          autoconfiguration is (attempted to be) performed
+# @author Sebastien Binet
+# @date March 2010
+from __future__ import with_statement
+
+__version__ = "$Revision: 523884 $"
+__doc__ = "take a bunch of input (pool/bs) files and produce a filtered one"
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(
+    name='filter-files'
+    )
+@acmdlib.argument(
+    '-o', '--output',
+    required=True,
+    help="Name of the filtered output file"
+    )
+@acmdlib.argument(
+    'files',
+    nargs='+',
+    help='path to the input (pool/bs) files'
+    )
+@acmdlib.argument(
+    '-s', '--selection',
+    required=True,
+    help='comma separated list of tuples (run,event) numbers to select or an ascii file containg a list of such run+event numbers to select'
+    )
+def main(args):
+    """take a bunch of input (pool/bs) files and produce a filtered one
+    """
+    exitcode = 0
+
+    import PyUtils.Logging as L
+    msg = L.logging.getLogger('filter-files')
+    msg.setLevel(L.logging.INFO)
+
+    msg.info(':'*40)
+    msg.info('welcome to filter-files version %s', __version__)
+
+    import os.path as osp
+    args.files = [ osp.expandvars(osp.expanduser(fname))
+                   for fname in args.files ]
+
+    args.selection = osp.expandvars(osp.expanduser(args.selection))
+    
+    msg.info('input files: %s', args.files)
+    msg.info('output file: %s', args.output)
+    msg.info('selection:   %s', args.selection)
+
+    import os
+    if os.path.exists(args.selection):
+        selection = []
+        with open(args.selection, 'r') as s:
+            for line in s:
+                if line.strip().startswith('#'):
+                    continue
+                l = line.strip().split()
+                if len(l)==1: # assume this is only the event number
+                    runnbr, evtnbr = None, long(l[0])
+                elif len(l)==2: # a pair (run,evt) number
+                    runnbr, evtnbr = long(l[0]), long(l[1])
+                else:
+                    raise RuntimeError(
+                        'file [%s] has invalid format at line:\n%r' %
+                        (args.selection, line)
+                        )
+                selection.append((runnbr, evtnbr))
+    else:
+        try:
+            args.selection = eval(args.selection)
+        except Exception,err:
+            msg.error('caught:\n%s', err)
+            msg.error('.. while trying to parse selection-string')
+            return 1
+        
+        selection = []
+        for item in args.selection:
+            if not isinstance(item, (tuple, list, int, long)):
+                raise TypeError('type: %r' % type(item))
+
+            if isinstance(item, (tuple, list)):
+                if len(item) == 1:
+                    runnbr, evtnbr = None, long(item[0])
+                elif len(item) == 2:
+                    runnbr, evtnbr = long(item[0]), long(item[1])
+                else:
+                    raise RuntimeError(
+                        'item [%s] has invalid arity (%s)' %
+                        (item, len(item))
+                        )
+            else:
+                runnbr, evtnbr = None, long(item)
+            selection.append((runnbr, evtnbr))
+
+    # put back the massaged selection into our workspace
+    args.selection = selection[:]
+    
+    import PyUtils.AthFile as af
+    fi = af.fopen(args.files[0]).infos
+    af.save_cache()
+    
+    if fi['file_type'] == 'bs':
+        # optimization: run directly 'AtlCopyBSEvent.exe
+        import subprocess
+        cmd = ' '.join([
+            'AtlCopyBSEvent.exe',
+            '-e %(evt-list)s',
+            '%(run-list)s',
+            '--out %(output)s',
+            '%(files)s',
+            ])
+        evt_list = [str(i) for _,i in args.selection]
+        run_list = [str(i) for i,_ in args.selection if not i is None]
+        cmd = cmd % {
+            'evt-list': ','.join(evt_list),
+            'run-list': '' if len(run_list)==0 else '-r '+','.join(run_list),
+            'output': args.output,
+            'files':  ' '.join(args.files),
+            }
+        return subprocess.call(cmd.split())
+    
+    import AthenaCommon.ChapPy as api
+    app = api.AthenaApp()
+    app << """
+    import AthenaCommon.Constants as Lvl
+    from AthenaCommon.AthenaCommonFlags import jobproperties as jp
+    acf = jp.AthenaCommonFlags
+    acf.FilesInput = %(files)s
+
+    # events to process
+    acf.EvtMax = EvtMax = theApp.EvtMax = -1
+
+    # configuration
+    import AthenaPython.ConfigLib as apcl
+    cfg = apcl.AutoCfg(
+        name='filter-files',
+        input_files=acf.FilesInput(),
+        output_file='%(output)s')
+
+    for type_name in ('evgen',
+                      'hits',
+                      'rdo',
+                      'esd',
+                      'aod',
+                      'tag',
+                      'usr',):
+        res = getattr(cfg, 'is_' + type_name)()
+        if res:
+            cfg.msg.info('input file type is ['+type_name+']')
+            break
+    else:
+        cfg.msg.info('input file stream is of unknown type')
+        cfg.msg.info('autoconfiguration might not work...')
+        pass
+        
+    # add the filtering algorithm
+    # get a handle on the job main sequence
+    import AthenaCommon.AlgSequence as acas
+    job = acas.AlgSequence()
+    
+    ## filter configuration ##
+    ##  -> we use the special sequence 'AthFilterSeq' which
+    ##      is run before any other algorithm (which are usually in the
+    ##      'TopAlg' sequence
+    seq = acas.AthSequencer('AthFilterSeq')
+    
+    import GaudiSequencer.PyComps as gspc
+    seq += gspc.PyEvtFilter(
+       'filter_pyalg',
+       # the store-gate key. leave as an empty string to take any eventinfo instance
+       evt_info=None,
+       OutputLevel=Lvl.INFO)
+    seq.filter_pyalg.evt_list = %(selection)s   
+
+
+    cfg.configure_job()
+
+    if (cfg.is_rdo() or
+        cfg.is_esd() or
+        cfg.is_aod()):
+        # main jobos
+        include ('RecExCond/RecExCommon_flags.py')
+        include ('RecExCommon/RecExCommon_topOptions.py')
+
+    """ % args.__dict__
+    
+    stdout = None
+    exitcode = app.run(stdout=stdout)
+    return exitcode
diff --git a/Tools/PyUtils/python/scripts/gen_klass.py b/Tools/PyUtils/python/scripts/gen_klass.py
new file mode 100644
index 00000000000..22500e6bb5b
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/gen_klass.py
@@ -0,0 +1,1163 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.gen_klass.py
+# @purpose helper script to generate header and cxx files of various
+#           athena components (svc/tool/alg/isvc/itool/object)
+# @author Sebastien Binet
+# @date   April 2008
+
+__version__ = "$Revision: 615061 $"
+__author__ = "Sebastien Binet"
+__doc__ = """\
+helper script to generate header and cxx files of various athena
+components (svc/tool/alg/isvc/itool/object)
+"""
+
+import sys
+import os
+
+import PyUtils.acmdlib as acmdlib
+
+class GenTypes:
+    values = ('object',
+              'isvc', 'svc',
+              'itool', 'tool',
+              'alg',
+              ## the python ones
+              'pyalg', 'pysvc', 'pytool', 'pyaud'
+              )
+    needing_iface = ('svc', 'tool')
+    pass
+
+class Templates:
+    isvc_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s 
+#define %(guard)s 1 
+
+/** @class %(klass)s
+ */
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "GaudiKernel/IService.h"
+
+// %(pkg)s includes
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::IService
+{ 
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /** Destructor: 
+   */
+  virtual ~%(klass)s();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  static const InterfaceID& interfaceID();
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+  static const InterfaceID IID_%(klass)s("%(klass)s", 1, 0);
+  return IID_%(klass)s; 
+}
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    isvc_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    itool_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+
+// HepMC / CLHEP includes
+
+// FrameWork includes
+#include "GaudiKernel/IAlgTool.h"
+
+// Forward declaration
+
+%(namespace_begin)s
+
+static const InterfaceID IID_%(klass)s("%(klass)s", 1, 0);
+
+class %(klass)s
+  : virtual public ::IAlgTool
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /** Destructor: 
+   */
+  virtual ~%(klass)s();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+  static const InterfaceID& interfaceID();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Protected data: 
+  /////////////////////////////////////////////////////////////////// 
+ protected: 
+
+}; 
+
+/// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+/// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+   return IID_%(klass)s; 
+}
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    itool_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// Framework includes
+//#include "GaudiKernel/MsgStream.h"
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+///////////////////////////////////////////////////////////////////
+
+%(namespace_end)s
+"""
+    object_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <iosfwd>
+
+// Gaudi includes
+
+// Forward declaration
+
+%(namespace_begin)s
+
+class %(klass)s
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Copy constructor: 
+  %(klass)s( const %(klass)s& rhs );
+
+  /// Assignment operator: 
+  %(klass)s& operator=( const %(klass)s& rhs ); 
+
+  /// Constructor with parameters: 
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+}; 
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+//std::ostream& operator<<( std::ostream& out, const %(klass)s& o );
+
+%(namespace_end)s
+
+#endif //> !%(guard)s
+"""
+
+    object_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(pkg)s/%(klass)s.h"
+
+// STL includes
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+
+// Destructor
+///////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    svc_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthService.h"
+
+// %(ipkg)s
+#include "%(ipkg)s/%(iklass)s.h"
+
+// Forward declaration
+class ISvcLocator;
+template <class TYPE> class SvcFactory;
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::%(iklass)s,
+            public ::AthService
+{ 
+  friend class SvcFactory<%(klass)s>;
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& name, ISvcLocator* pSvcLocator );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Assignment operator: 
+  //%(klass)s &operator=(const %(klass)s &alg); 
+
+  /// Gaudi Service Implementation
+  //@{
+  virtual StatusCode initialize();
+  virtual StatusCode finalize();
+  virtual StatusCode queryInterface( const InterfaceID& riid, 
+                                     void** ppvInterface );
+  //@}
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  static const InterfaceID& interfaceID();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+inline const InterfaceID& %(klass)s::interfaceID() 
+{ 
+  return %(iklass)s::interfaceID(); 
+}
+
+%(namespace_end)s
+
+#endif //> !%(guard)s
+"""
+
+    svc_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/Property.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& name, 
+		      ISvcLocator* pSvcLocator ) : 
+  ::AthService( name, pSvcLocator )
+{
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena Service's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+// Query the interfaces.
+//   Input: riid, Requested interface ID
+//          ppvInterface, Pointer to requested interface
+//   Return: StatusCode indicating SUCCESS or FAILURE.
+// N.B. Don't forget to release the interface after use!!!
+StatusCode 
+%(klass)s::queryInterface(const InterfaceID& riid, void** ppvInterface) 
+{
+  if ( %(iklass)s::interfaceID().versionMatch(riid) ) {
+    *ppvInterface = dynamic_cast<%(iklass)s*>(this);
+  } else {
+    // Interface is not directly available : try out a base class
+    return ::AthService::queryInterface(riid, ppvInterface);
+  }
+  addRef();
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    alg_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthAlgorithm.h"
+
+%(namespace_begin)s
+
+class %(klass)s
+  : public ::AthAlgorithm
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& name, ISvcLocator* pSvcLocator );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Assignment operator: 
+  //%(klass)s &operator=(const %(klass)s &alg); 
+
+  // Athena algorithm's Hooks
+  virtual StatusCode  initialize();
+  virtual StatusCode  execute();
+  virtual StatusCode  finalize();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  /// Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    alg_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/Property.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& name, 
+			  ISvcLocator* pSvcLocator ) : 
+  ::AthAlgorithm( name, pSvcLocator )
+{
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena Algorithm's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::execute()
+{  
+  ATH_MSG_DEBUG ("Executing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    tool_hdr_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.h 
+// Header file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+#ifndef %(guard)s
+#define %(guard)s 1
+
+// STL includes
+#include <string>
+
+// FrameWork includes
+#include "AthenaBaseComps/AthAlgTool.h"
+#include "GaudiKernel/ServiceHandle.h"
+
+// %(ipkg)s includes
+#include "%(ipkg)s/%(iklass)s.h"
+
+// Forward declaration
+
+%(namespace_begin)s
+
+class %(klass)s
+  : virtual public ::%(iklass)s,
+            public ::AthAlgTool
+{ 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Public methods: 
+  /////////////////////////////////////////////////////////////////// 
+ public: 
+
+  // Copy constructor: 
+
+  /// Constructor with parameters: 
+  %(klass)s( const std::string& type,
+	     const std::string& name, 
+	     const IInterface* parent );
+
+  /// Destructor: 
+  virtual ~%(klass)s(); 
+
+  // Athena algtool's Hooks
+  virtual StatusCode  initialize();
+  virtual StatusCode  finalize();
+
+  /////////////////////////////////////////////////////////////////// 
+  // Const methods: 
+  ///////////////////////////////////////////////////////////////////
+
+  /////////////////////////////////////////////////////////////////// 
+  // Non-const methods: 
+  /////////////////////////////////////////////////////////////////// 
+
+  /////////////////////////////////////////////////////////////////// 
+  // Private data: 
+  /////////////////////////////////////////////////////////////////// 
+ private: 
+
+  /// Default constructor: 
+  %(klass)s();
+
+  // Containers
+  
+
+}; 
+
+// I/O operators
+//////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Inline methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+#endif //> !%(guard)s
+"""
+
+    tool_cxx_template = """\
+///////////////////////// -*- C++ -*- /////////////////////////////
+// %(klass)s.cxx 
+// Implementation file for class %(klass)s
+// Author: S.Binet<binet@cern.ch>
+/////////////////////////////////////////////////////////////////// 
+
+// %(pkg)s includes
+#include "%(klass)s.h"
+
+// STL includes
+
+// FrameWork includes
+#include "GaudiKernel/IToolSvc.h"
+
+%(namespace_begin)s
+
+/////////////////////////////////////////////////////////////////// 
+// Public methods: 
+/////////////////////////////////////////////////////////////////// 
+
+// Constructors
+////////////////
+%(klass)s::%(klass)s( const std::string& type, 
+		      const std::string& name, 
+		      const IInterface* parent ) : 
+  ::AthAlgTool  ( type, name, parent   )
+{
+  declareInterface< %(iklass)s >(this);
+  //
+  // Property declaration
+  // 
+  //declareProperty( "Property", m_nProperty );
+
+}
+
+// Destructor
+///////////////
+%(klass)s::~%(klass)s()
+{}
+
+// Athena algtool's Hooks
+////////////////////////////
+StatusCode %(klass)s::initialize()
+{
+  ATH_MSG_INFO ("Initializing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+StatusCode %(klass)s::finalize()
+{
+  ATH_MSG_INFO ("Finalizing " << name() << "...");
+
+  return StatusCode::SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Protected methods: 
+/////////////////////////////////////////////////////////////////// 
+
+/////////////////////////////////////////////////////////////////// 
+// Const methods: 
+///////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////////////////////////// 
+// Non-const methods: 
+/////////////////////////////////////////////////////////////////// 
+
+%(namespace_end)s
+"""
+
+    pyalg_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 615061 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Alg):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def execute(self):
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pysvc_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 615061 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Svc):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pytool_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 615061 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.AlgTool):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+    pyaud_template = """\
+# @file:    %(pkg)s/python/%(fname)s
+# @purpose: <put some purpose here>
+# @author:  Sebastien Binet <binet@cern.ch>
+
+__doc__     = 'some documentation here'
+__version__ = '$Revision: 615061 $'
+__author__  = 'Sebastien Binet <binet@cern.ch>'
+
+import AthenaCommon.SystemOfUnits as Units
+import AthenaPython.PyAthena as PyAthena
+from AthenaPython.PyAthena import StatusCode
+
+class %(klass)s (PyAthena.Aud):
+    'put some documentation here'
+    def __init__(self, name='%(klass)s', **kw):
+        ## init base class
+        kw['name'] = name
+        super(%(klass)s, self).__init__(**kw)
+
+        ## properties and data members
+        #self.foo = kw.get('foo', 10) # default value
+        return
+
+    def initialize(self):
+        self.msg.info('==> initialize...')
+        return StatusCode.Success
+
+    def finalize(self):
+        self.msg.info('==> finalize...')
+        return StatusCode.Success
+
+    # class %(klass)s
+"""
+
+def gen_files(pkg="", klass="", klass_type='object', fname='foo',
+              ipkg="", iklass=""):
+    """Simple helper function to generate files based off some informations
+     @param pkg the name of the package holding the class we want to generate
+     @param klass the (fully qualified) name of the C++ class to generate
+     @param klass_type the type of class to generate (svc/tool/alg/object)
+     @param fname the filename to generate
+     @param ipkg the name of the package holding the interface of the class
+     @param iklass the name of the interface of the class we generate
+    """
+    try:
+        hdr = getattr(Templates, '%s_hdr_template'%klass_type)
+        cxx = getattr(Templates, '%s_cxx_template'%klass_type)
+    except AttributeError,err:
+        print "::: UNKNOWN klass_type [%s] !" % klass_type
+        raise err
+
+    namespace_klass = klass.replace('::','_')
+    namespace_begin,namespace_end = "",""
+    if klass.count("::")>0:
+        nm    = klass.split("::")[0]
+        klass = klass.split("::")[1]
+        namespace_begin = "namespace %s {" % nm
+        namespace_end   = "} //> end namespace %s" % nm
+        pass
+
+    guard = "%s_%s_H" % (pkg.upper(), namespace_klass.upper())
+
+    d = dict( pkg=pkg,
+              klass=klass,
+              ipkg=ipkg,
+              iklass=iklass,
+              guard=guard,
+              namespace_begin=namespace_begin,
+              namespace_end=namespace_end
+              )
+    fname = os.path.splitext(fname)[0]
+    
+    o_hdr = open(fname+'.h', 'w')
+    o_hdr.writelines(hdr%d)
+    o_hdr.flush()
+    o_hdr.close()
+
+    o_cxx = open(fname+'.cxx', 'w')
+    o_cxx.writelines(cxx%d)
+    o_cxx.flush()
+    o_cxx.close()
+
+    return 0
+
+   
+def gen_pyfiles(pkg="", klass="", klass_type='pyalg', fname='foo'):
+    """Simple helper function to generate (python) files based off some
+       user informations.
+     @param pkg the name of the package holding the class we want to generate
+     @param klass the name of the python class to generate
+     @param klass_type the type of class to generate (pysvc/pytool/pyalg/pyaud)
+     @param fname the filename to generate
+    """
+    try:
+        py_template = getattr(Templates, '%s_template'%klass_type)
+    except AttributeError,err:
+        print "::: UNKNOWN klass_type [%s] !" % klass_type
+        raise err
+
+    invalid_py_chars = ( ':', '.', '>', '<', ' ' )
+    
+    if any([c for c in invalid_py_chars if c in klass]):
+        err = "::: INVALID class name ! (%s) !\n"%klass
+        err += "::: python class names can *NOT* contain any character of %s"%\
+               repr(invalid_py_chars)
+        print err
+        raise RuntimeError(err)
+
+    fname=''.join([fname,'.py'])
+    d = dict( pkg=pkg,
+              klass=klass,
+              fname=fname
+              )
+    o = open(fname, 'w')
+    o.writelines(py_template%d)
+    o.flush()
+    o.close()
+    return 0
+
+@acmdlib.command(name='gen-klass')
+@acmdlib.argument(
+    "--klass",
+    required=True,
+    help = "The (fully qualified) name of the python or C++ class to create (ex: ElectronContainer, Analysis::Electron, MyAlgTool, PyTestAlg)")
+@acmdlib.argument(
+    "--pkg",
+    required=True,
+    help = "The name of the package holding the C++ class to create (ex: MyAnalysis, JetEvent)")
+@acmdlib.argument(
+    "--type",
+    dest = "klass_type",
+    required=True,
+    choices = GenTypes.values,
+    help = "The type of class to create")
+@acmdlib.argument(
+    "--ipkg",
+    default = None,
+    help = "The name of the package holding the interface of the C++ class (mandatory for 'svc' and 'tool' types)")
+@acmdlib.argument(
+    "--iklass",
+    default = None,
+    help = "The name of the interface the C++ class is implementing (mandatory for 'svc' and 'tool' types)")
+@acmdlib.argument(
+    "-o",
+    "--output-file",
+    required=True,
+    dest = "fname",
+    help = "The name of the file(s) which will hold header and implementation of the class (ex: 'Foo' --> ('Foo.h','Foo.cxx'))")
+def main(args):
+    """helper script to generate header and cxx files
+    of various athena components (svc/tool/alg/isvc/itool/object)
+    """
+    
+    exitcode = 0
+    
+    if args.klass_type in GenTypes.needing_iface and \
+       ( args.ipkg is None or args.iklass is None ) :
+        print ":: You have to give 'ipkg' and 'iklass' options to properly ",
+        print "generate an implementation for '%s'"%args.klass_type
+        return 3
+        
+
+    if args.ipkg is None:
+        args.ipkg = ""
+
+    if args.iklass is None:
+        args.iklass = ""
+
+    if args.klass_type.startswith('py'):
+        exitcode = gen_pyfiles(
+            klass=args.klass,
+            klass_type=args.klass_type,
+            pkg=args.pkg,
+            fname=args.fname
+            )
+    else:
+        exitcode = gen_files(
+            klass=args.klass,
+            klass_type=args.klass_type,
+            pkg=args.pkg,
+            iklass=args.iklass,
+            ipkg=args.ipkg,
+            fname=args.fname
+            )
+    return exitcode
diff --git a/Tools/PyUtils/python/scripts/get_tag_diff.py b/Tools/PyUtils/python/scripts/get_tag_diff.py
new file mode 100644
index 00000000000..558bc0dcfb7
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/get_tag_diff.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.get_tag_diff
+# @purpose Get the list of tag differences between 2 releases (CERN centric)
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 276362 $"
+__doc__ = "Get the list of tag differences between 2 releases (CERN centric)."
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(
+    name='get-tag-diff'
+    )
+@acmdlib.argument(
+    'old',
+    help="The description string of the reference release (eg: 12.0.X,rel_3,AtlasOffline)"
+    )
+@acmdlib.argument(
+    'new',
+    help="The description string of the to-be-compared release (eg: 12.0.X,rel_3 or 12.0.3)"
+    )
+def main(args):
+    """Get the list of tag differences between 2 releases (CERN centric)
+    """
+
+    import PyCmt.Cmt as Cmt
+    diffs = Cmt.get_tag_diff(ref=args.old,
+                             chk=args.new,
+                             verbose=True)
+
+    if len(diffs) > 0:
+        return 1
+    return 0
diff --git a/Tools/PyUtils/python/scripts/merge_files.py b/Tools/PyUtils/python/scripts/merge_files.py
new file mode 100644
index 00000000000..b87db3b1ac5
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/merge_files.py
@@ -0,0 +1,117 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.merge_files
+# @purpose take a bunch of input (pool/bs) files and produce a single one
+#          autoconfiguration is (attempted to be) performed
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 310812 $"
+__doc__ = "take a bunch of input (pool/bs) files and produce a single one"
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(
+    name='merge-files'
+    )
+@acmdlib.argument(
+    '-o', '--output',
+    required=True,
+    help="Name of the merged output file"
+    )
+@acmdlib.argument(
+    'files',
+    nargs='+',
+    help='path to the input (pool/bs) files'
+    )
+@acmdlib.argument(
+    '--evts',
+    type=int,
+    default=-1,
+    help="number of events to process"
+    )
+@acmdlib.argument(
+    '--logfile',
+    default='<stdout>',
+    help = "Path to a file where to put athena job's logfile"
+    )
+def main(args):
+    """take a bunch of input (pool/bs) files and produce a single one
+    """
+    exitcode = 0
+
+    import PyUtils.Logging as L
+    msg = L.logging.getLogger('merge-files')
+    msg.setLevel(L.logging.INFO)
+
+    msg.info(':'*40)
+    msg.info('welcome to merge-files version %s', __version__)
+
+    import os.path as osp
+    args.files = [ osp.expandvars(osp.expanduser(fname))
+                   for fname in args.files ]
+
+    msg.info('input files: %s', args.files)
+    msg.info('output file: %s', args.output)
+    msg.info('evts to process: %s', args.evts)
+    msg.info('log-files: %s', args.logfile)
+
+    import AthenaCommon.ChapPy as api
+    app = api.AthenaApp()
+    app << """
+    from AthenaCommon.AthenaCommonFlags import jobproperties as jp
+    acf = jp.AthenaCommonFlags
+    acf.FilesInput = %(files)s
+
+    # events to process
+    acf.EvtMax = EvtMax = theApp.EvtMax = %(evts)s
+
+    # configuration
+    import AthenaPython.ConfigLib as apcl
+    cfg = apcl.AutoCfg(
+        name='merge-files',
+        input_files=acf.FilesInput(),
+        output_file='%(output)s')
+
+    for type_name in ('evgen',
+                      'hits',
+                      'rdo',
+                      'esd',
+                      'aod',
+                      'tag',
+                      'usr',):
+        res = getattr(cfg, 'is_' + type_name)()
+        if res:
+            cfg.msg.info('input file type is ['+type_name+']')
+            break
+    else:
+        cfg.msg.info('input file stream is of unknown type')
+        cfg.msg.info('autoconfiguration might not work...')
+        pass
+        
+    cfg.configure_job()
+
+    if (cfg.is_rdo() or
+        cfg.is_esd() or
+        cfg.is_aod()):
+        # main jobos
+        include ('RecExCond/RecExCommon_flags.py')
+        # FIXME: work-around for bug #56185
+        from AthenaCommon.DetFlags import DetFlags
+        DetFlags.makeRIO.all_setOff()
+        # FIXME -- end
+        include ('RecExCommon/RecExCommon_topOptions.py')
+        
+    """ % args.__dict__
+
+    stdout = args.logfile
+    if stdout.lower() in ('<stdout>', 'stdout',):
+        stdout = None
+    else:
+        stdout = open(stdout, 'w')
+
+    exitcode = app.run(stdout=stdout)
+    return exitcode
diff --git a/Tools/PyUtils/python/scripts/tc_find_pkg.py b/Tools/PyUtils/python/scripts/tc_find_pkg.py
new file mode 100644
index 00000000000..42783c403c9
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/tc_find_pkg.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.tc_find_pkg
+# @purpose find a package using TC-2
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 279982 $"
+__doc__ = "find a package using TC-2."
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='tc.find-pkg')
+@acmdlib.argument('pkg',
+                  nargs='+',
+                  help='(list of) package(s) to find in TagCollector')
+def main(args):
+    """find a package using TagCollector-2"""
+
+    import PyUtils.AmiLib as amilib
+    client = amilib.Client()
+
+    pkgs = args.pkg
+    if isinstance(pkgs, basestring):
+        pkgs = [pkgs]
+
+    for pkg in pkgs:
+        client.msg.info('looking for [%s]...', pkg)
+        pkg = client.find_pkg(pkg, check_tag=False)
+        client.msg.info(' found: pkg= [%s]', pkg['packagePath']+pkg['packageName'])
+        #client.msg.info('        tag= [%s]', tag)
+
+    return 0
+
diff --git a/Tools/PyUtils/python/scripts/tc_find_tag.py b/Tools/PyUtils/python/scripts/tc_find_tag.py
new file mode 100644
index 00000000000..db1e5ec31e2
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/tc_find_tag.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.tc_find_tag
+# @purpose Find package version taking into account project dependencies
+# @author Frank Winklmeier
+# @date Novemeber 2011
+
+__version__ = "$Revision:$"
+__doc__ = "Find package version taking into account project dependencies."
+__author__ = "Frank Winklmeier"
+
+
+### imports -------------------------------------------------------------------
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='tc.find-tag')
+@acmdlib.argument(
+    'pkg',
+    nargs='+',
+    help='(list of) package(s) to find in TagCollector')
+@acmdlib.argument(
+    '-p', '--project',
+    action='store',
+    required=True,
+    help='Project')
+@acmdlib.argument(
+    '-r', '--release',
+    action='store',
+    help='Release [default: latest]')
+
+def main(args):
+    """find package version taking into account project dependencies"""
+
+    import PyUtils.AmiLib as amilib
+    client = amilib.Client()
+
+    pkgs = args.pkg
+    if isinstance(pkgs, basestring):
+        pkgs = [pkgs]
+
+    if not args.release:
+        rel = client.get_releases(args.project)
+        if len(rel)==0:
+            raise RuntimeError('No release for project',args.project)
+        args.release = rel[-1]
+
+    client.msg.info('searching package tags for [%s] in release [%s]' % (','.join(pkgs),args.release))
+    pkg_list = [client.get_version_of_pkg_with_deps(pkg, args.project, args.release) for pkg in pkgs]
+    pkg_list = sum(pkg_list,[])   # Flatten list in case more than one version per package
+    client.msg.info('Found %d package(s)' % len(pkg_list))
+    for p in pkg_list:
+        print(' '.join(p))
+
+    return 0
+
diff --git a/Tools/PyUtils/python/scripts/tc_show_clients.py b/Tools/PyUtils/python/scripts/tc_show_clients.py
new file mode 100644
index 00000000000..18b1ec4c583
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/tc_show_clients.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.tc_show_clients
+# @purpose show the clients of a package using TC-2
+# @author Sebastien Binet
+# @date May 2011
+
+__version__ = "$Revision: 538932 $"
+__doc__ = "show the clients of a package using TC-2"
+__author__ = "Sebastien Binet"
+
+
+### imports -------------------------------------------------------------------
+import os
+import PyUtils.acmdlib as acmdlib
+
+@acmdlib.command(name='tc.show-clients')
+@acmdlib.argument('pkg',
+                  nargs='+',
+                  help='(list of) package(s) to show clients of')
+@acmdlib.argument('-r', '--release',
+                  required=True,
+                  help='the release in which to show the clients (e.g: 17.0.1)')
+@acmdlib.argument('--co',
+                  action='store_true',
+                  default=False,
+                  help='enable the checkout of these clients')
+def main(args):
+    """show the clients of a package using TC-2"""
+
+    import PyUtils.AmiLib as amilib
+    client = amilib.Client()
+
+    pkgs = args.pkg
+    if isinstance(pkgs, basestring):
+        pkgs = [pkgs]
+
+    from collections import defaultdict
+    all_clients = defaultdict(list)
+    
+    for pkg in pkgs:
+        print
+        client.msg.info('showing clients of [%s]...', pkg)
+        # find the project for this pkg
+        projects = client.get_project_of_pkg(pkg, args.release)
+        pkg = client.find_pkg(pkg, check_tag=False)
+        fpkg = pkg['packagePath']+pkg['packageName']
+        if len(projects) > 1:
+            client.msg.info('pkg [%s] exists in more than 1 project: %s ==> will use last one' % (pkg['packageName'], projects))
+        elif len(projects) < 1:
+            continue
+        project = projects[-1]
+        clients = client.get_clients(project, args.release, fpkg)
+        for full_name,v,rel,grp in clients:
+            if full_name.startswith('Projects/'):
+                # remove "meta" packages
+                continue
+            v = v.strip()
+            if os.path.basename(full_name) == v:
+                # filter out container packages
+                continue
+            if '-%s-'%project not in v:
+                all_clients[full_name].append(v)
+        #client.msg.info('        tag= [%s]', tag)
+
+    _all_clients = dict(all_clients)
+    all_clients = []
+    for full_name in sorted(_all_clients.keys()):
+        v = _all_clients[full_name]
+        if len(v) > 1:
+            versions = client.get_version_of_pkg(full_name, args.release)
+            if len(versions) != 1:
+                client.msg.info('found multiple versions for package [%s]: %r',
+                                full_name, versions)
+                v = versions
+        print '%-40s' % v[0], full_name
+        all_clients.append(v[0])
+        
+    rc = 0
+    if args.co:
+        print
+        client.msg.info(":"*40)
+        client.msg.info(":: list of package to checkout:")
+        for c in all_clients:
+            print c
+        cmd = ['pkgco.py',]+all_clients
+        import subprocess
+        rc = subprocess.call(cmd)
+    return rc
+
diff --git a/Tools/PyUtils/python/scripts/tc_submit_tag.py b/Tools/PyUtils/python/scripts/tc_submit_tag.py
new file mode 100644
index 00000000000..d38a4339100
--- /dev/null
+++ b/Tools/PyUtils/python/scripts/tc_submit_tag.py
@@ -0,0 +1,314 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils.scripts.tc_submit_pkg
+# @purpose Submit one or more TAGs to TagCollector.
+# @author Sebastien Binet
+# @date February 2010
+
+__version__ = "$Revision: 611656 $"
+__doc__ = "Submit one or more TAGs to TagCollector."
+__author__ = "Sebastien Binet, Frank Winklmeier"
+
+
+### imports -------------------------------------------------------------------
+import readline
+import getpass
+import os
+import os.path as osp
+
+import PyUtils.acmdlib as acmdlib
+import PyUtils.AmiLib as amilib
+import PyCmt.Cmt as cmt
+
+### functions -----------------------------------------------------------------
+
+def query_option(opt_name):
+    """query option from user and set proper history files"""
+    history_file = None
+    _allowed_values = ('project', 'release', 'justification', 'bug',)
+    if opt_name not in _allowed_values:
+        raise ValueError(
+            "'opt_name' must be in %s (got %s)" %
+            _allowed_values,
+            opt_name
+            )
+    history_file = osp.expanduser('~/.tc_submit_tag.%s.history' % opt_name)
+    if osp.exists(history_file):
+        readline.read_history_file(history_file)
+
+    value = raw_input('%s: ' % opt_name)
+    if history_file:
+        readline.write_history_file(history_file)
+    readline.clear_history()
+
+    if value == '':
+        return None
+    return value
+
+def _get_projects(client, release, pkg):
+    """retrieve the list of projects from AMI for a given release and package
+    """
+    projects = []
+    full_pkg_name = pkg['packagePath']+pkg['packageName'] # pkg['packageTag']
+    try:
+        res = client.exec_cmd(cmd='TCGetPackageVersionHistory',
+                              fullPackageName=full_pkg_name,
+                              releaseName=release)
+        rows = res.rows()
+        if isinstance(rows, dict):
+            rows = [rows]
+        ## print "---"
+        ## print list(rows)
+        ## print "---"
+        for row in rows:
+            if row.get('releaseName')!=release: continue  # skip irrelevant releases
+            v = row.get('groupName')
+            if not v in projects:
+                projects.append(v)
+            
+        if not projects:
+            print "::: no project found for package [%s] and release [%s]" % (
+                full_pkg_name,
+                release)
+    except amilib.PyAmi.AMI_Error, err:
+        pass
+    return projects
+    
+def query_project(projects, release, pkg):
+    """query the project(s) to submit to"""
+    if len(projects)==1:
+        return projects[0]
+
+    print "::: Available projects for package [%s] and release [%s]" % (
+        pkg,
+        release)
+    for p in projects:
+        print "   %s" % (p,)
+
+    readline.clear_history()
+    for r in reversed(projects):
+        readline.add_history(p)
+
+    choice = raw_input("Select (comma separated or '*' for all): ")
+
+    if choice=='*':
+        return ','.join(projects)
+    return choice
+
+def query_release(releases, project):
+    """query the release(s) to submit to"""
+
+    if len(releases)==1:
+        return releases[0]
+
+    print "::: Available releases for %s:" % (project,)
+    for r in releases:
+        print "  %s" % (r,)
+
+    readline.clear_history()
+    for r in reversed(releases):
+        readline.add_history(r)
+
+    choice = raw_input("Select (comma separated or '*' for all): ")
+
+    if choice=='*':
+        return ','.join(releases)
+    return choice
+   
+def submit_tag(client, args, pkg, tag):
+   """Submit tag"""
+
+   cmd_args = {}
+   cmd_args['action'] = 'update'
+   cmd_args['fullPackageName'] = pkg
+   cmd_args['packageTag'] = tag
+   cmd_args['autoDetectChanges'] = 'yes'
+
+   if args.justification: cmd_args['justification'] = args.justification
+   if args.bug: cmd_args['bugReport'] = args.bug
+   if args.bundle: cmd_args['bundleName'] = args.bundle
+   if args.no_mail: cmd_args['noMail'] = ''
+   
+   for i,p in enumerate(args.project):
+     cmd_args['groupName'] = p
+     cmd_args['releaseName'] = args.release[i]
+     ok = client.exec_cmd(cmd='TCSubmitTagApproval', args=cmd_args)
+     if ok:
+         print "%s %s submitted to %s %s" % (pkg,tag,p,args.release[i])
+
+@acmdlib.command(name='tc.submit-tag')
+@acmdlib.argument(
+    '-p', '--project',
+    action='store',
+    help='(comma separated list of) project(s) to submit tags to')
+@acmdlib.argument(
+    '-r', '--release',
+    action='store',
+    help='(comma separated list of) release(s) to submit tags to')
+@acmdlib.argument(
+    '-j', '-m', '--justification',
+    action='store',
+    help='justification for tag request')
+@acmdlib.argument(
+    '-s', '--savannah', '--bug',
+    dest='bug',
+    action='store',
+    metavar='BUG',
+    help='bug report number')
+@acmdlib.argument(
+    '-b','--bundle',
+    action='store',
+    help="bundle name (stays incomplete)")
+@acmdlib.argument(
+    '-n', '--no-mail',
+    action='store_true',
+    default=False,
+    help="do not send confirmation email")
+@acmdlib.argument(
+    '--dry-run',
+    action='store_true',
+    default=False,
+    help='switch to simulate the commands but not actually send the requests'
+    )
+@acmdlib.argument(
+    'pkgs',
+    nargs='+',
+    metavar='TAG',
+    help="""\
+    (list of package) tags to submit or a file containing that list""")
+def main(args):
+    """submit one or more package tags to TagCollector
+
+    TAG can be one of the following formats:
+      Container/Package-00-01-02
+      Package-00-01-02
+      Package --> will use latest package tag
+
+    All submitted tags need approval via the TagCollector web interface.
+    If several TAGs are given they will be submitted to the same release(s)
+    with the same justification, etc. Optionally a bundle name can be specified.
+    If no release number is given a list of available releases is presented.
+
+    For any required argument that is not specified on the command line,
+    an interactive query is presented. Some text fields support history (arrow keys).
+
+    Authentication is handled via pyAMI (see https://atlas-ami.cern.ch/AMI/pyAMI)
+    """
+
+    import PyUtils.AmiLib as amilib
+    client = amilib.Client()
+
+    def select_tag():
+        value = raw_input('Please select (q to quit): ')
+        if value.lower() == 'q':
+            raise StopIteration
+        return int(value)
+    
+    # create a list of (pkg,tag) with full package path
+    pkgs = []
+
+    for pkg in args.pkgs:
+        # a file ?
+        if osp.exists(pkg):
+            fname = pkg
+            print "::: taking tags from file [%s]..." % (fname,)
+            for l in open(fname, 'r'):
+                l = l.strip()
+                if l:
+                    print " - [%s]" % (l,)
+                    pkgs.append(l)
+        else:
+            pkgs.append(pkg)
+
+    pkg_list = [client.find_pkg(pkg, cbk_fct=select_tag, check_tag=False) for pkg in pkgs]
+
+    # setup history
+    readline.set_history_length(10)
+
+    # query release if project is known
+    if args.project and not args.release:
+        for p in args.project.split(','):
+            rel = client.get_open_releases(p)
+            if len(rel)==0:
+                continue
+            if not args.release:
+                args.release = query_release(rel, p)
+            else:
+                args.release += (',%s' % query_release(rel, p))
+    if args.release and len(args.release.split(',')) == 1:
+        _release = args.release.split(',')[0]
+        args.release = ','.join([_release]*len(pkg_list))
+        # adjust the project list too
+        if args.project and len(args.project.split(',')) == 1:
+            args.project = ','.join([args.project.split(',')[0]]*len(pkg_list))
+            
+    # query project if release is known
+    if args.release and not args.project:
+        _releases = args.release.split(',')
+        _projects = []
+        rel = _releases[0]
+        for pkg in pkg_list:
+            proj = _get_projects(client, rel, pkg)
+            if len(proj)==0:
+                _projects.append(None)
+                continue
+            v = query_project(proj, rel, pkg)
+            _projects.append(v)
+            pass # pkgs
+        if not args.project:
+            args.project = ','.join(_projects)
+        else:
+            args.project += ','+','.join(_projects)
+        pass
+
+    # Find latest tag if needed
+    print '-'*80
+    for p in pkg_list:
+        if not 'packageTag' in p:
+            pkg = (p['packagePath']+p['packageName']).strip('/') # CMTise path
+            p['packageTag'] = cmt.CmtWrapper().get_latest_pkg_tag(pkg)
+            print 'Using latest tag %s' % (p['packageTag'])
+
+    # query for missing options    
+    for o in ('project', 'release', 'justification', 'bug',):
+        value = getattr(args, o)
+        if value:
+            print '%s : %s' % (o, value)
+        else:
+            setattr(args, o, query_option(o))
+    print '-'*80
+
+    args.project = args.project.split(',')
+    args.release = args.release.split(',')
+    if len(args.project) != len(args.release):
+        raise RuntimeError(
+            'Number of projects %s and releases %s do not match' %
+            (args.project, args.release)
+            )
+
+    # If only one tag given, submit this tag to all releases
+    if len(pkg_list)==1: pkg_list = pkg_list*len(args.release)
+                    
+    choice = raw_input("Submit tag? [Y/n] ")      
+    ok = len(choice)==0 or choice.upper()=="Y"
+
+    if args.dry_run:
+        client.dry_run = args.dry_run
+
+    releases = args.release[:]
+    projects = args.project[:]
+
+    exitcode = 0
+    if ok:
+        # Submit tag request
+        for p,rel,proj in zip(pkg_list, releases, projects):
+            args.release = [rel]
+            args.project = [proj]
+            submit_tag(client, args,
+                       p['packagePath']+p['packageName'],p['packageTag'])
+    else:
+        print "Tag submission aborted"
+        exitcode = 1
+        
+    return exitcode
+
diff --git a/Tools/PyUtils/python/smem.py b/Tools/PyUtils/python/smem.py
new file mode 100644
index 00000000000..dc68ce8d8f9
--- /dev/null
+++ b/Tools/PyUtils/python/smem.py
@@ -0,0 +1,641 @@
+#!/usr/bin/env python
+#
+# smem - a tool for meaningful memory reporting
+#
+# Copyright 2008-2009 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of
+# the GNU General Public License version 2 or later, incorporated
+# herein by reference.
+
+import re, os, sys, pwd, grp, optparse, errno, tarfile
+
+class procdata(object):
+    def __init__(self, source):
+        self._ucache = {}
+        self._gcache = {}
+        self.source = source and source or ""
+    def _list(self):
+        return os.listdir(self.source + "/proc")
+    def _read(self, f):
+        return file(self.source + '/proc/' + f).read()
+    def _readlines(self, f):
+        return self._read(f).splitlines(True)
+    def _stat(self, f):
+        return os.stat(self.source + "/proc/" + f)
+
+    def pids(self):
+        '''get a list of processes'''
+        return [int(e) for e in self._list()
+                if e.isdigit() and not iskernel(e)]
+    def mapdata(self, pid):
+        return self._readlines('%s/smaps' % pid)
+    def memdata(self):
+        return self._readlines('meminfo')
+    def version(self):
+        return self._readlines('version')[0]
+    def pidname(self, pid):
+        l = self._read('%d/stat' % pid)
+        return l[l.find('(') + 1: l.find(')')]
+    def pidcmd(self, pid):
+        c = self._read('%s/cmdline' % pid)[:-1]
+        return c.replace('\0', ' ')
+    def piduser(self, pid):
+        return self._stat('%d/cmdline' % pid).st_uid
+    def pidgroup(self, pid):
+        return self._stat('%d/cmdline' % pid).st_gid
+    def username(self, uid):
+        if uid not in self._ucache:
+            self._ucache[uid] = pwd.getpwuid(uid)[0]
+        return self._ucache[uid]
+    def groupname(self, gid):
+        if gid not in self._gcache:
+            self._gcache[gid] = pwd.getgrgid(gid)[0]
+        return self._gcache[gid]
+
+class tardata(procdata):
+    def __init__(self, source):
+        procdata.__init__(self, source)
+        self.tar = tarfile.open(source)
+    def _list(self):
+        for ti in self.tar:
+            if ti.name.endswith('/smaps'):
+                d,f = ti.name.split('/')
+                yield d
+    def _read(self, f):
+        return self.tar.extractfile(f).read()
+    def _readlines(self, f):
+        return self.tar.extractfile(f).readlines()
+    def piduser(self, p):
+        t = self.tar.getmember("%s/cmdline" % p)
+        if t.uname:
+            self._ucache[t.uid] = t.uname
+        return t.uid
+    def pidgroup(self, p):
+        t = self.tar.getmember("%s/cmdline" % p)
+        if t.gname:
+            self._gcache[t.gid] = t.gname
+        return t.gid
+    def username(self, u):
+        return self._ucache.get(u, str(u))
+    def groupname(self, g):
+        return self._gcache.get(g, str(g))
+
+_totalmem = 0
+def totalmem():
+    global _totalmem
+    if not _totalmem:
+        if options.realmem:
+            _totalmem = fromunits(options.realmem) / 1024
+        else:
+            _totalmem = memory()['memtotal']
+    return _totalmem
+
+_kernelsize = 0
+def kernelsize():
+    global _kernelsize
+    if not _kernelsize and options.kernel:
+        try:
+            d = os.popen("size %s" % options.kernel).readlines()[1]
+            _kernelsize = int(d.split()[3]) / 1024
+        except:
+            pass
+    return _kernelsize
+
+def pidmaps(pid):
+    maps = {}
+    start = None
+    for l in src.mapdata(pid):
+    	f = l.split()
+	if f[-1] == 'kB':
+            maps[start][f[0][:-1].lower()] = int(f[1])
+        else:
+            start, end = f[0].split('-')
+            start = int(start, 16)
+            name = "<anonymous>"
+            if len(f) > 5:
+                name = f[5]
+            maps[start] = dict(end=int(end, 16), mode=f[1],
+                               offset=int(f[2], 16),
+                               device=f[3], inode=f[4], name=name)
+
+    if options.mapfilter:
+        f = {}
+        for m in maps:
+            if not filter(options.mapfilter, m, lambda x: maps[x]['name']):
+                f[m] = maps[m]
+        return f
+
+    return maps
+
+def sortmaps(totals, key):
+    l = []
+    for pid in totals:
+        l.append((totals[pid][key], pid))
+    l.sort()
+    return [pid for pid,key in l]
+
+def iskernel(pid):
+    return src.pidcmd(pid) == ""
+
+def memory():
+    t = {}
+    f = re.compile('(\\S+):\\s+(\\d+) kB')
+    for l in src.memdata():
+        m = f.match(l)
+        if m:
+            t[m.group(1).lower()] = int(m.group(2))
+    return t
+
+def units(x):
+    s = ''
+    if x == 0:
+        return '0'
+    for s in ('', 'K', 'M', 'G'):
+        if x < 1024:
+            break
+        x /= 1024.0
+    return "%.1f%s" % (x, s)
+
+def fromunits(x):
+    s = dict(k=2**10, K=2**10, kB=2**10, KB=2**10,
+             M=2**20, MB=2**20, G=2**30, GB=2**30)
+    for k,v in s.items():
+        if x.endswith(k):
+            return int(float(x[:len(k)])*v)
+
+def pidusername(pid):
+    return src.username(src.piduser(pid))
+
+def showamount(a):
+    if options.abbreviate:
+        return units(a * 1024)
+    elif options.percent:
+        return "%.2f%%" % (100.0 * a / totalmem())
+    return a
+
+def filter(opt, arg, *sources):
+    if not opt:
+        return False
+
+    for f in sources:
+        if re.search(opt, f(arg)):
+            return False
+    return True
+
+def pidtotals(pid):
+    maps = pidmaps(pid)
+    t = dict(size=0, rss=0, pss=0, shared_clean=0, shared_dirty=0,
+             private_clean=0, private_dirty=0, referenced=0, swap=0)
+    for m in maps.iterkeys():
+        for k in t:
+            t[k] += maps[m].get(k, 0)
+
+    t['uss'] = t['private_clean'] + t['private_dirty']
+    t['maps'] = len(maps)
+
+    return t
+
+def processtotals(pids):
+    totals = {}
+    for pid in pids:
+        if (filter(options.processfilter, pid, src.pidname, src.pidcmd) or
+                filter(options.userfilter, pid, pidusername)):
+            continue
+        try:
+            p = pidtotals(pid)
+            if p['maps'] != 0:
+                totals[pid] = p
+        except:
+            continue
+    return totals
+
+def showpids():
+    p = src.pids()
+    pt = processtotals(p)
+
+    def showuser(p):
+        if options.numeric:
+            return src.piduser(p)
+        return pidusername(p)
+
+    fields = dict(
+        pid=('PID', lambda n: n, '% 5s', lambda x: len(p),
+             'process ID'),
+        user=('User', showuser, '%-8s', lambda x: len(dict.fromkeys(x)),
+              'owner of process'),
+        name=('Name', src.pidname, '%-24.24s', None,
+              'name of process'),
+        command=('Command', src.pidcmd, '%-27.27s', None,
+                 'process command line'),
+        maps=('Maps',lambda n: pt[n]['maps'], '% 5s', sum,
+              'total number of mappings'),
+        swap=('Swap',lambda n: pt[n]['swap'], '% 8a', sum,
+              'amount of swap space consumed (ignoring sharing)'),
+        uss=('USS', lambda n: pt[n]['uss'], '% 8a', sum,
+             'unique set size'),
+        rss=('RSS', lambda n: pt[n]['rss'], '% 8a', sum,
+             'resident set size (ignoring sharing)'),
+        pss=('PSS', lambda n: pt[n]['pss'], '% 8a', sum,
+             'proportional set size (including sharing)'),
+        vss=('VSS', lambda n: pt[n]['size'], '% 8a', sum,
+             'virtual set size (total virtual memory mapped)'),
+        )
+    columns = options.columns or 'pid user command swap uss pss rss'
+
+    showtable(pt.keys(), fields, columns.split(), options.sort or 'pss')
+
+def maptotals(pids):
+    totals = {}
+    for pid in pids:
+        if (filter(options.processfilter, pid, src.pidname, src.pidcmd) or
+                filter(options.userfilter, pid, pidusername)):
+            continue
+        try:
+            maps = pidmaps(pid)
+            seen = {}
+            for m in maps.iterkeys():
+                name = maps[m]['name']
+                if name not in totals:
+                    t = dict(size=0, rss=0, pss=0, shared_clean=0,
+                             shared_dirty=0, private_clean=0, count=0,
+                             private_dirty=0, referenced=0, swap=0, pids=0)
+                else:
+                    t = totals[name]
+
+                for k in t:
+                    t[k] += maps[m].get(k, 0)
+                t['count'] += 1
+                if name not in seen:
+                    t['pids'] += 1
+                    seen[name] = 1
+                totals[name] = t
+        except:
+            raise
+    return totals
+
+def showmaps():
+    p = src.pids()
+    pt = maptotals(p)
+
+    fields = dict(
+        map=('Map', lambda n: n, '%-40.40s', len,
+             'mapping name'),
+        count=('Count', lambda n: pt[n]['count'], '% 5s', sum,
+               'number of mappings found'),
+        pids=('PIDs', lambda n: pt[n]['pids'], '% 5s', sum,
+              'number of PIDs using mapping'),
+        swap=('Swap',lambda n: pt[n]['swap'], '% 8a', sum,
+              'amount of swap space consumed (ignoring sharing)'),
+        uss=('USS', lambda n: pt[n]['private_clean']
+             + pt[n]['private_dirty'], '% 8a', sum,
+             'unique set size'),
+        rss=('RSS', lambda n: pt[n]['rss'], '% 8a', sum,
+             'resident set size (ignoring sharing)'),
+        pss=('PSS', lambda n: pt[n]['pss'], '% 8a', sum,
+             'proportional set size (including sharing)'),
+        vss=('VSS', lambda n: pt[n]['size'], '% 8a', sum,
+             'virtual set size (total virtual address space mapped)'),
+        avgpss=('AVGPSS', lambda n: int(1.0 * pt[n]['pss']/pt[n]['pids']),
+                '% 8a', sum,
+                'average PSS per PID'),
+        avguss=('AVGUSS', lambda n: int(1.0 * pt[n]['uss']/pt[n]['pids']),
+                '% 8a', sum,
+                'average USS per PID'),
+        avgrss=('AVGRSS', lambda n: int(1.0 * pt[n]['rss']/pt[n]['pids']),
+                '% 8a', sum,
+                'average RSS per PID'),
+        )
+    columns = options.columns or 'map pids avgpss pss'
+
+    showtable(pt.keys(), fields, columns.split(), options.sort or 'pss')
+
+def usertotals(pids):
+    totals = {}
+    for pid in pids:
+        if (filter(options.processfilter, pid, src.pidname, src.pidcmd) or
+                filter(options.userfilter, pid, pidusername)):
+            continue
+        try:
+            maps = pidmaps(pid)
+            if len(maps) == 0:
+                continue
+        except:
+            raise
+        user = src.piduser(pid)
+        if user not in totals:
+            t = dict(size=0, rss=0, pss=0, shared_clean=0,
+                     shared_dirty=0, private_clean=0, count=0,
+                     private_dirty=0, referenced=0, swap=0)
+        else:
+            t = totals[user]
+
+        for m in maps.iterkeys():
+            for k in t:
+                t[k] += maps[m].get(k, 0)
+
+        t['count'] += 1
+        totals[user] = t
+    return totals
+
+def showusers():
+    p = src.pids()
+    pt = usertotals(p)
+
+    def showuser(u):
+        if options.numeric:
+            return u
+        return src.username(u)
+
+    fields = dict(
+        user=('User', showuser, '%-8s', None,
+              'user name or ID'),
+        count=('Count', lambda n: pt[n]['count'], '% 5s', sum,
+               'number of processes'),
+        swap=('Swap',lambda n: pt[n]['swap'], '% 8a', sum,
+              'amount of swapspace consumed (ignoring sharing)'),
+        uss=('USS', lambda n: pt[n]['private_clean']
+             + pt[n]['private_dirty'], '% 8a', sum,
+             'unique set size'),
+        rss=('RSS', lambda n: pt[n]['rss'], '% 8a', sum,
+             'resident set size (ignoring sharing)'),
+        pss=('PSS', lambda n: pt[n]['pss'], '% 8a', sum,
+             'proportional set size (including sharing)'),
+        vss=('VSS', lambda n: pt[n]['pss'], '% 8a', sum,
+             'virtual set size (total virtual memory mapped)'),
+        )
+    columns = options.columns or 'user count swap uss pss rss'
+
+    showtable(pt.keys(), fields, columns.split(), options.sort or 'pss')
+
+def showsystem():
+    t = totalmem()
+    ki = kernelsize()
+    m = memory()
+
+    mt = m['memtotal']
+    f = m['memfree']
+
+    # total amount used by hardware
+    fh = max(t - mt - ki, 0)
+
+    # total amount mapped into userspace (ie mapped an unmapped pages)
+    u = m['anonpages'] + m['mapped']
+
+    # total amount allocated by kernel not for userspace
+    kd = mt - f - u
+
+    # total amount in kernel caches
+    kdc = m['buffers'] + m['sreclaimable'] + (m['cached'] - m['mapped'])
+
+    l = [("firmware/hardware", fh, 0),
+         ("kernel image", ki, 0),
+         ("kernel dynamic memory", kd, kdc),
+         ("userspace memory", u, m['mapped']),
+         ("free memory", f, f)]
+
+    fields = dict(
+        order=('Order', lambda n: n, '% 1s', lambda x: '',
+             'hierarchical order'),
+        area=('Area', lambda n: l[n][0], '%-24s', lambda x: '',
+             'memory area'),
+        used=('Used', lambda n: l[n][1], '%10a', sum,
+              'area in use'),
+        cache=('Cache', lambda n: l[n][2], '%10a', sum,
+              'area used as reclaimable cache'),
+        noncache=('Noncache', lambda n: l[n][1] - l[n][2], '%10a', sum,
+              'area not reclaimable'))
+
+    columns = options.columns or 'area used cache noncache'
+    showtable(range(len(l)), fields, columns.split(), options.sort or 'order')
+
+def showfields(fields, f):
+    if f != list:
+        print "unknown field", f
+    print "known fields:"
+    for l in sorted(fields.keys()):
+        print "%-8s %s" % (l, fields[l][-1])
+
+def showtable(rows, fields, columns, sort):
+    header = ""
+    format = ""
+    formatter = []
+
+    if sort not in fields:
+            showfields(fields, sort)
+            sys.exit(-1)
+
+    if options.pie:
+        columns.append(options.pie)
+    if options.bar:
+        columns.append(options.bar)
+
+    for n in columns:
+        if n not in fields:
+            showfields(fields, n)
+            sys.exit(-1)
+
+        f = fields[n][2]
+        if 'a' in f:
+            formatter.append(showamount)
+            f = f.replace('a', 's')
+        else:
+            formatter.append(lambda x: x)
+        format += f + " "
+        header += f % fields[n][0] + " "
+
+    l = []
+    for n in rows:
+        r = [fields[c][1](n) for c in columns]
+        l.append((fields[sort][1](n), r))
+
+    l.sort(reverse=bool(options.reverse))
+
+    if options.pie:
+        showpie(l, sort)
+        return
+    elif options.bar:
+        showbar(l, columns, sort)
+        return
+
+    if not options.no_header:
+        print header
+
+    for k,r in l:
+        print format % tuple([f(v) for f,v in zip(formatter, r)])
+
+    if options.totals:
+        # totals
+        t = []
+        for c in columns:
+            f = fields[c][3]
+            if f:
+                t.append(f([fields[c][1](n) for n in rows]))
+            else:
+                t.append("")
+
+        print "-" * len(header)
+        print format % tuple([f(v) for f,v in zip(formatter, t)])
+
+def showpie(l, sort):
+    try:
+        import pylab
+    except ImportError:
+        sys.stderr.write("pie chart requires matplotlib\n")
+        sys.exit(-1)
+
+    if (l[0][0] < l[-1][0]):
+        l.reverse()
+
+    labels = [r[1][-1] for r in l]
+    values = [r[0] for r in l] # sort field
+
+    tm = totalmem()
+    s = sum(values)
+    unused = tm - s
+    t = 0
+    c = 0
+    while values and (t + values[-1 - c] < (tm * .02) or
+                      values[-1 - c] < (tm * .005)):
+        c += 1
+        t += values.pop()
+        labels.pop()
+    if c > 1:
+        values = values[:-c]
+        labels = labels[:-c]
+        values.append(t)
+        labels.append('other')
+
+    explode = [0] * len(values)
+    if unused > 0:
+        values.insert(0, unused)
+        labels.insert(0, 'unused')
+        explode.insert(0, .05)
+
+    pylab.figure(1, figsize=(6,6))
+    ax = pylab.axes([0.1, 0.1, 0.8, 0.8])
+    pylab.pie(values, explode = explode, labels=labels,
+              autopct="%.2f%%", shadow=True)
+    pylab.title('%s by %s' % (options.pie, sort))
+    pylab.show()
+
+def showbar(l, columns, sort):
+    try:
+        import pylab, numpy
+    except ImportError:
+        sys.stderr.write("bar chart requires matplotlib\n")
+        sys.exit(-1)
+
+    if (l[0][0] < l[-1][0]):
+        l.reverse()
+
+    rc = []
+    key = []
+    for n in range(len(columns) - 1):
+        try:
+            if columns[n] in 'pid user group'.split():
+                continue
+            float(l[0][1][n])
+            rc.append(n)
+            key.append(columns[n])
+        except:
+            pass
+
+    width = 1.0 / (len(rc) + 1)
+    offset = width / 2
+
+    def gc(n):
+        return 'bgrcmyw'[n % 7]
+
+    pl = []
+    ind = numpy.arange(len(l))
+    for n in xrange(len(rc)):
+        pl.append(pylab.bar(ind + offset + width * n,
+                             [x[1][rc[n]] for x in l], width, color=gc(n)))
+
+    #plt.xticks(ind + .5, )
+    pylab.gca().set_xticks(ind + .5)
+    pylab.gca().set_xticklabels([x[1][-1] for x in l], rotation=45)
+    pylab.legend([p[0] for p in pl], key)
+    pylab.show()
+
+def kernel_version_check():
+    kernel_release = src.version().split()[2].split('-')[0]
+    if kernel_release < "2.6.27":
+        name = os.path.basename(sys.argv[0])
+        sys.stderr.write(name + " requires a kernel >= 2.6.27\n")
+        sys.exit(-1)
+
+parser = optparse.OptionParser("%prog [options]")
+parser.add_option("-H", "--no-header", action="store_true",
+                  help="disable header line")
+parser.add_option("-c", "--columns", type="str",
+                  help="columns to show")
+parser.add_option("-t", "--totals", action="store_true",
+                  help="show totals")
+
+parser.add_option("-R", "--realmem", type="str",
+                  help="amount of physical RAM")
+parser.add_option("-K", "--kernel", type="str",
+                  help="path to kernel image")
+
+parser.add_option("-m", "--mappings", action="store_true",
+                  help="show mappings")
+parser.add_option("-u", "--users", action="store_true",
+                  help="show users")
+parser.add_option("-w", "--system", action="store_true",
+                  help="show whole system")
+
+parser.add_option("-P", "--processfilter", type="str",
+                  help="process filter regex")
+parser.add_option("-M", "--mapfilter", type="str",
+                  help="map filter regex")
+parser.add_option("-U", "--userfilter", type="str",
+                  help="user filter regex")
+
+parser.add_option("-n", "--numeric", action="store_true",
+                  help="numeric output")
+parser.add_option("-s", "--sort", type="str",
+                  help="field to sort on")
+parser.add_option("-r", "--reverse", action="store_true",
+                  help="reverse sort")
+
+parser.add_option("-p", "--percent", action="store_true",
+                  help="show percentage")
+parser.add_option("-k", "--abbreviate", action="store_true",
+                  help="show unit suffixes")
+
+parser.add_option("", "--pie", type='str',
+                  help="show pie graph")
+parser.add_option("", "--bar", type='str',
+                  help="show bar graph")
+
+parser.add_option("-S", "--source", type="str",
+                  help="/proc data source")
+
+
+defaults = {}
+parser.set_defaults(**defaults)
+(options, args) = parser.parse_args()
+
+try:
+    src = tardata(options.source)
+except:
+    src = procdata(options.source)
+
+kernel_version_check()
+
+try:
+    if options.mappings:
+        showmaps()
+    elif options.users:
+        showusers()
+    elif options.system:
+        showsystem()
+    else:
+        showpids()
+except IOError, e:
+    if e.errno == errno.EPIPE:
+        pass
+except KeyboardInterrupt:
+    pass
diff --git a/Tools/PyUtils/python/xmldict.py b/Tools/PyUtils/python/xmldict.py
new file mode 100644
index 00000000000..298f0756da4
--- /dev/null
+++ b/Tools/PyUtils/python/xmldict.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
+
+# @file PyUtils/python/xmldict.py
+# @purpose converts an XML file into a python dict, back and forth
+# @author http://code.activestate.com/recipes/573463
+#         slightly adapted to follow PEP8 conventions
+
+__version__ = "$Revision$"
+__doc__ = """\
+functions to convert an XML file into a python dict, back and forth
+"""
+__author__ = "Sebastien Binet <binet@cern.ch>"
+
+
+# hack: LCGCMT had the py-2.5 xml.etree module hidden by mistake.
+#       this is to import it, by hook or by crook
+def import_etree():
+    import xml
+    # first try the usual way
+    try:
+        import xml.etree
+        return xml.etree
+    except ImportError:
+        pass
+    # do it by hook or by crook...
+    import sys, os, imp
+    xml_site_package = os.path.join(os.path.dirname(os.__file__), 'xml')
+    m = imp.find_module('etree', [xml_site_package])
+
+    etree = imp.load_module('xml.etree', *m)
+    setattr(xml, 'etree', etree)
+    return etree
+
+etree = import_etree()
+from xml.etree import ElementTree
+
+from xml.sax.saxutils import escape as _xml_escape
+from xml.sax.saxutils import unescape as _xml_unescape
+
+## module data ----------------------------------------------------------------
+__all__ = [
+    'xml2dict',
+    'dict2xml',
+    ]
+
+## module implementation ------------------------------------------------------
+class XmlDictObject(dict):
+    def __init__(self, initdict=None):
+        if initdict is None:
+            initdict = {}
+        dict.__init__(self, initdict)
+    
+    def __getattr__(self, item):
+        return self.__getitem__(item)
+    
+    def __setattr__(self, item, value):
+        self.__setitem__(item, value)
+    
+    ## def __getitem__(self, item):
+    ##     o = dict.__getitem__(self, item)
+    ##     if isinstance(o, basestring):
+    ##         return _xml_unescape(o)
+    ##     return o
+
+    ## def __setitem__(self, item, value):
+    ##     if isinstance(value, basestring):
+    ##         value = _xml_unescape(value)
+    ##     dict.__setitem__(self, item, value)
+        
+    def __str__(self):
+        if '_text' in self:
+            return self['_text']
+        else:
+            return dict.__str__(self)
+
+    @staticmethod
+    def wrap(x):
+        if isinstance(x, dict):
+            return XmlDictObject ((k, XmlDictObject.wrap(v))
+                                  for (k, v) in x.iteritems())
+        elif isinstance(x, list):
+            return [XmlDictObject.wrap(v) for v in x]
+        else:
+            return x
+
+    @staticmethod
+    def _unwrap(x):
+        if isinstance(x, dict):
+            return dict ((k, XmlDictObject._unwrap(v))
+                         for (k, v) in x.iteritems())
+        elif isinstance(x, list):
+            return [XmlDictObject._unwrap(v) for v in x]
+        else:
+            return x
+        
+    def unwrap(self):
+        return XmlDictObject._unwrap(self)
+
+def _dict2xml_recurse(parent, dictitem):
+    assert type(dictitem) is not type(list)
+
+    if isinstance(dictitem, dict):
+        for (tag, child) in dictitem.iteritems():
+            if isinstance(child, basestring):
+                child = _xml_escape(child)
+            if str(tag) == '_text':
+                parent.text = str(child)
+            elif type(child) is type(list):
+                for listchild in child:
+                    elem = ElementTree.Element(tag)
+                    parent.append(elem)
+                    _dict2xml_recurse (elem, listchild)
+            else:                
+                elem = ElementTree.Element(tag)
+                parent.append(elem)
+                _dict2xml_recurse (elem, child)
+    else:
+        parent.text = str(dictitem)
+    
+def dict2xml(xmldict):
+    """convert a python dictionary into an XML tree"""
+    roottag = xmldict.keys()[0]
+    root = ElementTree.Element(roottag)
+    _dict2xml_recurse (root, xmldict[roottag])
+    return root
+
+def _xml2dict_recurse (node, dictclass):
+    nodedict = dictclass()
+    
+    if len(node.items()) > 0:
+        # if we have attributes, set them
+        nodedict.update(dict((k, _xml_unescape(v) if isinstance(v, basestring) else v)
+                             for k,v in node.items()))
+    
+    for child in node:
+        # recursively add the element's children
+        newitem = _xml2dict_recurse (child, dictclass)
+        if isinstance(newitem, basestring):
+            newitem = _xml_unescape(newitem)
+        if nodedict.has_key(child.tag):
+            # found duplicate tag, force a list
+            if isinstance(nodedict[child.tag], list):
+                # append to existing list
+                nodedict[child.tag].append(newitem)
+            else:
+                # convert to list
+                nodedict[child.tag] = [nodedict[child.tag], newitem]
+        else:
+            # only one, directly set the dictionary
+            nodedict[child.tag] = newitem
+
+    if node.text is None: 
+        text = ''
+    else: 
+        text = node.text.strip()
+    
+    if len(nodedict) > 0:            
+        # if we have a dictionary add the text as a dictionary value
+        # (if there is any)
+        if len(text) > 0:
+            nodedict['_text'] = text
+    else:
+        # if we don't have child nodes or attributes, just set the text
+        if node.text: nodedict = node.text.strip()
+        else:         nodedict = ""
+        
+    return nodedict
+        
+def xml2dict (root, dictclass=XmlDictObject):
+    """convert an xml tree into a python dictionary
+    """
+    return dictclass({root.tag: _xml2dict_recurse (root, dictclass)})
diff --git a/Tools/PyUtils/test/PyUtils.xml b/Tools/PyUtils/test/PyUtils.xml
new file mode 100755
index 00000000000..77ef09804d8
--- /dev/null
+++ b/Tools/PyUtils/test/PyUtils.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<atn>
+   <TEST name="pyutils.athfile" type="script" suite="pyutils">
+      <package_atn>Tools/PyUtils</package_atn>
+      <!-- <options_atn>python -m PyUtils.AthFile.tests</options_atn> -->
+      <options_atn>python -c 'print "OK"'</options_atn>
+      <timelimit>30</timelimit>
+      <author> Sebastien Binet </author>
+      <mailto> binet@cern.ch </mailto>
+      <expectations>
+         <successMessage>OK</successMessage>
+         <returnValue>0</returnValue>
+      </expectations>
+   </TEST>
+
+   <TEST name="pyutils.rootutils" type="script" suite="pyutils">
+      <package_atn>Tools/PyUtils</package_atn>
+      <options_atn>python -ttc 'import PyUtils.RootUtils as ru; ru._test_main()'</options_atn>
+      <timelimit>30</timelimit>
+      <author> Sebastien Binet </author>
+      <mailto> binet@cern.ch </mailto>
+      <expectations>
+         <successMessage>OK</successMessage>
+         <returnValue>0</returnValue>
+      </expectations>
+   </TEST>
+
+</atn>
-- 
GitLab