diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..55890e24a2c4a66d470265d9f3b89389726d2f35
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,35 @@
+variables:
+  TARGET_BRANCH: master
+
+stages:
+  - check
+  - test
+
+default:
+  image: cern/cc7-base:latest
+  tags:
+    - cvmfs
+  before_script:
+    - . /cvmfs/lhcb.cern.ch/lib/LbEnv.sh
+
+check-formatting:
+  stage: check
+  script:
+    - . /cvmfs/lhcb.cern.ch/lib/LbEnv.sh
+    - curl -o lb-format "https://gitlab.cern.ch/lhcb-core/LbDevTools/raw/master/LbDevTools/SourceTools.py?inline=false"
+    - python lb-format --format-patch apply-formatting.patch origin/${TARGET_BRANCH}
+  artifacts:
+    paths:
+      - apply-formatting.patch
+    when: on_failure
+    expire_in: 1 week
+
+# test-python2:
+#   stage: test
+#   script:
+#     - python2 -m compileall -q .
+
+test-python3:
+  stage: test
+  script:
+    - python3 -m compileall -q .
diff --git a/collectRunResults.py b/collectRunResults.py
index 74dde0606cf8650513ebad4d69ac328e906bc79c..6721af99909612f740490f6120bd5c5ddee1f3c0 100755
--- a/collectRunResults.py
+++ b/collectRunResults.py
@@ -12,15 +12,16 @@ import ntpath
 import sendToDB
 import argparse
 import urllib.request, urllib.error, urllib.parse
-from datetime import (timedelta, datetime, tzinfo)
+from datetime import timedelta, datetime, tzinfo
 import requests
 
+
 def send_notification_mattermost(webhook, message):
-    payload={"text": message}
+    payload = {"text": message}
     r = requests.post(webhook, json=payload)
 
-class FixedOffset(tzinfo):
 
+class FixedOffset(tzinfo):
     """Fixed offset in minutes: `time = utc_time + utc_offset`."""
 
     def __init__(self, offset):
@@ -29,7 +30,7 @@ class FixedOffset(tzinfo):
         # NOTE: the last part is to remind about deprecated POSIX GMT+h timezones
         #  that have the opposite sign in the name;
         #  the corresponding numeric value is not used e.g., no minutes
-        self.__name = '<%+03d%02d>%+d' % (hours, minutes, -hours)
+        self.__name = "<%+03d%02d>%+d" % (hours, minutes, -hours)
 
     def utcoffset(self, dt=None):
         return self.__offset
@@ -41,12 +42,12 @@ class FixedOffset(tzinfo):
         return timedelta(0)
 
     def __repr__(self):
-        return 'FixedOffset(%d)' % (self.utcoffset().total_seconds() / 60)
+        return "FixedOffset(%d)" % (self.utcoffset().total_seconds() / 60)
 
 
 def mkdatetime(datestr):
-    naive_date_str, _, offset_str = datestr.rpartition(' ')
-    naive_dt = datetime.strptime(naive_date_str, '%Y-%m-%d %H:%M:%S')
+    naive_date_str, _, offset_str = datestr.rpartition(" ")
+    naive_dt = datetime.strptime(naive_date_str, "%Y-%m-%d %H:%M:%S")
     offset = int(offset_str[-4:-2]) * 60 + int(offset_str[-2:])
     if offset_str[0] == "-":
         offset = -offset
@@ -54,44 +55,66 @@ def mkdatetime(datestr):
     return dt
 
 
-def JobDictionary(hostname, starttime, endtime, cmtconfig, appname, appversion,
-                  appversiondatetime, execname, execcontent, optname, optcontent,
-                  optstandalone, setupname, setupcontent, status, cpu_info, memoryinfo):
+def JobDictionary(
+        hostname,
+        starttime,
+        endtime,
+        cmtconfig,
+        appname,
+        appversion,
+        appversiondatetime,
+        execname,
+        execcontent,
+        optname,
+        optcontent,
+        optstandalone,
+        setupname,
+        setupcontent,
+        status,
+        cpu_info,
+        memoryinfo,
+):
     """
     This method creates a dictionary with information about the job (like time_start/end etc)
     which will be added to json_results along with the execution results
     """
 
-    hostDict = {'hostname': hostname, 'cpu_info': cpu_info, 'memoryinfo': memoryinfo}
-    cmtconfigDict = {'platform': cmtconfig}
+    hostDict = {
+        "hostname": hostname,
+        "cpu_info": cpu_info,
+        "memoryinfo": memoryinfo
+    }
+    cmtconfigDict = {"platform": cmtconfig}
     DataDict = {
-        'HOST': hostDict,
-        'CMTCONFIG': cmtconfigDict,
-        'time_start': starttime,
-        'time_end': endtime,
-        'status': status,
-        'app_name': appname,
-        'app_version': appversion,
-        'app_version_datetime': appversiondatetime,
-        'exec_name': execname,
-        'exec_content': execcontent,
-        'opt_name': optname,
-        'opt_content': optcontent,
-        'opt_standalone': optstandalone,
-        'setup_name': setupname,
-        'setup_content': setupcontent
+        "HOST": hostDict,
+        "CMTCONFIG": cmtconfigDict,
+        "time_start": starttime,
+        "time_end": endtime,
+        "status": status,
+        "app_name": appname,
+        "app_version": appversion,
+        "app_version_datetime": appversiondatetime,
+        "exec_name": execname,
+        "exec_content": execcontent,
+        "opt_name": optname,
+        "opt_content": optcontent,
+        "opt_standalone": optstandalone,
+        "setup_name": setupname,
+        "setup_content": setupcontent,
     }
 
     return DataDict
 
+
 def urlopen(url):
-    '''
+    """
     Wrapper for urllib2.urlopen to enable or disable SSL verification.
-    '''
+    """
     if sys.version_info >= (2, 7, 9):
         # with Python >= 2.7.9 SSL certificates are validated by default
         # but we can ignore them
         from ssl import SSLContext, PROTOCOL_SSLv23
+
         return urllib.request.urlopen(url, context=SSLContext(PROTOCOL_SSLv23))
     return urllib.request.urlopen(url)
 
@@ -101,90 +124,172 @@ def main():
     the runned job(platform,host,status etc) along with the execution results, the output(logs, root files,xml files)
      of a job are collected by handlers. Each handler knows which file must parse, so this script imports dynamically
      each handler(from the input handler list, --list-handlers option) and calls the collectResults function, of each handler, and
-     passes to the function the directory(the default is the . <-- current directory) to the results(output of the runned job)"""
+     passes to the function the directory(the default is the . <-- current directory) to the results(output of the runned job)
+    """
     # this is used for checking
-    outputfile = 'json_results'
+    outputfile = "json_results"
 
     description = """The program needs all the input arguments(options in order to run properly)"""
-    parser = argparse.ArgumentParser(description=description,
-                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-    parser.add_argument('-r', '--results', default=".",
-                        help='Directory which contains results, default is the current directory')
-
-    parser.add_argument('--app-name',
-                        help='Application name (Brunel, Gauss, Moore, ...)',
-                        required=True)
-    parser.add_argument('--app-version',
-                        help='Application release/build version (v42r0, lhcb-gaudi-header-111,...)',
-                        required=True)
-    parser.add_argument('--app-version-datetime',
-                        help='Application release/build creation time (2015-10-13 11:00:00 +0200)',
-                        type=mkdatetime,
-                        required=True)
-    parser.add_argument('--exec-name',
-                        help='Executable name',
-                        required=True)
-    parser.add_argument('--exec-content',
-                        help='Executable command (lb-run, gaudirun.py,...)',
-                        required=True)
-    parser.add_argument('--opt-name',
-                        help='Option name (PRTEST-COLLISION12-1000, PRTEST-Callgrind-300evts,...)',
-                        required=True)
-    parser.add_argument('--opt-content',
-                        help='Option content ("${PRCONFIGOPTS}/Moore/PRTEST-Callgrind-300evts.py",...)',
-                        required=True)
-    parser.add_argument('--opt-standalone', action='store_true',
-                        help='Set flag if option is shell script and not job option',
-                        default=False)
-    parser.add_argument('--setup-name',
-                        help='Setup name (UsePRConfig, UserAreaPRConfig, ...)',
-                        required=False)
-    parser.add_argument('--setup-content',
-                        help='Setup content ("--no-user-area --use PRConfig", "--use PRConfig", ...)',
-                        required=False)
-
-    parser.add_argument('-s', '--start-time',
-                        dest='startTime', help='The start time of the job.',
-                        required=True)
-    parser.add_argument('-e', '--end-time',
-                        dest="endTime", help="The end time of the job.",
-                        required=True)
-    parser.add_argument("-p", "--hostname",
-                        dest="hostname", help="The name of the host who runned the job.",
-                        required=True)
-    parser.add_argument("-u", "--cpu_info",
-                        dest="cpu_info", help="The cpu_info of the host who runned the job.",
-                        required=True)
-    parser.add_argument("-m", "--memoryinfo",
-                        dest="memoryinfo", help="The memoryinfo of the host who runned the job.",
-                        required=True)
-    parser.add_argument("-c", "--platform",
-                        dest="platform", help="The platform(cmtconfig) of the job.",
-                        required=True)
-    parser.add_argument("-l", "--list-handlers",
-                        dest="handlers", help="The list of handlers(comma separated.",
-                        required=True)
-    parser.add_argument("-q", "--quiet", action="store_const", const=logging.WARNING,
-                        dest="loglevel", default=logging.INFO,
-                        help="Just be quiet (do not print info from logger)")
-    parser.add_argument("-d", "--debug", action="store_const", const=logging.DEBUG,
-                        dest="loglevel", default=logging.INFO,
-                        help="Print additional debug info from logger")
-    parser.add_argument("-i", "--count",
-                        dest="count", default="1",
-                        help="Iteration number of the test in a given jenkins build")
-    parser.add_argument("-t", "--status",
-                        dest="status", default="0",
-                        help="Return code of the test job")
-    parser.add_argument("-a", "--auto-send-results", action="store_true",
-                        dest="send", default=False,
-                        help="Automatically send the zip results to the database and job info to couchdb.")
+    parser = argparse.ArgumentParser(
+        description=description,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument(
+        "-r",
+        "--results",
+        default=".",
+        help=
+        "Directory which contains results, default is the current directory",
+    )
+
+    parser.add_argument(
+        "--app-name",
+        help="Application name (Brunel, Gauss, Moore, ...)",
+        required=True)
+    parser.add_argument(
+        "--app-version",
+        help=
+        "Application release/build version (v42r0, lhcb-gaudi-header-111,...)",
+        required=True,
+    )
+    parser.add_argument(
+        "--app-version-datetime",
+        help=
+        "Application release/build creation time (2015-10-13 11:00:00 +0200)",
+        type=mkdatetime,
+        required=True,
+    )
+    parser.add_argument("--exec-name", help="Executable name", required=True)
+    parser.add_argument(
+        "--exec-content",
+        help="Executable command (lb-run, gaudirun.py,...)",
+        required=True,
+    )
+    parser.add_argument(
+        "--opt-name",
+        help=
+        "Option name (PRTEST-COLLISION12-1000, PRTEST-Callgrind-300evts,...)",
+        required=True,
+    )
+    parser.add_argument(
+        "--opt-content",
+        help=
+        'Option content ("${PRCONFIGOPTS}/Moore/PRTEST-Callgrind-300evts.py",...)',
+        required=True,
+    )
+    parser.add_argument(
+        "--opt-standalone",
+        action="store_true",
+        help="Set flag if option is shell script and not job option",
+        default=False,
+    )
+    parser.add_argument(
+        "--setup-name",
+        help="Setup name (UsePRConfig, UserAreaPRConfig, ...)",
+        required=False,
+    )
+    parser.add_argument(
+        "--setup-content",
+        help=
+        'Setup content ("--no-user-area --use PRConfig", "--use PRConfig", ...)',
+        required=False,
+    )
+
+    parser.add_argument(
+        "-s",
+        "--start-time",
+        dest="startTime",
+        help="The start time of the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-e",
+        "--end-time",
+        dest="endTime",
+        help="The end time of the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-p",
+        "--hostname",
+        dest="hostname",
+        help="The name of the host who runned the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-u",
+        "--cpu_info",
+        dest="cpu_info",
+        help="The cpu_info of the host who runned the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-m",
+        "--memoryinfo",
+        dest="memoryinfo",
+        help="The memoryinfo of the host who runned the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-c",
+        "--platform",
+        dest="platform",
+        help="The platform(cmtconfig) of the job.",
+        required=True,
+    )
+    parser.add_argument(
+        "-l",
+        "--list-handlers",
+        dest="handlers",
+        help="The list of handlers(comma separated.",
+        required=True,
+    )
+    parser.add_argument(
+        "-q",
+        "--quiet",
+        action="store_const",
+        const=logging.WARNING,
+        dest="loglevel",
+        default=logging.INFO,
+        help="Just be quiet (do not print info from logger)",
+    )
+    parser.add_argument(
+        "-d",
+        "--debug",
+        action="store_const",
+        const=logging.DEBUG,
+        dest="loglevel",
+        default=logging.INFO,
+        help="Print additional debug info from logger",
+    )
+    parser.add_argument(
+        "-i",
+        "--count",
+        dest="count",
+        default="1",
+        help="Iteration number of the test in a given jenkins build",
+    )
+    parser.add_argument(
+        "-t",
+        "--status",
+        dest="status",
+        default="0",
+        help="Return code of the test job")
+    parser.add_argument(
+        "-a",
+        "--auto-send-results",
+        action="store_true",
+        dest="send",
+        default=False,
+        help=
+        "Automatically send the zip results to the database and job info to couchdb.",
+    )
 
     options = parser.parse_args()
 
-    fh = logging.FileHandler(os.path.join(options.results,'collect.log'))
+    fh = logging.FileHandler(os.path.join(options.results, "collect.log"))
     ch = logging.StreamHandler()
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
     ch.setFormatter(formatter)
     fh.setFormatter(formatter)
     root_logger = logging.getLogger()
@@ -192,7 +297,7 @@ def main():
     root_logger.addHandler(ch)
     root_logger.addHandler(fh)
 
-    logger = logging.getLogger('collectRunResults.py')
+    logger = logging.getLogger("collectRunResults.py")
 
     dataDict = JobDictionary(
         options.hostname,
@@ -222,47 +327,46 @@ def main():
         try:
             from LbNightlyTools.Utils import Dashboard
 
-            dash = Dashboard(credentials=None,
-                             flavour='periodic')
-            build_id = options.startTime.replace(' ', '_')
-            if 'BUILD_ID' in os.environ:
-                build_id = os.environ.get('BUILD_ID')
+            dash = Dashboard(credentials=None, flavour="periodic")
+            build_id = options.startTime.replace(" ", "_")
+            if "BUILD_ID" in os.environ:
+                build_id = os.environ.get("BUILD_ID")
             doc_name = build_id + "." + options.count
 
-            dataDict['handlers_info'] = handlers_result
-            dataDict['JobAttributes'] = jobAttributes
-            if 'BUILD_URL' in os.environ:
-                dataDict['build_url'] = os.environ.get('BUILD_URL') + '/console'
+            dataDict["handlers_info"] = handlers_result
+            dataDict["JobAttributes"] = jobAttributes
+            if "BUILD_URL" in os.environ:
+                dataDict["build_url"] = os.environ.get(
+                    "BUILD_URL") + "/console"
 
         except Exception as ex:
-            logger.warning(
-                'Problem with sending information to couchdb: %s', ex
-            )
+            logger.warning("Problem with sending information to couchdb: %s",
+                           ex)
 
     # no point to run the handlers if the test job failed
     if options.status != "0":
-        logger.warning('Test failed, handlers will not be executed')
+        logger.warning("Test failed, handlers will not be executed")
 
         if "MATTERMOST_HOOK" in os.environ:
-            log_url = "https://lhcb-nightlies.cern.ch/periodic/summary/" + urllib.parse.quote(options.startTime)
-            content=":alarm: Test failed for: `"\
-                +options.app_name+"`, `"\
-                +options.app_version+"`, `"\
-                +options.platform+"`, `"\
-                +options.opt_name+"`"\
-                +", see [here]("+ log_url + ") for details :alarm:"
-            send_notification_mattermost(os.environ['MATTERMOST_HOOK'], content)
+            log_url = ("https://lhcb-nightlies.cern.ch/periodic/summary/" +
+                       urllib.parse.quote(options.startTime))
+            content = (":alarm: Test failed for: `" + options.app_name + "`, `"
+                       + options.app_version + "`, `" + options.platform +
+                       "`, `" + options.opt_name + "`" + ", see [here](" +
+                       log_url + ") for details :alarm:")
+            send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
+                                         content)
 
     else:
         # for each handler in the handlers list
-        for handler in options.handlers.split(','):
-            module = ''.join(['handlers','.',handler])
+        for handler in options.handlers.split(","):
+            module = "".join(["handlers", ".", handler])
             # import the current handler
             try:
                 mod = __import__(module, fromlist=[module])
             except ImportError as e:
                 logger.exception(
-                    'Please check your script or handlers directory: %s', e)
+                    "Please check your script or handlers directory: %s", e)
             else:
                 # create an instance of a the current handler
                 try:
@@ -270,76 +374,86 @@ def main():
                     currentHandler = klass()
                 except Exception as ex:
                     logger.exception(
-                        'Could not instantiate handler class.'
-                        'Is the class name same as file name? : %s', ex)
+                        "Could not instantiate handler class."
+                        "Is the class name same as file name? : %s",
+                        ex,
+                    )
 
                 try:
                     # collect results from the given directory(--results-directory, -r)
-                    collectresext = getattr(currentHandler, "collectResultsExt", None)
+                    collectresext = getattr(currentHandler,
+                                            "collectResultsExt", None)
                     if collectresext == None:
                         currentHandler.collectResults(options.results)
                     else:
-                        currentHandler.collectResultsExt(options.results,
-                                                         project= options.app_name,
-                                                         version= options.app_version,
-                                                         platform=options.platform,
-                                                         hostname=options.hostname,
-                                                         cpu_info=options.cpu_info,
-                                                         memoryinfo=options.memoryinfo,
-                                                         startTime=options.startTime,
-                                                         endTime=options.endTime,
-                                                         options=options.opt_name)
+                        currentHandler.collectResultsExt(
+                            options.results,
+                            project=options.app_name,
+                            version=options.app_version,
+                            platform=options.platform,
+                            hostname=options.hostname,
+                            cpu_info=options.cpu_info,
+                            memoryinfo=options.memoryinfo,
+                            startTime=options.startTime,
+                            endTime=options.endTime,
+                            options=options.opt_name,
+                        )
                 except Exception as ex:
                     # if any error occurs and the handler fails, inform the user
                     # using the logger and save that the current handler failed
-                    logger.exception('Handler exception: %s', ex)
-                    handlers_result.append(
-                        {'handler': handler, 'successful': False})
+                    logger.exception("Handler exception: %s", ex)
+                    handlers_result.append({
+                        "handler": handler,
+                        "successful": False
+                    })
                 else:
                     # in case everything is fine , save that the current handler
                     # worked successfully
                     jobAttributes.extend(currentHandler.getResults())
-                    handlers_result.append(
-                        {'handler': handler, 'successful': True})
+                    handlers_result.append({
+                        "handler": handler,
+                        "successful": True
+                    })
 
         if not jobAttributes:
             if "MATTERMOST_HOOK" in os.environ:
-                log_url = "https://lhcb-nightlies.cern.ch/periodic/summary/"\
-                    + urllib.parse.quote(options.startTime)
-                content="Results were not collected for test: `"\
-                    +options.app_name+"`, `"\
-                    +options.app_version+"`, `"\
-                    +options.platform+"`, `"\
-                    +options.opt_name+"`"\
-                    +" \nHandlers failed (unexpected output?) \nSee "\
-                    + log_url\
-                    +" for details "
-                send_notification_mattermost(os.environ['MATTERMOST_HOOK'], content)
-            exit('All handlers failed, no results were collected...')
+                log_url = ("https://lhcb-nightlies.cern.ch/periodic/summary/" +
+                           urllib.parse.quote(options.startTime))
+                content = (
+                    "Results were not collected for test: `" + options.app_name
+                    + "`, `" + options.app_version + "`, `" + options.platform
+                    + "`, `" + options.opt_name + "`" +
+                    " \nHandlers failed (unexpected output?) \nSee " + log_url
+                    + " for details ")
+                send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
+                                             content)
+            exit("All handlers failed, no results were collected...")
         else:
             unique_results_id = str(uuid.uuid1())
-            zipper = zipfile.ZipFile(unique_results_id + '.zip', mode='w')
+            zipper = zipfile.ZipFile(unique_results_id + ".zip", mode="w")
 
             for i in range(len(jobAttributes)):
-                if jobAttributes[i]['type'] == 'File':
-                    head, tail = ntpath.split(jobAttributes[i]['filename'])
+                if jobAttributes[i]["type"] == "File":
+                    head, tail = ntpath.split(jobAttributes[i]["filename"])
 
                     try:
                         # write to the zip file the root file with a unique name
-                        zipper.write(jobAttributes[i]['filename'], tail)
+                        zipper.write(jobAttributes[i]["filename"], tail)
                     except Exception as ex:
-                        logger.warning('Could not write the root file to the zip file: %s', ex)
+                        logger.warning(
+                            "Could not write the root file to the zip file: %s",
+                            ex)
                         pass
 
                     # update in the json_results the uuid new filename
-                    jobAttributes[i]['filename'] = tail
+                    jobAttributes[i]["filename"] = tail
 
             # add the collected results to the final data dictionary
-            dataDict['JobAttributes'] = jobAttributes
-            dataDict['results_id'] = unique_results_id
-            dataDict['handlers_info'] = handlers_result
+            dataDict["JobAttributes"] = jobAttributes
+            dataDict["results_id"] = unique_results_id
+            dataDict["handlers_info"] = handlers_result
 
-            f = open(outputfile, 'w')
+            f = open(outputfile, "w")
             f.write(json.dumps(dataDict))
             f.close()
 
@@ -349,56 +463,63 @@ def main():
             # close the zipfile object
             zipper.close()
 
-            logger.info(unique_results_id + '.zip')
+            logger.info(unique_results_id + ".zip")
 
             if options.send:
-                with open('unique_results_id_zip', 'w') as file:
-                    file.write(unique_results_id + '.zip')
+                with open("unique_results_id_zip", "w") as file:
+                    file.write(unique_results_id + ".zip")
 
                 id_app = 0
                 id_opt = 0
                 id_ver = 0
 
                 # add to dictionary path to lhcbpr dashboard
-                if options.app_name.startswith("Moore") and "throughput" in options.handlers.lower():
-                    dataDict['lhcbpr_url'] = f"https://cern.ch/lhcbpr-hlt/" \
-                                             f"PerfTests/UpgradeThroughput/" \
-                                             f"Throughput_{options.app_version}_" \
-                                             f"{str(options.opt_name)}_" \
-                                             f"{str(options.platform)}_" \
-                                             f"{options.startTime.replace(' ', '_')}"
-                elif options.app_name.startswith("Moore") and "ratetest" in options.handlers.lower():
-                    dataDict['lhcbpr_url'] = f"https://cern.ch/lhcbpr-hlt/" \
-                                             f"UpgradeRateTest/" \
-                                             f"RateTest_{options.app_version}_" \
-                                             f"{str(options.opt_name)}_" \
-                                             f"{str(options.platform)}_" \
-                                             f"{options.startTime.replace(' ', '_')}"
-                elif options.app_name.startswith("Moore") and "bandwidthtest" in options.handlers.lower():
-                    dataDict['lhcbpr_url'] = f"https://cern.ch/lhcbpr-hlt/" \
-                                             f"UpgradeRateTest/" \
-                                             f"BandwidthTest_{options.app_version}_" \
-                                             f"{str(options.opt_name)}_" \
-                                             f"{str(options.platform)}_" \
-                                             f"{options.startTime.replace(' ', '_')}"
+                if (options.app_name.startswith("Moore")
+                        and "throughput" in options.handlers.lower()):
+                    dataDict["lhcbpr_url"] = (
+                        f"https://cern.ch/lhcbpr-hlt/"
+                        f"PerfTests/UpgradeThroughput/"
+                        f"Throughput_{options.app_version}_"
+                        f"{str(options.opt_name)}_"
+                        f"{str(options.platform)}_"
+                        f"{options.startTime.replace(' ', '_')}")
+                elif (options.app_name.startswith("Moore")
+                      and "ratetest" in options.handlers.lower()):
+                    dataDict["lhcbpr_url"] = (
+                        f"https://cern.ch/lhcbpr-hlt/"
+                        f"UpgradeRateTest/"
+                        f"RateTest_{options.app_version}_"
+                        f"{str(options.opt_name)}_"
+                        f"{str(options.platform)}_"
+                        f"{options.startTime.replace(' ', '_')}")
+                elif (options.app_name.startswith("Moore")
+                      and "bandwidthtest" in options.handlers.lower()):
+                    dataDict["lhcbpr_url"] = (
+                        f"https://cern.ch/lhcbpr-hlt/"
+                        f"UpgradeRateTest/"
+                        f"BandwidthTest_{options.app_version}_"
+                        f"{str(options.opt_name)}_"
+                        f"{str(options.platform)}_"
+                        f"{options.startTime.replace(' ', '_')}")
                 else:
-                    dataDict['lhcbpr_url'] = f"https://lblhcbpr.cern.ch/{options.app_name}"
+                    dataDict[
+                        "lhcbpr_url"] = f"https://lblhcbpr.cern.ch/{options.app_name}"
 
     if options.send:
         try:
             # removing information unnecessary for the couchdb dashboard
-            del dataDict['JobAttributes']
-            del dataDict['exec_name']
-            del dataDict['exec_content']
-            del dataDict['setup_name']
-            del dataDict['setup_content']
-            del dataDict['opt_standalone']
+            del dataDict["JobAttributes"]
+            del dataDict["exec_name"]
+            del dataDict["exec_content"]
+            del dataDict["setup_name"]
+            del dataDict["setup_content"]
+            del dataDict["opt_standalone"]
             # updating the entry
             dash.update(doc_name, dataDict)
         except Exception as ex:
-            logger.warning(
-                'Problem with sending information to couchdb: %s', ex
-            )
+            logger.warning("Problem with sending information to couchdb: %s",
+                           ex)
+
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
diff --git a/handlers/AllPlotsHandler.py b/handlers/AllPlotsHandler.py
index 746e30079fb70f99f5cd49a48f114a9a4faa0918..b0498df3b3abc15312d64fe2bf37597d5891def6 100644
--- a/handlers/AllPlotsHandler.py
+++ b/handlers/AllPlotsHandler.py
@@ -4,6 +4,7 @@ import ROOT
 
 from .BaseHandler import BaseHandler
 
+
 def _TList__iter__(self):
     next_item = ROOT.TIter(self)
     while True:
@@ -13,8 +14,10 @@ def _TList__iter__(self):
         else:
             return
 
+
 ROOT.TList.__iter__ = _TList__iter__
 
+
 def make_key(prefix, obj):
     the_type = type(obj)
     return "{}_{}_{}".format(
@@ -23,11 +26,10 @@ def make_key(prefix, obj):
         obj.GetName(),
     )
 
+
 def sanitize(string):
-    return (string
-            .replace("/", "_bs_")
-            .replace(".", "_ps_")
-            )
+    return string.replace("/", "_bs_").replace(".", "_ps_")
+
 
 def all_plots_in_files(filenames):
     def loop_over(key, tdirectory):
@@ -73,20 +75,19 @@ class AllPlotsHandler(BaseHandler):
     """
 
     def collectResults(self, directory):
-        filenames = [os.path.join(directory, i) for i in type(self).files_to_search]
+        filenames = [
+            os.path.join(directory, i) for i in type(self).files_to_search
+        ]
 
         allplots_keys = []
         for key, obj in all_plots_in_files(filenames):
-            self.saveJSON(
-                key,
-                obj,
-                group="_allplots"
-                )
+            self.saveJSON(key, obj, group="_allplots")
             allplots_keys.append(key)
 
         self.saveJSON(
             "allplots_keys",
             allplots_keys,
-            description="A list of all names of JSON'd plottables recorded by AllPlotsHandler.",
+            description=
+            "A list of all names of JSON'd plottables recorded by AllPlotsHandler.",
             group="_allplots_keys",
         )
diff --git a/handlers/BandwidthTestHandler.py b/handlers/BandwidthTestHandler.py
index 33971403be44ed9e8040a4da07ceeb59dba03dce..244f18ab86f9cbb5c416520f70dce61363814480 100644
--- a/handlers/BandwidthTestHandler.py
+++ b/handlers/BandwidthTestHandler.py
@@ -17,31 +17,37 @@ log = logging.getLogger(__name__)
 
 WWW_BASE_URL = "https://cern.ch/lhcbpr-hlt/UpgradeRateTest"
 compare_str = f'<li><a href="{WWW_BASE_URL}/$$dirname$$/comparisons.html"> Comparisons with reference build</a></li>'
-
-'''
+"""
     Writes results to: /eos/lhcb/storage/lhcbpr/www/UpgradeRateTest
 
     To run a local test of this handler: ./run python -m pytest --log-cli-level=DEBUG tests/test_BandwidthHandler.py
     For local testing: set env variable 'LHCBPR_WWW_EOS' to '/eos/lhcb/storage/lhcbpr/www'
 
-'''
+"""
+
 
-def process_prmon_output(prmon_file, prmon_json_file, process='Hlt2'):
+def process_prmon_output(prmon_file, prmon_json_file, process="Hlt2"):
     """copied and modified from prmon plotting script
-    https://github.com/HSF/prmon/blob/main/package/scripts/prmon_plot.py 
+    https://github.com/HSF/prmon/blob/main/package/scripts/prmon_plot.py
     """
 
     # Load the data
     data = pd.read_csv(prmon_file, sep="\t")
     data["Time"] = pd.to_datetime(data["Time"], unit="s")
-    make_prmon_plots(data, xvar='wtime', ylist=['vmem', 'pss', 'rss', 'swap'], process=process)
+    make_prmon_plots(
+        data,
+        xvar="wtime",
+        ylist=["vmem", "pss", "rss", "swap"],
+        process=process)
 
-    with open(prmon_json_file) as f: prmon_summary = json.load(f)
-    max_rss = prmon_summary["Max"]["rss"] / 1024.0 / 1024.0 # In GB
-    max_pss = prmon_summary["Max"]["pss"] / 1024.0 / 1024.0 # In GB
+    with open(prmon_json_file) as f:
+        prmon_summary = json.load(f)
+    max_rss = prmon_summary["Max"]["rss"] / 1024.0 / 1024.0  # In GB
+    max_pss = prmon_summary["Max"]["pss"] / 1024.0 / 1024.0  # In GB
 
     return max_rss, max_pss
 
+
 def make_prmon_plots(data, xvar, ylist, process):
     # Labels and output information
     legend_names = {
@@ -51,56 +57,73 @@ def make_prmon_plots(data, xvar, ylist, process):
         "rss": "Resident Set Size",
         "swap": "Swap Size",
     }
-    xlabel = 'Wall-time'
-    ylabel = 'Memory'
-    xunit = 's'
-    yunit = 'GB'
+    xlabel = "Wall-time"
+    ylabel = "Memory"
+    xunit = "s"
+    yunit = "GB"
     xmultiplier = 1.0
-    ymultiplier = 1.0 / 1024.0 / 1024.0 # Raw data is in kB
+    ymultiplier = 1.0 / 1024.0 / 1024.0  # Raw data is in kB
 
     # Here comes the figure and data extraction
     fig = plt.figure()
     xdata = np.array(data[xvar]) * xmultiplier
     ydlist = []
-    for carg in ylist: ydlist.append(np.array(data[carg]) * ymultiplier)
-    for cidx, cdata in enumerate(ydlist): plt.plot(xdata, cdata, lw=2, label=legend_names[ylist[cidx]])
+    for carg in ylist:
+        ydlist.append(np.array(data[carg]) * ymultiplier)
+    for cidx, cdata in enumerate(ydlist):
+        plt.plot(xdata, cdata, lw=2, label=legend_names[ylist[cidx]])
     plt.legend(loc=0)
-    plt.title("Plot of {} vs {} for {} test".format(ylabel, xlabel, process), y=1.05)
+    plt.title(
+        "Plot of {} vs {} for {} test".format(ylabel, xlabel, process), y=1.05)
     plt.xlabel((xlabel + " [" + xunit + "]"))
     plt.ylabel((ylabel + " [" + yunit + "]"))
     plt.tight_layout()
-    fig.savefig('memory_consumption.png')
+    fig.savefig("memory_consumption.png")
+
 
 def get_from_message(message, str_to_find):
     return [match for match in re.finditer(str_to_find, message)][0].group(1)
 
+
 def make_dirname(version, options, platform, start_time):
-    return "_".join(["BandwidthTest", version, options, platform, start_time.replace(' ', '_')])
+    return "_".join([
+        "BandwidthTest", version, options, platform,
+        start_time.replace(" ", "_")
+    ])
+
 
 def get_info_from_comparison_build(version, options, csv_rate_table_names):
-    '''
+    """
     Obtain info from another build given version.
     This is presumably the reference build for comparison with the current build's results.
     Copied from ThroughputProfileHandler.
-    '''
+    """
     test = dashboard.get_test_doc(version, options)
-    dirname = make_dirname(test['app_version'], test['opt_name'], test['CMTCONFIG']['platform'], test['time_start'] )
-    directory = os.path.join(os.getenv("LHCBPR_WWW_EOS")[23:], "UpgradeRateTest", dirname)
+    dirname = make_dirname(
+        test["app_version"],
+        test["opt_name"],
+        test["CMTCONFIG"]["platform"],
+        test["time_start"],
+    )
+    directory = os.path.join(
+        os.getenv("LHCBPR_WWW_EOS")[23:], "UpgradeRateTest", dirname)
 
-    dfs = {csv_name: pd.read_csv(os.path.join(directory, csv_name)) for csv_name in csv_rate_table_names}
+    dfs = {
+        csv_name: pd.read_csv(os.path.join(directory, csv_name))
+        for csv_name in csv_rate_table_names
+    }
 
-    web_link = test.get("lhcbpr_url", f"https://Failed_to_retrieve_lhcbpr_link_for_{version}")
-    with open(os.path.join(directory, 'message.txt'), 'r') as message_file:
+    web_link = test.get(
+        "lhcbpr_url", f"https://Failed_to_retrieve_lhcbpr_link_for_{version}")
+    with open(os.path.join(directory, "message.txt"), "r") as message_file:
         message = message_file.read()
-        tot_rate = float(get_from_message(message, r"total_rate = ([\d.]+) kHz"))
-        tot_bandwidth = float(get_from_message(message, r"total_bandwidth = ([\d.]+) GB/s"))
+        tot_rate = float(
+            get_from_message(message, r"total_rate = ([\d.]+) kHz"))
+        tot_bandwidth = float(
+            get_from_message(message, r"total_bandwidth = ([\d.]+) GB/s"))
+
+    return (dfs, web_link, dirname, tot_rate, tot_bandwidth)
 
-    return (dfs,
-        web_link,
-        dirname,
-        tot_rate,
-        tot_bandwidth
-        )
 
 def compare_column(ref_df, new_df, name_col, compare_col):
     data1 = ref_df[[name_col, compare_col]]
@@ -108,9 +131,9 @@ def compare_column(ref_df, new_df, name_col, compare_col):
     diff = (data2[compare_col] - data1[compare_col]) / data1[compare_col] * 1e2
     new_df = pd.DataFrame({
         name_col: ref_df[name_col],
-        'New': ref_df[compare_col],
-        'Ref': new_df[compare_col],
-        'Change (%)': diff
+        "New": ref_df[compare_col],
+        "Ref": new_df[compare_col],
+        "Change (%)": diff,
     })
     return new_df
 
@@ -121,105 +144,133 @@ def make_comparison_page(dfs_for_comparison, ref_version, new_version):
     n_new_lines = -1
 
     def highlight_vals(val):
-        if val > 0: return 'background-color: red'
-        if val < 0: return 'background-color: green'
-        else: return ''
+        if val > 0:
+            return "background-color: red"
+        if val < 0:
+            return "background-color: green"
+        else:
+            return ""
 
     html_tables = {}
-    for bw_table_name, new_bw_df in dfs_for_comparison[new_version].items(): 
+    for bw_table_name, new_bw_df in dfs_for_comparison[new_version].items():
         ref_bw_df = dfs_for_comparison[ref_version][bw_table_name]
-        if ('Stream' not in new_bw_df.columns and 'Line' not in new_bw_df.columns):
-            log.warning(f'Expected "Stream" or "Line" to be one of the rate table columns. Found {new_bw_df.columns}')
+        if "Stream" not in new_bw_df.columns and "Line" not in new_bw_df.columns:
+            log.warning(
+                f'Expected "Stream" or "Line" to be one of the rate table columns. Found {new_bw_df.columns}'
+            )
             success = False
             continue
 
-        is_table_per_line = 'Line' in new_bw_df.columns # other tables have 'Stream' if streamed
-        columns_to_compare = ['Rate (kHz)'] if is_table_per_line else new_bw_df.columns[2:] # First two cols are a count and the stream name
+        is_table_per_line = ("Line" in new_bw_df.columns
+                             )  # other tables have 'Stream' if streamed
+        columns_to_compare = (
+            ["Rate (kHz)"] if is_table_per_line else new_bw_df.columns[2:]
+        )  # First two cols are a count and the stream name
 
-        name_col = 'Line' if is_table_per_line else 'Stream'
+        name_col = "Line" if is_table_per_line else "Stream"
         if name_col != new_bw_df.columns[1]:
-            log.warning(f'Expected 1st column of df "{name_col}" != the actual 1st col "{new_bw_df.columns[1]}". Comparison will fail.')
+            log.warning(
+                f'Expected 1st column of df "{name_col}" != the actual 1st col "{new_bw_df.columns[1]}". Comparison will fail.'
+            )
             success = False
             continue
 
-        comparison_df = pd.DataFrame({name_col: new_bw_df[name_col]}) 
+        comparison_df = pd.DataFrame({name_col: new_bw_df[name_col]})
         for col_to_compare in columns_to_compare:
             # Compare each column, make a small sub-df with 3 cols (old, new, diff) for that column, then merge the 3 into comparison df
-            old_new_diff_subdf = compare_column(ref_bw_df, new_bw_df, name_col, col_to_compare)
-            comparison_df = pd.merge(comparison_df, old_new_diff_subdf, on=[name_col], suffixes=(f'{col_to_compare}_ref', f'{col_to_compare}_new'))
+            old_new_diff_subdf = compare_column(ref_bw_df, new_bw_df, name_col,
+                                                col_to_compare)
+            comparison_df = pd.merge(
+                comparison_df,
+                old_new_diff_subdf,
+                on=[name_col],
+                suffixes=(f"{col_to_compare}_ref", f"{col_to_compare}_new"),
+            )
 
         # Now arrange the columns so that e.g. the Rate column has 3 sub-columns under it: old, new and diff
         # name_col is only column that doesn't have sub-columns
-        multi_columns = [(name_col, '')] + pd.MultiIndex.from_product([columns_to_compare, ['Ref', 'New', 'Change (%)']]).to_list()
+        multi_columns = [(name_col, "")] + pd.MultiIndex.from_product(
+            [columns_to_compare, ["Ref", "New", "Change (%)"]]).to_list()
         comparison_df.columns = pd.MultiIndex.from_tuples(multi_columns)
-        if name_col == 'Line':
+        if name_col == "Line":
             # Table will be too large; show only those that have changed
-            comparison_df = comparison_df[comparison_df['Rate (kHz)']['Ref'] != comparison_df['Rate (kHz)']['New']]
+            comparison_df = comparison_df[comparison_df["Rate (kHz)"]["Ref"] !=
+                                          comparison_df["Rate (kHz)"]["New"]]
 
         # Apply highlighting and formatting
         styler = comparison_df.style
         for column in columns_to_compare:
-            styler = styler.applymap(highlight_vals, subset=[(column, 'Change (%)')])
-            styler = styler.format('{:.2f}', subset=[(column, 'Change (%)')])
-            styler = styler.format('{:.3g}', subset=[(column, 'New'), (column, 'Ref')])
+            styler = styler.applymap(
+                highlight_vals, subset=[(column, "Change (%)")])
+            styler = styler.format("{:.2f}", subset=[(column, "Change (%)")])
+            styler = styler.format(
+                "{:.3g}", subset=[(column, "New"), (column, "Ref")])
 
         # Work out what to call the table
         if is_table_per_line:
-            table_name_for_html = 'rates per line'
+            table_name_for_html = "rates per line"
         else:
-            if 'turbo' in [col.lower() for col in comparison_df[name_col].values]:
-                table_name_for_html = 'rates per production stream'
-            elif 'qee' in [col.lower() for col in comparison_df[name_col].values]:
-                table_name_for_html = 'rates per WG stream'
+            if "turbo" in [
+                    col.lower() for col in comparison_df[name_col].values
+            ]:
+                table_name_for_html = "rates per production stream"
+            elif "qee" in [
+                    col.lower() for col in comparison_df[name_col].values
+            ]:
+                table_name_for_html = "rates per WG stream"
             else:
-                table_name_for_html = 'unknown table'
+                table_name_for_html = "unknown table"
 
         # Finally - how many new lines?
         if is_table_per_line:
             n_new_lines = 0
             for line in new_bw_df[name_col]:
-                if line not in ref_bw_df[name_col].values: n_new_lines += 1
+                if line not in ref_bw_df[name_col].values:
+                    n_new_lines += 1
 
-        html_tables[table_name_for_html] = styler.set_table_attributes("border=1").to_html()
+        html_tables[table_name_for_html] = styler.set_table_attributes(
+            "border=1").to_html()
 
     # Put together the html page
-    html_str = f'''
+    html_str = f"""
         <p>
             Comparison between {ref_version} and {new_version} under different streaming configurations
         </p>
         <p style="color:{'green' if success else 'red'}">
             <b>{'All comparison tables were made successfully' if success else 'The comparison page failed to build cleanly. Please see warnings in logfile.'}</b>
         </p>
-    '''
+    """
     for bw_table_name, html_table in html_tables.items():
-        html_str += f'''
+        html_str += f"""
             <p>
                 Changes in {bw_table_name}:
             </p>
-        '''
+        """
         html_str += html_table
-        
+
     return html_str, n_new_lines
 
+
 def rate_tolerance(process):
-    return 1000 if process == 'Hlt2' else 500 # Hz
+    return 1000 if process == "Hlt2" else 500  # Hz
+
 
 def send_gitlab_feedback(
-    n_new_lines,
-    n_low_rate,
-    n_high_rate,
-    tot_rate_new,
-    tot_rate_ref,
-    tot_bandwidth_new,
-    tot_bandwidth_ref,
-    options,
-    web_link_new,
-    web_link_ref,
-    trigger,
-    status_msg,
-    process
+        n_new_lines,
+        n_low_rate,
+        n_high_rate,
+        tot_rate_new,
+        tot_rate_ref,
+        tot_bandwidth_new,
+        tot_bandwidth_ref,
+        options,
+        web_link_new,
+        web_link_ref,
+        trigger,
+        status_msg,
+        process,
 ):
-    '''
+    """
     Post info to GitLab MR.
 
     Arguments:
@@ -236,12 +287,16 @@ def send_gitlab_feedback(
         trigger: note in MR that triggers this test
         status_msg: message to sum up whether all sub-jobs were successful
         process: either `Hlt2` or `Sprucing`
-    '''
+    """
 
     tot_rate_change = (tot_rate_new - tot_rate_ref) / tot_rate_ref
-    tot_bandwidth_change = (tot_bandwidth_new - tot_bandwidth_ref) / tot_bandwidth_ref
-    status_msg = status_msg.replace(':bluetick:', ':ballot_box_with_check:') # `bluetick` not supported on GitLab
-    status_msg = status_msg.replace(':alarm:', ':warning:')                  # `alarm` not supported on GitLab
+    tot_bandwidth_change = (
+        tot_bandwidth_new - tot_bandwidth_ref) / tot_bandwidth_ref
+    status_msg = status_msg.replace(
+        ":bluetick:",
+        ":ballot_box_with_check:")  # `bluetick` not supported on GitLab
+    status_msg = status_msg.replace(
+        ":alarm:", ":warning:")  # `alarm` not supported on GitLab
 
     message = (
         f"Bandwidth test [{options}]({web_link_new}): "
@@ -251,31 +306,37 @@ def send_gitlab_feedback(
         f"vs. [reference]({web_link_ref}). "
         f"New lines added: {n_new_lines}. "
         f"Lines with rate of 0 Hz: {n_low_rate}. "
-        f"Lines with rate > {rate_tolerance(process)} Hz: {n_high_rate}. "
-    )
+        f"Lines with rate > {rate_tolerance(process)} Hz: {n_high_rate}. ")
     message += status_msg
 
-    publish.post_gitlab_feedback(
-        trigger,
-        message
-    )
+    publish.post_gitlab_feedback(trigger, message)
 
 
-def send_mattermost_feedback(n_low_rate, n_high_rate, tot_rate, tot_bandwidth, options, version, web_link, status_msg, process ):
-    '''
+def send_mattermost_feedback(
+        n_low_rate,
+        n_high_rate,
+        tot_rate,
+        tot_bandwidth,
+        options,
+        version,
+        web_link,
+        status_msg,
+        process,
+):
+    """
     Post info to Mattermost LHCbPR throughput channel.
 
     Arguments:
         n_low_rate: number of lines which have a rate of 0 Hz
         n_high_rate: number of lines with rate above tolerance
-        tot_rate: total rate from test 
-        tot_bandwidth: total bandwidth from test 
+        tot_rate: total rate from test
+        tot_bandwidth: total bandwidth from test
         options: the name of this test
         version: the CI/nightly test slot.build_id of test
         web_link: web link of test build
         status_msg: message to sum up whether all sub-jobs were successful
         process: either `Hlt2` or `Sprucing`
-    '''
+    """
     message = (
         "The results of latest bandwidth test "
         f"[{options} {version}]({web_link}):\n"
@@ -283,41 +344,34 @@ def send_mattermost_feedback(n_low_rate, n_high_rate, tot_rate, tot_bandwidth, o
         f"Lines with rate of 0 Hz: {n_low_rate}. "
         f"Lines with rate > {rate_tolerance(process)} Hz: {n_high_rate}.\n"
     ) + status_msg
-    log.info(
-        f"Posting Mattermost feedback:\n message={message}"
-    )
+    log.info(f"Posting Mattermost feedback:\n message={message}")
     publish.post_mattermost(message)
 
 
 class BandwidthTestHandler(BaseHandler):
-
     def __init__(self):
         super().__init__()
 
     def collectResultsExt(
-        self,
-        directory,
-        project,
-        version,
-        platform,
-        hostname,
-        cpu_info,
-        memoryinfo,
-        startTime,
-        endTime,
-        options,
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
     ):
-
         try:
             slot, build_id = version.split(".")
             build_id = int(build_id)
         except:
             raise RuntimeError("Handler is only supported for nightly builds")
 
-        process = {
-            'hlt2': 'Hlt2',
-            'spruce': 'Sprucing'
-        }[options.split('_')[1]]
+        process = {"hlt2": "Hlt2", "spruce": "Sprucing"}[options.split("_")[1]]
 
         dirname = make_dirname(version, options, platform, startTime)
         targetRootWebDir = os.path.join(WWW_BASE_URL, dirname)
@@ -325,7 +379,8 @@ class BandwidthTestHandler(BaseHandler):
         def version_str(slot_and_build_id):
             return ".".join([str(s) for s in slot_and_build_id])
 
-        base_dir = os.path.join(directory, 'tmp/Output')
+        base_dir = os.path.join(directory, "tmp/Output")
+
         def full_output_path(end_path):
             return os.path.join(base_dir, end_path)
 
@@ -333,54 +388,62 @@ class BandwidthTestHandler(BaseHandler):
             return os.listdir(base_dir)
 
         # process `prmon` output to monitor memory consumption
-        prmon_file = os.path.join(directory, 'prmon.txt')
-        prmon_json_file = os.path.join(directory, 'prmon.json')
+        prmon_file = os.path.join(directory, "prmon.txt")
+        prmon_json_file = os.path.join(directory, "prmon.json")
         if os.path.exists(prmon_file) and os.path.exists(prmon_json_file):
             monitor_memory = True
-            max_rss, max_pss = process_prmon_output(prmon_file, prmon_json_file, process=process)
-            max_rss = f'{max_rss:.2f}'
-            max_pss = f'{max_pss:.2f}'
+            max_rss, max_pss = process_prmon_output(
+                prmon_file, prmon_json_file, process=process)
+            max_rss = f"{max_rss:.2f}"
+            max_pss = f"{max_pss:.2f}"
         else:
             monitor_memory = False
-            log.warning('No prmon output files found. Seems this test was not wrapped with `prmon` yet.')
-            max_rss = 'N/A'
-            max_pss = 'N/A'
+            log.warning(
+                "No prmon output files found. Seems this test was not wrapped with `prmon` yet."
+            )
+            max_rss = "N/A"
+            max_pss = "N/A"
 
         replacements = {
             # for the index page, add comparison for lhcb-master-mr builds
-            'comparison': compare_str if slot == 'lhcb-master-mr' else '',
-            'dirname': dirname,
-            'start_time': startTime,
-            'end_time': endTime,
-            'version': version,
-            'platform': platform,
-            'hostname': hostname,
-            'cpu_info': cpu_info,
-            'max_rss': max_rss,
-            'max_pss': max_pss
+            "comparison": compare_str if slot == "lhcb-master-mr" else "",
+            "dirname": dirname,
+            "start_time": startTime,
+            "end_time": endTime,
+            "version": version,
+            "platform": platform,
+            "hostname": hostname,
+            "cpu_info": cpu_info,
+            "max_rss": max_rss,
+            "max_pss": max_pss,
         }
 
-        main_page = 'index.html'
-        with open(main_page, 'w') as new_html_file:
-            with open(full_output_path(main_page), 'r') as old_html_file:
+        main_page = "index.html"
+        with open(main_page, "w") as new_html_file:
+            with open(full_output_path(main_page), "r") as old_html_file:
                 # Will fail if index.html not there, as it should
                 content = old_html_file.read()
                 for old, new in replacements.items():
-                    content = content.replace(f'$${old}$$', new)
+                    content = content.replace(f"$${old}$$", new)
                 new_html_file.write(content)
 
         extensions = [".html", ".csv", ".json", ".txt", ".png"]
         files_to_upload = []
         for ext in extensions:
-            files_to_upload += [full_output_path(f) for f in list_outputs() if f.endswith(ext)]
+            files_to_upload += [
+                full_output_path(f) for f in list_outputs() if f.endswith(ext)
+            ]
 
-        log_files = [os.path.join(directory, f)
-            for f in os.listdir(directory)
-            if f.endswith(".log")]
+        log_files = [
+            os.path.join(directory, f) for f in os.listdir(directory)
+            if f.endswith(".log")
+        ]
 
         files_to_upload += log_files
         # replace old index.html (picked up by list_outputs()) with new, edited version
-        files_to_upload = [fl for fl in files_to_upload if main_page not in fl] + [main_page]
+        files_to_upload = [
+            fl for fl in files_to_upload if main_page not in fl
+        ] + [main_page]
         for f in files_to_upload:
             log.info(f"Found file {f} to upload.")
 
@@ -388,7 +451,8 @@ class BandwidthTestHandler(BaseHandler):
             publish.upload_eos_www(
                 fname,
                 os.path.join(diro, os.path.basename(fname)),
-                baseurl=os.path.join(os.getenv("LHCBPR_WWW_EOS"), "UpgradeRateTest")
+                baseurl=os.path.join(
+                    os.getenv("LHCBPR_WWW_EOS"), "UpgradeRateTest"),
             )
 
         for file in files_to_upload:
@@ -396,66 +460,98 @@ class BandwidthTestHandler(BaseHandler):
 
         if monitor_memory:
             for mon_file in [
-                prmon_file,
-                prmon_json_file,
-                'memory_consumption.png'
+                    prmon_file, prmon_json_file, "memory_consumption.png"
             ]:
                 upload_file(dirname, mon_file)
 
-        # Read message to post 
-        with open(full_output_path('message.txt'), 'r') as message_file:
+        # Read message to post
+        with open(full_output_path("message.txt"), "r") as message_file:
             message = message_file.read()
-            tot_rate = float(get_from_message(message, r"total_rate = ([\d.]+) kHz"))
-            tot_bandwidth = float(get_from_message(message, r"total_bandwidth = ([\d.]+) GB/s"))
-            n_low_rate = int(get_from_message(message, r"n_low_rate = ([\d.]+)"))
-            n_high_rate = int(get_from_message(message, r"n_high_rate = ([\d.]+)"))
-            status = bool(int(get_from_message(message, r"all_jobs_successful_bool = ([\d.]+)"))) # get_from_message will return a string '0' or '1', and bool(string) is always True, so cast to int first.
-        for metric_name, metric_val in {'rate': tot_rate, 'bandwidth': tot_bandwidth}.items():
+            tot_rate = float(
+                get_from_message(message, r"total_rate = ([\d.]+) kHz"))
+            tot_bandwidth = float(
+                get_from_message(message, r"total_bandwidth = ([\d.]+) GB/s"))
+            n_low_rate = int(
+                get_from_message(message, r"n_low_rate = ([\d.]+)"))
+            n_high_rate = int(
+                get_from_message(message, r"n_high_rate = ([\d.]+)"))
+            status = bool(
+                int(
+                    get_from_message(message,
+                                     r"all_jobs_successful_bool = ([\d.]+)"))
+            )  # get_from_message will return a string '0' or '1', and bool(string) is always True, so cast to int first.
+        for metric_name, metric_val in {
+                "rate": tot_rate,
+                "bandwidth": tot_bandwidth,
+        }.items():
             self.saveFloat(
-                f'total_{metric_name}',
+                f"total_{metric_name}",
                 metric_val,
-                description=f'total {metric_name} of {process} lines',
-                group=metric_name
+                description=f"total {metric_name} of {process} lines",
+                group=metric_name,
             )
-        status_sentence = ":bluetick: All sub-jobs in this test exited successfully.\n" if status else ":alarm: **There were errors in some of the sub-jobs of this test; please see the logs.** :alarm:\n"
+        status_sentence = (
+            ":bluetick: All sub-jobs in this test exited successfully.\n"
+            if status else
+            ":alarm: **There were errors in some of the sub-jobs of this test; please see the logs.** :alarm:\n"
+        )
 
-        send_mattermost_feedback(n_low_rate, n_high_rate, tot_rate, tot_bandwidth, options, version, targetRootWebDir, status_sentence, process)
+        send_mattermost_feedback(
+            n_low_rate,
+            n_high_rate,
+            tot_rate,
+            tot_bandwidth,
+            options,
+            version,
+            targetRootWebDir,
+            status_sentence,
+            process,
+        )
 
         # Post GitLab MR feedback; a bit more complex as comparing to a reference build.
-        if (slot 
-            in [
+        if (slot in [
                 "lhcb-master-mr",
                 "lhcb-master",
-            ]
-        ) and (
-            options
-            in [
+        ]) and (options in [
                 "Moore_hlt2_bandwidth",
                 "Moore_hlt2_2023_bandwidth",
                 "Moore_spruce_bandwidth",
+        ]):
+            csv_rate_table_names = [
+                f for f in list_outputs() if f.endswith(".csv")
             ]
-        ):
-            csv_rate_table_names = [f for f in list_outputs() if f.endswith(".csv")]
-            for ref, test, trigger in dashboard.get_ci_test_pairs(slot, build_id):
+            for ref, test, trigger in dashboard.get_ci_test_pairs(
+                    slot, build_id):
                 try:
-
-                    dfs_for_comparison = {version_str(ref): {}, version_str(test): {}}
+                    dfs_for_comparison = {
+                        version_str(ref): {},
+                        version_str(test): {}
+                    }
                     ref_version = version_str(ref)
                     new_version = version_str(test)
-                    
+
                     if test == (slot, build_id):
                         # The current build is the *test*, not the reference build
                         web_link_new = targetRootWebDir
                         dirname_new = dirname
                         tot_rate_new = tot_rate
                         tot_bandwidth_new = tot_bandwidth
-                    
+
                         # The build we're comparing to is therefore the reference build
-                        dfs_for_comparison[ref_version], web_link_ref, _, tot_rate_ref, tot_bandwidth_ref = get_info_from_comparison_build(ref_version, options, csv_rate_table_names)
+                        (
+                            dfs_for_comparison[ref_version],
+                            web_link_ref,
+                            _,
+                            tot_rate_ref,
+                            tot_bandwidth_ref,
+                        ) = get_info_from_comparison_build(
+                            ref_version, options, csv_rate_table_names)
 
                         for csv_name in csv_rate_table_names:
                             log.info(f"Trying comparison with {csv_name}.")
-                            dfs_for_comparison[new_version][csv_name] = pd.read_csv(full_output_path(csv_name))
+                            dfs_for_comparison[new_version][
+                                csv_name] = pd.read_csv(
+                                    full_output_path(csv_name))
 
                     elif ref == (slot, build_id):
                         # The current build is the *reference*, not the test build
@@ -464,11 +560,20 @@ class BandwidthTestHandler(BaseHandler):
                         tot_bandwidth_ref = tot_bandwidth
 
                         # The build we're comparing to is therefore the test build
-                        dfs_for_comparison[new_version], web_link_new, dirname_new, tot_rate_new, tot_bandwidth_new = get_info_from_comparison_build(new_version, options, csv_rate_table_names)
+                        (
+                            dfs_for_comparison[new_version],
+                            web_link_new,
+                            dirname_new,
+                            tot_rate_new,
+                            tot_bandwidth_new,
+                        ) = get_info_from_comparison_build(
+                            new_version, options, csv_rate_table_names)
 
                         for csv_name in csv_rate_table_names:
                             log.info(f"Trying comparison with {csv_name}.")
-                            dfs_for_comparison[ref_version][csv_name] = pd.read_csv(full_output_path(csv_name))
+                            dfs_for_comparison[ref_version][
+                                csv_name] = pd.read_csv(
+                                    full_output_path(csv_name))
 
                     else:
                         assert False
@@ -477,11 +582,12 @@ class BandwidthTestHandler(BaseHandler):
                         "Could not fetch results for other slot, not posting reply."
                     )
                 else:
-                    html_str, n_new_lines = make_comparison_page(dfs_for_comparison, ref_version, new_version)
+                    html_str, n_new_lines = make_comparison_page(
+                        dfs_for_comparison, ref_version, new_version)
                     with open("comparisons.html", "w") as html_file:
                         html_file.write(html_str)
                     # always upload comparison table to test dirs
-                    upload_file(dirname_new, 'comparisons.html')
+                    upload_file(dirname_new, "comparisons.html")
                     send_gitlab_feedback(
                         n_new_lines,
                         n_low_rate,
@@ -495,5 +601,5 @@ class BandwidthTestHandler(BaseHandler):
                         web_link_ref,
                         trigger,
                         status_sentence,
-                        process
+                        process,
                     )
diff --git a/handlers/BaseHandler.py b/handlers/BaseHandler.py
index f0ad91367ae110a6a4e04194d7c2af7ed4578287..72f67861607722f1462081cc990cdb6ea9423915 100644
--- a/handlers/BaseHandler.py
+++ b/handlers/BaseHandler.py
@@ -7,8 +7,8 @@ try:
 except ImportError:
     logger.warning("ROOT not imported. Skipping...")
 
-class BaseHandler(object):
 
+class BaseHandler(object):
     """For using this class in order to build a handler check the documentation
     to learn how to deploy a new handler"""
 
@@ -26,42 +26,42 @@ class BaseHandler(object):
         want to group your attributes, specify a group eg "Timing"
         """
         dataDict = {
-            'name': name,
-            'data': data,
-            'description': description,
-            'group': group,
+            "name": name,
+            "data": data,
+            "description": description,
+            "group": group,
         }
         return dataDict
 
     def saveInt(self, name, data, description="", group=""):
-        if name == '' or data == '':
+        if name == "" or data == "":
             return False
 
         dataDict = self.__save(name, data, description, group)
-        dataDict['type'] = 'Integer'
+        dataDict["type"] = "Integer"
 
         self.__results.append(dataDict)
 
     def saveFloat(self, name, data, description="", group=""):
-        if name == '' or data == '':
+        if name == "" or data == "":
             return False
 
         dataDict = self.__save(name, data, description, group)
-        dataDict['type'] = 'Float'
+        dataDict["type"] = "Float"
 
         self.__results.append(dataDict)
 
     def saveString(self, name, data, description="", group=""):
-        if name == '' or data == '':
+        if name == "" or data == "":
             return False
 
         dataDict = self.__save(name, data, description, group)
-        dataDict['type'] = 'String'
+        dataDict["type"] = "String"
 
         self.__results.append(dataDict)
 
     def saveJSON(self, name, data, description="", group=""):
-        if name == '' or data == '':
+        if name == "" or data == "":
             return False
 
         # If the object is a ROOT object, use ROOT method to save JSON.
@@ -70,16 +70,18 @@ class BaseHandler(object):
             if issubclass(type(data), ROOT.TObject):
                 if ROOT.gROOT.GetVersionInt() < 60800:
                     raise NotImplementedError(
-                            "Converting ROOT objects to JSON is only supported with ROOT versions >=6.08.")
+                        "Converting ROOT objects to JSON is only supported with ROOT versions >=6.08."
+                    )
                 json_to_save = str(ROOT.TBufferJSON.ConvertToJSON(data))
             else:
                 json_to_save = json.dumps(data)
         except NameError:
-            logger.warning("Not using ROOT to save JSON as ROOT is not imported!")
+            logger.warning(
+                "Not using ROOT to save JSON as ROOT is not imported!")
             json_to_save = json.dumps(data)
 
         dataDict = self.__save(name, json_to_save, description, group)
-        dataDict['type'] = 'JSON'
+        dataDict["type"] = "JSON"
 
         self.__results.append(dataDict)
 
@@ -90,15 +92,15 @@ class BaseHandler(object):
         to the file you want to file eg saveFile("Gauss-histogram.root",
         "/afs/cern.ch/user/.../tests/Gauss-30000000-100ev-20130425-histos.root")
         """
-        if name == '' or filename == '':
+        if name == "" or filename == "":
             return False
 
         dataDict = {
-            'name': name,
-            'filename': filename,
-            'description': description,
-            'group': group,
-            'type': 'File'
+            "name": name,
+            "filename": filename,
+            "description": description,
+            "group": group,
+            "type": "File",
         }
 
         self.__results.append(dataDict)
@@ -106,5 +108,5 @@ class BaseHandler(object):
     def getResults(self):
         return self.__results
 
-    def collectResults(self, directory='.'):
+    def collectResults(self, directory="."):
         return NotImplementedError()
diff --git a/handlers/BooleMoniROOTFileHandler.py b/handlers/BooleMoniROOTFileHandler.py
index a78626581ec4cf0bd55a6d0f20e0c2077ae44efd..2c32823288cd4adeced2d51dc9de7a86a4869a20 100644
--- a/handlers/BooleMoniROOTFileHandler.py
+++ b/handlers/BooleMoniROOTFileHandler.py
@@ -3,14 +3,12 @@ from .BaseHandler import BaseHandler
 
 
 class BooleMoniROOTFileHandler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
         files = [
-            'PR-UPG-SpillOver25ns-FT-1000ev-histos.root',
-            'Boole-histos.root'
+            "PR-UPG-SpillOver25ns-FT-1000ev-histos.root", "Boole-histos.root"
         ]
 
         fileFound = False
@@ -19,7 +17,10 @@ class BooleMoniROOTFileHandler(BaseHandler):
             print("Checking for ", f)
             if os.path.isfile(os.path.join(directory, f)):
                 fileFound = True
-                self.saveFile('BooleROOTMoniOutput', os.path.join(directory, f))
+                self.saveFile("BooleROOTMoniOutput", os.path.join(
+                    directory, f))
 
         if not fileFound:
-            raise Exception('Could not locate any supported monitoring histograms ROOT files')
+            raise Exception(
+                "Could not locate any supported monitoring histograms ROOT files"
+            )
diff --git a/handlers/BrunelMemHandler.py b/handlers/BrunelMemHandler.py
index c8ed5e68a2b8680d1c355fe8fc5edad1ff00191d..39879cfd00221579b2c2cf91968024d8978af45c 100644
--- a/handlers/BrunelMemHandler.py
+++ b/handlers/BrunelMemHandler.py
@@ -3,22 +3,23 @@ import os
 import re
 from .BaseHandler import BaseHandler
 
-class BrunelMemHandler(BaseHandler):
 
+class BrunelMemHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
+    def collectResults(self, directory):
         l = self.findHistoFile(directory)
         if len(l) != 1:
-            raise Exception("Could not locate just 1 histo file, found:" + str(l))
+            raise Exception("Could not locate just 1 histo file, found:" +
+                            str(l))
 
         f = ROOT.TFile(os.path.join(directory, l[0]))
         b = f.Get("Brunel/MemoryTool/Total Memory [MB]")
 
-        self.saveFloat("TotalMemory", b.GetMean(), "Memory [MB]", "Memory");
+        self.saveFloat("TotalMemory", b.GetMean(), "Memory [MB]", "Memory")
 
     def findHistoFile(self, dir):
         return [f for f in os.listdir(dir) if re.match(".*histos.root", f)]
diff --git a/handlers/CallgrindHandler.py b/handlers/CallgrindHandler.py
index 3041e35c8b59c72456bb523d83c4a598f1e97394..a90000bad969bc0e2e262488749dab7c10255196 100644
--- a/handlers/CallgrindHandler.py
+++ b/handlers/CallgrindHandler.py
@@ -1,9 +1,7 @@
-
 import os
 from .BaseHandler import BaseHandler
 from .parser.GaudiSequenceParser import GaudiSequenceParser
 from .timing.CallgrindLogParser import CallgrindLogParser
-
 """
 The CallGrindHandler module extracts callgrind metrics from a data structure
 which is passed to it from the timing.CallgrindLogParser. The function numbers
@@ -12,15 +10,16 @@ to extract from the CallgrindLogParser are extracted by the GaudiSequenceParser
 
 
 class CallgrindHandler(BaseHandler):
-
-    def __init__(self, directory=''):
+    def __init__(self, directory=""):
         super(self.__class__, self).__init__()
         self.directory = directory
         if not self.directory:
             self.directory = os.path.realpath(os.curdir)
         # variables used for parsing the cachegrind annotated log file
-        self.algoselect = ['GaudiSequencer/RecoDecodingSeq',
-                           'GaudiSequencer/RecoTrFastSeq']
+        self.algoselect = [
+            "GaudiSequencer/RecoDecodingSeq",
+            "GaudiSequencer/RecoTrFastSeq",
+        ]
 
     def collectResults(self, directory):
         gsp = GaudiSequenceParser(dir=directory)
@@ -31,19 +30,19 @@ class CallgrindHandler(BaseHandler):
 
         for alg in callgrindmetrics:
             metr = callgrindmetrics[alg]
-            btot = metr['bc'] + metr['bi']
-            itot = metr['ir'] + metr['dr'] + metr['dw']
-            l1m = metr['i1mr'] + metr['d1mr'] + metr['d1mw']
-            llm = metr['ilmr'] + metr['dlmr'] + metr['dlmw']
-            bm = metr['bim'] + metr['bcm']
-            cest = metr['ir'] + 10 * bm + 10 * l1m + 100 * llm
-            fp32 = metr['ifp32x1'] + 2 * metr['ifp32x2'] + 4 * metr['ifp32x4']\
-                + 8 * metr['ifp32x8']
-            fp64 = metr['ifp64x1'] + 2 * metr['ifp64x2'] + 4 * metr['ifp64x4']
-            vfp128 = 4 * metr['ifp32x4'] + 2 * metr['ifp64x4']
-            vfp256 = 8 * metr['ifp32x8'] + 4 * metr['ifp64x4']
-            vfp = 2 * metr['ifp32x2'] + vfp128 + vfp256
-            sfp = metr['ifp32x1'] + metr['ifp64x1']
+            btot = metr["bc"] + metr["bi"]
+            itot = metr["ir"] + metr["dr"] + metr["dw"]
+            l1m = metr["i1mr"] + metr["d1mr"] + metr["d1mw"]
+            llm = metr["ilmr"] + metr["dlmr"] + metr["dlmw"]
+            bm = metr["bim"] + metr["bcm"]
+            cest = metr["ir"] + 10 * bm + 10 * l1m + 100 * llm
+            fp32 = (metr["ifp32x1"] + 2 * metr["ifp32x2"] + 4 * metr["ifp32x4"]
+                    + 8 * metr["ifp32x8"])
+            fp64 = metr["ifp64x1"] + 2 * metr["ifp64x2"] + 4 * metr["ifp64x4"]
+            vfp128 = 4 * metr["ifp32x4"] + 2 * metr["ifp64x4"]
+            vfp256 = 8 * metr["ifp32x8"] + 4 * metr["ifp64x4"]
+            vfp = 2 * metr["ifp32x2"] + vfp128 + vfp256
+            sfp = metr["ifp32x1"] + metr["ifp64x1"]
             flop = fp32 + fp64
 
             rsimd = 0
@@ -61,41 +60,41 @@ class CallgrindHandler(BaseHandler):
             for val in callgrindvalues:
                 valname = val[0]
                 valdesc = val[1]
-                metname = '%s_%s' % (valname, alg)
+                metname = "%s_%s" % (valname, alg)
                 self.saveInt(metname, metr[valname], valdesc,
-                             'callgrind_metric')
-
-            self.saveInt('l1m_' + alg, l1m, 'L1 Miss Sum', 'callgrind_metric')
-            self.saveInt('llm_' + alg, llm, 'Last Levl Miss Sum',
-                         'callgrind_metric')
-            self.saveInt('bm_' + alg, bm, 'Branch Missprediction',
-                         'callgrind_metric')
-            self.saveInt('cest_' + alg, cest, 'Cycle Estimation',
-                         'callgrind_metric')
-            self.saveInt('fp32_' + alg, fp32, 'fp 32 operations',
-                         'callgrind_metric')
-            self.saveInt('fp64_' + alg, fp64, 'fp 64 operations',
-                         'callgrind_metric')
-            self.saveInt('vfp128_' + alg, vfp128, 'simd 128 fp operations',
-                         'callgrind_metric')
-            self.saveInt('vfp256_' + alg, vfp256, 'simd 256 fp operations',
-                         'callgrind_metric')
-            self.saveInt('vfp_' + alg, vfp, 'simd fp operations',
-                         'callgrind_metric')
-            self.saveInt('sfp_' + alg, sfp, 'scalar fp operations',
-                         'callgrind_metric')
-            self.saveInt('flop_' + alg, flop, 'fp operations',
-                         'callgrind_metric')
-            self.saveFloat('rsimd_' + alg, rsimd, 'ratio simd operations',
-                           'callgrind_metric')
-            self.saveFloat('rbm_' + alg, rbm, 'ratio branch misspredictions',
-                           'callgrind_metric')
-            self.saveFloat('rcm_' + alg, rcm, 'ratio cache misses',
-                           'callgrind_metric')
+                             "callgrind_metric")
+
+            self.saveInt("l1m_" + alg, l1m, "L1 Miss Sum", "callgrind_metric")
+            self.saveInt("llm_" + alg, llm, "Last Levl Miss Sum",
+                         "callgrind_metric")
+            self.saveInt("bm_" + alg, bm, "Branch Missprediction",
+                         "callgrind_metric")
+            self.saveInt("cest_" + alg, cest, "Cycle Estimation",
+                         "callgrind_metric")
+            self.saveInt("fp32_" + alg, fp32, "fp 32 operations",
+                         "callgrind_metric")
+            self.saveInt("fp64_" + alg, fp64, "fp 64 operations",
+                         "callgrind_metric")
+            self.saveInt("vfp128_" + alg, vfp128, "simd 128 fp operations",
+                         "callgrind_metric")
+            self.saveInt("vfp256_" + alg, vfp256, "simd 256 fp operations",
+                         "callgrind_metric")
+            self.saveInt("vfp_" + alg, vfp, "simd fp operations",
+                         "callgrind_metric")
+            self.saveInt("sfp_" + alg, sfp, "scalar fp operations",
+                         "callgrind_metric")
+            self.saveInt("flop_" + alg, flop, "fp operations",
+                         "callgrind_metric")
+            self.saveFloat("rsimd_" + alg, rsimd, "ratio simd operations",
+                           "callgrind_metric")
+            self.saveFloat("rbm_" + alg, rbm, "ratio branch misspredictions",
+                           "callgrind_metric")
+            self.saveFloat("rcm_" + alg, rcm, "ratio cache misses",
+                           "callgrind_metric")
 
     def run(self):
         self.collectResults()
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     CallgrindHandler().run()
diff --git a/handlers/CommentClassHandler.py b/handlers/CommentClassHandler.py
index 51d23c8dfc2aff47072df63814160d2a210a0506..c671b0e55e91a501e4554adf38ba2c415e1e109e 100644
--- a/handlers/CommentClassHandler.py
+++ b/handlers/CommentClassHandler.py
@@ -3,36 +3,43 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class CommentClassHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
-        self.results  = []
+        self.results = []
 
     def collectResults(self, directory):
-        logfile = 'profile_info.txt'
+        logfile = "profile_info.txt"
         run_path = os.path.join(directory, logfile)
 
-        regxp = "^comment\s*=\s*\"(.*)\s*/\s*(.*)\""
+        regxp = '^comment\s*=\s*"(.*)\s*/\s*(.*)"'
         comment = ""
         cclass = ""
         try:
-           loglines = open(run_path, 'r')
-           for l in loglines.readlines():
-              m = re.match(regxp, l)
-              if m != None:
-                 comment = m.group(1)
-                 cclass = m.group(2)
-                 break
-           loglines.close()
+            loglines = open(run_path, "r")
+            for l in loglines.readlines():
+                m = re.match(regxp, l)
+                if m != None:
+                    comment = m.group(1)
+                    cclass = m.group(2)
+                    break
+            loglines.close()
         except IOError:
-           raise Exception(str(self.__class__)+": File not found, this handler expects 'profile_info.txt' file in the result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": File not found, this handler expects 'profile_info.txt' file in the result directory"
+            )
 
         self.saveString("Comment", comment, "Comment Results", "JobInfo")
         self.saveString("Class", cclass, "Classify Results", "JobInfo")
 
         print(comment, cclass)
 
+
 if __name__ == "__main__":
     cch = CommentClassHandler()
-    cch.collectResults('/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v20r1p1/x86_64-slc6-gcc46-opt/20130919_1659_time')
+    cch.collectResults(
+        "/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v20r1p1/x86_64-slc6-gcc46-opt/20130919_1659_time"
+    )
diff --git a/handlers/DataChallengesROOTFileHandler.py b/handlers/DataChallengesROOTFileHandler.py
index 76a371f0481798137a7e75864249a918c09044c0..5de3bc9dcd532d17314f2981c78b3df4250d81cc 100644
--- a/handlers/DataChallengesROOTFileHandler.py
+++ b/handlers/DataChallengesROOTFileHandler.py
@@ -3,19 +3,18 @@ from .BaseHandler import BaseHandler
 
 
 class DataChallengesROOTFileHandler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
         files = [
-            'hlt2_reco_baseline_DC.root',
-            'hlt1_allen_track_reconstruction.root',
-            'Hlt1SeedAndMatchTrackingResolutionAllen.root',
-            'HLT1HLT2Checker_fitted_profile.root',
-            'HLT1HLT2Checker_fitted_profile_new.root',
-            'histos_hlt2_light_reco_pr_kf_without_UT_on_data_with_monitoring.root',
-            'MCMatching_baseline_MiniBias.root',
+            "hlt2_reco_baseline_DC.root",
+            "hlt1_allen_track_reconstruction.root",
+            "Hlt1SeedAndMatchTrackingResolutionAllen.root",
+            "HLT1HLT2Checker_fitted_profile.root",
+            "HLT1HLT2Checker_fitted_profile_new.root",
+            "histos_hlt2_light_reco_pr_kf_without_UT_on_data_with_monitoring.root",
+            "MCMatching_baseline_MiniBias.root",
         ]
 
         fileFound = False
@@ -27,4 +26,4 @@ class DataChallengesROOTFileHandler(BaseHandler):
                 self.saveFile(f, os.path.join(directory, f))
 
         if not fileFound:
-            raise Exception('No ROOT files found')
+            raise Exception("No ROOT files found")
diff --git a/handlers/DetailedTimingInVolumesHandler.py b/handlers/DetailedTimingInVolumesHandler.py
index 66f0e118578c34c9673f27750dd7e4145e449975..c029ded90a1f693a6dbdf86a8255769b62c68586 100644
--- a/handlers/DetailedTimingInVolumesHandler.py
+++ b/handlers/DetailedTimingInVolumesHandler.py
@@ -5,78 +5,129 @@ import re
 import json
 from .BaseHandler import BaseHandler
 
+
 class DetailedTimingInVolumesHandler(BaseHandler):
-	def __init__(self):
-		super(self.__class__, self).__init__()
-		self.resulting_data = {}
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.resulting_data = {}
+
+    # Read in the log file and return its content as a string
+    def read_logfile(self, log_file=None):
+        if not os.path.exists(log_file):
+            raise Exception("File %s does not exist" % log_file)
 
-	# Read in the log file and return its content as a string
-	def read_logfile(self, log_file=None):
-		if not os.path.exists(log_file):
-			raise Exception("File %s does not exist" % log_file)
+        with open(log_file, mode="r") as log_file_handler:
+            log_data = ""
+            for line in log_file_handler:
+                log_data += line
+            return log_data
 
-		with open(log_file, mode="r") as log_file_handler:
-			log_data = ""
-			for line in log_file_handler:
-				log_data += line
-			return log_data
+    # Parse the log data and return an array of lists of tuples
+    def parse_log_data(self, log_data=None):
+        resulting_data = {}
 
-	# Parse the log data and return an array of lists of tuples
-	def parse_log_data(self, log_data=None):
-		resulting_data = {}
+        # Match the blocks of data in the log
+        matched_log_blocks = re.finditer(
+            "\*(.+?)\n\n", log_data, flags=re.DOTALL)
+        if matched_log_blocks:
+            for match in matched_log_blocks:
+                data_block_title = ""
+                output_data_block = []
 
-		# Match the blocks of data in the log
-		matched_log_blocks = re.finditer("\*(.+?)\n\n", log_data, flags=re.DOTALL)
-		if matched_log_blocks:
-			for match in matched_log_blocks:
-				data_block_title = ""
-				output_data_block = []
+                imput_data_block = match.group(1).split("\n")
+                for block_line in imput_data_block:
+                    # A new block with a title
+                    if block_line.startswith("*"):
+                        match_block_title = re.search(
+                            "\*\s(.+?)\s\*", block_line, flags=re.IGNORECASE)
+                        if match_block_title:
+                            data_block_title = (
+                                match_block_title.group(1).lower().replace(
+                                    " ", "_").split("_(", 1)[0])
+                    # Data within a block
+                    else:
+                        # Volume or Process section
+                        if block_line.lower().startswith(
+                                "volume") or block_line.lower().startswith(
+                                    "process"):
+                            block_contents_match = re.search(
+                                ".+?: (.+?) cumulated time (.+?) seconds",
+                                block_line,
+                                flags=re.IGNORECASE,
+                            )
+                            if block_contents_match:
+                                output_data_block.append([
+                                    block_contents_match.group(1).lower(),
+                                    block_contents_match.group(2),
+                                ])
+                        # Timing per particle in specific detectors section
+                        elif data_block_title.startswith(
+                                "timing_per_particle"):
+                            block_contents_match = re.search(
+                                "(.+?)\s+?: cumulated time (.+?) seconds \((.+?)\)",
+                                block_line,
+                                flags=re.IGNORECASE,
+                            )
+                            if block_contents_match:
+                                output_data_block.append([
+                                    block_contents_match.group(1).lower(),
+                                    block_contents_match.group(2),
+                                    block_contents_match.group(3),
+                                ])
+                            # Cumulative record of the section
+                            elif block_line.lower().startswith("time in"):
+                                block_contents_match = re.search(
+                                    "Time in (.+?): (.+?) seconds \((.+?)\s",
+                                    block_line,
+                                    flags=re.IGNORECASE,
+                                )
+                                if block_contents_match:
+                                    output_data_block.append([
+                                        block_contents_match.group(1).lower(),
+                                        block_contents_match.group(2),
+                                        block_contents_match.group(3),
+                                    ])
+                        # Summary section
+                        elif data_block_title.startswith("summary"):
+                            block_contents_match = re.search(
+                                "Total time in (.+?): (.+?) seconds \((.+?)\s",
+                                block_line,
+                                flags=re.IGNORECASE,
+                            )
+                            if block_contents_match:
+                                output_data_block.append([
+                                    block_contents_match.group(1).lower(),
+                                    block_contents_match.group(2),
+                                    block_contents_match.group(3),
+                                ])
+                        # Other volumes section
+                        elif data_block_title.startswith("other"):
+                            block_contents_match = re.search(
+                                "(.+?): (.+?)$",
+                                block_line,
+                                flags=re.IGNORECASE)
+                            if block_contents_match:
+                                output_data_block.append([
+                                    block_contents_match.group(1).lower(),
+                                    block_contents_match.group(2).lower(),
+                                ])
 
-				imput_data_block = match.group(1).split("\n")
-				for block_line in imput_data_block:
-					# A new block with a title
-					if block_line.startswith("*"):
-						match_block_title = re.search("\*\s(.+?)\s\*", block_line, flags=re.IGNORECASE)
-						if match_block_title:
-							data_block_title = match_block_title.group(1).lower().replace(" ", "_").split('_(', 1)[0]
-					# Data within a block
-					else:
-						# Volume or Process section
-						if block_line.lower().startswith("volume") or block_line.lower().startswith("process"):
-							block_contents_match = re.search(".+?: (.+?) cumulated time (.+?) seconds", block_line, flags=re.IGNORECASE)
-							if block_contents_match:
-								output_data_block.append([block_contents_match.group(1).lower(), block_contents_match.group(2)])
-						# Timing per particle in specific detectors section
-						elif data_block_title.startswith("timing_per_particle"):
-							block_contents_match = re.search("(.+?)\s+?: cumulated time (.+?) seconds \((.+?)\)", block_line, flags=re.IGNORECASE)
-							if block_contents_match:
-								output_data_block.append([block_contents_match.group(1).lower(), block_contents_match.group(2), block_contents_match.group(3)])
-							# Cumulative record of the section
-							elif block_line.lower().startswith("time in"):
-								block_contents_match = re.search("Time in (.+?): (.+?) seconds \((.+?)\s", block_line, flags=re.IGNORECASE)
-								if block_contents_match:
-									output_data_block.append([block_contents_match.group(1).lower(), block_contents_match.group(2), block_contents_match.group(3)])
-						# Summary section
-						elif data_block_title.startswith("summary"):
-							block_contents_match = re.search("Total time in (.+?): (.+?) seconds \((.+?)\s", block_line, flags=re.IGNORECASE)
-							if block_contents_match:
-								output_data_block.append([block_contents_match.group(1).lower(), block_contents_match.group(2), block_contents_match.group(3)])
-						# Other volumes section
-						elif data_block_title.startswith("other"):
-							block_contents_match = re.search("(.+?): (.+?)$", block_line, flags=re.IGNORECASE)
-							if block_contents_match:
-								output_data_block.append([block_contents_match.group(1).lower(), block_contents_match.group(2).lower()])
+                    resulting_data[data_block_title] = output_data_block
 
-					resulting_data[data_block_title] = output_data_block
+        return resulting_data
 
-		return resulting_data
+    def collectResults(self, directory):
+        log_file = os.path.join(directory, "Timing.log")
+        resulting_data = self.parse_log_data(self.read_logfile(log_file))
+        self.saveJSON(
+            "detailed_timing_in_volumes",
+            resulting_data,
+            "Full information on detailed timing in volumes",
+            "detailed_timing_in_volumes",
+        )
 
-	def collectResults(self, directory):
-		log_file = os.path.join(directory, "Timing.log")
-		resulting_data = self.parse_log_data(self.read_logfile(log_file))
-		self.saveJSON("detailed_timing_in_volumes", resulting_data, "Full information on detailed timing in volumes", "detailed_timing_in_volumes")
 
 if __name__ == "__main__":
-	dtvh = DetailedTimingInVolumesHandler()
+    dtvh = DetailedTimingInVolumesHandler()
 
 # EOF
diff --git a/handlers/DummyHandler.py b/handlers/DummyHandler.py
index e4a70ca8b9394b9dade1cd92b598a8c5f05c2fc6..23635df964d3af5502629f8f8637bf0a2fcb023a 100644
--- a/handlers/DummyHandler.py
+++ b/handlers/DummyHandler.py
@@ -2,15 +2,13 @@ import os, sys, re
 from .BaseHandler import BaseHandler
 import random
 
+
 class DummyHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-        self.saveFloat("MyMeasure", random.gauss(42, 5), "Dummy timing value [ms]", "Timing")
-
-
-
+    def collectResults(self, directory):
+        self.saveFloat("MyMeasure", random.gauss(42, 5),
+                       "Dummy timing value [ms]", "Timing")
diff --git a/handlers/EMHandler.py b/handlers/EMHandler.py
index d4e6ca0cc871dbfd7af14ef67100ef3eefc78166..9d02110d145be12849136404a779a672b5430f03 100644
--- a/handlers/EMHandler.py
+++ b/handlers/EMHandler.py
@@ -14,6 +14,9 @@ class EMHandler(BaseHandler):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        rootFiles = glob(directory+"*.root" if directory.endswith("/") else directory+"/*.root")
+        rootFiles = glob(directory + "*.root" if directory.
+                         endswith("/") else directory + "/*.root")
         for files in rootFiles:
-            self.saveFile(re.sub(".root","",files.split("RootFile",1)[1]), files)
+            self.saveFile(
+                re.sub(".root", "",
+                       files.split("RootFile", 1)[1]), files)
diff --git a/handlers/EMHandlerSummary.py b/handlers/EMHandlerSummary.py
index b2b804af7082f093e0ce439e96ae9f21926ebb9f..f0e863654d3ef967e8b2f649b58045a298fca330 100644
--- a/handlers/EMHandlerSummary.py
+++ b/handlers/EMHandlerSummary.py
@@ -18,346 +18,381 @@ from array import array
 
 class landau:
     ############################################################################################
-    def landau_fit(self,fileName,histoName,betagamma):
-        '''
+    def landau_fit(self, fileName, histoName, betagamma):
+        """
         FIT LANDAU : Get the ROOT file that contains the energy deposit in VELO
                      and get MPV and FWHM. The root file that contains the histogram
                      with the energy deposits is loaded. In this method a parabolic fit
                      around the peak using up to second orders of polynomials is performed.
                      The method returns the MPV and FHW.
-        '''
-        file  = ROOT.TFile(fileName)
+        """
+        file = ROOT.TFile(fileName)
         histo = file.Get(histoName)
 
         histo.Rebin(1)
-        fpol = ROOT.TF1("f1","pol2",0,2000)
+        fpol = ROOT.TF1("f1", "pol2", 0, 2000)
         # this limit is set in order to avoid getting inverted parabola around the peak due to
         # low statistics
-        fpol.SetParLimits(2,-100,0)
-        xmax, xval = self.landau_getMPV(histo,fpol,fileName,betagamma)
-        right,left = self.landau_getFWHM(histo,xval)
-        scale = (right - left)
-        return xmax,scale
-
-
+        fpol.SetParLimits(2, -100, 0)
+        xmax, xval = self.landau_getMPV(histo, fpol, fileName, betagamma)
+        right, left = self.landau_getFWHM(histo, xval)
+        scale = right - left
+        return xmax, scale
 
     ##############################################################################################
-    def landau_getMPV(self,histo,pol,fileName,betagamma):
-        '''
+    def landau_getMPV(self, histo, pol, fileName, betagamma):
+        """
         FIT LANDAU : Find the MPV from the landau distribution. A search for the MPV is performed
                      around the peak using a parabolic fit. The function returns the position and
                      the value of the peak
-        '''
+        """
         x = histo.GetBinCenter(histo.GetMaximumBin())
         for time in range(3):
             # the +/- is addhoc but in general the larger the statistics in the histogram
             # the smaller the range that we need.
-            histo.Fit(pol,"QB","",x-8, x+8)
-            x = self.landau_getMax(pol,x-8,x+8)
+            histo.Fit(pol, "QB", "", x - 8, x + 8)
+            x = self.landau_getMax(pol, x - 8, x + 8)
         return x, pol.Eval(x)
 
-
     ###############################################################################################
-    def landau_fitgen(self,fileName,histo,betagamma):
-        '''
+    def landau_fitgen(self, fileName, histo, betagamma):
+        """
         FIT LANDAU GEN : Fit the histogram with the landau distribution generated. The function
                          returns MPV and width of the landau distribution.
-        '''
-        fpol = ROOT.TF1("f1","pol2",0,2000)
-        xmax, xval = self.landau_getMPV(histo,fpol,fileName,betagamma)
-
-        right,left = self.landau_getFWHM(histo,xval)
-        scale = (right - left)
-        return xmax,scale
-
+        """
+        fpol = ROOT.TF1("f1", "pol2", 0, 2000)
+        xmax, xval = self.landau_getMPV(histo, fpol, fileName, betagamma)
 
+        right, left = self.landau_getFWHM(histo, xval)
+        scale = right - left
+        return xmax, scale
 
     ##############################################################################################
-    def landau_getMax(self,pol,min,max):
-        '''
+    def landau_getMax(self, pol, min, max):
+        """
         FIT LANDAU : Find maximum value of distribution. Takes a range and the parabolic fit result
                      around the peak and scans for the maximum value form the fit. The function returns
                      the position of the maximum
-        '''
-        start=min
-        end  =max
-        step =(max-min)/1000
-        maxVal=pol.Eval(start)
-        maxX  =start
-        while(start<end):
-            if(pol.Eval(start)>maxVal):
-                maxX  = start
-                maxVal= pol.Eval(start)
-            start=start+step
+        """
+        start = min
+        end = max
+        step = (max - min) / 1000
+        maxVal = pol.Eval(start)
+        maxX = start
+        while start < end:
+            if pol.Eval(start) > maxVal:
+                maxX = start
+                maxVal = pol.Eval(start)
+            start = start + step
         return maxX
 
-
     ################################################################################################
-    def landau_getFWHM(self,histo,mpv):
-        '''
+    def landau_getFWHM(self, histo, mpv):
+        """
         FIT LANDAU : Find FWHM
-        '''
-        maxBin   = histo.GetMaximumBin()
-        reqLevel = 0.5*mpv
-
-        ileft    = maxBin
-        while(ileft > 0 and (histo.GetBinContent(ileft)>reqLevel)):
-            ileft=ileft-1
-            left     = self.landau_interpolate(ileft,reqLevel,-1,histo)
-
-        iright   = maxBin
-        while( iright < histo.GetNbinsX() and histo.GetBinContent(iright)>reqLevel):
-            iright=iright+1
-            right    = self.landau_interpolate(iright,reqLevel,1,histo)
+        """
+        maxBin = histo.GetMaximumBin()
+        reqLevel = 0.5 * mpv
+
+        ileft = maxBin
+        while ileft > 0 and (histo.GetBinContent(ileft) > reqLevel):
+            ileft = ileft - 1
+            left = self.landau_interpolate(ileft, reqLevel, -1, histo)
+
+        iright = maxBin
+        while iright < histo.GetNbinsX() and histo.GetBinContent(
+                iright) > reqLevel:
+            iright = iright + 1
+            right = self.landau_interpolate(iright, reqLevel, 1, histo)
         return right, left
 
-
     ######################################################################################
-    def landau_interpolate(self,bin, req, off, histo):
-        '''
+    def landau_interpolate(self, bin, req, off, histo):
+        """
         FIT LANDAU : Interpolate. The functions performs and interpolation of the parabolic
                      fit.
-        '''
-        tmp1 = histo.GetBinContent(bin) - histo.GetBinContent(bin+off)
-        tmp2 = (histo.GetBinCenter(bin) - histo.GetBinCenter(bin+off))
-        step=off
-        while(tmp1==0 or tmp2==0):
-            off=off+1
-            tmp1 = histo.GetBinContent(bin) - histo.GetBinContent(bin+off)
-            tmp2 = (histo.GetBinCenter(bin) - histo.GetBinCenter(bin+off))
-
-        slope=tmp1/tmp2
-        const = histo.GetBinContent(bin) - slope*histo.GetBinCenter(bin)
-        return (req-const)/slope
-
+        """
+        tmp1 = histo.GetBinContent(bin) - histo.GetBinContent(bin + off)
+        tmp2 = histo.GetBinCenter(bin) - histo.GetBinCenter(bin + off)
+        step = off
+        while tmp1 == 0 or tmp2 == 0:
+            off = off + 1
+            tmp1 = histo.GetBinContent(bin) - histo.GetBinContent(bin + off)
+            tmp2 = histo.GetBinCenter(bin) - histo.GetBinCenter(bin + off)
+
+        slope = tmp1 / tmp2
+        const = histo.GetBinContent(bin) - slope * histo.GetBinCenter(bin)
+        return (req - const) / slope
 
     #######################################################################################
-    def landau_genlandau(self,beta,gamma):
-        '''
+    def landau_genlandau(self, beta, gamma):
+        """
         FIT LANDAU : Generate landau distribution. The function generates a landau distribution
                      including smearing effects. The function returns a histogram with the
                      energy deposits.
-        '''
+        """
         thikness = 300
-        b = self.landau_genlandauWidth(beta,thikness)
-        a = self.landau_genlandauMPV(b,beta,gamma) + (0.226*b)
-
-        fL = ROOT.TF1("fL","landau",0.,600,)
-        fL.SetParameters(100000,a,b)
-
-        fG = ROOT.TF1("fG","gaus",-50,50)
+        b = self.landau_genlandauWidth(beta, thikness)
+        a = self.landau_genlandauMPV(b, beta, gamma) + (0.226 * b)
+
+        fL = ROOT.TF1(
+            "fL",
+            "landau",
+            0.0,
+            600,
+        )
+        fL.SetParameters(100000, a, b)
+
+        fG = ROOT.TF1("fG", "gaus", -50, 50)
         bindingEffect = self.landau_atomicBinding(thikness)
-        fG.SetParameters(1,0.,bindingEffect)
+        fG.SetParameters(1, 0.0, bindingEffect)
 
-        fGN = ROOT.TF1("fGN","gaus",-50,50)
-        fGN.SetParameters(1,0.,3.8)
+        fGN = ROOT.TF1("fGN", "gaus", -50, 50)
+        fGN.SetParameters(1, 0.0, 3.8)
 
-        fhisto = ROOT.TH1F("fhisto","",1000,0,500)
+        fhisto = ROOT.TH1F("fhisto", "", 1000, 0, 500)
         for i in range(100000):
-            val = fL.GetRandom()+fG.GetRandom()
+            val = fL.GetRandom() + fG.GetRandom()
             fhisto.Fill(val)
 
         return fhisto
 
     ##########################################################################################
-    def landau_genlandauWidth(self,beta,t):
-        '''
+    def landau_genlandauWidth(self, beta, t):
+        """
         FIT LANDAU : Generate landau width. The function generates the width of the landau
                      distribution. The functions returns the with of the landau
-        '''
-        return (0.017825*t/(beta**2))
-
+        """
+        return 0.017825 * t / (beta**2)
 
     #########################################################################################
-    def landau_densityEffect(self,beta,gamma):
-        '''
+    def landau_densityEffect(self, beta, gamma):
+        """
         FIT LANDAU : Generate density effects. The function calculate density effects. The
                      function returns density effects.
-        '''
-        x = math.log(beta*gamma,10)
+        """
+        x = math.log(beta * gamma, 10)
         dEffect = 0
-        if(x>0.09666 and x<2.5):
-            dEffect = 4.606*x -4.435 + (0.3755*pow(2.5-x,2.75))
+        if x > 0.09666 and x < 2.5:
+            dEffect = 4.606 * x - 4.435 + (0.3755 * pow(2.5 - x, 2.75))
         else:
-            dEffect=4.606*x-4.435
+            dEffect = 4.606 * x - 4.435
         return dEffect
 
     ##############################################################################################
-    def landau_genlandauMPV(self,b,beta,gamma):
-        '''
+    def landau_genlandauMPV(self, b, beta, gamma):
+        """
         FIT LANDAU :  Generate MPV. The function generates the MPV. The function returns MPV.
-        '''
-        return b*(math.log(2.0*511.0*(beta*gamma)**2/0.174)+
-                  math.log(b/0.174)+0.2-beta**2-self.landau_densityEffect(beta,gamma))
-
+        """
+        return b * (math.log(2.0 * 511.0 *
+                             (beta * gamma)**2 / 0.174) + math.log(b / 0.174) +
+                    0.2 - beta**2 - self.landau_densityEffect(beta, gamma))
 
     ##############################################################################################
-    def landau_atomicBinding(self,t):
-        '''
+    def landau_atomicBinding(self, t):
+        """
         FIT LANDAU :  Generate atomic binding effects. The function generaters atomic binding
                       effects. The function returns atomic binding effects.
-        '''
-        return math.sqrt(0.18*t)
-
+        """
+        return math.sqrt(0.18 * t)
 
 
 class toolsPlots:
     ###############################################################################################
-    def getParams(self,id,p):
-        '''
+    def getParams(self, id, p):
+        """
         BETA - GAMMA : Get beta expresion. The Function returns beta and gamma
-        '''
-        mass=0
-        if(id==11.0 ):mass=0.000511
-        if(id==13.0 ):mass=0.10566
-        if(id==211.0):mass=0.13498
-
-        energy =math.sqrt(p**2 + mass**2)
-        gamma = energy/mass
-        beta  = p/(gamma*mass)
-        return beta,gamma
+        """
+        mass = 0
+        if id == 11.0:
+            mass = 0.000511
+        if id == 13.0:
+            mass = 0.10566
+        if id == 211.0:
+            mass = 0.13498
+
+        energy = math.sqrt(p**2 + mass**2)
+        gamma = energy / mass
+        beta = p / (gamma * mass)
+        return beta, gamma
 
     ###############################################################################################
-    def loadFiles(self,path):
-        '''
+    def loadFiles(self, path):
+        """
         LOAD Files :  The function loads all root files and returns a list of them.
-        '''
-        return glob(path+"*.root")
+        """
+        return glob(path + "*.root")
 
     ###############################################################################################
-    '''
+    """
     LOAD Files : Only files with more thatn 1000 entries will be considered. Root Files with less
                  entries will not be used, since the fit in the peak does not make much sence any
                  more. The function returns a binary decision.
-    '''
-    def checkhisto(self,file,histo):
-        f=ROOT.TFile(file)
-        goodHisto=False
-        if(ROOT.gROOT.FindObject("energy_deposit")):
-            h=f.Get("energy_deposit")
-            if(h.GetEntries()>1000):
-                goodHisto=True
+    """
+
+    def checkhisto(self, file, histo):
+        f = ROOT.TFile(file)
+        goodHisto = False
+        if ROOT.gROOT.FindObject("energy_deposit"):
+            h = f.Get("energy_deposit")
+            if h.GetEntries() > 1000:
+                goodHisto = True
             else:
-                goodHisto=False
+                goodHisto = False
         else:
-            goodHisto=False
+            goodHisto = False
         return goodHisto
 
-
     ##########################################################################################################################
-    '''
+    """
     Make Graphs : The function makes the graphs that will be presented, most of the style is alread attached. Returns a list
                   of tuples that contain the TGraphs and info used for the finall massage.
-    '''
-    def getGraphs(self,objects,MPV,FWHM):
-        tg   = []
-        book = dict()
-        book = {11.0 : {'NoCuts':(array('d'),array('d')),'Opt1'  :(array('d'),array('d')),'Opt2'  :(array('d'),array('d')),
-                        'Opt3'  :(array('d'),array('d')),'LHCb'  :(array('d'),array('d'))},
-                13.0 : {'NoCuts':(array('d'),array('d')),'Opt1'  :(array('d'),array('d')),'Opt2'  :(array('d'),array('d')),
-                        'Opt3'  :(array('d'),array('d')),'LHCb'  :(array('d'),array('d'))},
-                211.0: {'NoCuts':(array('d'),array('d')),'Opt1'  :(array('d'),array('d')),'Opt2'  :(array('d'),array('d')),
-                        'Opt3'  :(array('d'),array('d')),'LHCb'  :(array('d'),array('d'))},
-                0.0  : {'theory':(array('d'),array('d'))}}
+    """
 
+    def getGraphs(self, objects, MPV, FWHM):
+        tg = []
+        book = dict()
+        book = {
+            11.0: {
+                "NoCuts": (array("d"), array("d")),
+                "Opt1": (array("d"), array("d")),
+                "Opt2": (array("d"), array("d")),
+                "Opt3": (array("d"), array("d")),
+                "LHCb": (array("d"), array("d")),
+            },
+            13.0: {
+                "NoCuts": (array("d"), array("d")),
+                "Opt1": (array("d"), array("d")),
+                "Opt2": (array("d"), array("d")),
+                "Opt3": (array("d"), array("d")),
+                "LHCb": (array("d"), array("d")),
+            },
+            211.0: {
+                "NoCuts": (array("d"), array("d")),
+                "Opt1": (array("d"), array("d")),
+                "Opt2": (array("d"), array("d")),
+                "Opt3": (array("d"), array("d")),
+                "LHCb": (array("d"), array("d")),
+            },
+            0.0: {
+                "theory": (array("d"), array("d"))
+            },
+        }
 
         for key in objects:
-            book[objects[key]["ID"]][objects[key]["PL"]][0].append(objects[key]["BETAGAMMA"])
-            book[objects[key]["ID"]][objects[key]["PL"]][1].append(objects[key]["MPV"] if MPV else objects[key]["MPV"]*1./objects[key]["FWHM"])
-
-
+            book[objects[key]["ID"]][objects[key]["PL"]][0].append(
+                objects[key]["BETAGAMMA"])
+            book[objects[key]["ID"]][objects[key]["PL"]][1].append(
+                objects[key]["MPV"] if MPV else objects[key]["MPV"] * 1.0 /
+                objects[key]["FWHM"])
 
         for key in book:
             for kkey in book[key]:
-                if(len(book[key][kkey][0])!=0):
-                    name=" "
-                    if key==11.0 :name="e^{#pm}"
-                    if key==13.0 :name="#mu^{#pm}"
-                    if key==211.0:name="#pi^{#pm}"
-
-                    tg.append((ROOT.TGraph(len(book[key][kkey][0]),book[key][kkey][0],book[key][kkey][1]),(name,kkey)))
-
-
-        if(MPV):
-            tg[0][0].GetYaxis().SetRangeUser(70,220)
-            tg[0][0].GetXaxis().SetLimits(0.9,1000000)
+                if len(book[key][kkey][0]) != 0:
+                    name = " "
+                    if key == 11.0:
+                        name = "e^{#pm}"
+                    if key == 13.0:
+                        name = "#mu^{#pm}"
+                    if key == 211.0:
+                        name = "#pi^{#pm}"
+
+                    tg.append((
+                        ROOT.TGraph(
+                            len(book[key][kkey][0]),
+                            book[key][kkey][0],
+                            book[key][kkey][1],
+                        ),
+                        (name, kkey),
+                    ))
+
+        if MPV:
+            tg[0][0].GetYaxis().SetRangeUser(70, 220)
+            tg[0][0].GetXaxis().SetLimits(0.9, 1000000)
             tg[0][0].SetTitle("")
         else:
-            tg[0][0].GetYaxis().SetRangeUser(2,8)
-            tg[0][0].GetXaxis().SetLimits(0.9,1000000)
+            tg[0][0].GetYaxis().SetRangeUser(2, 8)
+            tg[0][0].GetXaxis().SetLimits(0.9, 1000000)
             tg[0][0].SetTitle("")
 
-
         tg[0][0].GetXaxis().SetTitle("#beta#gamma")
         tg[0][0].GetYaxis().SetTitle("MPV [eV]" if MPV else "MPV/FWHM")
         tg[0][0].SetMarkerStyle(24)
         tg[0][0].SetMarkerSize(2)
         tg[0][0].SetMarkerColor(ROOT.kBlack)
 
-        index=4
-        for i in range(0,len(tg)):
+        index = 4
+        for i in range(0, len(tg)):
             tg[i][0].SetMarkerStyle(24)
             tg[i][0].SetMarkerSize(1)
-            tg[i][0].SetMarkerColor(1+index)
-            index=index+4
+            tg[i][0].SetMarkerColor(1 + index)
+            index = index + 4
 
         return tg
 
-
-
-
     ###########################################################################################################################
-    def makePlots(self,objects,opt,nameFile,operType):
-        '''
+    def makePlots(self, objects, opt, nameFile, operType):
+        """
         Make the final canvas. The function makes the final canvas that is used in the LHCbPR. The function returns a canvas.
-        '''
-        gr = self.getGraphs(objects,opt[0],opt[1])
-        c1 = ROOT.TCanvas("","")
+        """
+        gr = self.getGraphs(objects, opt[0], opt[1])
+        c1 = ROOT.TCanvas("", "")
         c1.SetTitle(" ")
         c1.SetName("MPV" if opt[0] else "MPV_FWHM")
         c1.SetLogx()
         c1.SetGrid()
 
-        gr[0][0].Draw("AP");
+        gr[0][0].Draw("AP")
 
-        for i in range(0,len(gr)):
+        for i in range(0, len(gr)):
             gr[i][0].Draw("P SAME")
 
-        leg = ROOT.TLegend(0.6,0.4,0.8,0.8)
+        leg = ROOT.TLegend(0.6, 0.4, 0.8, 0.8)
         for i in gr:
-            if(i[1][1]!="theory"):
-                leg.AddEntry(i[0],(i[1][0]+" "+i[1][1]),"P")
+            if i[1][1] != "theory":
+                leg.AddEntry(i[0], (i[1][0] + " " + i[1][1]), "P")
             else:
                 i[0].SetMarkerStyle(7)
                 i[0].SetMarkerColor(1)
-                leg.AddEntry(i[0],i[1][1],"P")
+                leg.AddEntry(i[0], i[1][1], "P")
         leg.Draw("SAME")
-        File = ROOT.TFile(nameFile,operType)
+        File = ROOT.TFile(nameFile, operType)
         c1.Write()
         File.Close()
 
 
-
-
 class EMHandlerSummary(BaseHandler):
     def __init__(self):
-                super(self.__class__, self).__init__()
-
+        super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
         ROOT.gROOT.SetBatch(ROOT.kTRUE)
 
-        rootFiles = glob(directory+"*.root" if directory.endswith("/") else directory+"/*.root")
+        rootFiles = glob(directory + "*.root" if directory.
+                         endswith("/") else directory + "/*.root")
 
         tools = toolsPlots()
-        fit   = landau()
+        fit = landau()
         obsInfo = dict()
 
-
-        for id in [11.0,13.0,211.0]:
-            for pl in ['NoCuts','Opt1','Opt2','Opt3','LHCb']:
-                for en in [0.1, 0.2, 0.4, 1., 5., 10., 16.8, 50., 100., 120., 168.]:
-                    stringFile = "RootFileSimMonitor_%s_%s_%s_velo-histos_LHCbPR.root"%(pl,id,en)
+        for id in [11.0, 13.0, 211.0]:
+            for pl in ["NoCuts", "Opt1", "Opt2", "Opt3", "LHCb"]:
+                for en in [
+                        0.1,
+                        0.2,
+                        0.4,
+                        1.0,
+                        5.0,
+                        10.0,
+                        16.8,
+                        50.0,
+                        100.0,
+                        120.0,
+                        168.0,
+                ]:
+                    stringFile = (
+                        "RootFileSimMonitor_%s_%s_%s_velo-histos_LHCbPR.root" %
+                        (pl, id, en))
                     if directory.endswith("/"):
                         stringFile = directory + stringFile
                     else:
@@ -365,33 +400,85 @@ class EMHandlerSummary(BaseHandler):
                     for strFile in rootFiles:
                         if stringFile in strFile:
                             print(stringFile)
-                            if(tools.checkhisto(strFile,"energy_deposit")):
-                                b,g=tools.getParams(id,en)
-                                mpv,fwhm=fit.landau_fit(stringFile,"energy_deposit",b*g)
-                                obsInfo[stringFile]={"ID":id,"PL":pl,"EN":en,"MPV":mpv,"FWHM":fwhm,"BETAGAMMA":b*g}
-
-
-        energy=[0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1.0]
-
-        index=0.1
+                            if tools.checkhisto(strFile, "energy_deposit"):
+                                b, g = tools.getParams(id, en)
+                                mpv, fwhm = fit.landau_fit(
+                                    stringFile, "energy_deposit", b * g)
+                                obsInfo[stringFile] = {
+                                    "ID": id,
+                                    "PL": pl,
+                                    "EN": en,
+                                    "MPV": mpv,
+                                    "FWHM": fwhm,
+                                    "BETAGAMMA": b * g,
+                                }
+
+        energy = [
+            0.1,
+            0.15,
+            0.2,
+            0.25,
+            0.3,
+            0.35,
+            0.4,
+            0.45,
+            0.5,
+            0.55,
+            0.6,
+            0.65,
+            0.7,
+            0.75,
+            0.8,
+            0.85,
+            0.9,
+            0.95,
+            1.0,
+        ]
+
+        index = 0.1
         for i in range(100):
-            index=index+1./100
+            index = index + 1.0 / 100
             energy.append(index)
 
-        index=0
+        index = 0
         for i in range(100):
-            index=index+250./50
+            index = index + 250.0 / 50
             energy.append(index)
 
-        for id in [11.0,13.0]:
-            for pl in ['theory']:
+        for id in [11.0, 13.0]:
+            for pl in ["theory"]:
                 for en in energy:
-                    b,g=tools.getParams(id,en)
-                    stringFile=pl+"_"+str(id)+"_"+str(en)+"_"+str(b*g)+".root"
-                    mpv,fwhm=fit.landau_fitgen(stringFile,fit.landau_genlandau(b,g),tools.getParams(id,en))
-                    obsInfo[stringFile]={"ID":0.0,"PL":pl,"EN":en,"MPV":mpv,"FWHM":fwhm,"BETAGAMMA":b*g}
-
-
-        tools.makePlots(obsInfo,[True,False],(directory+"EMSummary.root" if directory.endswith("/") else directory+"/EMSummary.root"),"RECREATE")
-        tools.makePlots(obsInfo,[False,True],(directory+"EMSummary.root" if directory.endswith("/") else directory+"/EMSummary.root"),"UPDATE")
-        self.saveFile("summary",(directory+"EMSummary.root" if directory.endswith("/") else directory+"/EMSummary.root"))
+                    b, g = tools.getParams(id, en)
+                    stringFile = (pl + "_" + str(id) + "_" + str(en) + "_" +
+                                  str(b * g) + ".root")
+                    mpv, fwhm = fit.landau_fitgen(stringFile,
+                                                  fit.landau_genlandau(b, g),
+                                                  tools.getParams(id, en))
+                    obsInfo[stringFile] = {
+                        "ID": 0.0,
+                        "PL": pl,
+                        "EN": en,
+                        "MPV": mpv,
+                        "FWHM": fwhm,
+                        "BETAGAMMA": b * g,
+                    }
+
+        tools.makePlots(
+            obsInfo,
+            [True, False],
+            (directory + "EMSummary.root"
+             if directory.endswith("/") else directory + "/EMSummary.root"),
+            "RECREATE",
+        )
+        tools.makePlots(
+            obsInfo,
+            [False, True],
+            (directory + "EMSummary.root"
+             if directory.endswith("/") else directory + "/EMSummary.root"),
+            "UPDATE",
+        )
+        self.saveFile(
+            "summary",
+            (directory + "EMSummary.root"
+             if directory.endswith("/") else directory + "/EMSummary.root"),
+        )
diff --git a/handlers/FilePathHandler.py b/handlers/FilePathHandler.py
index 860e3d15c1dccb4007c091c8ba058a3489695559..3bd0a40c61a6221e94f44e5010942213f6b3f22c 100644
--- a/handlers/FilePathHandler.py
+++ b/handlers/FilePathHandler.py
@@ -3,37 +3,46 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class FilePathHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-        logfile = 'run.log'
+    def collectResults(self, directory):
+        logfile = "run.log"
         run_path = os.path.join(directory, logfile)
 
         regxp = ".*/afs/cern.ch/lhcb/software/profiling/releases(/[A-Z0-9]+/[A-Z0-9]+_[\w-]+.*)"
         path_line = ""
         try:
-           loglines = open(run_path, 'r')
-           for l in loglines.readlines():
-              m = re.match(regxp, l)
-              if m != None:
-                 path_line = m.group(1)
-                 continue
-           loglines.close()
+            loglines = open(run_path, "r")
+            for l in loglines.readlines():
+                m = re.match(regxp, l)
+                if m != None:
+                    path_line = m.group(1)
+                    continue
+            loglines.close()
         except IOError:
-           raise Exception(str(self.__class__)+": File not found, this handler expects 'run.log' file in the result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": File not found, this handler expects 'run.log' file in the result directory"
+            )
 
-        if os.path.exists(run_path) :
-           path = "$AFS_PROF" + path_line
-           self.saveString("Path", path, "Results Location", "JobInfo")
-           print(path)
+        if os.path.exists(run_path):
+            path = "$AFS_PROF" + path_line
+            self.saveString("Path", path, "Results Location", "JobInfo")
+            print(path)
         else:
-           print('File or path does not exist (file: ' + run_path + ')')
+            print("File or path does not exist (file: " + run_path + ")")
+
 
 if __name__ == "__main__":
     fh = FilePathHandler()
-    fh.collectResults('/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v14r11/x86_64-slc6-gcc46-opt/20131112_1712_time')
-    fh.collectResults('/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_lhcb-head-131111/x86_64-slc6-gcc46-opt/20131111_1931_time')
+    fh.collectResults(
+        "/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v14r11/x86_64-slc6-gcc46-opt/20131112_1712_time"
+    )
+    fh.collectResults(
+        "/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_lhcb-head-131111/x86_64-slc6-gcc46-opt/20131111_1931_time"
+    )
diff --git a/handlers/G4RichTbSimHandler.py b/handlers/G4RichTbSimHandler.py
index 27d1561211e297548f71e2f213b260f3d4f9a95b..f017ffc80ec97d34d51456236b6b1e4206d86700 100644
--- a/handlers/G4RichTbSimHandler.py
+++ b/handlers/G4RichTbSimHandler.py
@@ -3,7 +3,7 @@ from .BaseHandler import BaseHandler
 
 
 class G4RichTbSimHandler(BaseHandler):
-
     def collectResults(self, directory):
-        mc_histos_file = os.path.join(directory, 'G4RichTbSimHTestOutput', 'RichTbSim_MC_Histograms.root')
-        self.saveFile('G4RichTbSim_MCHistograms', mc_histos_file)
+        mc_histos_file = os.path.join(directory, "G4RichTbSimHTestOutput",
+                                      "RichTbSim_MC_Histograms.root")
+        self.saveFile("G4RichTbSim_MCHistograms", mc_histos_file)
diff --git a/handlers/GammaConversionHandler.py b/handlers/GammaConversionHandler.py
index 23042cdcec15b48be1ef39101af1b304a8c0dd4f..9fc8bc7d6b643afbe533d599ff71e6c345d2eabd 100644
--- a/handlers/GammaConversionHandler.py
+++ b/handlers/GammaConversionHandler.py
@@ -11,10 +11,18 @@ import json
 from .BaseHandler import BaseHandler
 import logging
 
-class GammaConversionHandler(BaseHandler):	
-    def __init__(self, debug='INFO'):
-        super(self.__class__,self).__init__()
+
+class GammaConversionHandler(BaseHandler):
+    def __init__(self, debug="INFO"):
+        super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        _gamma_dilepton_file = ('GammaToDiLeptonConversionTest.root', os.path.join(directory, 'G4GammaCVTestROOTFiles', 'G4GammaToDiLeptonConversionTest.root'))
+        _gamma_dilepton_file = (
+            "GammaToDiLeptonConversionTest.root",
+            os.path.join(
+                directory,
+                "G4GammaCVTestROOTFiles",
+                "G4GammaToDiLeptonConversionTest.root",
+            ),
+        )
         self.saveFile(*_gamma_dilepton_file)
diff --git a/handlers/GaussMemHandler.py b/handlers/GaussMemHandler.py
index 0b8f8c38dddd7edfd7323a58da423738d82a3934..24dae76605566093271839b1996e48e91c85c6dd 100644
--- a/handlers/GaussMemHandler.py
+++ b/handlers/GaussMemHandler.py
@@ -3,33 +3,61 @@ import os
 import re
 from .BaseHandler import BaseHandler
 
-class GaussMemHandler(BaseHandler):
 
+class GaussMemHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
+    def collectResults(self, directory):
         l = self.findHistoFile(directory)
         if len(l) != 1:
-            raise Exception("Could not locate just 1 histo file, found:" + str(l))
+            raise Exception("Could not locate just 1 histo file, found:" +
+                            str(l))
 
         f = ROOT.TFile(os.path.join(directory, l[0]))
         gaussGenTotal = f.Get("GaussGen.GaussGenMemory/Total Memory [MB]")
         gaussGenDelta = f.Get("GaussGen.GaussGenMemory/Delta Memory [MB]")
-        mainEventGaussSimTotal = f.Get("MainEventGaussSim.MainEventGaussSimMemory/Total Memory [MB]")
-        mainEventGaussSimDelta = f.Get("MainEventGaussSim.MainEventGaussSimMemory/Delta Memory [MB]")
+        mainEventGaussSimTotal = f.Get(
+            "MainEventGaussSim.MainEventGaussSimMemory/Total Memory [MB]")
+        mainEventGaussSimDelta = f.Get(
+            "MainEventGaussSim.MainEventGaussSimMemory/Delta Memory [MB]")
 
         # mem vars for Lamarr have a different path
-        if l[0].startswith('Lamarr'):
-            mainEventGaussSimTotal = f.Get("InitLamarr.InitLamarrMemory/Total Memory [MB]")
-            mainEventGaussSimDelta = f.Get("InitLamarr.InitLamarrMemory/Delta Memory [MB]")
+        if l[0].startswith("Lamarr"):
+            mainEventGaussSimTotal = f.Get(
+                "InitLamarr.InitLamarrMemory/Total Memory [MB]")
+            mainEventGaussSimDelta = f.Get(
+                "InitLamarr.InitLamarrMemory/Delta Memory [MB]")
 
-        self.saveFloat("TotalMemoryGaussGen", gaussGenTotal.GetMean(), "Total Memory [MB]", "Memory");
-        self.saveFloat("DeltaMemoryGaussGen", gaussGenDelta.GetMean(), "TotalDelta Memory [MB]", "Memory");
-        self.saveFloat("TotalMemoryMainEventGaussSim", mainEventGaussSimTotal.GetMean(), "Total Memory [MB]", "Memory");
-        self.saveFloat("DeltaMemoryMainEventGaussSim", mainEventGaussSimDelta.GetMean(), "TotalDelta Memory [MB]", "Memory");
+        self.saveFloat(
+            "TotalMemoryGaussGen",
+            gaussGenTotal.GetMean(),
+            "Total Memory [MB]",
+            "Memory",
+        )
+        self.saveFloat(
+            "DeltaMemoryGaussGen",
+            gaussGenDelta.GetMean(),
+            "TotalDelta Memory [MB]",
+            "Memory",
+        )
+        self.saveFloat(
+            "TotalMemoryMainEventGaussSim",
+            mainEventGaussSimTotal.GetMean(),
+            "Total Memory [MB]",
+            "Memory",
+        )
+        self.saveFloat(
+            "DeltaMemoryMainEventGaussSim",
+            mainEventGaussSimDelta.GetMean(),
+            "TotalDelta Memory [MB]",
+            "Memory",
+        )
 
     def findHistoFile(self, dir):
-        return [f for f in os.listdir(dir) if re.match("(Gauss|Lamarr).*histos.root", f)]
+        return [
+            f for f in os.listdir(dir)
+            if re.match("(Gauss|Lamarr).*histos.root", f)
+        ]
diff --git a/handlers/GeantStandaloneHandler.py b/handlers/GeantStandaloneHandler.py
index 6d5d5d790797a6bf122dee81b6c1f804b14209b9..1f34cbacffeb80d6dbb1fbf944b61e4887a9b408 100644
--- a/handlers/GeantStandaloneHandler.py
+++ b/handlers/GeantStandaloneHandler.py
@@ -2,34 +2,31 @@ import os
 import fnmatch
 import glob
 import csv
-#import random
+
+# import random
 
 from .BaseHandler import BaseHandler
 
 
 class GeantStandaloneHandler(BaseHandler):
-
-    """ LHCbPR Handler for Geant standalone tests.
-          SetupProject --nightly lhcb-gauss-def Geant4 Head (--build-env)
-          getpack Geant/G4examples
-          make
-          hadronis_tests
+    """LHCbPR Handler for Geant standalone tests.
+    SetupProject --nightly lhcb-gauss-def Geant4 Head (--build-env)
+    getpack Geant/G4examples
+    make
+    hadronis_tests
     """
 
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        """ Collect  results """
-        #self.saveInt("TestValue", random.randint(1, 10));
+        """Collect  results"""
+        # self.saveInt("TestValue", random.randint(1, 10));
         # Files
-        exts = ['*.root']
-        base = os.path.join(directory,'root')
+        exts = ["*.root"]
+        base = os.path.join(directory, "root")
         for file in os.listdir(base):
             for ext in exts:
                 if fnmatch.fnmatch(file, ext):
-                    
                     self.saveFile(
-                        os.path.basename(file),
-                        os.path.join(base, file)
-                    )
+                        os.path.basename(file), os.path.join(base, file))
diff --git a/handlers/GeantTestEm3Handler.py b/handlers/GeantTestEm3Handler.py
index 2c1fc88a1f94c8c048e0c79bf9933719af742c31..890a6e9c086389ed182ccf5b2546f4cdd6e87aa6 100644
--- a/handlers/GeantTestEm3Handler.py
+++ b/handlers/GeantTestEm3Handler.py
@@ -5,17 +5,17 @@ from .BaseHandler import BaseHandler
 
 
 class GeantTestEm3Handler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        """ Collect  results """
+        """Collect  results"""
 
-        file = 'Selectedresults.root'
-        txtfile = 'selectedresults.txt'
-        filename = os.path.join(directory, 'G4SamplingCaloTestOutput', file)
-        txtfilename = os.path.join(directory, 'G4SamplingCaloTestOutput', txtfile)
+        file = "Selectedresults.root"
+        txtfile = "selectedresults.txt"
+        filename = os.path.join(directory, "G4SamplingCaloTestOutput", file)
+        txtfilename = os.path.join(directory, "G4SamplingCaloTestOutput",
+                                   txtfile)
 
         if not os.path.exists(filename):
             raise Exception("File %s does not exist" % filename)
@@ -25,19 +25,24 @@ class GeantTestEm3Handler(BaseHandler):
 
         self.saveFile(file, filename)
 
-        with open(txtfilename, mode='r') as f:
+        with open(txtfilename, mode="r") as f:
             lines = f.readlines()
-            _, res_val, res_err = lines[1].split(',')
+            _, res_val, res_err = lines[1].split(",")
             print((res_val, res_err))
-            res_val, res_err = float(res_val), float(res_err.split(';')[0])
+            res_val, res_err = float(res_val), float(res_err.split(";")[0])
 
-            _, const_val, const_err = lines[2].split(',')
-            const_val, const_err = float(const_val), float(const_err.split(';')[0])
+            _, const_val, const_err = lines[2].split(",")
+            const_val, const_err = float(const_val), float(
+                const_err.split(";")[0])
 
             table = []
             for line in lines[6:]:
-                e_en, e_val, e_err = line.split(',')
-                e_en, e_val, e_err = float(e_en), float(e_val), float(e_err.split(';')[0])
+                e_en, e_val, e_err = line.split(",")
+                e_en, e_val, e_err = (
+                    float(e_en),
+                    float(e_val),
+                    float(e_err.split(";")[0]),
+                )
                 table.append((e_en, e_val, e_err))
 
             self.saveFloat("TESTEM3_FIT_RESOLUTION_VALUE", res_val)
diff --git a/handlers/GeantTestEm5Handler.py b/handlers/GeantTestEm5Handler.py
index 50160607c947537737f646fb21a0c49bd68a94ae..e259b5a86a52fa9d72182d3e1b79ba61a67555aa 100644
--- a/handlers/GeantTestEm5Handler.py
+++ b/handlers/GeantTestEm5Handler.py
@@ -5,7 +5,6 @@ from .BaseHandler import BaseHandler
 
 
 class GeantTestEm5Handler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
@@ -14,27 +13,41 @@ class GeantTestEm5Handler(BaseHandler):
             raise Exception("File %s does not exist" % _file)
 
     def ReadTable(self, _file):
-        with open(_file, mode='r') as f:
+        with open(_file, mode="r") as f:
             AllLines = f.readlines()
             AllLines.pop(0)
             for Line in AllLines:
-                Column = Line.split(',')
-                self.saveFloat('TESTEM5_{energy}MeV RMS'.format(energy=Column[0]),
-                               float(Column[1]),
-                               description='Multiple Scattering test: RMS of electron scattering angle at {energy} MeV'.format(energy=Column[0]))
-                self.saveFloat('TESTEM5_{energy}MeV RMS Uncertainty'.format(energy=Column[0]),
-                               float(Column[2].strip('\n')),
-                               description='Multiple Scattering test: Uncertainty on RMS of electron scattering angle at {energy} MeV'.format(energy=Column[0]))
+                Column = Line.split(",")
+                self.saveFloat(
+                    "TESTEM5_{energy}MeV RMS".format(energy=Column[0]),
+                    float(Column[1]),
+                    description=
+                    "Multiple Scattering test: RMS of electron scattering angle at {energy} MeV"
+                    .format(energy=Column[0]),
+                )
+                self.saveFloat(
+                    "TESTEM5_{energy}MeV RMS Uncertainty".format(
+                        energy=Column[0]),
+                    float(Column[2].strip("\n")),
+                    description=
+                    "Multiple Scattering test: Uncertainty on RMS of electron scattering angle at {energy} MeV"
+                    .format(energy=Column[0]),
+                )
 
     def collectResults(self, directory):
-        ResultsTable = os.path.join(directory, "G4MScInThinLayerTestOutput", "Results_Table.txt")
+        ResultsTable = os.path.join(directory, "G4MScInThinLayerTestOutput",
+                                    "Results_Table.txt")
 
         self.CheckFile(ResultsTable)
 
         self.ReadTable(ResultsTable)
 
-        ResultsFile = os.path.join(directory, "G4MScInThinLayerTestOutput", "RMSResults.root")
+        ResultsFile = os.path.join(directory, "G4MScInThinLayerTestOutput",
+                                   "RMSResults.root")
 
-        self.saveFile("TESTEM5_RMSResults.root",
-                      ResultsFile,
-                      description="Multiple Scattering test: File containing Distributions of RMS at each energy and graph of RMS vs. Energy")
+        self.saveFile(
+            "TESTEM5_RMSResults.root",
+            ResultsFile,
+            description=
+            "Multiple Scattering test: File containing Distributions of RMS at each energy and graph of RMS vs. Energy",
+        )
diff --git a/handlers/HLTIndependenceHandler.py b/handlers/HLTIndependenceHandler.py
index bcf1612d0ada1bf2079c4e4356531b52dd1caa05..3d58facc74e56349d933c668979c9bae00672dbe 100644
--- a/handlers/HLTIndependenceHandler.py
+++ b/handlers/HLTIndependenceHandler.py
@@ -4,15 +4,15 @@ from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 from .hlt import HLTIndependenceParser
 
+
 class HLTIndependenceHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
     def _parseHLTIndepLog(self, filename):
-        """ Parse the log of the HLT rate file to send to the DB """
+        """Parse the log of the HLT rate file to send to the DB"""
 
         # extracted rate table from the log
         table = []
@@ -33,29 +33,29 @@ class HLTIndependenceHandler(BaseHandler):
 
         data = HLTIndependenceParser.parseHLTIndependenceTable("".join(table))
         globalGroup = "HTLIndep_Global"
-        globalStatsKeys = ['requested', 'completed', 'nomismatch', 'processed']
+        globalStatsKeys = ["requested", "completed", "nomismatch", "processed"]
         for k in globalStatsKeys:
             self.saveFloat(k, float(data[k]), group=globalGroup)
-        
+
         # for each line we have:
         #  <line number>, <line name>, <All>, <Single>, <AnS>, <SnA> ]
         #                             |All(A) Single(S)  A!S    S!A
         # e.g.
         # ['2', 'Hlt1B2HH_LTUNB_KK:', '0', '0', '0', '0']
         def saveStatsLine(prefix, statsGroup, data):
-            """ Format the stats for LHCbPR """
+            """Format the stats for LHCbPR"""
             (num, name, dsingle, dall, dans, dsna) = data
             name = name.rstrip(":")
-            getname = lambda par: "_".join([ prefix, name, par])
+            getname = lambda par: "_".join([prefix, name, par])
+
+            self.saveFloat(getname("nb"), int(num), group=statsGroup)
+            self.saveFloat(getname("Single"), float(dsingle), group=statsGroup)
+            self.saveFloat(getname("All"), float(dall), group=statsGroup)
+            self.saveFloat(getname("A!S"), float(dans), group=statsGroup)
+            self.saveFloat(getname("S!A"), float(dsna), group=statsGroup)
 
-            self.saveFloat(getname("nb") , int(num), group=statsGroup)
-            self.saveFloat(getname("Single") , float(dsingle), group=statsGroup)
-            self.saveFloat(getname("All") , float(dall), group=statsGroup)
-            self.saveFloat(getname("A!S") , float(dans), group=statsGroup)
-            self.saveFloat(getname("S!A") , float(dsna), group=statsGroup)
         for d in data["HLT1LineStats"]:
             saveStatsLine("HLT1Indep", "HLTRate_HLT1IndepStats", d)
 
     def collectResults(self, directory):
-        self._parseHLTIndepLog(os.path.join(directory,'run.log'))
-
+        self._parseHLTIndepLog(os.path.join(directory, "run.log"))
diff --git a/handlers/HLTJSONHandler.py b/handlers/HLTJSONHandler.py
index 13f7fb076e06145091e189aedf8788eac819298c..3e0c0a4225e7d12f35d778164b8365b6d87315a7 100644
--- a/handlers/HLTJSONHandler.py
+++ b/handlers/HLTJSONHandler.py
@@ -5,6 +5,7 @@ from .hlt.EventSizeParser import eventSizeParser
 from .hlt.HLT1TupleParser import ParseHLT1Output
 from .hlt.HLT2TupleParser import ParseHLT2Output
 
+
 class HLTJSONHandler(BaseHandler):
     """
     This is the JSON based handler for the HLT data
@@ -24,13 +25,14 @@ class HLTJSONHandler(BaseHandler):
         HLT Rate Tests.
         This has support for parsing the output tuple from a split test or
         from a single tuple from a combined test.
-        """    
-   
+        """
+
         hlt1_results = os.path.join(directory, "tuples_hlt1.root")
         hlt2_results = os.path.join(directory, "tuples_hlt2.root")
 
-        if not os.path.isfile(hlt1_results) and not os.path.isfile(hlt2_results):
-            default_results = os.path.join(directory, 'tuples.root')
+        if not os.path.isfile(hlt1_results) and not os.path.isfile(
+                hlt2_results):
+            default_results = os.path.join(directory, "tuples.root")
             if os.path.isfile(default_results):
                 hlt1_results = default_results
                 hlt2_results = default_results
@@ -42,46 +44,47 @@ class HLTJSONHandler(BaseHandler):
         results_hlt1 = ParseHLT1Output(hlt1_results)
         results_hlt2 = ParseHLT2Output(hlt1_results, hlt2_results)
 
-        eventSizes = results_EventSize['eventSizes']
+        eventSizes = results_EventSize["eventSizes"]
         for k, v in list(eventSizes.items()):
-            self.saveJSON( 'eventSizes_'+k, v )
+            self.saveJSON("eventSizes_" + k, v)
 
-        self.saveFile(os.path.basename(results_EventSize['eventFileName']), results_EventSize['eventFileName'])
+        self.saveFile(
+            os.path.basename(results_EventSize["eventFileName"]),
+            results_EventSize["eventFileName"],
+        )
 
-        hlt1_ByRegex = results_hlt1['ByRegex']
-        hlt1_Hlt1Lines = results_hlt1['Hlt1Lines']
+        hlt1_ByRegex = results_hlt1["ByRegex"]
+        hlt1_Hlt1Lines = results_hlt1["Hlt1Lines"]
 
         for k, v in list(hlt1_ByRegex.items()):
-            self.saveJSON( 'hlt1_ByRegex_'+k, v )
+            self.saveJSON("hlt1_ByRegex_" + k, v)
         for k, v in list(hlt1_Hlt1Lines.items()):
-            self.saveJSON( 'hlt1_Hlt1Lines_'+k, v )
+            self.saveJSON("hlt1_Hlt1Lines_" + k, v)
 
-        hlt2_unaccounted = results_hlt2['unaccounted']
+        hlt2_unaccounted = results_hlt2["unaccounted"]
         for k, v in list(hlt2_unaccounted.items()):
-            self.saveJSON( 'hlt2_unaccounted_'+k, list(v) )
-        hlt2_decisions = results_hlt2['decisions']
+            self.saveJSON("hlt2_unaccounted_" + k, list(v))
+        hlt2_decisions = results_hlt2["decisions"]
         for k, v in list(hlt2_decisions.items()):
-            self.saveJSON( 'hlt2_decisions_'+k, v )
+            self.saveJSON("hlt2_decisions_" + k, v)
 
         # List
-        hlt2_persistRecoLines = results_hlt2['persistRecoLines']
-        self.saveJSON( 'hlt2_persistRecoLines_'+k, hlt2_persistRecoLines )
+        hlt2_persistRecoLines = results_hlt2["persistRecoLines"]
+        self.saveJSON("hlt2_persistRecoLines_" + k, hlt2_persistRecoLines)
 
-        hlt2_Streams = results_hlt2['Streams']
+        hlt2_Streams = results_hlt2["Streams"]
         for k, v in list(hlt2_Streams.items()):
             for k2, v2 in list(v.items()):
-                self.saveJSON( 'hlt2_Streams_'+k+'_'+k2, v2 )
+                self.saveJSON("hlt2_Streams_" + k + "_" + k2, v2)
 
-        hlt2_RateMatrix = results_hlt2['RateMatrix']
-        hlt2_RateMatrixDict = results_hlt2['RateMatrixDict']
-        hlt2_RateMatrixLines = results_hlt2['RateMatrixLines']
-        hlt2_RateMatrixLineNames = results_hlt2['RateMatrixLineNames']
+        hlt2_RateMatrix = results_hlt2["RateMatrix"]
+        hlt2_RateMatrixDict = results_hlt2["RateMatrixDict"]
+        hlt2_RateMatrixLines = results_hlt2["RateMatrixLines"]
+        hlt2_RateMatrixLineNames = results_hlt2["RateMatrixLineNames"]
 
         # List
-        self.saveJSON( 'hlt2_RateMatrix', hlt2_RateMatrix )
+        self.saveJSON("hlt2_RateMatrix", hlt2_RateMatrix)
         for k, v in list(hlt2_RateMatrixDict.items()):
-            self.saveJSON( 'hlt2_RateMatrixDict_'+k, v )
-        self.saveJSON( 'hlt2_RateMatrixLines', hlt2_RateMatrixLines )
-        self.saveJSON( 'hlt2_RateMatrixLineNames', hlt2_RateMatrixLineNames )
-
-
+            self.saveJSON("hlt2_RateMatrixDict_" + k, v)
+        self.saveJSON("hlt2_RateMatrixLines", hlt2_RateMatrixLines)
+        self.saveJSON("hlt2_RateMatrixLineNames", hlt2_RateMatrixLineNames)
diff --git a/handlers/HLTRateHandler.py b/handlers/HLTRateHandler.py
index 09cb2174543c6afe7506c54ea0522a356391e956..2972700c3fa22c9bae0a42a473a3b3e046687217 100644
--- a/handlers/HLTRateHandler.py
+++ b/handlers/HLTRateHandler.py
@@ -7,29 +7,27 @@ import subprocess
 import shutil
 import logging
 
-
 log = logging.getLogger(__name__)
 
-class HLTRateHandler(BaseHandler):
 
+class HLTRateHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-
     RE_NBWERROR = "([0-9\.]+)\+\-([0-9\.]+)"
+
     def _parseValError(self, valstr):
-        """ Parse a number in the format: 150.00+-35.71 """
+        """Parse a number in the format: 150.00+-35.71"""
         m = re.match(self.RE_NBWERROR, valstr)
         if m != None:
             return (m.group(1), m.group(2))
         else:
             return (None, None)
 
-
     def _parseHLTRateLog(self, filename, directory):
-        """ Parse the log of the HLT rate file to send to the DB """
+        """Parse the log of the HLT rate file to send to the DB"""
 
         # extracted rate table from the log
         rateTable = []
@@ -50,14 +48,21 @@ class HLTRateHandler(BaseHandler):
 
         # Parsing the rate table to extract the figures
         from .hlt import HLTRateParser
+
         data = HLTRateParser.parseHLTRateList("".join(rateTable))
 
         globalGroup = "HTLRate_Global"
-        globalStatsKeys = ['Hlt1Lines', 'Hlt2Lines', 'nbevents', 'ratesAssume']
+        globalStatsKeys = ["Hlt1Lines", "Hlt2Lines", "nbevents", "ratesAssume"]
         for k in globalStatsKeys:
             self.saveFloat(k, float(data[k]), group=globalGroup)
 
-        globalStatsWithErrKeys = ['TurcalRate', 'TurboRate', 'Hlt2GlobalRate', 'Hlt1GlobalRate', 'FullRate' ]
+        globalStatsWithErrKeys = [
+            "TurcalRate",
+            "TurboRate",
+            "Hlt2GlobalRate",
+            "Hlt1GlobalRate",
+            "FullRate",
+        ]
         for k in globalStatsWithErrKeys:
             self.saveFloat(k, float(data[k][1]), group=globalGroup)
             self.saveFloat(k + "_error", float(data[k][2]), group=globalGroup)
@@ -67,16 +72,20 @@ class HLTRateHandler(BaseHandler):
         # ['1', 'Hlt1TrackMVA', '150.00', '35.71', '80.00', '27.13']
 
         def saveStatsLine(prefix, statsGroup, data):
-            """ Format the stats for LHCbPR """
+            """Format the stats for LHCbPR"""
             (num, name, irate, irateerr, erate, erateerr) = data
 
-            getname = lambda par: "_".join([ prefix, name, par])
+            getname = lambda par: "_".join([prefix, name, par])
 
-            self.saveFloat(getname("nb") , int(num), group=statsGroup)
-            self.saveFloat(getname("Incl_rate") , float(irate), group=statsGroup)
-            self.saveFloat(getname("Incl_rate_err") , float(irateerr), group=statsGroup)
-            self.saveFloat(getname("Excl_rate") , float(erate), group=statsGroup)
-            self.saveFloat(getname("Excl_rate_err") , float(erateerr), group=statsGroup)
+            self.saveFloat(getname("nb"), int(num), group=statsGroup)
+            self.saveFloat(
+                getname("Incl_rate"), float(irate), group=statsGroup)
+            self.saveFloat(
+                getname("Incl_rate_err"), float(irateerr), group=statsGroup)
+            self.saveFloat(
+                getname("Excl_rate"), float(erate), group=statsGroup)
+            self.saveFloat(
+                getname("Excl_rate_err"), float(erateerr), group=statsGroup)
 
         for d in data["Hlt1RegexStats"]:
             saveStatsLine("HLT1Regex", "HLTRate_HLT1RegexLineStats", d)
@@ -90,10 +99,14 @@ class HLTRateHandler(BaseHandler):
         for d in data["Hlt2Stats"]:
             saveStatsLine("HLT2", "HLTRate_HLT2LineStats", d)
 
-        self.saveFile("tuples.root", os.path.join(directory,'tuples.root'),  group="HTLRate_Global")
+        self.saveFile(
+            "tuples.root",
+            os.path.join(directory, "tuples.root"),
+            group="HTLRate_Global",
+        )
 
     def _publishStaticHTML(self, directory, project, version):
-        """ Generate the static HTML files for Mika and copies them to AFS """
+        """Generate the static HTML files for Mika and copies them to AFS"""
         # This is a hack to boostrap the the process, until LHCbPR
         # has the required functionality
         wwwDir = os.environ.get("LHCBPR_WWW")
@@ -105,27 +118,33 @@ class HLTRateHandler(BaseHandler):
 
         wwwDirEos = os.environ.get("LHCBPR_WWW_EOS")
         if wwwDirEos == None:
-            raise Exception("No web dir on EOS defined, will not run extraction")
+            raise Exception(
+                "No web dir on EOS defined, will not run extraction")
 
         # Getting the repo with the transform code
         print("Publish HTML files to AFS and EOS")
         try:
             import shutil
+
             shutil.rmtree("HLTRateScripts")
         except:
             pass
 
         if not os.path.exists("HLTRateScripts"):
-            rc = subprocess.call("git clone --quiet ssh://git@gitlab.cern.ch:7999/lhcb-core/HLTRateScripts.git", shell=True)
+            rc = subprocess.call(
+                "git clone --quiet ssh://git@gitlab.cern.ch:7999/lhcb-core/HLTRateScripts.git",
+                shell=True,
+            )
 
         # Now calling the scripts and creating the directory
         import datetime
+
         i = datetime.datetime.now()
         dirname = str(version) + "_" + i.strftime("%Y-%m-%dT%H:%M:%S")
 
         # Checking if there is a JobSetting.txt file
         settingsfile = os.path.join(directory, "JobSettings.txt")
-        RateTestDir =  "RateTests"
+        RateTestDir = "RateTests"
         settings = None
 
         # We ignore errors in this part and keep default name in that case...
@@ -142,14 +161,18 @@ class HLTRateHandler(BaseHandler):
         rateScriptOutputDir = os.path.join(directory, "rateScriptOutput")
         os.makedirs(rateScriptOutputDir)
 
-        subprocess.call(["HLTRateScripts/scripts/analyse.sh", directory + "/", rateScriptOutputDir])
-        logname = os.path.join(directory,'run.log')
-        tuplename = os.path.join(directory,'tuples.root')
+        subprocess.call([
+            "HLTRateScripts/scripts/analyse.sh", directory + "/",
+            rateScriptOutputDir
+        ])
+        logname = os.path.join(directory, "run.log")
+        tuplename = os.path.join(directory, "tuples.root")
         try:
             for ifile in os.listdir(rateScriptOutputDir):
-                shutil.copy(os.path.join(rateScriptOutputDir, ifile), targetDir)
+                shutil.copy(
+                    os.path.join(rateScriptOutputDir, ifile), targetDir)
             shutil.copy(logname, targetDir)
-            shutil.copy(tuplename , targetDir)
+            shutil.copy(tuplename, targetDir)
         except:
             print("Error copying log and tuple")
 
@@ -157,25 +180,41 @@ class HLTRateHandler(BaseHandler):
         targetRootEosDir = wwwDirEos + "/" + RateTestDir + "/" + dirname
         try:
             for ifile in os.listdir(rateScriptOutputDir):
-                subprocess.call(['xrdcp', rateScriptOutputDir + "/" + ifile, targetRootEosDir + "/" + ifile])
-            subprocess.call(['xrdcp', logname, targetRootEosDir + "/run.log"])
-            subprocess.call(['xrdcp', tuplename, targetRootEosDir + "/tuples.root"])
+                subprocess.call([
+                    "xrdcp",
+                    rateScriptOutputDir + "/" + ifile,
+                    targetRootEosDir + "/" + ifile,
+                ])
+            subprocess.call(["xrdcp", logname, targetRootEosDir + "/run.log"])
+            subprocess.call(
+                ["xrdcp", tuplename, targetRootEosDir + "/tuples.root"])
         except Exception as ex:
-            log.warning('Error copying html files to eos: %s', ex)
-
-    def collectResultsExt(self, directory, project, version, platform, hostname, cpu_info, memoryinfo, startTime, endTime, options):
+            log.warning("Error copying html files to eos: %s", ex)
+
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
         try:
             self._publishStaticHTML(directory, project, version)
         except:
             (e, value, trace) = sys.exc_info()
             print("Could not publish the static HTML", e, value)
             import traceback
-            traceback.print_tb(trace)
-
-        #Now parsing the logs
-        self._parseHLTRateLog(os.path.join(directory,'run.log'), directory)
 
+            traceback.print_tb(trace)
 
+        # Now parsing the logs
+        self._parseHLTRateLog(os.path.join(directory, "run.log"), directory)
 
     def collectResults(self, directory):
-        self._parseHLTRateLog(os.path.join(directory,'run.log'), directory)
+        self._parseHLTRateLog(os.path.join(directory, "run.log"), directory)
diff --git a/handlers/HLTThroughputHandler.py b/handlers/HLTThroughputHandler.py
index ac48f1d379a170e29f3407014196045cef760cce..1aca4e32ffa41ccbdd5817a048be86dbd4b09db7 100644
--- a/handlers/HLTThroughputHandler.py
+++ b/handlers/HLTThroughputHandler.py
@@ -2,8 +2,8 @@ import os, sys, re
 from .BaseHandler import BaseHandler
 from .hlt.HLTPerfParser import process_directory
 
-class HLTThroughputHandler(BaseHandler):
 
+class HLTThroughputHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
@@ -12,30 +12,37 @@ class HLTThroughputHandler(BaseHandler):
     RE_NBWERROR = "\s*(\w+\s*\w+).*\s+([0-9\.]+)\s\+\-\s([0-9\.]+)"
 
     def collectResults(self, directory):
-
-        with open(os.path.join(directory,'averages.log')) as f:
+        with open(os.path.join(directory, "averages.log")) as f:
             for line in f.readlines():
                 metric = re.match(self.RE_NBWERROR, line)
                 if metric != None:
-                    self.saveFloat(metric.group(1).replace(" ", "_"), metric.group(2), group="HLTThroughput")
-                    self.saveFloat(metric.group(1).replace(" ", "_")+"_error", metric.group(3), group="HLTThroughput")
+                    self.saveFloat(
+                        metric.group(1).replace(" ", "_"),
+                        metric.group(2),
+                        group="HLTThroughput",
+                    )
+                    self.saveFloat(
+                        metric.group(1).replace(" ", "_") + "_error",
+                        metric.group(3),
+                        group="HLTThroughput",
+                    )
 
         try:
-
             collected_results = process_directory(directory)
 
-            if 'Hlt1' in collected_results['timing_results']:
-                prefix = 'HLTPerfHlt1_'
+            if "Hlt1" in collected_results["timing_results"]:
+                prefix = "HLTPerfHlt1_"
             else:
-                prefix = 'HLTPerfHlt2_'
+                prefix = "HLTPerfHlt2_"
 
             for k, v in list(collected_results.items()):
-                if k == 'timing_results':
+                if k == "timing_results":
                     for k2, v2 in list(v.items()):
-                        self.saveJSON(prefix+str(k2), v2)
+                        self.saveJSON(prefix + str(k2), v2)
                 else:
-                    self.saveJSON(prefix+str(k), v)
+                    self.saveJSON(prefix + str(k), v)
 
         except Exception as err:
-            print("ERROR: in new '%s' code:\n%s\n" % (self.__class__.__name__, str(err)))
+            print("ERROR: in new '%s' code:\n%s\n" % (self.__class__.__name__,
+                                                      str(err)))
             pass
diff --git a/handlers/HashRemoveHandler.py b/handlers/HashRemoveHandler.py
index e78a745fb572b34544e56baba1bd9f38f9e64b6a..4a7e5c69bd7ded5eadec4dbc94bda6f59d940f79 100644
--- a/handlers/HashRemoveHandler.py
+++ b/handlers/HashRemoveHandler.py
@@ -5,14 +5,14 @@ from .BaseHandler import BaseHandler
 
 
 class HashRemoveHandler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        ext = '*.root'
+        ext = "*.root"
         for root, _, files in os.walk(directory):
             for file in files:
                 if fnmatch.fnmatch(file, ext):
-                    modifier = HashRemover.HashRemover(os.path.join(root, file))
+                    modifier = HashRemover.HashRemover(
+                        os.path.join(root, file))
                     modifier.modify_root_file()
diff --git a/handlers/IgprofMemHandler.py b/handlers/IgprofMemHandler.py
index 0bb5ab220ad21f786aacc6d37c42c6a2ae5a036c..1bb98f96bf0505df3726621327ed4a9ddcb96bea 100644
--- a/handlers/IgprofMemHandler.py
+++ b/handlers/IgprofMemHandler.py
@@ -1,12 +1,13 @@
 import os, re
 from .BaseHandler import BaseHandler
 
-class IgprofMemHandler(BaseHandler):
 
+class IgprofMemHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    REG="\s*(\d+\.\d+)\s+([\d\']+)\s+([\d\']+)\s+(\S+)\s+\[\d+\]$"
+    REG = "\s*(\d+\.\d+)\s+([\d']+)\s+([\d']+)\s+(\S+)\s+\[\d+\]$"
+
     # 21.13   41'314'528          968  _ZN9RichHpdQE12setAnHpdQEenEiRKSt6vectorIdSaIdEES4_ [81]
 
     def parseIgprof(self, directory, filename, prof_type):
@@ -20,17 +21,23 @@ class IgprofMemHandler(BaseHandler):
                 start = True
                 metric = re.match(self.REG, line)
                 if metric != None:
-                    self.saveFloat(metric.group(4)+"_percent",
-                                   metric.group(1),
-                                   description="memory " + prof_type + " in %",
-                                   group="mem" + prof_type + "_percent")
-                    self.saveInt(metric.group(4)+"_byte",
-                                 metric.group(2).replace("'",""),
-                                 description="memory "+ prof_type + " in bytes",
-                                 group="mem" + prof_type +"_byte")
+                    self.saveFloat(
+                        metric.group(4) + "_percent",
+                        metric.group(1),
+                        description="memory " + prof_type + " in %",
+                        group="mem" + prof_type + "_percent",
+                    )
+                    self.saveInt(
+                        metric.group(4) + "_byte",
+                        metric.group(2).replace("'", ""),
+                        description="memory " + prof_type + " in bytes",
+                        group="mem" + prof_type + "_byte",
+                    )
 
     def collectResults(self, directory):
         self.parseIgprof(directory, "igout.mp.live.txt", "leak")
         self.parseIgprof(directory, "igout.mp.total.txt", "alloc")
-        self.saveFile('igprof_MemLive' , os.path.join(directory,'igout.mp.live.txt') )
-        self.saveFile('igprof_MemTotal' , os.path.join(directory,'igout.mp.total.txt') )
+        self.saveFile("igprof_MemLive",
+                      os.path.join(directory, "igout.mp.live.txt"))
+        self.saveFile("igprof_MemTotal",
+                      os.path.join(directory, "igout.mp.total.txt"))
diff --git a/handlers/JemallocHandler.py b/handlers/JemallocHandler.py
index 15a69482b085692fc83353fc6df8a8fac6a19c11..fcc6b3514fbf77c158c8dab955b827a5229bbe4d 100644
--- a/handlers/JemallocHandler.py
+++ b/handlers/JemallocHandler.py
@@ -6,29 +6,35 @@ from .BaseHandler import BaseHandler
 #
 ################################################################################
 
+
 def findHeapFiles(data, rundir):
-    """ Find the heap files related to the main PID """
-    heapfiles = [ f for f in os.listdir(rundir) \
-                  if f.endswith(".heap") \
-                  and ".%d." %  data["pid"] in f ]
+    """Find the heap files related to the main PID"""
+    heapfiles = [
+        f for f in os.listdir(rundir)
+        if f.endswith(".heap") and ".%d." % data["pid"] in f
+    ]
     return sorted(heapfiles, key=lambda x: int(x.split(".")[2]))
 
-def execute(command):    
-    with open(os.devnull, 'w') as DEVNULL:
-        popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=DEVNULL, shell=True)
+
+def execute(command):
+    with open(os.devnull, "w") as DEVNULL:
+        popen = subprocess.Popen(
+            command, stdout=subprocess.PIPE, stderr=DEVNULL, shell=True)
         lines_iterator = iter(popen.stdout.readline, b"")
         for line in lines_iterator:
-            yield line # yield line
+            yield line  # yield line
+
 
 def processPprofText(data, basefile, comparefile):
     import re
+
     total = None
     totalUnit = None
     allocs = []
-    for l in execute("pprof -text --base=%s %s %s" % (basefile, data["exe"], comparefile)):
-
+    for l in execute("pprof -text --base=%s %s %s" % (basefile, data["exe"],
+                                                      comparefile)):
         # Looking for:
-        #Total: 16.7 MB
+        # Total: 16.7 MB
         m = re.match("^\s*Total:\s+([\-\d\.]+)\s+(\w+).*", l)
         if m != None:
             total = m.group(1)
@@ -36,73 +42,75 @@ def processPprofText(data, basefile, comparefile):
 
         # Looking for:
         # 4.0  24.0%  24.0%      4.0  24.0% TrackMasterFitter::makeNodes
-        m2 = re.match("\s*([\-\d\.]+)\s+([\-\d\.]+)%\s+([\-\d\.]+)%\s+([\-\d\.]+)\s+([\-\d\.]+)%\s+(.*)", l)
+        m2 = re.match(
+            "\s*([\-\d\.]+)\s+([\-\d\.]+)%\s+([\-\d\.]+)%\s+([\-\d\.]+)\s+([\-\d\.]+)%\s+(.*)",
+            l,
+        )
         if m2 != None:
-            allocs.append([ m2.group(i) for i in range(1, 7) ])
+            allocs.append([m2.group(i) for i in range(1, 7)])
     return (total, totalUnit, allocs)
 
+
 def processPprofPs(data, basefile, comparefile, outfile):
     import re
+
     total = None
     totalUnit = None
     allocs = []
     with open(outfile, "w") as f:
-        for l in execute("pprof -ps --base=%s %s %s" % (basefile, data["exe"], comparefile)):
+        for l in execute("pprof -ps --base=%s %s %s" % (basefile, data["exe"],
+                                                        comparefile)):
             f.write(l)
 
 
-
 class JemallocHandler(BaseHandler):
-   """ LHCbPR Handler to extract information from Jemalloc heap files
-   """
-   
-   def __init__(self):
-      super(self.__class__, self).__init__()
-      self.finished = False
-      self.results = []
-      self.basefilename = "runinfo.json"
-
-      
-   def collectResults(self,directory):
-      """ Collect un results """
-      
-      # First check that we have the log file...
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-      # Parse the JSON input file
-      data = {}
-      import json
-      with open(self.basefilename) as f:
-         data = json.load(f)
-             
-      # Now find the files, sorted in order
-      rundir = os.path.dirname(os.path.abspath(self.basefilename))
-      heapfiles = findHeapFiles(data, rundir)
-
-      # Choose the files to be compared...
-      basefile = heapfiles[0]
-      comparefile = [f for f in heapfiles if not f.endswith(".f.heap") ][-1]
-
-      # Get the top algorithms
-      (total, totalUnit, allocs) = processPprofText(data, basefile, comparefile)
-      totalf = float(total)
-      if totalUnit.upper() == "GB":
-         totalf = totalf * 1024
-
-         
-      self.saveFloat("TOTAL", totalf, "Total diff (MB)", "Jemalloc")
-      for f in allocs:
-          print(f)
-          methodname = f[-1]
-          methodlost = float(f[0])
-          self.saveFloat(methodname, methodlost, "Memory diff in method", "Jemalloc")
-         
-      # Get the display in postscript
-      processPprofPs(data, basefile, comparefile, "jemalloc.ps")
-      self.saveFile("jemalloc.ps", "jemalloc.ps", "Diff between memory snapshots", "Jemalloc")
-      
-
-
-      
+    """LHCbPR Handler to extract information from Jemalloc heap files"""
+
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.finished = False
+        self.results = []
+        self.basefilename = "runinfo.json"
+
+    def collectResults(self, directory):
+        """Collect un results"""
+
+        # First check that we have the log file...
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        # Parse the JSON input file
+        data = {}
+        import json
+
+        with open(self.basefilename) as f:
+            data = json.load(f)
+
+        # Now find the files, sorted in order
+        rundir = os.path.dirname(os.path.abspath(self.basefilename))
+        heapfiles = findHeapFiles(data, rundir)
+
+        # Choose the files to be compared...
+        basefile = heapfiles[0]
+        comparefile = [f for f in heapfiles if not f.endswith(".f.heap")][-1]
+
+        # Get the top algorithms
+        (total, totalUnit, allocs) = processPprofText(data, basefile,
+                                                      comparefile)
+        totalf = float(total)
+        if totalUnit.upper() == "GB":
+            totalf = totalf * 1024
+
+        self.saveFloat("TOTAL", totalf, "Total diff (MB)", "Jemalloc")
+        for f in allocs:
+            print(f)
+            methodname = f[-1]
+            methodlost = float(f[0])
+            self.saveFloat(methodname, methodlost, "Memory diff in method",
+                           "Jemalloc")
+
+        # Get the display in postscript
+        processPprofPs(data, basefile, comparefile, "jemalloc.ps")
+        self.saveFile("jemalloc.ps", "jemalloc.ps",
+                      "Diff between memory snapshots", "Jemalloc")
diff --git a/handlers/LogFileHandler.py b/handlers/LogFileHandler.py
index 8c708d7a1dec928d63ffd31d7410dd8f327fd512..dfe0b07da8a2b8044454aacfaf5d9122b64704c9 100644
--- a/handlers/LogFileHandler.py
+++ b/handlers/LogFileHandler.py
@@ -3,21 +3,26 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class LogFileHandler(BaseHandler):
-    """ Stores a log file called run.log. """
+    """Stores a log file called run.log."""
+
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-        logfile  = 'run.log'
+    def collectResults(self, directory):
+        logfile = "run.log"
         filename = os.path.join(directory, logfile)
         if not os.path.exists(filename):
             raise Exception("File %s does not exist" % filename)
 
         self.saveFile(logfile, filename, "Logfile", "")
 
+
 if __name__ == "__main__":
     lfh = LogFileHandler()
-    lfh.collectResults('/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v14r11/x86_64-slc6-gcc46-opt/20131112_1712_time')
+    lfh.collectResults(
+        "/afs/cern.ch/lhcb/software/profiling/releases/MOORE/MOORE_v14r11/x86_64-slc6-gcc46-opt/20131112_1712_time"
+    )
diff --git a/handlers/MemoryHandler.py b/handlers/MemoryHandler.py
index ea7349932b9182474cfa4ea93e13b1a86e8bdece..3f39655d25bde1c6e0a2da4fb355bb21c93baef2 100644
--- a/handlers/MemoryHandler.py
+++ b/handlers/MemoryHandler.py
@@ -3,31 +3,31 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
-class MemoryHandler(BaseHandler):
 
-   def __init__(self):
-      super(self.__class__, self).__init__()
-      self.finished = False
-      self.results = []
+class MemoryHandler(BaseHandler):
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.finished = False
+        self.results = []
 
-   def collectResults(self,directory):
-      from .timing.MemoryParser import MemoryParser, MemNode
-      mp = MemoryParser(os.path.join(directory,'run.log'));
+    def collectResults(self, directory):
+        from .timing.MemoryParser import MemoryParser, MemNode
 
-      # Now saving all the nodes
-      peak_rs, peak_vm = MemNode.getPeakMemory()
-      self.saveFloat("Total Virt. Memory", peak_vm, "Memory [MB]", "Memory")
-      self.saveFloat("Total Res. Memory", peak_rs, "Memory [MB]", "Memory")
-      init_rs, init_vm = MemNode.getInitializationMemory()
-      self.saveFloat("Init. Virt. Memory", init_vm, "Memory [MB]", "Memory")
-      self.saveFloat("Init. Res. Memory", init_rs, "Memory [MB]", "Memory")
-      exec_rs, exec_vm = MemNode.getExecutionMemory()
-      self.saveFloat("Exec. Virt. Memory", exec_vm, "Memory [MB]", "Memory")
-      self.saveFloat("Exec. Res. Memory", exec_rs, "Memory [MB]", "Memory")
-      fini_rs, fini_vm = MemNode.getFinalizationMemory()
-      self.saveFloat("Fini. Virt. Memory", fini_vm, "Memory [MB]", "Memory")
-      self.saveFloat("Fini. Res. Memory", fini_rs, "Memory [MB]", "Memory")
-      evts_rs, evts_vm = MemNode.getMemPerEvent()
-      self.saveFloat("Virt. Memory / Ev.", evts_vm, "Memory [MB]", "Memory")
-      self.saveFloat("Res. Memory / Ev.", evts_rs, "Memory [MB]", "Memory")
+        mp = MemoryParser(os.path.join(directory, "run.log"))
 
+        # Now saving all the nodes
+        peak_rs, peak_vm = MemNode.getPeakMemory()
+        self.saveFloat("Total Virt. Memory", peak_vm, "Memory [MB]", "Memory")
+        self.saveFloat("Total Res. Memory", peak_rs, "Memory [MB]", "Memory")
+        init_rs, init_vm = MemNode.getInitializationMemory()
+        self.saveFloat("Init. Virt. Memory", init_vm, "Memory [MB]", "Memory")
+        self.saveFloat("Init. Res. Memory", init_rs, "Memory [MB]", "Memory")
+        exec_rs, exec_vm = MemNode.getExecutionMemory()
+        self.saveFloat("Exec. Virt. Memory", exec_vm, "Memory [MB]", "Memory")
+        self.saveFloat("Exec. Res. Memory", exec_rs, "Memory [MB]", "Memory")
+        fini_rs, fini_vm = MemNode.getFinalizationMemory()
+        self.saveFloat("Fini. Virt. Memory", fini_vm, "Memory [MB]", "Memory")
+        self.saveFloat("Fini. Res. Memory", fini_rs, "Memory [MB]", "Memory")
+        evts_rs, evts_vm = MemNode.getMemPerEvent()
+        self.saveFloat("Virt. Memory / Ev.", evts_vm, "Memory [MB]", "Memory")
+        self.saveFloat("Res. Memory / Ev.", evts_rs, "Memory [MB]", "Memory")
diff --git a/handlers/MuonMoniPLHandler.py b/handlers/MuonMoniPLHandler.py
index 5f9415b27793a50b4c6095b392a4f508493552df..72e001f4d90e8e1bf8fb3bd3f0f2820a8029430e 100644
--- a/handlers/MuonMoniPLHandler.py
+++ b/handlers/MuonMoniPLHandler.py
@@ -12,101 +12,223 @@ from ROOT import TFile, TH1D, TTree, gDirectory
 
 from .BaseHandler import BaseHandler
 
+
 def labelHist(hist, xtitle, ytitle, phys_list):
     hist.SetXTitle(xtitle)
     hist.GetXaxis().CenterTitle()
     hist.SetYTitle(ytitle)
     hist.GetYaxis().CenterTitle()
 
-def getHist(in_file, directory,out_file, phys_list):
-       inroot = in_file
-
-       inpath = os.path.join(directory,inroot)
-
-       print('Opening rootfile' + inpath)
-
-       infile = TFile.Open(inpath)
-
-       hist_MuSct = []
-       for i in range(1,5):
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}_prof'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}_prof'.format(i)))
 
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}_prof'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}_prof'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}_prof'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}_prof'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}_prof'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}_prof'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}_prof'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}_prof'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}_prof'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]',phys_list)
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}_prof'.format(i)))
-
-       for histogram in hist_MuSct:
-           old_name = histogram.GetName()
-           histogram.SetName(phys_list + '_' + old_name)
-
-       MuSct_dict = {j.GetName():j for j in hist_MuSct} # dictionary of form 'name of histogram' : 'histogram'
-
-
-       out_file.mkdir(phys_list)
-       out_file.cd(phys_list)
-
-       for key in MuSct_dict:
-           MuSct_dict[key].Write(key)
+def getHist(in_file, directory, out_file, phys_list):
+    inroot = in_file
+
+    inpath = os.path.join(directory, inroot)
+
+    print("Opening rootfile" + inpath)
+
+    infile = TFile.Open(inpath)
+
+    hist_MuSct = []
+    for i in range(1, 5):
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Displacement [mm]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Displacement [mm]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}"
+                .format(i)),
+            "Displacement [mm]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}"
+                .format(i)),
+            "Displacement [mm]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}_prof"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Displacement [mm]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}_prof"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}_prof"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Displacement [mm]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}_prof"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}_prof"
+                .format(i)),
+            "Displacement [mm]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}_prof"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}_prof"
+                .format(i)),
+            "Displacement [mm]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}_prof"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}_prof"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}_prof"
+                .format(i)))
+
+        labelHist(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}_prof"
+                .format(i)),
+            "Angular Deviation [mRad]",
+            "Particle Momentum [GeV/c]",
+            phys_list,
+        )
+        hist_MuSct.append(
+            infile.Get(
+                "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}_prof"
+                .format(i)))
+
+    for histogram in hist_MuSct:
+        old_name = histogram.GetName()
+        histogram.SetName(phys_list + "_" + old_name)
+
+    MuSct_dict = {j.GetName(): j
+                  for j in hist_MuSct
+                  }  # dictionary of form 'name of histogram' : 'histogram'
+
+    out_file.mkdir(phys_list)
+    out_file.cd(phys_list)
+
+    for key in MuSct_dict:
+        MuSct_dict[key].Write(key)
 
 
 class MuonMoniPLHandler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-       # Placeholder for holding histograms
+        # Placeholder for holding histograms
 
-       prefix = 'MuonTestResults/MuonMoniSim_'
-       physics_lists = ['EmOpt1','EmNoCuts', 'EmNoCutsNoLHCb']
-       postfix = '.root'
+        prefix = "MuonTestResults/MuonMoniSim_"
+        physics_lists = ["EmOpt1", "EmNoCuts", "EmNoCutsNoLHCb"]
+        postfix = ".root"
 
-       present_lists = []
+        present_lists = []
 
-       for i in physics_lists:
-           root_loc =  prefix + i + postfix
-           file_loc = os.path.join(directory,root_loc)
+        for i in physics_lists:
+            root_loc = prefix + i + postfix
+            file_loc = os.path.join(directory, root_loc)
 
-           if os.path.isfile(file_loc) == True:
+            if os.path.isfile(file_loc) == True:
                 present_lists.append(i)
-                print('\n Found file : ' + file_loc + '\n')
+                print("\n Found file : " + file_loc + "\n")
 
-       assert len(present_lists) is not 0
+        assert len(present_lists) != 0
 
-       outfile = TFile('MuonMoniOut.root', 'RECREATE')
+        outfile = TFile("MuonMoniOut.root", "RECREATE")
 
-       for G4list in present_lists:
-           getHist(prefix + G4list + postfix, directory, outfile, G4list)
+        for G4list in present_lists:
+            getHist(prefix + G4list + postfix, directory, outfile, G4list)
 
-       # #save your file
-       self.saveFile('MuonMoniSimRes', './MuonMoniOut.root')
+        # #save your file
+        self.saveFile("MuonMoniSimRes", "./MuonMoniOut.root")
diff --git a/handlers/MuonMoniTestHandler.py b/handlers/MuonMoniTestHandler.py
index a0e9475835ed2217720cec10702200be8d0224bb..660a45d1b8abffe24ceb5ff047e9f96054b2664c 100644
--- a/handlers/MuonMoniTestHandler.py
+++ b/handlers/MuonMoniTestHandler.py
@@ -12,6 +12,7 @@ from ROOT import TFile, TH1D, TTree, gDirectory
 
 from .BaseHandler import BaseHandler
 
+
 def labelHist(hist, xtitle, ytitle):
     hist.SetXTitle(xtitle)
     hist.GetXaxis().CenterTitle()
@@ -19,84 +20,137 @@ def labelHist(hist, xtitle, ytitle):
     hist.GetYaxis().CenterTitle()
 
 
-
 class MuonMoniTestHandler(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-       inroot = "MuonMoniSim_histos.root"
-       inpath = os.path.join(directory,inroot) 
-       infile = TFile.Open(inpath) 
-
-    #####  Collects data from MuonMoniSim output  #####
-   
-    #   MuonHitChecker Histograms in the input file are numbered 1000-1019 for time multiplicities
-    #                                            2000-2019 for radial multiplicites
-    #  Below gets the histograms and stores them in lists
-
-       #histID_TM = [infile.Get('MuonHitChecker/1{0:03}'.format(i)) for i in range(0, 20)]
-       #histID_RM = [infile.Get('MuonHitChecker/2{0:03}'.format(i)) for i in range(0, 20)]  
-
-    #  Collects histograms for the MuonMultipleScatteringChecker and labels the axes
-       hist_MuSct = []
-       for i in range(1,5):
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]')
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}'.format(i)))
-           
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Displacement [mm]')
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]')
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}'.format(i)), 'Displacement [mm]', 'Particle Momentum [GeV/c]')
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]')
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}'.format(i)))
-
-           labelHist(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}'.format(i)), 'Angular Deviation [mRad]', 'Particle Momentum [GeV/c]')         
-           hist_MuSct.append(infile.Get('MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}'.format(i)))
-
-    # Performs manipulation of the MuonHitChecker histograms
-       #Mu_Station = 0
-    # Creates a dictionary to hold the histograms, key being their titles and their values being 
-    # the data from the corresponding histograms
-       #MuHitCheck_dict = {}
-           
-    # Adds each of the 4 histograms for each of the muon stations together
-       #for n in range(0,20):
-           #if n % 4 == 0:
-               #Mu_Station += 1
-               #TM_hist = histID_TM[n].Clone("Time_multiplicity_M{mu}".format(mu=Mu_Station))
-               #RM_hist = histID_RM[n].Clone("Radial_multiplicity_M{mu}".format(mu=Mu_Station))
-
-    # Ensures every four histograms are added together, remembering we start from 1000 or 2000
-           #else:
-               #TM_hist.Add(histID_TM[n],1)
-               #RM_hist.Add(histID_RM[n],1)
-           #if n != 0 and n % 4 == 3:
-               #TM_hist.SetTitle('Time Multiplicity M{mu}'.format(mu = Mu_Station))
-               #RM_hist.SetTitle('Radial Multiplicity M{mu}'.format(mu = Mu_Station))
-               #MuHitCheck_dict[TM_hist.GetName()] = TM_hist
-               #MuHitCheck_dict[RM_hist.GetName()] = RM_hist
-       
-    # Puts MuonMultipleScatteringChecker into a dictionary of nice format for writing to ROOT file
-
-       print(hist_MuSct)
-
-    
-       MuSct_dict = {j.GetName():j for j in hist_MuSct} # dictionary of form 'name of histogram' : 'histogram'
-   
-    # Saves histograms in a root file
-       outfile = TFile('MuonMoniOut.root', 'NEW')
-       
-       #for key in MuHitCheck_dict:
-          # MuHitCheck_dict[key].Write(key)
-
-       for key in MuSct_dict:
-           MuSct_dict[key].Write(key)
-       
-       self.saveFile('MuonMoniSimRes', './MuonMoniOut.root')
+        inroot = "MuonMoniSim_histos.root"
+        inpath = os.path.join(directory, inroot)
+        infile = TFile.Open(inpath)
+
+        #####  Collects data from MuonMoniSim output  #####
+
+        #   MuonHitChecker Histograms in the input file are numbered 1000-1019 for time multiplicities
+        #                                            2000-2019 for radial multiplicites
+        #  Below gets the histograms and stores them in lists
+
+        # histID_TM = [infile.Get('MuonHitChecker/1{0:03}'.format(i)) for i in range(0, 20)]
+        # histID_RM = [infile.Get('MuonHitChecker/2{0:03}'.format(i)) for i in range(0, 20)]
+
+        #  Collects histograms for the MuonMultipleScatteringChecker and labels the axes
+        hist_MuSct = []
+        for i in range(1, 5):
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}"
+                    .format(i)),
+                "Angular Deviation [mRad]",
+                "Displacement [mm]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dxdTx_MF{}"
+                    .format(i)))
+
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}"
+                    .format(i)),
+                "Angular Deviation [mRad]",
+                "Displacement [mm]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/dydTy_MF{}"
+                    .format(i)))
+
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}"
+                    .format(i)),
+                "Displacement [mm]",
+                "Particle Momentum [GeV/c]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdx_MF{}"
+                    .format(i)))
+
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}"
+                    .format(i)),
+                "Displacement [mm]",
+                "Particle Momentum [GeV/c]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdy_MF{}"
+                    .format(i)))
+
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}"
+                    .format(i)),
+                "Angular Deviation [mRad]",
+                "Particle Momentum [GeV/c]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTx_MF{}"
+                    .format(i)))
+
+            labelHist(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}"
+                    .format(i)),
+                "Angular Deviation [mRad]",
+                "Particle Momentum [GeV/c]",
+            )
+            hist_MuSct.append(
+                infile.Get(
+                    "MuonMultipleScatteringChecker/MuonMultipleScatteringTest/pdTy_MF{}"
+                    .format(i)))
+
+        # Performs manipulation of the MuonHitChecker histograms
+        # Mu_Station = 0
+        # Creates a dictionary to hold the histograms, key being their titles and their values being
+        # the data from the corresponding histograms
+        # MuHitCheck_dict = {}
+
+        # Adds each of the 4 histograms for each of the muon stations together
+        # for n in range(0,20):
+        # if n % 4 == 0:
+        # Mu_Station += 1
+        # TM_hist = histID_TM[n].Clone("Time_multiplicity_M{mu}".format(mu=Mu_Station))
+        # RM_hist = histID_RM[n].Clone("Radial_multiplicity_M{mu}".format(mu=Mu_Station))
+
+        # Ensures every four histograms are added together, remembering we start from 1000 or 2000
+        # else:
+        # TM_hist.Add(histID_TM[n],1)
+        # RM_hist.Add(histID_RM[n],1)
+        # if n != 0 and n % 4 == 3:
+        # TM_hist.SetTitle('Time Multiplicity M{mu}'.format(mu = Mu_Station))
+        # RM_hist.SetTitle('Radial Multiplicity M{mu}'.format(mu = Mu_Station))
+        # MuHitCheck_dict[TM_hist.GetName()] = TM_hist
+        # MuHitCheck_dict[RM_hist.GetName()] = RM_hist
+
+        # Puts MuonMultipleScatteringChecker into a dictionary of nice format for writing to ROOT file
+
+        print(hist_MuSct)
+
+        MuSct_dict = {j.GetName(): j
+                      for j in hist_MuSct
+                      }  # dictionary of form 'name of histogram' : 'histogram'
+
+        # Saves histograms in a root file
+        outfile = TFile("MuonMoniOut.root", "NEW")
+
+        # for key in MuHitCheck_dict:
+        # MuHitCheck_dict[key].Write(key)
+
+        for key in MuSct_dict:
+            MuSct_dict[key].Write(key)
+
+        self.saveFile("MuonMoniSimRes", "./MuonMoniOut.root")
diff --git a/handlers/PVCheckerHandler.py b/handlers/PVCheckerHandler.py
index aaf53d49f8db2293aeae1a83b77d0c373fea3089..1ee204344dd0bd455faa4ac52ae9b0f7978b45c6 100644
--- a/handlers/PVCheckerHandler.py
+++ b/handlers/PVCheckerHandler.py
@@ -1,4 +1,3 @@
-
 import os
 import re
 import math
@@ -6,142 +5,241 @@ from .BaseHandler import BaseHandler
 import ROOT as r
 from collections import OrderedDict
 
-class PVCheckerHandler(BaseHandler):
 
+class PVCheckerHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-
     def getEff(self, num, denom):
-        '''
+        """
         Return the tuple (eff, err_eff) ginving the numerator and the denominator
         if denom == 0  returns (0, 0)
-        '''
+        """
         if denom == 0:
             return (0, 0)
         else:
-            eff  = num/denom
-            eff_err = math.sqrt(eff*(1-eff)/denom)
+            eff = num / denom
+            eff_err = math.sqrt(eff * (1 - eff) / denom)
             return eff, eff_err
 
-
     def extractPerf(self, infile):
-        '''
+        """
         Extract the interesting performance numbers from the log file
         returns a dictionary {key: (value, error, explanation)}
-        '''
+        """
 
         numbers = OrderedDict()
 
-        for line in open(infile,'r'):
-            if line.find('PVChecker') > -1:
-                if (line.find("All") > -1) and 'EffPVall' not in numbers: # Of 'All' I want only the first occurence in the log file because it appears also later
-                    num, denom = re.findall('PVChecker.*?All.*?\(([ 0-9\.]*?)/([ 0-9\.]*?)\)', line)[0]
+        for line in open(infile, "r"):
+            if line.find("PVChecker") > -1:
+                if (
+                        line.find("All") > -1
+                ) and "EffPVall" not in numbers:  # Of 'All' I want only the first occurence in the log file because it appears also later
+                    num, denom = re.findall(
+                        "PVChecker.*?All.*?\(([ 0-9\.]*?)/([ 0-9\.]*?)\)",
+                        line)[0]
                     num, denom = float(num), float(denom)
                     eff, eff_err = self.getEff(num, denom)
-                    numbers['EffPVall'] = (eff, eff_err, 'PV efficiency')
+                    numbers["EffPVall"] = (eff, eff_err, "PV efficiency")
                 elif line.find("False rate") > -1:
-                    num, denom = re.findall('PVChecker.*?False rate.*?\(([ 0-9\.]*?)/([ 0-9\.]*?)\)', line)[0]
+                    num, denom = re.findall(
+                        "PVChecker.*?False rate.*?\(([ 0-9\.]*?)/([ 0-9\.]*?)\)",
+                        line)[0]
                     num, denom = float(num), float(denom)
                     eff, eff_err = self.getEff(num, denom)
-                    numbers['falseRate'] = (eff, eff_err, 'PV fake rate')
+                    numbers["falseRate"] = (eff, eff_err, "PV fake rate")
                 elif line.find("INFO dx:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['dx_mean'] = (mean, mean_err, 'mean x distance between reco and MC vertices')
-                    numbers['dx_rms'] = (rms, rms_err, 'rms x distance between reco and MC vertices')
+                    numbers["dx_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean x distance between reco and MC vertices",
+                    )
+                    numbers["dx_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms x distance between reco and MC vertices",
+                    )
                 elif line.find("INFO dy:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['dy_mean'] = (mean, mean_err, 'mean y distance between reco and MC vertices')
-                    numbers['dy_rms'] = (rms, rms_err, 'rms y distance between reco and MC vertices')
+                    numbers["dy_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean y distance between reco and MC vertices",
+                    )
+                    numbers["dy_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms y distance between reco and MC vertices",
+                    )
                 elif line.find("INFO dz:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['dz_mean'] = (mean, mean_err, 'mean z distance between reco and MC vertices')
-                    numbers['dz_rms'] = (rms, rms_err, 'rms z distance between reco and MC vertices')
+                    numbers["dz_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean z distance between reco and MC vertices",
+                    )
+                    numbers["dz_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms z distance between reco and MC vertices",
+                    )
                 elif line.find("INFO pullx:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['pullx_mean'] = (mean, mean_err, 'mean pull of the x distance between reco and MC vertices')
-                    numbers['pullx_rms'] = (rms, rms_err, 'rms pull of the x distance between reco and MC vertices')
+                    numbers["pullx_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean pull of the x distance between reco and MC vertices",
+                    )
+                    numbers["pullx_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms pull of the x distance between reco and MC vertices",
+                    )
                 elif line.find("INFO pully:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['pully_mean'] = (mean, mean_err, 'mean pull of the y distance between reco and MC vertices')
-                    numbers['pully_rms'] = (rms, rms_err, 'rms pull of the y distance between reco and MC vertices')
+                    numbers["pully_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean pull of the y distance between reco and MC vertices",
+                    )
+                    numbers["pully_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms pull of the y distance between reco and MC vertices",
+                    )
                 elif line.find("INFO pullz:") > -1:
-                    mean, mean_err, rms, rms_err = re.findall('PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$', line)[0]
+                    mean, mean_err, rms, rms_err = re.findall(
+                        "PVChecker.*? mean = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?), RMS = ([ 0-9\.\-]*?) \+/\- ([ 0-9\.\-]*?)$",
+                        line,
+                    )[0]
                     for i in [mean, mean_err, rms, rms_err]:
                         i = float(i)
-                    numbers['pullz_mean'] = (mean, mean_err, 'mean pull of the z distance between reco and MC vertices')
-                    numbers['pullz_rms'] = (rms, rms_err, 'rms pull of the z distance between reco and MC vertices')
+                    numbers["pullz_mean"] = (
+                        mean,
+                        mean_err,
+                        "mean pull of the z distance between reco and MC vertices",
+                    )
+                    numbers["pullz_rms"] = (
+                        rms,
+                        rms_err,
+                        "rms pull of the z distance between reco and MC vertices",
+                    )
                     break
 
         return numbers
 
-
     def extractHists(self, infile):
-        '''
+        """
         Extract the interesting histograms from the root file
         (N.B The root file should have already been opened)
         returns a dictionary with {key : (histo, explanation)}
-        '''
+        """
 
         histos = OrderedDict()
 
-        histos['PV_dx'] = (infile.Get('PVChecker/1021').Clone(), 'x distance between reco and MC vertices')
-        histos['PV_dy'] = (infile.Get('PVChecker/1022'), 'y distance between reco and MC vertices')
-        histos['PV_dz'] = (infile.Get('PVChecker/1023'), 'z distance between reco and MC vertices')
-        histos['PV_pullx'] = (infile.Get('PVChecker/1031'), 'pull of x distance between reco and MC vertices')
-        histos['PV_pully'] = (infile.Get('PVChecker/1032'), 'pull of y distance between reco and MC vertices')
-        histos['PV_pullz'] = (infile.Get('PVChecker/1033'), 'pull of z distance between reco and MC vertices')
-        histos['PV_nTr4PVrec'] = (infile.Get('PVChecker/1041'), 'Number of tracks per reconstructed PV')
-        histos['PV_dnTr4PV_MCvsRec'] = (infile.Get('PVChecker/1041'), 'Difference in number of tracks between MC and reconstructed PV')
-        histos['PV_nPVperEvt'] = (infile.Get('PVChecker/1051'), 'Number of PVs per event')
+        histos["PV_dx"] = (
+            infile.Get("PVChecker/1021").Clone(),
+            "x distance between reco and MC vertices",
+        )
+        histos["PV_dy"] = (
+            infile.Get("PVChecker/1022"),
+            "y distance between reco and MC vertices",
+        )
+        histos["PV_dz"] = (
+            infile.Get("PVChecker/1023"),
+            "z distance between reco and MC vertices",
+        )
+        histos["PV_pullx"] = (
+            infile.Get("PVChecker/1031"),
+            "pull of x distance between reco and MC vertices",
+        )
+        histos["PV_pully"] = (
+            infile.Get("PVChecker/1032"),
+            "pull of y distance between reco and MC vertices",
+        )
+        histos["PV_pullz"] = (
+            infile.Get("PVChecker/1033"),
+            "pull of z distance between reco and MC vertices",
+        )
+        histos["PV_nTr4PVrec"] = (
+            infile.Get("PVChecker/1041"),
+            "Number of tracks per reconstructed PV",
+        )
+        histos["PV_dnTr4PV_MCvsRec"] = (
+            infile.Get("PVChecker/1041"),
+            "Difference in number of tracks between MC and reconstructed PV",
+        )
+        histos["PV_nPVperEvt"] = (
+            infile.Get("PVChecker/1051"),
+            "Number of PVs per event",
+        )
 
         return histos
 
-
     def collectResults(self, directory):
-
         # get efficiency and fake rate from PVChecker log
-        possible_files = [i for i in os.listdir(directory) if re.match('run.log', i)]
+        possible_files = [
+            i for i in os.listdir(directory) if re.match("run.log", i)
+        ]
         if len(possible_files) == 0:
-            raise IOError('Input log file not found')
+            raise IOError("Input log file not found")
 
         numbers = self.extractPerf(os.path.join(directory, "run.log"))
 
         # save floats
         for key, (val, err, description) in list(numbers.items()):
-            self.saveFloat(key,
-                           val,
-                           description=description,
-                           group="performance")
-            self.saveFloat(key+'_err',
-                           err,
-                           description='Error of '+description,
-                           group="performance")
-
+            self.saveFloat(
+                key, val, description=description, group="performance")
+            self.saveFloat(
+                key + "_err",
+                err,
+                description="Error of " + description,
+                group="performance",
+            )
 
         # get histograms
-        possible_files = [i for i in os.listdir(directory) if re.match('Brunel-.*ev-histos.root', i)]
+        possible_files = [
+            i for i in os.listdir(directory)
+            if re.match("Brunel-.*ev-histos.root", i)
+        ]
         if len(possible_files) == 0:
-            raise IOError('Input root file with histograms not found')
+            raise IOError("Input root file with histograms not found")
 
         infile_root = r.TFile(os.path.join(directory, possible_files[0]))
         histos = self.extractHists(infile_root)
 
-        outfile = r.TFile('PVChecker.root', 'RECREATE')
+        outfile = r.TFile("PVChecker.root", "RECREATE")
 
         # save histograms
         for key, (histo, description) in list(histos.items()):
-            self.saveJSON(key, histo, description=description, group='performance')
+            self.saveJSON(
+                key, histo, description=description, group="performance")
             histo.Write()
 
-        self.saveFile('PVChecker', './PVChecker.root')
+        self.saveFile("PVChecker", "./PVChecker.root")
diff --git a/handlers/PerfHandler.py b/handlers/PerfHandler.py
index c7048e2294f5bbd43dbb0edd8b18aea68daf44c2..3077600588e34946b28251f87d8d99ef26816e89 100644
--- a/handlers/PerfHandler.py
+++ b/handlers/PerfHandler.py
@@ -3,34 +3,45 @@ import re
 import subprocess
 from .BaseHandler import BaseHandler
 
-class PerfHandler(BaseHandler):
 
+class PerfHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    REG="\s*(\d+\.\d+)\%\s+(\d+\.\d+)\%\s+(\w+)\s+(\w+\.\w+)\s+\[\.\]\s+(\w+\:\:\w+)$"
+    REG = "\s*(\d+\.\d+)\%\s+(\d+\.\d+)\%\s+(\w+)\s+(\w+\.\w+)\s+\[\.\]\s+(\w+\:\:\w+)$"
+
     # 53.76%     0.12%  python           libG4tracking.so                             [.] G4TrackingManager::ProcessOneTrack
 
     def collectResults(self, directory):
-        self.saveFile('perf.lbr' , os.path.join(directory,'perf.lbr.txt') )
-        with open(os.path.join(directory,'perf.lbr.txt')) as f:
-
+        self.saveFile("perf.lbr", os.path.join(directory, "perf.lbr.txt"))
+        with open(os.path.join(directory, "perf.lbr.txt")) as f:
             for line in f.readlines():
                 metric = re.match(self.REG, line)
 
                 if metric != None and float(metric.group(2)) > 0.0:
-                    self.saveFloat(metric.group(5)+"_self",
-                                   metric.group(2),
-                                   description="time spent by function itself in %",
-                                   group="self")
-                    self.saveFloat(metric.group(5)+"_children",
-                                   metric.group(1),
-                                   description="time spent by children of function in %",
-                                   group="children")
+                    self.saveFloat(
+                        metric.group(5) + "_self",
+                        metric.group(2),
+                        description="time spent by function itself in %",
+                        group="self",
+                    )
+                    self.saveFloat(
+                        metric.group(5) + "_children",
+                        metric.group(1),
+                        description="time spent by children of function in %",
+                        group="children",
+                    )
 
         if not os.path.exists("FlameGraph"):
-            subprocess.call("git clone --depth 1 https://github.com/brendangregg/FlameGraph",
-                                 shell=True)
-
-        subprocess.call("perf script -i " + os.path.join(directory,"perf.log") + " | ./FlameGraph/stackcollapse-perf.pl | ./FlameGraph/flamegraph.pl > " + os.path.join(directory,"perf.svg"), shell=True)
-        self.saveFile('perf.svg' , os.path.join(directory,'perf.svg') )
+            subprocess.call(
+                "git clone --depth 1 https://github.com/brendangregg/FlameGraph",
+                shell=True,
+            )
+
+        subprocess.call(
+            "perf script -i " + os.path.join(directory, "perf.log") +
+            " | ./FlameGraph/stackcollapse-perf.pl | ./FlameGraph/flamegraph.pl > "
+            + os.path.join(directory, "perf.svg"),
+            shell=True,
+        )
+        self.saveFile("perf.svg", os.path.join(directory, "perf.svg"))
diff --git a/handlers/PrCheckerCounterJSONhandler.py b/handlers/PrCheckerCounterJSONhandler.py
index e864a92fb5e8453eca557e1813937da2787342af..f838ca825bfe484e69958f2b2dc656c090c5ec3b 100644
--- a/handlers/PrCheckerCounterJSONhandler.py
+++ b/handlers/PrCheckerCounterJSONhandler.py
@@ -14,12 +14,22 @@ DEBUG = False
 def initCounterMap():
     basedict = {
         "TrackMonitor": {
-            "#Tracks", "#Long", "#Downstream", "#Ttrack", "#Upstream", "#Velo",
-            "#VeloBackward"
+            "#Tracks",
+            "#Long",
+            "#Downstream",
+            "#Ttrack",
+            "#Upstream",
+            "#Velo",
+            "#VeloBackward",
         },
         "fromPrTracksV1Tracks": {"Seeding", "Velo", "Forward", "Match"},
-        "CaloClusterEff":
-        {"ET250", "ET3000", "Brem_ET50", "mergedPi0_All", "resolvedPi0_All"},
+        "CaloClusterEff": {
+            "ET250",
+            "ET3000",
+            "Brem_ET50",
+            "mergedPi0_All",
+            "resolvedPi0_All",
+        },
         "CaloHypo_BkgFrac": {"Electron hypos", "Photon hypos"},
         "CaloHypo_Eff": {"Electron hypo", "Photon hypo"},
     }
@@ -40,12 +50,14 @@ def handler_TracksV1Track(dicname, dic, _list):
         if listname == "Velo":
             location.append([
                 "fromPr{}TracksV1TracksMerger".format(listname),
-                "Nb of converted Tracks", listname
+                "Nb of converted Tracks",
+                listname,
             ])
         else:
             location.append([
                 "fromPr{}TracksV1Tracks".format(listname),
-                "Nb of converted Tracks", listname
+                "Nb of converted Tracks",
+                listname,
             ])
     dicname = "fromPrTracksV1Tracks"
     drawhistogram(dicname, location, dic, "sum", "{}_sum".format(dicname))
@@ -65,12 +77,14 @@ def handler_CaloHypoEff_BkgFrac(dicname, dic, _list):
     location = []
     for listname in sorted(list(_list)):
         location.append([
-            "CaloHypoEff_ET250", "Bkg fraction in {}".format(listname),
-            "{}(ET250)".format(listname)
+            "CaloHypoEff_ET250",
+            "Bkg fraction in {}".format(listname),
+            "{}(ET250)".format(listname),
         ])
         location.append([
-            "CaloHypoEff_Brem_ET50", "Bkg fraction in {}".format(listname),
-            "{}(Brem_ET50)".format(listname)
+            "CaloHypoEff_Brem_ET50",
+            "Bkg fraction in {}".format(listname),
+            "{}(Brem_ET50)".format(listname),
         ])
     drawhistogram(dicname, location, dic, "efficiency", dicname)
 
@@ -79,18 +93,20 @@ def handler_CaloHypoEff_Eff(dicname, dic, _list):
     location = []
     for listname in sorted(list(_list)):
         location.append([
-            "CaloHypoEff_ET250", "{} efficiency".format(listname),
-            "{}(ET250)".format(listname)
+            "CaloHypoEff_ET250",
+            "{} efficiency".format(listname),
+            "{}(ET250)".format(listname),
         ])
     location.append([
-        "CaloHypoEff_Brem_ET50", "Photon hypo efficiency".format(listname),
-        "Photon hypo(Brem_ET50)".format(listname)
+        "CaloHypoEff_Brem_ET50",
+        "Photon hypo efficiency".format(listname),
+        "Photon hypo(Brem_ET50)".format(listname),
     ])
     drawhistogram(dicname, location, dic, "efficiency", dicname)
 
 
 def drawhistogram(dicname, location, dic, tupname, title):
-    ''' location(component,name,xtitle) '''
+    """location(component,name,xtitle)"""
     lisnum = len(location)
     canvas = ROOT.TCanvas(dicname, dicname)
     canvas.SetGrid()
@@ -98,7 +114,7 @@ def drawhistogram(dicname, location, dic, tupname, title):
     for index, entry in zip(range(lisnum), location):
         entity = [
             c for c in dic
-            if c['component'] == entry[0] and c['name'] == entry[1]
+            if c["component"] == entry[0] and c["name"] == entry[1]
         ]
         if len(entity) == 0:
             entity = [{
@@ -107,28 +123,36 @@ def drawhistogram(dicname, location, dic, tupname, title):
                     tupname: 0.0,
                     "empty": True
                 },
-                "name": entry[1]
+                "name": entry[1],
             }]
         if DEBUG:
             print(entity, entry)
         assert len(entity) == 1
-        if entity[0]['entity']["empty"] == False:
-            histo.SetBinContent( index + 1, entity[0]['entity'][tupname])
+        if entity[0]["entity"]["empty"] == False:
+            histo.SetBinContent(index + 1, entity[0]["entity"][tupname])
             if tupname == "mean":
-                histo.SetBinError(index + 1, entity[0]['entity']['standard_deviation'])
+                histo.SetBinError(index + 1,
+                                  entity[0]["entity"]["standard_deviation"])
             elif tupname == "efficiency":
-                histo.SetBinError(index + 1, entity[0]['entity']['efficiencyErr'])
-        elif entity[0]['entity']["empty"] == True:
-            '''zero will not shown in the plot (1.e-20 in case)'''
-            histo.SetBinContent(index + 1, 0.)
-            histo.SetBinError(index + 1, 0.)
+                histo.SetBinError(index + 1,
+                                  entity[0]["entity"]["efficiencyErr"])
+        elif entity[0]["entity"]["empty"] == True:
+            """zero will not shown in the plot (1.e-20 in case)"""
+            histo.SetBinContent(index + 1, 0.0)
+            histo.SetBinError(index + 1, 0.0)
         else:
-            print("ERROR: Counter ", entity[0]["component"], " : ", entity[0]["name"], "is missing empty tuple")
+            print(
+                "ERROR: Counter ",
+                entity[0]["component"],
+                " : ",
+                entity[0]["name"],
+                "is missing empty tuple",
+            )
         histo.GetXaxis().SetBinLabel(index + 1, entry[2])
-    maxYaxis=(histo.GetMaximum()+histo.GetBinError(histo.GetMaximumBin()))
-    minYaxis=(histo.GetMinimum()-histo.GetBinError(histo.GetMinimumBin()))
-    histo.SetMaximum(maxYaxis+(maxYaxis-minYaxis)/18.)
-    histo.SetMinimum(minYaxis-(maxYaxis-minYaxis)/18.)
+    maxYaxis = histo.GetMaximum() + histo.GetBinError(histo.GetMaximumBin())
+    minYaxis = histo.GetMinimum() - histo.GetBinError(histo.GetMinimumBin())
+    histo.SetMaximum(maxYaxis + (maxYaxis - minYaxis) / 18.0)
+    histo.SetMinimum(minYaxis - (maxYaxis - minYaxis) / 18.0)
     histo.SetName(dicname)
     histo.SetStats(0)
     histo.SetBarWidth(0.3)
@@ -147,47 +171,57 @@ def drawhistogram(dicname, location, dic, tupname, title):
     histo.GetYaxis().SetTitleSize(0.06)
     histo.GetYaxis().SetDecimals()
     histo.GetYaxis().CenterTitle(True)
-    #histo.GetYaxis().SetTitleOffset(0.8)
-    #histo.GetYaxis().SetTitle(ytitle)
+    # histo.GetYaxis().SetTitleOffset(0.8)
+    # histo.GetYaxis().SetTitle(ytitle)
     histo.SetTitle(title)
     histo.SetMarkerStyle(20)
     histo.SetMarkerColor(ROOT.kRed)
     histo.Draw("EP")
-    canvas.Write(dicname + '_' + tupname + "_canvas")
+    canvas.Write(dicname + "_" + tupname + "_canvas")
 
 
 class PrCheckerCounterJSONhandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform,
-                          hostname, cpu_info, memoryinfo, startTime, endTime,
-                          options):
-
-        logfile = os.path.join(directory, 'run.log')
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        logfile = os.path.join(directory, "run.log")
         jsonfile = grepPattern(
-            'INFO Writing JSON file (\S+)',
-            open(logfile, 'r', encoding='ISO-8859-1').read())
+            "INFO Writing JSON file (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
         if DEBUG:
-            print('directory', directory)
-            print('logfile', logfile)
-            print('jsonfile', jsonfile)
+            print("directory", directory)
+            print("logfile", logfile)
+            print("jsonfile", jsonfile)
 
-        of_full_name = os.path.join(directory, 'PrCheckerCountersPlots.root')
-        outputfile = ROOT.TFile(of_full_name, 'recreate')
+        of_full_name = os.path.join(directory, "PrCheckerCountersPlots.root")
+        outputfile = ROOT.TFile(of_full_name, "recreate")
 
-        with open(os.path.join(directory, jsonfile), 'r') as inputfile:
+        with open(os.path.join(directory, jsonfile), "r") as inputfile:
             json_data = json.load(inputfile)
             basedict = initCounterMap()
-            '''TrackMonitor'''
-            dicname = 'TrackMonitor'
+            """TrackMonitor"""
+            dicname = "TrackMonitor"
             subdic = [c for c in json_data if c["component"] == dicname]
             if DEBUG:
                 print(json.dumps(subdic, ensure_ascii=False, indent=4))
             outputfile.mkdir(dicname)
             outputfile.cd(dicname)
             handler_TrackMonitor(dicname, subdic, basedict[dicname])
-            '''TracksV1Tracks'''
+            """TracksV1Tracks"""
             dicname = "fromPrTracksV1Tracks"
             subdic = [
                 c for c in json_data
@@ -199,18 +233,18 @@ class PrCheckerCounterJSONhandler(BaseHandler):
             outputfile.mkdir(dicname)
             outputfile.cd(dicname)
             handler_TracksV1Track(dicname, subdic, basedict[dicname])
-            '''CaloClusterEff'''
+            """CaloClusterEff"""
             dicname = "CaloClusterEff"
             subdic = [
                 c for c in json_data if match(c["component"], dicname + "_*")
-                and c['name'] == "reco efficiency"
+                and c["name"] == "reco efficiency"
             ]
             if DEBUG:
                 print(json.dumps(subdic, ensure_ascii=False, indent=4))
             outputfile.mkdir(dicname)
             outputfile.cd(dicname)
             handler_CaloClusterEff(dicname, subdic, basedict[dicname])
-            '''CaloHypo_BkgFrac'''
+            """CaloHypo_BkgFrac"""
             dicname = "CaloHypo_BkgFrac"
             subdic = [
                 c for c in json_data if match(c["component"], "CaloHypoEff_*")
@@ -221,7 +255,7 @@ class PrCheckerCounterJSONhandler(BaseHandler):
             outputfile.mkdir(dicname)
             outputfile.cd(dicname)
             handler_CaloHypoEff_BkgFrac(dicname, subdic, basedict[dicname])
-            '''CaloHypo_Eff'''
+            """CaloHypo_Eff"""
             dicname = "CaloHypo_Eff"
             subdic = [
                 c for c in json_data if match(c["component"], "CaloHypoEff_*")
@@ -235,4 +269,4 @@ class PrCheckerCounterJSONhandler(BaseHandler):
 
         outputfile.Write()
         outputfile.Close()
-        self.saveFile('PrCheckerCountersPlots', of_full_name)
+        self.saveFile("PrCheckerCountersPlots", of_full_name)
diff --git a/handlers/PrCheckerEfficiencyHandler.py b/handlers/PrCheckerEfficiencyHandler.py
index 356f00e4685ddaadd70aa86ee105ed577974c7d5..28f12f0a84fd53a8ce42786746bf3cf14e206284 100644
--- a/handlers/PrCheckerEfficiencyHandler.py
+++ b/handlers/PrCheckerEfficiencyHandler.py
@@ -21,73 +21,92 @@ from collectRunResults import send_notification_mattermost
 
 log = logging.getLogger(__name__)
 
-def getEfficiencyHistoNames() :
+
+def getEfficiencyHistoNames():
     return ["eta", "p", "pt", "phi", "nPV"]
 
-def getTrackers() :
+
+def getTrackers():
     return ["Velo", "Upstream", "Forward"]
 
-def getOriginFolders() :
-     basedict = {
-        "Velo" : {},
-        "Upstream" : {},
-        "Forward" : {}
-        }
 
-     basedict["Velo"]["folder"] = "VeloMCChecker/"
-     basedict["Upstream"]["folder"] = "UpMCChecker/"
-     basedict["Forward"]["folder"] = "ForwardMCChecker/"
+def getOriginFolders():
+    basedict = {"Velo": {}, "Upstream": {}, "Forward": {}}
+
+    basedict["Velo"]["folder"] = "VeloMCChecker/"
+    basedict["Upstream"]["folder"] = "UpMCChecker/"
+    basedict["Forward"]["folder"] = "ForwardMCChecker/"
+
+    return basedict
 
-     return basedict
 
-def getGhostHistoNames() :
+def getGhostHistoNames():
     return ["eta", "nPV"]
 
-class PrCheckerEfficiencyHandler(BaseHandler):
 
+class PrCheckerEfficiencyHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform, hostname, cpu_info, memoryinfo, startTime, endTime, options):
-
-        f = ROOT.TFile.Open(os.path.join(directory,'PrCheckerPlots.root'), 'read')
-        outputfile = ROOT.TFile( "efficiency_plots.root", "recreate" )
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        f = ROOT.TFile.Open(
+            os.path.join(directory, "PrCheckerPlots.root"), "read")
+        outputfile = ROOT.TFile("efficiency_plots.root", "recreate")
 
         if "EFFICIENCY_PLOTS_PRCHECKER_REFERENCE" not in os.environ:
-            raise Exception("Environment variable with the reference file is not set. Check node configuration")
+            raise Exception(
+                "Environment variable with the reference file is not set. Check node configuration"
+            )
         else:
-            file_ref=urlopen(os.environ["EFFICIENCY_PLOTS_PRCHECKER_REFERENCE"])
-            file_=open("./efficiency_plots_ref.root",'wb')
+            file_ref = urlopen(
+                os.environ["EFFICIENCY_PLOTS_PRCHECKER_REFERENCE"])
+            file_ = open("./efficiency_plots_ref.root", "wb")
             file_.write(file_ref.read())
             file_.close()
-            f_ref = ROOT.TFile.Open(os.path.join('efficiency_plots_ref.root'), 'read')
+            f_ref = ROOT.TFile.Open(
+                os.path.join("efficiency_plots_ref.root"), "read")
 
         if not f_ref:
-            raise Exception("Couldn't get the file with the reference distribution")
+            raise Exception(
+                "Couldn't get the file with the reference distribution")
 
         from .utils.LHCbStyle import setLHCbStyle
-        from .utils.ConfigHistos import (efficiencyHistoDict,
-                                        ghostHistoDict,
-                                        categoriesDict,
-                                        getCuts)
+        from .utils.ConfigHistos import (
+            efficiencyHistoDict,
+            ghostHistoDict,
+            categoriesDict,
+            getCuts,
+        )
         from .utils.Legend import place_legend
 
         setLHCbStyle()
 
-        latex=ROOT.TLatex()
+        latex = ROOT.TLatex()
         latex.SetNDC()
         latex.SetTextSize(0.04)
 
         efficiencyHistoDict = efficiencyHistoDict()
-        efficiencyHistos    = getEfficiencyHistoNames()
-        ghostHistos         = getGhostHistoNames()
-        ghostHistoDict      = ghostHistoDict()
-        categories          = categoriesDict()
-        cuts                = getCuts()
-        trackers            = getTrackers()
-        folders             = getOriginFolders()
-
-        for tracker in trackers :
+        efficiencyHistos = getEfficiencyHistoNames()
+        ghostHistos = getGhostHistoNames()
+        ghostHistoDict = ghostHistoDict()
+        categories = categoriesDict()
+        cuts = getCuts()
+        trackers = getTrackers()
+        folders = getOriginFolders()
+
+        for tracker in trackers:
             outputfile.cd()
             trackerDir = outputfile.mkdir(tracker)
             trackerDir.cd()
@@ -101,107 +120,123 @@ class PrCheckerEfficiencyHandler(BaseHandler):
 
                 # calculate efficiency
                 for histo in efficiencyHistos:
-                    title = "efficiency vs. " + histo + ", " + categories[tracker][cut]["title"]
+                    title = ("efficiency vs. " + histo + ", " +
+                             categories[tracker][cut]["title"])
                     name = "efficiency vs. " + histo
                     canvas = ROOT.TCanvas(name, title)
                     ROOT.gPad.SetTicks()
                     # get efficiency for not electrons category
-                    histoName       = histoBaseName + "notElectrons_" + efficiencyHistoDict[histo]["variable"]
+                    histoName = (histoBaseName + "notElectrons_" +
+                                 efficiencyHistoDict[histo]["variable"])
                     print("not electrons: " + histoName)
-                    numeratorName   = histoName + "_reconstructed"
-                    numerator       = f.Get(numeratorName)
+                    numeratorName = histoName + "_reconstructed"
+                    numerator = f.Get(numeratorName)
                     denominatorName = histoName + "_reconstructible"
-                    denominator     = f.Get(denominatorName)
-                    if numerator.GetEntries() == 0 or denominator.GetEntries() == 0 :
+                    denominator = f.Get(denominatorName)
+                    if numerator.GetEntries() == 0 or denominator.GetEntries(
+                    ) == 0:
                         continue
 
                     numerator.Sumw2()
                     denominator.Sumw2()
 
                     g_efficiency_notElectrons = ROOT.TGraphAsymmErrors()
-                    g_efficiency_notElectrons.SetName("g_efficiency_notElectrons")
-                    g_efficiency_notElectrons.Divide(numerator, denominator, "w")
+                    g_efficiency_notElectrons.SetName(
+                        "g_efficiency_notElectrons")
+                    g_efficiency_notElectrons.Divide(numerator, denominator,
+                                                     "w")
                     g_efficiency_notElectrons.SetTitle("not electrons")
 
                     # get efficiency for electrons category
-                    if categories[tracker][cut]["plotElectrons"] :
-                        histoName       = histoBaseName + "electrons_" + efficiencyHistoDict[histo]["variable"]
+                    if categories[tracker][cut]["plotElectrons"]:
+                        histoName = (histoBaseName + "electrons_" +
+                                     efficiencyHistoDict[histo]["variable"])
                         print("electrons: " + histoName)
-                        numeratorName   = histoName + "_reconstructed"
-                        numerator       = f.Get(numeratorName)
+                        numeratorName = histoName + "_reconstructed"
+                        numerator = f.Get(numeratorName)
                         denominatorName = histoName + "_reconstructible"
-                        denominator     = f.Get(denominatorName)
-                        if numerator.GetEntries() == 0 or denominator.GetEntries() == 0 :
+                        denominator = f.Get(denominatorName)
+                        if numerator.GetEntries(
+                        ) == 0 or denominator.GetEntries() == 0:
                             continue
 
                         numerator.Sumw2()
                         denominator.Sumw2()
 
                         g_efficiency_electrons = ROOT.TGraphAsymmErrors()
-                        g_efficiency_electrons.SetName("g_efficiency_electrons")
-                        g_efficiency_electrons.Divide(numerator, denominator, "w")
+                        g_efficiency_electrons.SetName(
+                            "g_efficiency_electrons")
+                        g_efficiency_electrons.Divide(numerator, denominator,
+                                                      "w")
                         g_efficiency_electrons.SetTitle("electrons")
-                        g_efficiency_electrons.SetMarkerColor(ROOT.kAzure-3)
+                        g_efficiency_electrons.SetMarkerColor(ROOT.kAzure - 3)
                         g_efficiency_electrons.SetMarkerStyle(24)
-                        g_efficiency_electrons.SetLineColor(ROOT.kAzure-3)
+                        g_efficiency_electrons.SetLineColor(ROOT.kAzure - 3)
 
                     # draw them both
                     mg = ROOT.TMultiGraph()
                     mg.Add(g_efficiency_notElectrons)
-                    if categories[tracker][cut]["plotElectrons"] :
+                    if categories[tracker][cut]["plotElectrons"]:
                         mg.Add(g_efficiency_electrons)
 
                     mg.Draw("ap")
                     xtitle = efficiencyHistoDict[histo]["xTitle"]
                     mg.GetXaxis().SetTitle(xtitle)
                     mg.GetYaxis().SetTitle("efficiency")
-                    mg.GetYaxis().SetRangeUser(0,1)
+                    mg.GetYaxis().SetRangeUser(0, 1)
 
-                    if categories[tracker][cut]["plotElectrons"] :
+                    if categories[tracker][cut]["plotElectrons"]:
                         canvas.PlaceLegend()
 
                     mg.SetName("efficiency vs. " + histo + " ref")
-                    mg.SetTitle(tracker+" " +cut)
+                    mg.SetTitle(tracker + " " + cut)
                     mg.Write()
                     canvas.Write()
 
-                    mg_ref = f_ref.Get(tracker + "/" + cut + "/" + name + " ref")
+                    mg_ref = f_ref.Get(tracker + "/" + cut + "/" + name +
+                                       " ref")
                     for graph in mg_ref.GetListOfGraphs():
                         graph.SetMarkerStyle(25)
                         graph.SetMarkerSize(2)
-                        graph.SetTitle(graph.GetTitle()+" ref")
+                        graph.SetTitle(graph.GetTitle() + " ref")
                         graph.Draw("psame")
 
                     canvas.PlaceLegend()
-                    canvas.SetName(canvas.GetName()+" with ref")
+                    canvas.SetName(canvas.GetName() + " with ref")
                     canvas.Write()
 
-                    if tracker == "Forward"\
-                       and cut == "Long_eta25_triggerNumbers"\
-                       and histo == "pt":
-                        latex.DrawLatex(0.45,0.3,"Long, 2 < eta < 5, p > 3 GeV, pt > 500 MeV")
+                    if (tracker == "Forward"
+                            and cut == "Long_eta25_triggerNumbers"
+                            and histo == "pt"):
+                        latex.DrawLatex(
+                            0.45, 0.3,
+                            "Long, 2 < eta < 5, p > 3 GeV, pt > 500 MeV")
                         canvas.SaveAs("forward_long_eta25_effpt.png")
 
                     # add plot description
                     _root_str = ROOT.TNamed()
-                    _root_str.SetTitle(tracker+" "+cut)
-                    _root_str.SetName(canvas.GetName()+'__description')
+                    _root_str.SetTitle(tracker + " " + cut)
+                    _root_str.SetName(canvas.GetName() + "__description")
                     _root_str.Write()
-                    _root_str.SetName(canvas.GetName().replace(" with ref","")+'__description')
+                    _root_str.SetName(canvas.GetName().replace(
+                        " with ref", "") + "__description")
                     _root_str.Write()
 
             # calculate ghost rate
             histoBaseName = "Track/" + folder + tracker + "/"
-            for histo in ghostHistos :
+            for histo in ghostHistos:
                 trackerDir.cd()
                 title = "ghost rate vs " + histo
                 canvas = ROOT.TCanvas(title, title)
                 ROOT.gPad.SetTicks()
-                numeratorName   = histoBaseName + ghostHistoDict[histo]["variable"] + "_Ghosts"
-                denominatorName = histoBaseName + ghostHistoDict[histo]["variable"] + "_Total"
+                numeratorName = (histoBaseName +
+                                 ghostHistoDict[histo]["variable"] + "_Ghosts")
+                denominatorName = (
+                    histoBaseName + ghostHistoDict[histo]["variable"] +
+                    "_Total")
                 print("ghost histo: " + histoBaseName)
-                numerator       = f.Get(numeratorName)
-                denominator     = f.Get(denominatorName)
+                numerator = f.Get(numeratorName)
+                denominator = f.Get(denominatorName)
                 numerator.Sumw2()
                 denominator.Sumw2()
 
@@ -222,39 +257,52 @@ class PrCheckerEfficiencyHandler(BaseHandler):
                 g_efficiency_ref.SetMarkerSize(2)
                 g_efficiency_ref.Draw("psame")
 
-                g_efficiency.SetName(g_efficiency.GetName().replace(" ref",""))
+                g_efficiency.SetName(g_efficiency.GetName().replace(
+                    " ref", ""))
                 canvas.PlaceLegend()
-                canvas.SetName(canvas.GetName()+" with ref")
+                canvas.SetName(canvas.GetName() + " with ref")
                 canvas.Write()
 
                 # add plot description
                 _root_str = ROOT.TNamed()
-                _root_str.SetTitle(tracker+" "+cut)
-                _root_str.SetName(canvas.GetName()+'__description')
+                _root_str.SetTitle(tracker + " " + cut)
+                _root_str.SetName(canvas.GetName() + "__description")
                 _root_str.Write()
-                _root_str.SetName(g_efficiency.GetName()+'__description')
+                _root_str.SetName(g_efficiency.GetName() + "__description")
                 _root_str.Write()
 
         outputfile.Write()
         outputfile.Close()
         f.Close()
-        self.saveFile('PrCheckerEfficiency','./efficiency_plots.root')
+        self.saveFile("PrCheckerEfficiency", "./efficiency_plots.root")
 
         # send plot to EOS
         wwwDirEos = os.environ.get("LHCBPR_WWW_EOS")
         if wwwDirEos == None:
-            raise Exception("No web dir on EOS defined, will not run extraction")
+            raise Exception(
+                "No web dir on EOS defined, will not run extraction")
         else:
-            time=str(startTime).replace(" +0200","").replace(" +0100","").replace(" ","_").replace("-","").replace(":","")
-            dirname="efficiency"
+            time = (str(startTime).replace(" +0200", "").replace(
+                " +0100", "").replace(" ", "_").replace("-", "").replace(
+                    ":", ""))
+            dirname = "efficiency"
             targetRootEosDir = os.path.join(wwwDirEos, dirname)
             try:
-                subprocess.call(['xrdcp', '-f', 'forward_long_eta25_effpt.png',
-                                 targetRootEosDir + "/forward_long_eta25_effpt_"+str(version)+"_"+str(platform)+"_"+time+".png"])
+                subprocess.call([
+                    "xrdcp",
+                    "-f",
+                    "forward_long_eta25_effpt.png",
+                    targetRootEosDir + "/forward_long_eta25_effpt_" +
+                    str(version) + "_" + str(platform) + "_" + time + ".png",
+                ])
             except Exception as ex:
-                log.warning('Error copying to eos: %s', ex)
-
-            self.saveString("forward_long_eta25_effpt",
-                            "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"+dirname+"/forward_long_eta25_effpt_"+str(version)+"_"+str(platform)+"_"+time+".png",
-                            description="link to forward_long_eta25_effpt plot",
-                            group="prchecker_efficiency_plots")
+                log.warning("Error copying to eos: %s", ex)
+
+            self.saveString(
+                "forward_long_eta25_effpt",
+                "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/" + dirname +
+                "/forward_long_eta25_effpt_" + str(version) + "_" +
+                str(platform) + "_" + time + ".png",
+                description="link to forward_long_eta25_effpt plot",
+                group="prchecker_efficiency_plots",
+            )
diff --git a/handlers/PrCheckerEfficiencyHandler_HLT2.py b/handlers/PrCheckerEfficiencyHandler_HLT2.py
index 87263019c70a7981d9358ed6493a5c43e7cf1a39..b5f63b605c632ef8e4598e1f999491498ed381c2 100644
--- a/handlers/PrCheckerEfficiencyHandler_HLT2.py
+++ b/handlers/PrCheckerEfficiencyHandler_HLT2.py
@@ -18,156 +18,208 @@ from .BaseHandler import BaseHandler
 import logging
 import subprocess
 
-
 log = logging.getLogger(__name__)
 
-#from BaseHandler import BaseHandler
-
-def getEfficiencyHistoNames() :
-    return ["eta", "p", "pt", "phi", "nPV", "docaz"]
-
-def getTrackers() :
-    return ["Velo", "Upstream",#Made by PrChecker2Fast
-            "Forward", "TTrack", "Downstream", "Match", "Best", "BestDown", "BestLong"] #Made by PrChecker2
-
-def getOriginFolders() :
-     basedict = {
-        "Velo" : {},
-        "Upstream" : {},
-        "Forward" : {},
-        "TTrack" : {},
-        "Downstream" : {},
-        "Match" : {},
-        "Best" : {},
-        "BestDown" : {},
-        "BestLong" : {},
-        }
-
-     basedict["Velo"]["folder"] = "VeloMCChecker/"
-     basedict["Upstream"]["folder"] = "UpMCChecker/"
-     basedict["Forward"]["folder"] = "ForwardMCChecker/"
-     basedict["TTrack"]["folder"] = "TMCChecker/"
-     basedict["Match"]["folder"] = "MatchMCChecker/"
-     basedict["Best"]["folder"] = "BestMCChecker/"
-     basedict["Downstream"]["folder"] = "DownMCChecker/"
-     basedict["BestDown"]["folder"] = "BestDownMCChecker/"
-     basedict["BestLong"]["folder"] = "BestLongMCChecker/"
-
-     return basedict
+# from BaseHandler import BaseHandler
 
 
+def getEfficiencyHistoNames():
+    return ["eta", "p", "pt", "phi", "nPV", "docaz"]
 
 
-#def PrCheckerEfficiencyHandler_HLT2(outputname):
+def getTrackers():
+    return [
+        "Velo",
+        "Upstream",  # Made by PrChecker2Fast
+        "Forward",
+        "TTrack",
+        "Downstream",
+        "Match",
+        "Best",
+        "BestDown",
+        "BestLong",
+    ]  # Made by PrChecker2
+
+
+def getOriginFolders():
+    basedict = {
+        "Velo": {},
+        "Upstream": {},
+        "Forward": {},
+        "TTrack": {},
+        "Downstream": {},
+        "Match": {},
+        "Best": {},
+        "BestDown": {},
+        "BestLong": {},
+    }
+
+    basedict["Velo"]["folder"] = "VeloMCChecker/"
+    basedict["Upstream"]["folder"] = "UpMCChecker/"
+    basedict["Forward"]["folder"] = "ForwardMCChecker/"
+    basedict["TTrack"]["folder"] = "TMCChecker/"
+    basedict["Match"]["folder"] = "MatchMCChecker/"
+    basedict["Best"]["folder"] = "BestMCChecker/"
+    basedict["Downstream"]["folder"] = "DownMCChecker/"
+    basedict["BestDown"]["folder"] = "BestDownMCChecker/"
+    basedict["BestLong"]["folder"] = "BestLongMCChecker/"
+
+    return basedict
+
+
+# def PrCheckerEfficiencyHandler_HLT2(outputname):
 class PrCheckerEfficiencyHandler_HLT2(BaseHandler):
-#outputname: the location and name for the output root file, which saves the efficiency plots
+    # outputname: the location and name for the output root file, which saves the efficiency plots
 
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform, hostname, cpu_info, memoryinfo, startTime, endTime, options):
-        #print "directory: ", directory
-        #print "project: ", project
-        #print "version: ", version
-        #print "platform: ", platform
-        #print "hostname: ", hostname
-        #print "startTime: ", startTime
-        #print "endTime: ", endTime
-        #print "options: ", options
-
-        #Import LHCb style
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        # print "directory: ", directory
+        # print "project: ", project
+        # print "version: ", version
+        # print "platform: ", platform
+        # print "hostname: ", hostname
+        # print "startTime: ", startTime
+        # print "endTime: ", endTime
+        # print "options: ", options
+
+        # Import LHCb style
         from .utils.LHCbStyle import setLHCbStyle
-        #Import configurations, to determine which plots to draw
-        from .utils.ConfigHistos_HLT2 import (efficiencyHistoDict, #Include the variable in Xaxis, the X title.
-                                        categoriesDict,#Category of the tracks, and the corresponding title for the plot
-                                        getCuts) #Category of tracks. Used to construct the location to find the histogram in PrCheckerPlots_HLT2.root
-        #Import the method to draw TLegend in the graph
+
+        # Import configurations, to determine which plots to draw
+        from .utils.ConfigHistos_HLT2 import (
+            efficiencyHistoDict,  # Include the variable in Xaxis, the X title.
+            categoriesDict,  # Category of the tracks, and the corresponding title for the plot
+            getCuts,
+        )  # Category of tracks. Used to construct the location to find the histogram in PrCheckerPlots_HLT2.root
+
+        # Import the method to draw TLegend in the graph
         from .utils.Legend import place_legend
 
         setLHCbStyle()
 
-
         efficiencyHistoDict = efficiencyHistoDict()
-        efficiencyHistos    = getEfficiencyHistoNames()
-        categories          = categoriesDict()
-        cuts                = getCuts()
-        trackers            = getTrackers()
-        folders             = getOriginFolders()
-
-        #Define the location of the input root file, which contains the reconstructible & reconstructed histograms for drawing the efficiency plots.
-        inputfile = os.path.join(directory,'PrCheckerPlots_HLT2.root')
-        #Define the location of the output rootfile, which contains the efficiency plots
-        outputfile = ROOT.TFile( "./efficiency_hlt2_plots.root", "recreate" )
-
-        for tracker in trackers :#Loop for track category
+        efficiencyHistos = getEfficiencyHistoNames()
+        categories = categoriesDict()
+        cuts = getCuts()
+        trackers = getTrackers()
+        folders = getOriginFolders()
+
+        # Define the location of the input root file, which contains the reconstructible & reconstructed histograms for drawing the efficiency plots.
+        inputfile = os.path.join(directory, "PrCheckerPlots_HLT2.root")
+        # Define the location of the output rootfile, which contains the efficiency plots
+        outputfile = ROOT.TFile("./efficiency_hlt2_plots.root", "recreate")
+
+        for tracker in trackers:  # Loop for track category
             outputfile.cd()
             trackerDir = outputfile.mkdir(tracker)
             trackerDir.cd()
 
-            for cut in cuts[tracker]:#Loop for different selections on the tracks
+            for cut in cuts[
+                    tracker]:  # Loop for different selections on the tracks
                 cutDir = trackerDir.mkdir(cut)
                 cutDir.cd()
                 folder = folders[tracker]["folder"]
                 histoBaseName = "Track/" + folder + tracker + "/" + cut + "_"
 
-
                 # calculate efficiency
-                for histo in efficiencyHistos:#Loop for kinematic variables
-                    draw_electron = True #Turn to False if no corresponding histograms are found
-                    draw_non_electron = True #Turn to False if no corresponding histograms are found
-                    title = "efficiency vs. " + histo + ", " + categories[tracker][cut]["title"]
+                for histo in efficiencyHistos:  # Loop for kinematic variables
+                    draw_electron = (
+                        True  # Turn to False if no corresponding histograms are found
+                    )
+                    draw_non_electron = (
+                        True  # Turn to False if no corresponding histograms are found
+                    )
+                    title = ("efficiency vs. " + histo + ", " +
+                             categories[tracker][cut]["title"])
                     name = "efficiency vs. " + histo
                     canvas = ROOT.TCanvas(name, title)
                     ROOT.gPad.SetTicks()
                     # get efficiency plots for 'not electrons' category
                     # The base of histogram names is defined in utils.ConfigHistos_HLT2
-                    histoName       = histoBaseName + "notElectrons_" + efficiencyHistoDict[histo]["variable"]
-                    #read the input file and get the histograms
-                    f = ROOT.TFile.Open(inputfile,'read')
-                    numeratorName   = histoName + "_reconstructed"
-                    numerator       = f.Get(numeratorName)
+                    histoName = (histoBaseName + "notElectrons_" +
+                                 efficiencyHistoDict[histo]["variable"])
+                    # read the input file and get the histograms
+                    f = ROOT.TFile.Open(inputfile, "read")
+                    numeratorName = histoName + "_reconstructed"
+                    numerator = f.Get(numeratorName)
                     denominatorName = histoName + "_reconstructible"
-                    denominator     = f.Get(denominatorName)
+                    denominator = f.Get(denominatorName)
                     print("************")
                     print(histoName)
                     if not numerator or not denominator:
-                    #Print some information to tell whether a histogram is found or not
-                        print("Not found in input root file: ",numeratorName, " or ",denominatorName)
+                        # Print some information to tell whether a histogram is found or not
+                        print(
+                            "Not found in input root file: ",
+                            numeratorName,
+                            " or ",
+                            denominatorName,
+                        )
                         draw_non_electron = False
                     if draw_non_electron:
-                        print("reconstructed: " ,numerator.GetEntries(), "; reconsructible: ", denominator.GetEntries())
+                        print(
+                            "reconstructed: ",
+                            numerator.GetEntries(),
+                            "; reconsructible: ",
+                            denominator.GetEntries(),
+                        )
                     print("************")
                     if draw_non_electron:
                         numerator.Sumw2()
                         denominator.Sumw2()
-                        #Define the TGraphAsymmErrors which shows the efficiency distributions
+                        # Define the TGraphAsymmErrors which shows the efficiency distributions
                         g_efficiency_notElectrons = ROOT.TGraphAsymmErrors()
-                        g_efficiency_notElectrons.Divide(numerator, denominator, "cl=0.683 b(1,1) mode")
+                        g_efficiency_notElectrons.Divide(
+                            numerator, denominator, "cl=0.683 b(1,1) mode")
                         g_efficiency_notElectrons.SetTitle("not electrons")
 
-
                     # get efficiency plots for electrons category. The structure of script is the same as the non-electron one.
-                    histoName       = histoBaseName + "electrons_" + efficiencyHistoDict[histo]["variable"]
-                    f = ROOT.TFile.Open(inputfile,'read')
-                    numeratorName   = histoName + "_reconstructed"
-                    numerator       = f.Get(numeratorName)
+                    histoName = (histoBaseName + "electrons_" +
+                                 efficiencyHistoDict[histo]["variable"])
+                    f = ROOT.TFile.Open(inputfile, "read")
+                    numeratorName = histoName + "_reconstructed"
+                    numerator = f.Get(numeratorName)
                     denominatorName = histoName + "_reconstructible"
-                    denominator     = f.Get(denominatorName)
+                    denominator = f.Get(denominatorName)
                     if not numerator or not denominator:
-                        print("Not found in input root file: ",numeratorName, " or ",denominatorName)
+                        print(
+                            "Not found in input root file: ",
+                            numeratorName,
+                            " or ",
+                            denominatorName,
+                        )
                         draw_electron = False
                     if draw_electron:
-                        print("reconstructed: " ,numerator.GetEntries(), "; reconsructible: ", denominator.GetEntries())
+                        print(
+                            "reconstructed: ",
+                            numerator.GetEntries(),
+                            "; reconsructible: ",
+                            denominator.GetEntries(),
+                        )
                     print("************")
 
                     if draw_electron:
                         numerator.Sumw2()
                         denominator.Sumw2()
                         g_efficiency_electrons = ROOT.TGraphAsymmErrors()
-                        g_efficiency_electrons.Divide(numerator, denominator, "cl=0.683 b(1,1) mode")
+                        g_efficiency_electrons.Divide(numerator, denominator,
+                                                      "cl=0.683 b(1,1) mode")
                         g_efficiency_electrons.SetTitle("electrons")
-                        g_efficiency_electrons.SetMarkerColor(ROOT.kAzure-3)
-                        g_efficiency_electrons.SetLineColor(ROOT.kAzure-3)
+                        g_efficiency_electrons.SetMarkerColor(ROOT.kAzure - 3)
+                        g_efficiency_electrons.SetLineColor(ROOT.kAzure - 3)
 
                     cutDir.cd()
 
@@ -182,49 +234,58 @@ class PrCheckerEfficiencyHandler_HLT2(BaseHandler):
                         xtitle = efficiencyHistoDict[histo]["xTitle"]
                         mg.GetXaxis().SetTitle(xtitle)
                         mg.GetYaxis().SetTitle("efficiency")
-                        mg.GetYaxis().SetRangeUser(0,1)
+                        mg.GetYaxis().SetRangeUser(0, 1)
 
                         canvas.PlaceLegend()
                     f.Close()
                     if draw_electron or draw_non_electron:
                         canvas.Write()
 
-                        #Make the plots to be sent to eos
-                        if tracker == "BestLong" \
-                            and cut == "Long_eta25" \
-                            and histo == "pt":
-                            latex=ROOT.TLatex()
+                        # Make the plots to be sent to eos
+                        if (tracker == "BestLong" and cut == "Long_eta25"
+                                and histo == "pt"):
+                            latex = ROOT.TLatex()
                             latex.SetNDC()
                             latex.SetTextSize(0.04)
-                            latex.DrawLatex(0.45,0.3,"BestLong, 2 < eta < 5")
+                            latex.DrawLatex(0.45, 0.3, "BestLong, 2 < eta < 5")
                             canvas.SaveAs("BestLong_eta25_effpt.png")
 
-
-
-
-
         outputfile.Write()
         outputfile.Close()
 
-        self.saveFile("PrCheckerEfficiency_HLT2","./efficiency_hlt2_plots.root")
+        self.saveFile("PrCheckerEfficiency_HLT2",
+                      "./efficiency_hlt2_plots.root")
 
-        #send plot to EOS
+        # send plot to EOS
         wwwDirEos = os.environ.get("LHCBPR_WWW_EOS")
         if wwwDirEos == None:
-            raise Exception("No web dir on EOS defined, will not run extraction")
+            raise Exception(
+                "No web dir on EOS defined, will not run extraction")
         else:
-            time=str(startTime).replace(" +0200","").replace(" +0100","").replace(" ","_").replace("-","").replace(":","")
-            dirname="efficiency"
+            time = (str(startTime).replace(" +0200", "").replace(
+                " +0100", "").replace(" ", "_").replace("-", "").replace(
+                    ":", ""))
+            dirname = "efficiency"
             targetRootEosDir = os.path.join(wwwDirEos, dirname)
             print("targetRootEosDir: ", targetRootEosDir)
             output_png_list = ["BestLong_eta25_effpt"]
             for output_png in output_png_list:
                 try:
-                    subprocess.call(['xrdcp', '-f', output_png + '.png',
-                        targetRootEosDir+"/"+output_png+"_"+str(version)+"_"+str(platform)+"_"+time+".png"])
+                    subprocess.call([
+                        "xrdcp",
+                        "-f",
+                        output_png + ".png",
+                        targetRootEosDir + "/" + output_png + "_" +
+                        str(version) + "_" + str(platform) + "_" + time +
+                        ".png",
+                    ])
                 except Exception as ex:
-                    log.warning('Error copying to eos: %s', ex)
-                self.saveString(output_png,
-                    "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"+dirname+"/"+output_png+"_"+str(version)+"_"+str(platform)+"_"+time+".png",
+                    log.warning("Error copying to eos: %s", ex)
+                self.saveString(
+                    output_png,
+                    "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/" + dirname + "/"
+                    + output_png + "_" + str(version) + "_" + str(platform) +
+                    "_" + time + ".png",
                     description="link to " + output_png + "  plot",
-                    group="prchecker_efficiency_plots")
+                    group="prchecker_efficiency_plots",
+                )
diff --git a/handlers/PrCheckerHandler.py b/handlers/PrCheckerHandler.py
index d41d5d62a31974c761aa5c353a8a82b8db7edb80..a7c5fcac942c545265e38d526c2b736cc6a77a29 100644
--- a/handlers/PrCheckerHandler.py
+++ b/handlers/PrCheckerHandler.py
@@ -1,24 +1,24 @@
 import os, sys, re
 from .BaseHandler import BaseHandler
-
 """ PrCheckerHandler will parse the PrChecker2 output of a run.log file."""
 
-ReRecoHead   = r"PrChecker2(.+?)\*{4}\s(.+?)\s{2,}(\d+)\stracks.+?(\d+)"
-ReRecoLine   = r"PrChecker2.+?(\d{2}.+?)\s.+?(\d+).+?(\d+)\s\[.+?\]\s+(\d+).+?purity:\s*(\d+.\d+).+?hitEff:\s*(\d+.\d+)"
+ReRecoHead = r"PrChecker2(.+?)\*{4}\s(.+?)\s{2,}(\d+)\stracks.+?(\d+)"
+ReRecoLine = r"PrChecker2.+?(\d{2}.+?)\s.+?(\d+).+?(\d+)\s\[.+?\]\s+(\d+).+?purity:\s*(\d+.\d+).+?hitEff:\s*(\d+.\d+)"
+
+ReTTHead = r"PrChecker2(.+?)\*{4}\s(.+?)\s\*{4}\s*(\d+)\sghost.+?(\d+.\d+)"
+ReTTLine = r"PrChecker2.+?(\d{2}.+?)[:].+?(\d+).+?(\d+.\d+).+?(\d+.\d+).+?\[.+?\].+?(\d+.\d+)\sghost"
 
-ReTTHead     = r"PrChecker2(.+?)\*{4}\s(.+?)\s\*{4}\s*(\d+)\sghost.+?(\d+.\d+)"
-ReTTLine     = r"PrChecker2.+?(\d{2}.+?)[:].+?(\d+).+?(\d+.\d+).+?(\d+.\d+).+?\[.+?\].+?(\d+.\d+)\sghost"
 
 class PrCheckerHandler(BaseHandler):
-    def __init__(self, directory=''):
+    def __init__(self, directory=""):
         super(self.__class__, self).__init__()
         self.directory = directory
         if not self.directory:
             self.directory = os.path.realpath(os.curdir)
 
     def collectResults(self, directory):
-        print(("reading", directory + os.sep+ 'run.log'))
-        file = open(directory + os.sep + 'run.log', 'r')
+        print(("reading", directory + os.sep + "run.log"))
+        file = open(directory + os.sep + "run.log", "r")
 
         # handle the cases with two header lines
         resetPrefix = True
@@ -39,26 +39,46 @@ class PrCheckerHandler(BaseHandler):
             if res:
                 groups = res.groups()
                 if resetPrefix:
-                    prefix = groups[1] + 'Fast' if (groups[1] == 'Forward' and groups[0].find('Fast') is not -1 ) else groups[1]
+                    prefix = (groups[1] + "Fast" if
+                              (groups[1] == "Forward"
+                               and groups[0].find("Fast") != -1) else
+                              groups[1])
                     resetPrefix = False
                 else:
                     prefixtmp = prefix
-                    prefix = prefix + ' ' + groups[1]
+                    prefix = prefix + " " + groups[1]
                     resetPrefix = True
 
                 if not isTT:
-                    self.saveInt(prefix + '_tracks'  , int(groups[2]), description="", group=prefix)
-                    self.saveInt(prefix + '_ghosts' , int(groups[3]), description="", group=prefix)
+                    self.saveInt(
+                        prefix + "_tracks",
+                        int(groups[2]),
+                        description="",
+                        group=prefix)
+                    self.saveInt(
+                        prefix + "_ghosts",
+                        int(groups[3]),
+                        description="",
+                        group=prefix)
                 else:
-                    self.saveInt(prefix + '_ghosts'  , int(groups[2]), description="", group=prefix)
-                    self.saveFloat(prefix + '_TT/tr' , float(groups[3]), description="", group=prefix)
+                    self.saveInt(
+                        prefix + "_ghosts",
+                        int(groups[2]),
+                        description="",
+                        group=prefix)
+                    self.saveFloat(
+                        prefix + "_TT/tr",
+                        float(groups[3]),
+                        description="",
+                        group=prefix,
+                    )
 
                 if resetPrefix:
                     prefix = prefixtmp
                 continue
 
             if not res:
-                res = re.match(ReRecoLine,i)
+                res = re.match(ReRecoLine, i)
 
             if not res:
                 isTT = True
@@ -67,23 +87,64 @@ class PrCheckerHandler(BaseHandler):
             if res:
                 resetPrefix = True
                 groups = res.groups()
-                name = prefix + '_' + groups[0]
+                name = prefix + "_" + groups[0]
 
                 if not isTT:
-                    self.saveInt(name + '_reconstructed'  , int(groups[1]), description="", group=prefix)
-                    self.saveInt(name + '_reconstrucible' , int(groups[2]), description="", group=prefix)
-                    self.saveInt(name + '_clones'         , int(groups[3]), description="", group=prefix)
-                    self.saveFloat(name + '_purity'       , float(groups[4]), description="", group=prefix)
-                    self.saveFloat(name + '_hitEff'       , float(groups[5]), description="", group=prefix)
+                    self.saveInt(
+                        name + "_reconstructed",
+                        int(groups[1]),
+                        description="",
+                        group=prefix,
+                    )
+                    self.saveInt(
+                        name + "_reconstrucible",
+                        int(groups[2]),
+                        description="",
+                        group=prefix,
+                    )
+                    self.saveInt(
+                        name + "_clones",
+                        int(groups[3]),
+                        description="",
+                        group=prefix)
+                    self.saveFloat(
+                        name + "_purity",
+                        float(groups[4]),
+                        description="",
+                        group=prefix)
+                    self.saveFloat(
+                        name + "_hitEff",
+                        float(groups[5]),
+                        description="",
+                        group=prefix)
                 else:
-                    self.saveInt(name + '_tr'                      , int(groups[1]), description="", group=prefix)
-                    self.saveFloat(name + '_reconstructed'         , float(groups[2]), description="", group=prefix)
-                    self.saveFloat(name + '_reconstrucible'        , float(groups[3]), description="", group=prefix)
-                    self.saveFloat(name + '_ghost_hits_real_track' , float(groups[4]), description="", group=prefix)
+                    self.saveInt(
+                        name + "_tr",
+                        int(groups[1]),
+                        description="",
+                        group=prefix)
+                    self.saveFloat(
+                        name + "_reconstructed",
+                        float(groups[2]),
+                        description="",
+                        group=prefix,
+                    )
+                    self.saveFloat(
+                        name + "_reconstrucible",
+                        float(groups[3]),
+                        description="",
+                        group=prefix,
+                    )
+                    self.saveFloat(
+                        name + "_ghost_hits_real_track",
+                        float(groups[4]),
+                        description="",
+                        group=prefix,
+                    )
 
         def run(self):
             self.collectResults(self.directory)
 
-if __name__ == '__main__':
-    PrCheckerHandler().run()
 
+if __name__ == "__main__":
+    PrCheckerHandler().run()
diff --git a/handlers/PrCheckerSummaryHLT1Handler.py b/handlers/PrCheckerSummaryHLT1Handler.py
index 0f36f48cce93770a5fdc0847f310907fcc49a16e..178fd4303e4861846ed3088a7c583a799ff108c4 100644
--- a/handlers/PrCheckerSummaryHLT1Handler.py
+++ b/handlers/PrCheckerSummaryHLT1Handler.py
@@ -11,11 +11,11 @@
 # Based on: PrCheckerSummaryHLT1Handler
 # author: Miroslav Saur (miroslav.saur@cern.ch)
 # Version:
-#	   - 0.1 (2021/02): basic plots and efficiencies
+# 	   - 0.1 (2021/02): basic plots and efficiencies
 #          - 0.2 (2022/07): keeping only forward tracks, as no other are currently used
 # author: Guanyue Wan (wanguanyue@cern.ch)
 # Version:
-#	   - 0.3 (2023/04): adding other tracks, including types of `Forward, Velo, Upstream` and `ForwardUTHits`, as well as categories of `Long`, `Long_fromB`, `Long_electron`, etc.
+# 	   - 0.3 (2023/04): adding other tracks, including types of `Forward, Velo, Upstream` and `ForwardUTHits`, as well as categories of `Long`, `Long_fromB`, `Long_electron`, etc.
 #                       also complete branch names with certain hash numbers.
 
 import os
@@ -37,22 +37,21 @@ from collections import defaultdict
 
 
 def effPlotsTypes():
-    """Return required types of track
-	"""
+    """Return required types of track"""
     return ["Forward", "Velo", "Upstream", "ForwardUTHits"]
 
 
 def effPlotsDict(track_type, track_category, kinematics):
     """
-	Set the directory to required histograms in root file,
- 	the title along x&y axis of output efficiency histograms.
-
-	Args:
-		track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
-		track_category (string): Category of required tracks within `Long`, `Long_fromB`, `Long_electron` etc,
-			should be different for different `track_type`s.
-		kinematics (list): Required kinematic variables within `["Eta","Phi","nPV","P","Pt"]`
-	"""
+    Set the directory to required histograms in root file,
+    the title along x&y axis of output efficiency histograms.
+
+    Args:
+            track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
+            track_category (string): Category of required tracks within `Long`, `Long_fromB`, `Long_electron` etc,
+                    should be different for different `track_type`s.
+            kinematics (list): Required kinematic variables within `["Eta","Phi","nPV","P","Pt"]`
+    """
 
     # directory name in the root file
     # the hash numbers are manually given for certain build version
@@ -89,11 +88,11 @@ def effPlotsDict(track_type, track_category, kinematics):
 
 def getEffPlots(track_type):
     """
-	Return the kinematic variables for certain category.
+    Return the kinematic variables for certain category.
 
-	Args:
-		track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
-	"""
+    Args:
+            track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
+    """
 
     kinematics = ["Eta", "Phi", "nPV", "P", "Pt"]
     if track_type == "ForwardUTHits":
@@ -103,20 +102,27 @@ def getEffPlots(track_type):
 
 def effPlotsCategories(track_type):
     """
-	Return the required categories of different track types.
+    Return the required categories of different track types.
 
-	Args:
-		track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
-	"""
+    Args:
+            track_type (string): Type of track, within `Forward, Velo, Upstream` and `ForwardUTHits`.
+    """
     Categories = {
         "Forward": [
-            "01_long", "05_long_fromB", "07_long_electrons",
-            "08_long_fromB_electrons", "Ghosts"
+            "01_long",
+            "05_long_fromB",
+            "07_long_electrons",
+            "08_long_fromB_electrons",
+            "Ghosts",
         ],
         "ForwardUTHits":
         ["01_long", "03_long_fromB_P>3GeV_Pt>0.5GeV", "Ghosts"],
-        "Upstream":
-        ["02_velo+UT", "09_long_fromB", "12_long_fromB_electrons", "Ghosts"],
+        "Upstream": [
+            "02_velo+UT",
+            "09_long_fromB",
+            "12_long_fromB_electrons",
+            "Ghosts",
+        ],
         "Velo":
         ["01_velo", "06_long_fromB", "09_long_fromB_electrons", "Ghosts"],
     }[track_type]
@@ -128,16 +134,27 @@ class PrCheckerSummaryHLT1Handler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform,
-                          hostname, cpu_info, memoryinfo, startTime, endTime,
-                          options):
-        logfile = os.path.join(directory, 'run.log')
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        logfile = os.path.join(directory, "run.log")
         rootfile = grepPattern(
-            'INFO Writing ROOT histograms to: (\S+)',
-            open(logfile, 'r', encoding='ISO-8859-1').read())
+            "INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
         modifier = HashRemover.HashRemover(os.path.join(directory, rootfile))
         modifier.modify_root_file()
-        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), 'read')
+        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), "read")
         outputfile = ROOT.TFile("PrCheckerSummaryHLT1.root", "recreate")
 
         from .utils.LHCbStyle import setLHCbStyle
@@ -244,4 +261,4 @@ class PrCheckerSummaryHLT1Handler(BaseHandler):
         outputfile.Write()
         outputfile.Close()
         inputfile.Close()
-        self.saveFile('PrCheckerSummaryHLT1', 'PrCheckerSummaryHLT1.root')
+        self.saveFile("PrCheckerSummaryHLT1", "PrCheckerSummaryHLT1.root")
diff --git a/handlers/PrCheckerSummaryHandler.py b/handlers/PrCheckerSummaryHandler.py
index d3d20b6a487efca55131bf27147b22292dad5310..0532edbdc4f216cbc0445089275cad3886120a93 100644
--- a/handlers/PrCheckerSummaryHandler.py
+++ b/handlers/PrCheckerSummaryHandler.py
@@ -11,10 +11,10 @@
 # Based on: PrCheckerEfficieincyHandler and _HLT2
 # author: Miroslav Saur (miroslav.saur@cern.ch) & Suzanne Klaver (suzanne.klaver@cern.ch)
 # Version:
-#	   - 0.1 (2020/01): basic plots and efficiencies as in LHCb-FIGURE-2019-
-#	   - 0.2 (2020/04): updated to work with python3
-#	   - 0.3 (2020/06): removal of basic plots, adding efficiecies for different track types
-#	   - 0.4 (2021/01); update following switching to Moore
+# 	   - 0.1 (2020/01): basic plots and efficiencies as in LHCb-FIGURE-2019-
+# 	   - 0.2 (2020/04): updated to work with python3
+# 	   - 0.3 (2020/06): removal of basic plots, adding efficiecies for different track types
+# 	   - 0.4 (2021/01); update following switching to Moore
 #
 
 import os, sys
@@ -43,14 +43,10 @@ def effPlotsDict_long():
         "docaz": {},
     }
 
-    basedict["Pt"][
-        "path"] = "Track/BestLongTrackChecker/BestLong/01_long_Pt"
-    basedict["P"][
-        "path"] = "Track/BestLongTrackChecker/BestLong/01_long_P"
-    basedict["Eta"][
-        "path"] = "Track/BestLongTrackChecker/BestLong/01_long_Eta"
-    basedict["nPV"][
-        "path"] = "Track/BestLongTrackChecker/BestLong/01_long_nPV"
+    basedict["Pt"]["path"] = "Track/BestLongTrackChecker/BestLong/01_long_Pt"
+    basedict["P"]["path"] = "Track/BestLongTrackChecker/BestLong/01_long_P"
+    basedict["Eta"]["path"] = "Track/BestLongTrackChecker/BestLong/01_long_Eta"
+    basedict["nPV"]["path"] = "Track/BestLongTrackChecker/BestLong/01_long_nPV"
     basedict["docaz"][
         "path"] = "Track/BestLongTrackChecker/BestLong/01_long_docaz"
     basedict["Pt"]["unit"] = "pT [MeV]"
@@ -115,14 +111,10 @@ def effPlotsDict_forward():
         "docaz": {},
     }
 
-    basedict["Pt"][
-        "path"] = "Track/ForwardTrackChecker/Forward/01_long_Pt"
-    basedict["P"][
-        "path"] = "Track/ForwardTrackChecker/Forward/01_long_P"
-    basedict["Eta"][
-        "path"] = "Track/ForwardTrackChecker/Forward/01_long_Eta"
-    basedict["nPV"][
-        "path"] = "Track/ForwardTrackChecker/Forward/01_long_nPV"
+    basedict["Pt"]["path"] = "Track/ForwardTrackChecker/Forward/01_long_Pt"
+    basedict["P"]["path"] = "Track/ForwardTrackChecker/Forward/01_long_P"
+    basedict["Eta"]["path"] = "Track/ForwardTrackChecker/Forward/01_long_Eta"
+    basedict["nPV"]["path"] = "Track/ForwardTrackChecker/Forward/01_long_nPV"
     basedict["docaz"][
         "path"] = "Track/ForwardTrackChecker/Forward/01_long_docaz"
     basedict["Pt"]["unit"] = "pT [MeV]"
@@ -189,12 +181,9 @@ def effPlotsDict_ttrack():
 
     basedict["Pt"]["path"] = "Track/SeedTrackChecker/Seed/01_hasT_Pt"
     basedict["P"]["path"] = "Track/SeedTrackChecker/Seed/01_hasT_P"
-    basedict["Eta"][
-        "path"] = "Track/SeedTrackChecker/Seed/01_hasT_Eta"
-    basedict["nPV"][
-        "path"] = "Track/SeedTrackChecker/Seed/01_hasT_nPV"
-    basedict["docaz"][
-        "path"] = "Track/SeedTrackChecker/Seed/01_hasT_docaz"
+    basedict["Eta"]["path"] = "Track/SeedTrackChecker/Seed/01_hasT_Eta"
+    basedict["nPV"]["path"] = "Track/SeedTrackChecker/Seed/01_hasT_nPV"
+    basedict["docaz"]["path"] = "Track/SeedTrackChecker/Seed/01_hasT_docaz"
     basedict["Pt"]["unit"] = "pT [MeV]"
     basedict["P"]["unit"] = "p [MeV]"
     basedict["Eta"]["unit"] = "#eta [-]"
@@ -216,13 +205,24 @@ class PrCheckerSummaryHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform,
-                          hostname, cpu_info, memoryinfo, startTime, endTime,
-                          options):
-        logfile = os.path.join(directory, 'run.log')
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        logfile = os.path.join(directory, "run.log")
         rootfile = grepPattern(
-            'INFO Writing ROOT histograms to: (\S+)',
-            open(logfile, 'r', encoding='ISO-8859-1').read())
+            "INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
         """
                 # This should be modified to at first check if required files are available, commenting out for now
 		# combining the root files produces from old style and new style histograms
@@ -238,7 +238,7 @@ class PrCheckerSummaryHandler(BaseHandler):
                 """
         modifier = HashRemover.HashRemover(os.path.join(directory, rootfile))
         modifier.modify_root_file()
-        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), 'read')
+        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), "read")
         outputfile = ROOT.TFile("PrCheckerSummary.root", "recreate")
 
         from .utils.LHCbStyle import setLHCbStyle
@@ -250,11 +250,11 @@ class PrCheckerSummaryHandler(BaseHandler):
         latex.SetNDC()
         latex.SetTextSize(0.03)
 
-        outputfile.mkdir('long')
-        outputfile.mkdir('down')
-        outputfile.mkdir('forward')
-        outputfile.mkdir('velo')
-        outputfile.mkdir('ttrack')
+        outputfile.mkdir("long")
+        outputfile.mkdir("down")
+        outputfile.mkdir("forward")
+        outputfile.mkdir("velo")
+        outputfile.mkdir("ttrack")
 
         effPlots_long = getEffPlots_long()
         eff_dict_long = effPlotsDict_long()
@@ -450,4 +450,4 @@ class PrCheckerSummaryHandler(BaseHandler):
         outputfile.Write()
         outputfile.Close()
         inputfile.Close()
-        self.saveFile('PrCheckerSummary', 'PrCheckerSummary.root')
+        self.saveFile("PrCheckerSummary", "PrCheckerSummary.root")
diff --git a/handlers/PrCheckerSummaryHandler_withoutUT.py b/handlers/PrCheckerSummaryHandler_withoutUT.py
index 08ba71f8f045fafff10420b53a5274acd991fdee..7984c24030f3a05d58afcd7770119a5aac03d2aa 100644
--- a/handlers/PrCheckerSummaryHandler_withoutUT.py
+++ b/handlers/PrCheckerSummaryHandler_withoutUT.py
@@ -11,7 +11,7 @@
 # Based on: PrCheckerSummaryhandler
 # author: Miroslav Saur (miroslav.saur@cern.ch) & Suzanne Klaver (suzanne.klaver@cern.ch)
 # Version:
-#	   - 0.1 (2021/12): version of PrCheckerSummaryhandler without downstream and forward tracks
+# 	   - 0.1 (2021/12): version of PrCheckerSummaryhandler without downstream and forward tracks
 
 import os, sys
 import argparse
@@ -133,13 +133,24 @@ class PrCheckerSummaryHandler_withoutUT(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version, platform,
-                          hostname, cpu_info, memoryinfo, startTime, endTime,
-                          options):
-        logfile = os.path.join(directory, 'run.log')
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        logfile = os.path.join(directory, "run.log")
         rootfile = grepPattern(
-            'INFO Writing ROOT histograms to: (\S+)',
-            open(logfile, 'r', encoding='ISO-8859-1').read())
+            "INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
         """
         # This should be modified to at first check if required files are available, commenting out for now
         # combining the root files produces from old style and new style histograms
@@ -156,7 +167,7 @@ class PrCheckerSummaryHandler_withoutUT(BaseHandler):
 
         modifier = HashRemover.HashRemover(os.path.join(directory, rootfile))
         modifier.modify_root_file()
-        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), 'read')
+        inputfile = ROOT.TFile.Open(os.path.join(directory, rootfile), "read")
         outputfile = ROOT.TFile("PrCheckerSummary.root", "recreate")
 
         from .utils.LHCbStyle import setLHCbStyle
@@ -168,9 +179,9 @@ class PrCheckerSummaryHandler_withoutUT(BaseHandler):
         latex.SetNDC()
         latex.SetTextSize(0.03)
 
-        outputfile.mkdir('long')
-        outputfile.mkdir('velo')
-        outputfile.mkdir('ttrack')
+        outputfile.mkdir("long")
+        outputfile.mkdir("velo")
+        outputfile.mkdir("ttrack")
 
         effPlots_long = getEffPlots_long()
         eff_dict_long = effPlotsDict_long()
@@ -290,4 +301,4 @@ class PrCheckerSummaryHandler_withoutUT(BaseHandler):
         outputfile.Write()
         outputfile.Close()
         inputfile.Close()
-        self.saveFile('PrCheckerSummary', 'PrCheckerSummary.root')
+        self.saveFile("PrCheckerSummary", "PrCheckerSummary.root")
diff --git a/handlers/PrmonHandler.py b/handlers/PrmonHandler.py
index 3204a3eefb616b341547878648ea8e82676e943f..15a89ee9d74714a9e1bb7dda64e22b50ada187db 100644
--- a/handlers/PrmonHandler.py
+++ b/handlers/PrmonHandler.py
@@ -3,55 +3,77 @@ import json
 import subprocess
 from .BaseHandler import BaseHandler
 
-class PrmonHandler(BaseHandler):
 
+class PrmonHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        self.saveFile('prmon.json' , os.path.join(directory,'prmon.json') )
+        self.saveFile("prmon.json", os.path.join(directory, "prmon.json"))
 
-        with open(os.path.join(directory,'prmon.json')) as infile:
-            prmon_values=json.load(infile)
-            self.saveInt("vmem_max",
-                         prmon_values['Max']['vmem'],
-                         description="maximum of virtual memory in kb",
-                         group="vmem")
-            self.saveInt("vmem_avg",
-                         prmon_values['Avg']['vmem'],
-                         description="average of virtual memory in kb",
-                         group="vmem")
-            self.saveInt("rss_max",
-                         prmon_values['Max']['rss'],
-                         description="maximum of resident set size in kb",
-                         group="rss")
-            self.saveInt("rss_avg",
-                         prmon_values['Avg']['rss'],
-                         description="average of resident set size in kb",
-                         group="rss")
-            self.saveInt("pss_max",
-                         prmon_values['Max']['pss'],
-                         description="maximum of proportional set size in kb",
-                         group="pss")
-            self.saveInt("pss_avg",
-                         prmon_values['Avg']['pss'],
-                         description="average of proportional set size in kb",
-                         group="pss")
+        with open(os.path.join(directory, "prmon.json")) as infile:
+            prmon_values = json.load(infile)
+            self.saveInt(
+                "vmem_max",
+                prmon_values["Max"]["vmem"],
+                description="maximum of virtual memory in kb",
+                group="vmem",
+            )
+            self.saveInt(
+                "vmem_avg",
+                prmon_values["Avg"]["vmem"],
+                description="average of virtual memory in kb",
+                group="vmem",
+            )
+            self.saveInt(
+                "rss_max",
+                prmon_values["Max"]["rss"],
+                description="maximum of resident set size in kb",
+                group="rss",
+            )
+            self.saveInt(
+                "rss_avg",
+                prmon_values["Avg"]["rss"],
+                description="average of resident set size in kb",
+                group="rss",
+            )
+            self.saveInt(
+                "pss_max",
+                prmon_values["Max"]["pss"],
+                description="maximum of proportional set size in kb",
+                group="pss",
+            )
+            self.saveInt(
+                "pss_avg",
+                prmon_values["Avg"]["pss"],
+                description="average of proportional set size in kb",
+                group="pss",
+            )
 
-        subprocess.call("prmon_plot.py --input  "\
-                        + os.path.join(directory,"prmon.txt")\
-                        + " --xvar wtime --yvar vmem,pss,rss,swap",
-                        shell=True)
-        self.saveFile('PrMon_wtime_vs_vmem_pss_rss_swap.png' , 'PrMon_wtime_vs_vmem_pss_rss_swap.png' )
+        subprocess.call(
+            "prmon_plot.py --input  " + os.path.join(directory, "prmon.txt") +
+            " --xvar wtime --yvar vmem,pss,rss,swap",
+            shell=True,
+        )
+        self.saveFile(
+            "PrMon_wtime_vs_vmem_pss_rss_swap.png",
+            "PrMon_wtime_vs_vmem_pss_rss_swap.png",
+        )
 
-        subprocess.call("prmon_plot.py --input  "\
-                        + os.path.join(directory,"prmon.txt")\
-                        + " --xvar wtime --yvar vmem,pss,rss,swap --diff",
-                        shell=True)
-        self.saveFile('PrMon_wtime_vs_diff_vmem_pss_rss_swap.png' , 'PrMon_wtime_vs_diff_vmem_pss_rss_swap.png' )
+        subprocess.call(
+            "prmon_plot.py --input  " + os.path.join(directory, "prmon.txt") +
+            " --xvar wtime --yvar vmem,pss,rss,swap --diff",
+            shell=True,
+        )
+        self.saveFile(
+            "PrMon_wtime_vs_diff_vmem_pss_rss_swap.png",
+            "PrMon_wtime_vs_diff_vmem_pss_rss_swap.png",
+        )
 
-        subprocess.call("prmon_plot.py --input  "\
-                        + os.path.join(directory,"prmon.txt")\
-                        + " --xvar wtime --yvar utime,stime --diff --stacked",
-                        shell=True)
-        self.saveFile('PrMon_wtime_vs_diff_utime_stime.png' , 'PrMon_wtime_vs_diff_utime_stime.png' )
+        subprocess.call(
+            "prmon_plot.py --input  " + os.path.join(directory, "prmon.txt") +
+            " --xvar wtime --yvar utime,stime --diff --stacked",
+            shell=True,
+        )
+        self.saveFile("PrMon_wtime_vs_diff_utime_stime.png",
+                      "PrMon_wtime_vs_diff_utime_stime.png")
diff --git a/handlers/RadLengthHandler.py b/handlers/RadLengthHandler.py
index 9dba474f06d0935cfd16d39d6727251ee3e07d22..37f4e618cc927423dae205c8218e72804a069eee 100644
--- a/handlers/RadLengthHandler.py
+++ b/handlers/RadLengthHandler.py
@@ -12,13 +12,25 @@ import json
 from .BaseHandler import BaseHandler
 import logging
 
-class RadLengthHandler(BaseHandler):	
-    def __init__(self, debug='INFO'):
-        super(self.__class__,self).__init__()
+
+class RadLengthHandler(BaseHandler):
+    def __init__(self, debug="INFO"):
+        super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        _rad_length_detec_scan_dir = ('RadLengthDetectorScan', os.path.join(directory, 'RadLengthDetectorScan/root_files/RadLengthSubDetectorPlots.root'))
-        _rad_length_velo_scan_dir  = ('RadLengthVeloScan', os.path.join(directory, 'RadLengthVeloScan/root_files/RadLengthVeloScan.root')) 
+        _rad_length_detec_scan_dir = (
+            "RadLengthDetectorScan",
+            os.path.join(
+                directory,
+                "RadLengthDetectorScan/root_files/RadLengthSubDetectorPlots.root",
+            ),
+        )
+        _rad_length_velo_scan_dir = (
+            "RadLengthVeloScan",
+            os.path.join(
+                directory,
+                "RadLengthVeloScan/root_files/RadLengthVeloScan.root"),
+        )
 
         for file_ in [_rad_length_detec_scan_dir, _rad_length_velo_scan_dir]:
             self.saveFile(*file_)
diff --git a/handlers/RootFilesHandler.py b/handlers/RootFilesHandler.py
index 327b9ccb9afff693af8bb178755811bad531c510..d9d496090a65ac30b695579e23ba98db9e598cca 100644
--- a/handlers/RootFilesHandler.py
+++ b/handlers/RootFilesHandler.py
@@ -7,27 +7,23 @@ from .BaseHandler import BaseHandler
 
 
 class RootFilesHandler(BaseHandler):
-
-    """ LHCbPR Handler for Geant standalone tests.
-          SetupProject --nightly lhcb-gauss-def Geant4 Head (--build-env)
-          getpack Geant/G4examples
-          make
-          hadronis_tests
+    """LHCbPR Handler for Geant standalone tests.
+    SetupProject --nightly lhcb-gauss-def Geant4 Head (--build-env)
+    getpack Geant/G4examples
+    make
+    hadronis_tests
     """
 
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def collectResults(self, directory):
-        """ Collect  results """
+        """Collect  results"""
 
         # Files
-        exts = ['*.root']
+        exts = ["*.root"]
         for root, _, files in os.walk(directory):
             for file in files:
                 for ext in exts:
                     if fnmatch.fnmatch(file, ext):
-                        self.saveFile(
-                            file,
-                            os.path.join(root, file)
-                        )
+                        self.saveFile(file, os.path.join(root, file))
diff --git a/handlers/StrippingTimingHandler.py b/handlers/StrippingTimingHandler.py
index 841a4e6d336253b4f230da2e0ffbd2fa8119acb2..9ac838111793bb05726e3ad10cc531f3da865d7c 100644
--- a/handlers/StrippingTimingHandler.py
+++ b/handlers/StrippingTimingHandler.py
@@ -3,25 +3,24 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class StrippingTimingHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-
+    def collectResults(self, directory):
         # Parsing the log
         from .timing.TimingParser import TimingParser
-        tp = TimingParser(os.path.join(directory,'run.log'))
+
+        tp = TimingParser(os.path.join(directory, "run.log"))
 
         # Collecting the interesting nodes
         nodelist = set()
         eventLoop = tp.getRoot()
         nodelist.add(eventLoop)
 
-        
         # Looking for all the nodes which name finishes with line
         foundnodes = eventLoop.getNodesMatching(".*Line$")
 
@@ -29,19 +28,22 @@ class StrippingTimingHandler(BaseHandler):
         for n in foundnodes:
             nodelist.add(n)
             nodelist |= n.getParentNodes()
-        
 
-        #eventLoop.printChildrenList(8)
+        # eventLoop.printChildrenList(8)
 
         # Now saving the results
         for node in nodelist:
-            self.saveFloat(node.name, node.value, "Time per Ev. [ms]", "Timing")
-            self.saveInt(node.name + "_count", node.entries, group="TimingCount")
+            self.saveFloat(node.name, node.value, "Time per Ev. [ms]",
+                           "Timing")
+            self.saveInt(
+                node.name + "_count", node.entries, group="TimingCount")
             self.saveInt(node.name + "_rank", node.rank, group="TimingRank")
             if node.parent != None:
-                self.saveString(node.name + "_parent", node.parent.name, group="TimingTree")
+                self.saveString(
+                    node.name + "_parent",
+                    node.parent.name,
+                    group="TimingTree")
             else:
-                self.saveString(node.name + "_parent", "None", group="TimingTree")
+                self.saveString(
+                    node.name + "_parent", "None", group="TimingTree")
             self.saveInt(node.name + "_id", node.id, group="TimingID")
-
-
diff --git a/handlers/TargetTestHandler.py b/handlers/TargetTestHandler.py
index 4ec1b44671b54b8c3e349ac3668703cb724f5486..f25b9a4db0daf17884ccb5c9f26b2c4bf498efce 100644
--- a/handlers/TargetTestHandler.py
+++ b/handlers/TargetTestHandler.py
@@ -6,102 +6,132 @@ from .BaseHandler import BaseHandler
 
 
 class TargetTestHandler(BaseHandler):
-
     # init is implicit
 
-    def _create_comp_plot(self, name, plots_file, components, out_file, titles):
-         plots_file = rt.TFile.Open(plots_file)
-         out_file   = rt.TFile.Open(out_file, 'UPDATE')
-         canvas = rt.TCanvas(name, name, 800,600)
-         legend = rt.TLegend(0.1,0.7,0.48,0.9)
-         first = True
-         color_list = [rt.kRed, rt.kGreen, rt.kBlue, rt.kViolet]
-         style_list = [1, 7, 1, 7]
+    def _create_comp_plot(self, name, plots_file, components, out_file,
+                          titles):
+        plots_file = rt.TFile.Open(plots_file)
+        out_file = rt.TFile.Open(out_file, "UPDATE")
+        canvas = rt.TCanvas(name, name, 800, 600)
+        legend = rt.TLegend(0.1, 0.7, 0.48, 0.9)
+        first = True
+        color_list = [rt.kRed, rt.kGreen, rt.kBlue, rt.kViolet]
+        style_list = [1, 7, 1, 7]
 
-         for plot, color, title, style in zip(components, color_list, titles, style_list):
-             plot = plots_file.Get(plot)
-             plot.SetLineColor(color)
-             plot.SetLineStyle(style)
-             plot.SetMarkerColor(color)
-             plot.SetMarkerStyle(20)
-             legend.AddEntry(plot, title, "lp")
-             try: 
-                 if first:
-                     plot.Draw("ALP")
-                     first = False
-                 else:
-                     plot.Draw("LP SAME")
-                     canvas.Update()
-             except:
-                 raise AssertionError
-         legend.Draw()
-         canvas.Update()
-         canvas.Write()
-         plots_file.Close()
-         out_file.Close()
+        for plot, color, title, style in zip(components, color_list, titles,
+                                             style_list):
+            plot = plots_file.Get(plot)
+            plot.SetLineColor(color)
+            plot.SetLineStyle(style)
+            plot.SetMarkerColor(color)
+            plot.SetMarkerStyle(20)
+            legend.AddEntry(plot, title, "lp")
+            try:
+                if first:
+                    plot.Draw("ALP")
+                    first = False
+                else:
+                    plot.Draw("LP SAME")
+                    canvas.Update()
+            except:
+                raise AssertionError
+        legend.Draw()
+        canvas.Update()
+        canvas.Write()
+        plots_file.Close()
+        out_file.Close()
 
-    def _make_desc_string(self, name, out_file, plot_type, thickness, mat, part):
-        out_file = rt.TFile.Open(out_file, 'UPDATE')
+    def _make_desc_string(self, name, out_file, plot_type, thickness, mat,
+                          part):
+        out_file = rt.TFile.Open(out_file, "UPDATE")
         _root_str = rt.TNamed()
-        _root_str.SetName(name+'__description')
-        _root_str.SetTitle('Comparison of {} for {} fired at various energies into a {}mm target of {}'+
-                            ' using different physics lists in Geant4.'.format(plot_type, part, thickness, mat))
+        _root_str.SetName(name + "__description")
+        _root_str.SetTitle(
+            "Comparison of {} for {} fired at various energies into a {}mm target of {}"
+            + " using different physics lists in Geant4.".format(
+                plot_type, part, thickness, mat))
         _root_str.Write()
         out_file.Close()
 
     def make_comp_plots(self, plots_file, output_file):
-        gr_1_template='InElastic_CrossSection_Thickness-10mm_Mat-Al_Mod-{}_PGun-{}'
-        gr_1_name = 'InElastic_XSec_Kaon_10mm_Al'
-        self._create_comp_plot( gr_1_name                                     , 
-                                plots_file                                    , 
-                                [ gr_1_template.format('QGSP_BERT', 'Kplus') ,
-                                  gr_1_template.format('QGSP_BERT', 'Kminus'), 
-                                  gr_1_template.format('FTFP_BERT', 'Kplus') , 
-                                  gr_1_template.format('FTFP_BERT', 'Kminus')], 
-                                output_file                                   , 
-                                [ 'K^{+} QGSP_BERT',
-                                  'K^{-} QGSP_BERT',
-                                  'K^{+} FTFP_BERT',
-                                  'K^{-} FTFP_BERT']                          )
+        gr_1_template = "InElastic_CrossSection_Thickness-10mm_Mat-Al_Mod-{}_PGun-{}"
+        gr_1_name = "InElastic_XSec_Kaon_10mm_Al"
+        self._create_comp_plot(
+            gr_1_name,
+            plots_file,
+            [
+                gr_1_template.format("QGSP_BERT", "Kplus"),
+                gr_1_template.format("QGSP_BERT", "Kminus"),
+                gr_1_template.format("FTFP_BERT", "Kplus"),
+                gr_1_template.format("FTFP_BERT", "Kminus"),
+            ],
+            output_file,
+            [
+                "K^{+} QGSP_BERT",
+                "K^{-} QGSP_BERT",
+                "K^{+} FTFP_BERT",
+                "K^{-} FTFP_BERT",
+            ],
+        )
+
+        self._make_desc_string(gr_1_name, output_file,
+                               "inelastic cross sections", 10, "aluminium",
+                               "kaons")
 
-        self._make_desc_string( gr_1_name, output_file, 'inelastic cross sections', 10, 'aluminium', 'kaons')
-        
-        gr_2_template='Multiplicity_Thickness-10mm_Mat-Be_Mod-{}_PGun-{}'
-        gr_2_name = 'Multiplicity_Proton_10mm_Be'
-        self._create_comp_plot( gr_2_name                                    , 
-                                plots_file                                   , 
-                                [ gr_2_template.format('QGSP_BERT', 'p')   ,
-                                  gr_2_template.format('QGSP_BERT', 'pbar'), 
-                                  gr_2_template.format('FTFP_BERT', 'p')   , 
-                                  gr_2_template.format('FTFP_BERT', 'pbar')] ,
-                                output_file                                  ,
-                                [ 'p^{+} QGSP_BERT',
-                                  'p^{-} QGSP_BERT',
-                                  'p^{+} FTFP_BERT',
-                                  'p^{-} FTFP_BERT']                         )
+        gr_2_template = "Multiplicity_Thickness-10mm_Mat-Be_Mod-{}_PGun-{}"
+        gr_2_name = "Multiplicity_Proton_10mm_Be"
+        self._create_comp_plot(
+            gr_2_name,
+            plots_file,
+            [
+                gr_2_template.format("QGSP_BERT", "p"),
+                gr_2_template.format("QGSP_BERT", "pbar"),
+                gr_2_template.format("FTFP_BERT", "p"),
+                gr_2_template.format("FTFP_BERT", "pbar"),
+            ],
+            output_file,
+            [
+                "p^{+} QGSP_BERT",
+                "p^{-} QGSP_BERT",
+                "p^{+} FTFP_BERT",
+                "p^{-} FTFP_BERT",
+            ],
+        )
 
-        self._make_desc_string( gr_2_name, output_file, 'multiplicities', 10, 'beryllium', 'protons')
+        self._make_desc_string(gr_2_name, output_file, "multiplicities", 10,
+                               "beryllium", "protons")
 
-        gr_3_template='Elastic_CrossSection_Thickness-10mm_Mat-Si_Mod-{}_PGun-{}'
-        gr_3_name = 'Elastic_XSec_Pion_10mm_Si' 
-        self._create_comp_plot( gr_3_name                                       , 
-                                plots_file                                      , 
-                                [ gr_2_template.format('QGSP_BERT', 'Piplus') ,
-                                  gr_2_template.format('QGSP_BERT', 'Piminus'), 
-                                  gr_2_template.format('FTFP_BERT', 'Piplus') , 
-                                  gr_2_template.format('FTFP_BERT', 'Piminus')] ,
-                                  output_file                                   , 
-                                  [ '#pi^{+} QGSP_BERT',
-                                    '#pi^{-} QGSP_BERT',
-                                    '#pi^{+} FTFP_BERT',
-                                    '#pi^{-} FTFP_BERT']                        )
+        gr_3_template = "Elastic_CrossSection_Thickness-10mm_Mat-Si_Mod-{}_PGun-{}"
+        gr_3_name = "Elastic_XSec_Pion_10mm_Si"
+        self._create_comp_plot(
+            gr_3_name,
+            plots_file,
+            [
+                gr_2_template.format("QGSP_BERT", "Piplus"),
+                gr_2_template.format("QGSP_BERT", "Piminus"),
+                gr_2_template.format("FTFP_BERT", "Piplus"),
+                gr_2_template.format("FTFP_BERT", "Piminus"),
+            ],
+            output_file,
+            [
+                "#pi^{+} QGSP_BERT",
+                "#pi^{-} QGSP_BERT",
+                "#pi^{+} FTFP_BERT",
+                "#pi^{-} FTFP_BERT",
+            ],
+        )
 
-        self._make_desc_string( gr_3_name, output_file, 'elastic cross sections', 10, 'silicon', 'pions')
-        
+        self._make_desc_string(gr_3_name, output_file,
+                               "elastic cross sections", 10, "silicon",
+                               "pions")
 
     def collectResults(self, directory):
-        plots_file = os.path.join(directory, 'TargetOutput', 'ROOTGraphs', 'TargetTestAllPlots.root')
-        comp_file  = os.path.join(directory, 'TargetOutput', 'ROOTGraphs', 'TargetTestCompPlots.root')
+        plots_file = os.path.join(directory, "TargetOutput", "ROOTGraphs",
+                                  "TargetTestAllPlots.root")
+        comp_file = os.path.join(directory, "TargetOutput", "ROOTGraphs",
+                                 "TargetTestCompPlots.root")
         self.make_comp_plots(plots_file, comp_file)
-        self.saveFile('TargetTestAllPlots.root', plots_file) #All plots now contained in single file
-        self.saveFile('TargetTestCompPlots.root', comp_file) #All plots now contained in single file
+        self.saveFile("TargetTestAllPlots.root",
+                      plots_file)  # All plots now contained in single file
+        self.saveFile("TargetTestCompPlots.root",
+                      comp_file)  # All plots now contained in single file
diff --git a/handlers/ThroughputProfileHandler.py b/handlers/ThroughputProfileHandler.py
index 5771133c60a482a143dc616e04d289ead3b4beb4..c360ae7f9fbe621d62dacfbfaff6354668c5908c 100644
--- a/handlers/ThroughputProfileHandler.py
+++ b/handlers/ThroughputProfileHandler.py
@@ -13,8 +13,7 @@ log = logging.getLogger(__name__)
 
 WWW_BASE_URL = "https://cern.ch/lhcbpr-hlt/PerfTests/UpgradeThroughput"
 
-REPORT_TEMPLATE = jinja2.Template(
-    """
+REPORT_TEMPLATE = jinja2.Template("""
 <html>
 <head></head>
 <body>
@@ -57,13 +56,14 @@ REPORT_TEMPLATE = jinja2.Template(
 </ul>
 </body>
 </html>
-"""
-)
+""")
 
 
 def get_throughput(file, pattern=r"Evts\/s = ([\d.]+)"):
     with open(file) as f:
-        values = [float(match.group(1)) for match in re.finditer(pattern, f.read())]
+        values = [
+            float(match.group(1)) for match in re.finditer(pattern, f.read())
+        ]
     if not values:
         log.warning(f"No throughput match for {pattern} in {file}.")
         return 0
@@ -72,9 +72,12 @@ def get_throughput(file, pattern=r"Evts\/s = ([\d.]+)"):
         return 0
     return values[0]
 
+
 def get_bandwidth(file, pattern=r"MB\/s = ([\d.]+)"):
     with open(file) as f:
-        values = [float(match.group(1)) for match in re.finditer(pattern, f.read())]
+        values = [
+            float(match.group(1)) for match in re.finditer(pattern, f.read())
+        ]
     if not values:
         log.warning(f"No bandwidth match for {pattern} in {file}.")
         return 0
@@ -83,6 +86,7 @@ def get_bandwidth(file, pattern=r"MB\/s = ([\d.]+)"):
         return 0
     return values[0]
 
+
 def get_couchdb_throughput_link(slot, build_id, options):
     """Get throughput value and report page URL from CouchDB."""
     # WARNING the gymnastics below of obtaining the throughput
@@ -96,9 +100,12 @@ def get_couchdb_throughput_link(slot, build_id, options):
     # get path to the run.log file on EOS, and get rid of the https stuff in the front
     # since eos is mounted on lbhltperf01 so we can simply open it via absolute path
     # Then use that path to read the throughput from the run.log file
-    throughput =  get_throughput(test['run_log'][28:])
+    throughput = get_throughput(test["run_log"][28:])
     # TODO should we handle the url differently?
-    return throughput, test.get("lhcbpr_url", f"https://Failed_to_retrieve_lhcbpr_link_for_{slot}.{build_id}")
+    return throughput, test.get(
+        "lhcbpr_url",
+        f"https://Failed_to_retrieve_lhcbpr_link_for_{slot}.{build_id}")
+
 
 def get_couchdb_bandwidth(slot, build_id, options):
     """Get bandwidth value from CouchDB."""
@@ -111,35 +118,37 @@ def get_couchdb_bandwidth(slot, build_id, options):
     # get path to the run.log file on EOS, and get rid of the https stuff in the front
     # since eos is mounted on lbhltperf01 so we can simply open it via absolute path
     # Then use that path to read the throughput from the run.log file
-    bandwidth = get_bandwidth(test['run_log'][28:])
+    bandwidth = get_bandwidth(test["run_log"][28:])
 
     return bandwidth
 
 
 def send_gitlab_feedback(
-    new_throughput,
-    ref_throughput,
-    new_bandwidth,
-    ref_bandwidth,
-    options,
-    web_link,
-    ref_web_link,
-    trigger_source,
+        new_throughput,
+        ref_throughput,
+        new_bandwidth,
+        ref_bandwidth,
+        options,
+        web_link,
+        ref_web_link,
+        trigger_source,
 ):
     throughput_change = (new_throughput - ref_throughput) / ref_throughput
 
     if "hlt1" in options:
         tol = 0.005
         prefix = "hlt1"
-    elif 'spruce' in options:
+    elif "spruce" in options:
         tol = 0.025
-        prefix = 'spruce'
+        prefix = "spruce"
     else:
         tol = 0.01
         prefix = "hlt2"
 
     add_labels = []
-    remove_labels = [prefix + "-throughput-increased", prefix + "-throughput-decreased"]
+    remove_labels = [
+        prefix + "-throughput-increased", prefix + "-throughput-decreased"
+    ]
     thumb = ""
 
     if throughput_change > tol:
@@ -153,15 +162,13 @@ def send_gitlab_feedback(
     message = (
         f"Throughput Test [{options}]({web_link}): "
         f"{new_throughput:.1f} Events/s -- change of {throughput_change:.2%} "
-        f"vs. [reference]({ref_web_link}) {thumb}"
-    )
+        f"vs. [reference]({ref_web_link}) {thumb}")
     # for Sprucing test, add extra info of bandwidth
-    if options in ['Moore_spruce_all_lines']:
+    if options in ["Moore_spruce_all_lines"]:
         bandwidth_change = (new_bandwidth - ref_bandwidth) / ref_bandwidth
         message += (
             f".\n Total bandwidth {new_bandwidth:.3g} GB/s -- change of {bandwidth_change:.2%} "
-            f"vs. [reference]({ref_web_link})"
-        )
+            f"vs. [reference]({ref_web_link})")
 
     # we only want to actually apply labels based on these two
     # each represents the current baseline for hlt1 and hlt2
@@ -182,19 +189,18 @@ class ThroughputProfileHandler(BaseHandler):
         super().__init__()
 
     def collectResultsExt(
-        self,
-        directory,
-        project,
-        version,
-        platform,
-        hostname,
-        cpu_info,
-        memoryinfo,
-        startTime,
-        endTime,
-        options,
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
     ):
-
         try:
             slot, build_id = version.split(".")
             build_id = int(build_id)
@@ -203,12 +209,12 @@ class ThroughputProfileHandler(BaseHandler):
 
         # grab the correct files to get the throughput
         log_files = [
-            os.path.join(directory, f)
-            for f in os.listdir(directory)
+            os.path.join(directory, f) for f in os.listdir(directory)
             if f.endswith(".log")
         ]
 
-        throughput = sum(get_throughput(f) for f in log_files if "ThroughputTest" in f)
+        throughput = sum(
+            get_throughput(f) for f in log_files if "ThroughputTest" in f)
         str_tput = "{:.1f}".format(throughput)
         self.saveFloat(
             "max_throughput",
@@ -219,13 +225,13 @@ class ThroughputProfileHandler(BaseHandler):
 
         # measure the total bandwidth
         # only for sprucing for now
-        measure_bandwidth = options in ['Moore_spruce_all_lines']
+        measure_bandwidth = options in ["Moore_spruce_all_lines"]
         if measure_bandwidth:
             run_log = os.path.join(directory, "run.log")
             if not os.path.isfile(run_log):
-                log.warning('There is no run.log!')
+                log.warning("There is no run.log!")
                 measure_bandwidth = False
-        bandwidth = get_bandwidth(run_log) / 1e3 if measure_bandwidth else 0.
+        bandwidth = get_bandwidth(run_log) / 1e3 if measure_bandwidth else 0.0
 
         dirname = (
             f"Throughput_{version}_{options}_{platform}_{startTime.replace(' ', '_')}"
@@ -235,14 +241,14 @@ class ThroughputProfileHandler(BaseHandler):
         # concatenate log files into one file
         with open("tests.log", "w") as outfile:
             for fname in log_files:
-                outfile.write(
-                    "\n{sep}\n{fname}\n{sep}\n\n".format(sep="=" * 80, fname=fname)
-                )
+                outfile.write("\n{sep}\n{fname}\n{sep}\n\n".format(
+                    sep="=" * 80, fname=fname))
                 with open(fname) as infile:
                     for line in infile:
                         outfile.write(line)
 
-        trend_url = os.path.join(WWW_BASE_URL, f"trend_throughput_{options}_{slot}.png")
+        trend_url = os.path.join(WWW_BASE_URL,
+                                 f"trend_throughput_{options}_{slot}.png")
         request = requests.get(trend_url)
         if request.status_code != 200:
             trend_url = None
@@ -263,12 +269,12 @@ class ThroughputProfileHandler(BaseHandler):
             log.debug("Generated HTML report:\n" + html)
 
         for filename in [
-            os.path.join(directory, "flamy.svg"),
-            os.path.join(directory, "flamy.svg"),
-            os.path.join(directory, "FlameBars.pdf"),
-            os.path.join(directory, "FlameBars.png"),
-            "index.html",
-            "tests.log",
+                os.path.join(directory, "flamy.svg"),
+                os.path.join(directory, "flamy.svg"),
+                os.path.join(directory, "FlameBars.pdf"),
+                os.path.join(directory, "FlameBars.png"),
+                "index.html",
+                "tests.log",
         ]:
             publish.upload_eos_www(
                 filename,
@@ -283,33 +289,30 @@ class ThroughputProfileHandler(BaseHandler):
         )
 
         # send notification on mattermost channel
-        cpu_model = cpu_info.split(" @")[0].replace("(R)", "").replace(" ", "-")
+        cpu_model = cpu_info.split(" @")[0].replace("(R)", "").replace(
+            " ", "-")
         mattermost_message = (
             "The results of latest throughput test "
             f"[{options} {version} {platform} {cpu_model}]({targetRootWebDir}):\n"
-            f"`Throughput = {str_tput} Events/s`"
-        )
+            f"`Throughput = {str_tput} Events/s`")
         if measure_bandwidth:
-            mattermost_message += (
-                f", `Bandwidth = {bandwidth:.3g} GB/s`"
-            )
+            mattermost_message += f", `Bandwidth = {bandwidth:.3g} GB/s`"
         publish.post_mattermost(mattermost_message)
         # let's post a reply to gitlab about the throughput test result
-        if (slot in ["lhcb-master-mr", "lhcb-master-ref", "lhcb-master"]) and (
-            options
-            in [
-                "Moore_hlt1_pp_default",
-                "Moore_hlt2_reco_baseline",
-                "Moore_hlt2_fastest_reco",
-                "Moore_hlt2_pp_thor",
-                "Moore_spruce_all_lines",
-            ]
-        ):
+        if (slot in ["lhcb-master-mr", "lhcb-master-ref", "lhcb-master"
+                     ]) and (options in [
+                         "Moore_hlt1_pp_default",
+                         "Moore_hlt2_reco_baseline",
+                         "Moore_hlt2_fastest_reco",
+                         "Moore_hlt2_pp_thor",
+                         "Moore_spruce_all_lines",
+                     ]):
             # The feedback needs to compare the results from the reference (*-ref or master)
             # and the -mr builds. We don't know which completes first,
             # so we must try both cases.
             # For a better treatment in the future, see LBCORE-1984
-            for ref, test, trigger in dashboard.get_ci_test_pairs(slot, build_id):
+            for ref, test, trigger in dashboard.get_ci_test_pairs(
+                    slot, build_id):
                 try:
                     if test == (slot, build_id):
                         # The handler runs for the -mr build, so fetch the -ref results
@@ -317,26 +320,24 @@ class ThroughputProfileHandler(BaseHandler):
                         web_link = targetRootWebDir
                         new_bandwidth = bandwidth
                         ref_throughput, ref_web_link = get_couchdb_throughput_link(
-                            ref[0], ref[1], options
-                        )
+                            ref[0], ref[1], options)
                         if measure_bandwidth:
                             ref_bandwidth = get_couchdb_bandwidth(
-                                ref[0], ref[1], options
-                            )
-                        else: ref_bandwidth = 0.
+                                ref[0], ref[1], options)
+                        else:
+                            ref_bandwidth = 0.0
                     elif ref == (slot, build_id):
                         # The handler runs for the -ref build, so fetch the -mr results
                         ref_throughput = throughput
                         ref_web_link = targetRootWebDir
                         ref_bandwidth = bandwidth
                         new_throughput, web_link = get_couchdb_throughput_link(
-                            test[0], test[1], options
-                        )
+                            test[0], test[1], options)
                         if measure_bandwidth:
                             new_bandwidth = get_couchdb_bandwidth(
-                                test[0], test[1], options
-                            )
-                        else: new_bandwidth = 0.
+                                test[0], test[1], options)
+                        else:
+                            new_bandwidth = 0.0
                     else:
                         assert False
                 except dashboard.ResourceNotFound:
diff --git a/handlers/ThroughputScalingHandler.py b/handlers/ThroughputScalingHandler.py
index d5c2aa62d7f8ee662d897a0e0d723719d4619a50..fc1ba9ca8efb31216eb697b408eb2876c9d455f7 100644
--- a/handlers/ThroughputScalingHandler.py
+++ b/handlers/ThroughputScalingHandler.py
@@ -9,21 +9,35 @@ from collectRunResults import send_notification_mattermost
 
 log = logging.getLogger(__name__)
 
-class ThroughputScalingHandler(BaseHandler):
 
+class ThroughputScalingHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResultsExt(self, directory, project, version,
-                            platform, hostname, cpu_info, memoryinfo, startTime, endTime, options):
-
-        regex = re.compile("Max reached throughput ([\d.]+) at ([\d]+) jobs with ([\d]+) threads")
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
+        regex = re.compile(
+            "Max reached throughput ([\d.]+) at ([\d]+) jobs with ([\d]+) threads"
+        )
 
         if os.path.exists("lhcb-benchmark-scripts"):
             output = subprocess.check_output([
-                                'python',
-                                './lhcb-benchmark-scripts/plotScaling.py',
-                                "--directory", directory])
+                "python",
+                "./lhcb-benchmark-scripts/plotScaling.py",
+                "--directory",
+                directory,
+            ])
         else:
             raise RuntimeError("./lhcb-benchmark-scripts does not exist,"
                                " can't run plotScaling.py")
@@ -36,10 +50,12 @@ class ThroughputScalingHandler(BaseHandler):
         else:
             raise RuntimeError("can't find match in regex")
 
-        self.saveFloat("max_throughput",
-                       maxThroughput,
-                       description="maximum throughput",
-                       group="throughput")
+        self.saveFloat(
+            "max_throughput",
+            maxThroughput,
+            description="maximum throughput",
+            group="throughput",
+        )
 
         # send plot to eos  as html
         wwwDirEos = os.environ.get("LHCBPR_WWW_EOS")
@@ -47,66 +63,58 @@ class ThroughputScalingHandler(BaseHandler):
             raise Exception("No web dir on EOS defined,"
                             " will not run extraction")
         else:
-            dirname = "Throughput_"\
-                      + str(version)\
-                      + "_"\
-                      + str(options)\
-                      + "_"\
-                      + str(platform)\
-                      + "_"\
-                      + startTime.replace(' ', '_')
-
-            html_code = "<html>"\
-                        "<head></head> "\
-                        "<body> "\
-                        "<p>"\
-                        "slot.build_id: "+str(version)+"<br>"\
-                        "platform: "+str(platform)+"<br>"\
-                        "hostname: "+str(hostname)+"<br>"\
-                        "options file: <a href='https://gitlab.cern.ch/lhcb-nightlies/Brunel/blob/"\
-                        + str(version).replace(".", "/")\
-                        + "/Rec/Brunel/python/upgrade_options/"\
-                        + str(options).replace("Scaling", "")\
-                        + ".py'>"\
-                        + str(options)\
-                        + "</a></p>"\
-                        "<ul>"\
-                        "  <li>Maximum throughput at "\
-                        + mtJobs\
-                        + " jobs with "\
-                        + mtThreads\
-                        + " threads = "\
-                        + str(maxThroughput)\
-                        + " Events/s"+"</li>"\
-                        "</ul>"\
-                        "<img src="'scalingTest.png'">"\
-                        "<p>Here's the <a href='scalingTest.pdf'>pdf</a> version.</p>"\
-                        "</body>"\
-                        "</html>"
+            dirname = ("Throughput_" + str(version) + "_" + str(options) + "_"
+                       + str(platform) + "_" + startTime.replace(" ", "_"))
+
+            html_code = (
+                "<html>"
+                "<head></head> "
+                "<body> "
+                "<p>"
+                "slot.build_id: " + str(version) + "<br>"
+                "platform: " + str(platform) + "<br>"
+                "hostname: " + str(hostname) + "<br>"
+                "options file: <a href='https://gitlab.cern.ch/lhcb-nightlies/Brunel/blob/"
+                + str(version).replace(".", "/") +
+                "/Rec/Brunel/python/upgrade_options/" + str(options).replace(
+                    "Scaling", "") + ".py'>" + str(options) + "</a></p>"
+                "<ul>"
+                "  <li>Maximum throughput at " + mtJobs + " jobs with " +
+                mtThreads + " threads = " + str(maxThroughput) + " Events/s" +
+                "</li>"
+                "</ul>"
+                "<img src="
+                "scalingTest.png"
+                ">"
+                "<p>Here's the <a href='scalingTest.pdf'>pdf</a> version.</p>"
+                "</body>"
+                "</html>")
 
             with open("index.html", "w") as html_file:
                 html_file.write(html_code)
 
             targetRootEosDir = os.path.join(wwwDirEos, dirname)
             try:
-                subprocess.call(['xrdcp',
-                                'scalingTest.png',
-                                targetRootEosDir + "/scalingTest.png"])
-                subprocess.call(['xrdcp',
-                                'scalingTest.pdf',
-                                targetRootEosDir + "/scalingTest.pdf"])
-                subprocess.call(['xrdcp',
-                                'index.html',
-                                targetRootEosDir + "/index.html"])
+                subprocess.call([
+                    "xrdcp", "scalingTest.png",
+                    targetRootEosDir + "/scalingTest.png"
+                ])
+                subprocess.call([
+                    "xrdcp", "scalingTest.pdf",
+                    targetRootEosDir + "/scalingTest.pdf"
+                ])
+                subprocess.call(
+                    ["xrdcp", "index.html", targetRootEosDir + "/index.html"])
             except Exception as ex:
-                log.warning('Error copying html files to eos: %s', ex)
+                log.warning("Error copying html files to eos: %s", ex)
 
-            self.saveString("throughput",
-                           "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"
-                           + dirname
-                           + "scalingTest.png",
-                           description="link to throughput vs parallelisation plot",
-                           group="performance")
+            self.saveString(
+                "throughput",
+                "cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/" + dirname +
+                "scalingTest.png",
+                description="link to throughput vs parallelisation plot",
+                group="performance",
+            )
 
             # harcoded info about machines
             # to be replaced by using cpu_info set in lbpr-get-command
@@ -117,20 +125,15 @@ class ThroughputScalingHandler(BaseHandler):
 
             # send notification on mattermost channel
             if "MATTERMOST_HOOK" in os.environ:
-                content = "The results of latest throughput test ["\
-                    + str(options).replace('_', ' ')\
-                    + " "\
-                    + str(platform)\
-                    + " "\
-                    + hostname\
-                    + "](https://cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"\
-                    + dirname\
-                    + "):\n"\
-                    + "`Maximum throughput = "\
-                    + str(maxThroughput)\
-                    + " Events/s`"
-                send_notification_mattermost(os.environ['MATTERMOST_HOOK'],
-                                            content)
+                content = (
+                    "The results of latest throughput test [" +
+                    str(options).replace(
+                        "_", " ") + " " + str(platform) + " " + hostname +
+                    "](https://cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/" +
+                    dirname + "):\n" + "`Maximum throughput = " +
+                    str(maxThroughput) + " Events/s`")
+                send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
+                                             content)
             else:
                 log.warning("notifications not sent"
-                                " because MATTERMOST_HOOK not set")
+                            " because MATTERMOST_HOOK not set")
diff --git a/handlers/TimeLineHandler.py b/handlers/TimeLineHandler.py
index 323102d887f3f4024f4059b1f1bdad16af0f3f02..7078877ca70d68875e7170ce014c7a1dedbe7f73 100755
--- a/handlers/TimeLineHandler.py
+++ b/handlers/TimeLineHandler.py
@@ -7,7 +7,6 @@ from .timing import TimeLineSvcParser
 
 # lb-run --nightly-cvmfs --nightly=lhcb-future Brunel/future gaudirun.py \
 # \${BRUNEL_PROJECT_ROOT}/Rec/Brunel/options/MiniBrunel.py
-
 """ TimeLineHandler will parse a timeline.csv file (via TimeLineSvcParser) and
 extract different metrics out of the resulting json data, such as total
 execution time (end of last algo - start of first algo), the minimum, maximum,
@@ -16,15 +15,13 @@ algorithm. """
 
 
 class TimeLineHandler(BaseHandler):
-
-    def __init__(self, directory=''):
+    def __init__(self, directory=""):
         super(self.__class__, self).__init__()
         self.directory = directory
         if not self.directory:
             self.directory = os.path.realpath(os.curdir)
 
     def collectResults(self, directory):
-
         # collect data from the timeline.csv output file and return a json
         tsp = TimeLineSvcParser.TimeLineSvcParser(directory)
         tsp.collectData()
@@ -32,14 +29,14 @@ class TimeLineHandler(BaseHandler):
 
         evts = {}
         algs = {}
-        psta = int(data[0]['start'])
-        pend = int(data[0]['end'])
+        psta = int(data[0]["start"])
+        pend = int(data[0]["end"])
 
         for x in data:
-            evt = int(x['event'])
-            sta = int(x['start'])
-            end = int(x['end'])
-            alg = x['algorithm']
+            evt = int(x["event"])
+            sta = int(x["start"])
+            end = int(x["end"])
+            alg = x["algorithm"]
             psta = min(psta, sta)
             pend = max(pend, end)
 
@@ -59,32 +56,69 @@ class TimeLineHandler(BaseHandler):
             durs.append(dur)
 
         for algo in list(algs.keys()):
-            self.saveInt('min_' + algo, int(round(min(algs[algo]))),
-                         'minimum execution time ' + algo, 'algorithm timing')
-            self.saveInt('max_' + algo, int(round(max(algs[algo]))),
-                         'minimum execution time ' + algo, 'algorithm timing')
-            self.saveInt('mean_' + algo, int(round(numpy.mean(algs[algo]))),
-                         'minimum execution time ' + algo, 'algorithm timing')
-            self.saveInt('sigma_' + algo, int(round(numpy.std(algs[algo]))),
-                         'minimum execution time ' + algo, 'algorithm timing')
-
-        self.saveInt('min_evt_time', int(round(min(durs))),
-                     'minimum event execution time', 'event timing')
-        self.saveInt('max_evt_time', int(round(max(durs))),
-                     'maximum event execution time', 'event timing')
-        self.saveInt('mean_evt_time', int(round(numpy.mean(durs))),
-                     'mean event execution time', 'event timing')
-        self.saveInt('sigma_evt_time', int(round(numpy.std(durs))),
-                     'event execution time standard deviation', 'event timing')
+            self.saveInt(
+                "min_" + algo,
+                int(round(min(algs[algo]))),
+                "minimum execution time " + algo,
+                "algorithm timing",
+            )
+            self.saveInt(
+                "max_" + algo,
+                int(round(max(algs[algo]))),
+                "minimum execution time " + algo,
+                "algorithm timing",
+            )
+            self.saveInt(
+                "mean_" + algo,
+                int(round(numpy.mean(algs[algo]))),
+                "minimum execution time " + algo,
+                "algorithm timing",
+            )
+            self.saveInt(
+                "sigma_" + algo,
+                int(round(numpy.std(algs[algo]))),
+                "minimum execution time " + algo,
+                "algorithm timing",
+            )
+
+        self.saveInt(
+            "min_evt_time",
+            int(round(min(durs))),
+            "minimum event execution time",
+            "event timing",
+        )
+        self.saveInt(
+            "max_evt_time",
+            int(round(max(durs))),
+            "maximum event execution time",
+            "event timing",
+        )
+        self.saveInt(
+            "mean_evt_time",
+            int(round(numpy.mean(durs))),
+            "mean event execution time",
+            "event timing",
+        )
+        self.saveInt(
+            "sigma_evt_time",
+            int(round(numpy.std(durs))),
+            "event execution time standard deviation",
+            "event timing",
+        )
 
         totaltime = pend - psta
-        self.saveFloat('total_execution_time', totaltime,
-                       'the total execution time', 'total timing')
+        self.saveFloat(
+            "total_execution_time",
+            totaltime,
+            "the total execution time",
+            "total timing",
+        )
 
     def run(self):
         self.collectResults(self.directory)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     TimeLineHandler().run()
 
 # fieldnames = ("FirstName","LastName","IDNumber","Message")
diff --git a/handlers/TimingHandler.py b/handlers/TimingHandler.py
index 62e20a818e8546110ae2a404dce299149c970378..d812af7468b98173988e8250f0c2441bc863d6b8 100644
--- a/handlers/TimingHandler.py
+++ b/handlers/TimingHandler.py
@@ -10,26 +10,29 @@ class TimingHandler(BaseHandler):
 
     def collectResults(self, directory):
         from .timing.TimingParser import TimingParser
-        tp = TimingParser(os.path.join(directory, 'run.log'))
+
+        tp = TimingParser(os.path.join(directory, "run.log"))
 
         # Now saving all the nodes
         for node in tp.getAllSorted():
-            if node.name == 'Hlt2CharmHadD2HHHKsDD':
-                print("{0} - {1} - {2} - {3}"
-                      .format(node.id, node.name, node.value, node.entries))
-
-            self.saveFloat(node.name, node.value,
-                           "Time per Ev. [ms]", "Timing")
-            self.saveInt(node.name + "_count",
-                         node.entries, group="TimingCount")
+            if node.name == "Hlt2CharmHadD2HHHKsDD":
+                print("{0} - {1} - {2} - {3}".format(node.id, node.name,
+                                                     node.value, node.entries))
+
+            self.saveFloat(node.name, node.value, "Time per Ev. [ms]",
+                           "Timing")
+            self.saveInt(
+                node.name + "_count", node.entries, group="TimingCount")
             self.saveInt(node.name + "_rank", node.rank, group="TimingRank")
 
             if node.parent != None:
-                self.saveString(node.name + "_parent",
-                                node.parent.name, group="TimingTree")
+                self.saveString(
+                    node.name + "_parent",
+                    node.parent.name,
+                    group="TimingTree")
             else:
-                self.saveString(node.name + "_parent",
-                                "None", group="TimingTree")
+                self.saveString(
+                    node.name + "_parent", "None", group="TimingTree")
             self.saveInt(node.name + "_id", node.id, group="TimingID")
 
         # save also overall timing
@@ -40,5 +43,9 @@ class TimingHandler(BaseHandler):
                 tp.overall_timing = round(el.value / 1000 * el.entries / 60, 1)
             except:
                 pass
-        self.saveFloat("overall_timing", tp.overall_timing,
-                       "Time User from ChronoStatSvc in minutes", "Timing")
+        self.saveFloat(
+            "overall_timing",
+            tp.overall_timing,
+            "Time User from ChronoStatSvc in minutes",
+            "Timing",
+        )
diff --git a/handlers/TimingHandlerXml.py b/handlers/TimingHandlerXml.py
index e5a5941e60efbbbec4a99bb1ef14a5508631a9b5..199e9bd69a785b2e46985f3ccb1e6da7ea8dfa40 100644
--- a/handlers/TimingHandlerXml.py
+++ b/handlers/TimingHandlerXml.py
@@ -6,28 +6,31 @@ from xml.parsers.expat import ExpatError
 
 ##must update this class
 class TimingHandler_2(BaseHandler):
-	
-	def __init__(self):
-		super(self.__class__, self).__init__()
+    def __init__(self):
+        super(self.__class__, self).__init__()
 
-	def collectResults(self,directory):
-		try:
-			os.chdir(directory)
-		except OSError:
-			return False
-		
-		tree = ElementTree()
-		
-		try:
-			tree.parse('timing.xml')
-		except ExpatError:
-			return False
-		except IOError:
-			return False
-		
-		for parent in tree.getiterator('alg'):
-			for child in parent:
-				if child.tag == 'count':
-					self.saveInt(parent.attrib.get("name")+'_'+child.tag, child.text)
-				else:
-					self.saveFloat(parent.attrib.get("name")+'_'+child.tag, child.text)
\ No newline at end of file
+    def collectResults(self, directory):
+        try:
+            os.chdir(directory)
+        except OSError:
+            return False
+
+        tree = ElementTree()
+
+        try:
+            tree.parse("timing.xml")
+        except ExpatError:
+            return False
+        except IOError:
+            return False
+
+        for parent in tree.getiterator("alg"):
+            for child in parent:
+                if child.tag == "count":
+                    self.saveInt(
+                        parent.attrib.get("name") + "_" + child.tag,
+                        child.text)
+                else:
+                    self.saveFloat(
+                        parent.attrib.get("name") + "_" + child.tag,
+                        child.text)
diff --git a/handlers/UpgradePrCheckerHandler.py b/handlers/UpgradePrCheckerHandler.py
index 94a2d754299b2cf6fb2646bd709be2989c6582e0..c3f7ab0745035191ea6ce37a432480922c0061ba 100644
--- a/handlers/UpgradePrCheckerHandler.py
+++ b/handlers/UpgradePrCheckerHandler.py
@@ -6,109 +6,129 @@ from collectRunResults import send_notification_mattermost
 
 log = logging.getLogger(__name__)
 
-class UpgradePrCheckerHandler(BaseHandler):
 
+class UpgradePrCheckerHandler(BaseHandler):
     def __init__(self):
         super(self.__class__, self).__init__()
 
     def extractPerf(self, infile, trackingtype="Velo", trackingcat="07"):
-        filetoread = open(infile,'r')
+        filetoread = open(infile, "r")
         foundtrackingtype = False
         foundtrackingcategory = False
         foundfakestracks = False
         for line in filetoread:
             if not foundtrackingtype:
-                if line.find('**** '+trackingtype) > -1:
+                if line.find("**** " + trackingtype) > -1:
                     foundtrackingtype = True
                     if line.find("ghosts") > -1 and not foundfakestracks:
-                        tracksFound = line.split('tracks including')[0].split(trackingtype)[1].lstrip(' ')
-                        fakesFound  = line.split('tracks including')[1].split("ghosts")[0].lstrip(' ')
-                        fakeRate    = line.split('tracks including')[1].split("ghosts")[1].split('[')[1].split(']')[0].lstrip(' ').rstrip(' %')
-                        print("Nb. Tracks Found = "+tracksFound)
-                        print("Nb. Fakes  Found = "+fakesFound+ " [ "+fakeRate +" %]")
-                        #print 'Velo tracking found =',line.split('tracks including')[0].split(trackingtype)[1].lstrip(' ')
+                        tracksFound = (line.split("tracks including")[0].split(
+                            trackingtype)[1].lstrip(" "))
+                        fakesFound = (line.split("tracks including")[1].split(
+                            "ghosts")[0].lstrip(" "))
+                        fakeRate = (line.split("tracks including")[1].split(
+                            "ghosts")[1].split("[")[1].split("]")[0].lstrip(
+                                " ").rstrip(" %"))
+                        print("Nb. Tracks Found = " + tracksFound)
+                        print("Nb. Fakes  Found = " + fakesFound + " [ " +
+                              fakeRate + " %]")
+                        # print 'Velo tracking found =',line.split('tracks including')[0].split(trackingtype)[1].lstrip(' ')
                         foundfakestracks = True
             else:
                 if line.find(trackingcat) > -1:
-                    efficiency = float(line.split('[')[1].split(']')[0].lstrip(' ').rstrip(' %'))
-                    clone_rate = 100.0-float(line.split('purity:')[1].split('%')[0].lstrip(' '))
-                    print('efficiency = ', efficiency)
-                    print('clone rate  = ', clone_rate)
+                    efficiency = float(
+                        line.split("[")[1].split("]")[0].lstrip(" ").rstrip(
+                            " %"))
+                    clone_rate = 100.0 - float(
+                        line.split("purity:")[1].split("%")[0].lstrip(" "))
+                    print("efficiency = ", efficiency)
+                    print("clone rate  = ", clone_rate)
                     foundtrackingcategory = True
                     break
 
         if not foundtrackingcategory or not foundfakestracks:
-            raise Exception('Error getting physics performance metrics:'
-                            ' not found tracking category or'
-                            ' not found fakes tracks lines in the log file')
+            raise Exception("Error getting physics performance metrics:"
+                            " not found tracking category or"
+                            " not found fakes tracks lines in the log file")
         else:
             return (efficiency, fakeRate, clone_rate)
 
-    def collectResultsExt(self, directory, project, version, platform, hostname, cpu_info, memoryinfo, startTime, endTime, options):
-
+    def collectResultsExt(
+            self,
+            directory,
+            project,
+            version,
+            platform,
+            hostname,
+            cpu_info,
+            memoryinfo,
+            startTime,
+            endTime,
+            options,
+    ):
         # get efficiency and fake rate from PRChecker log
-        efficiency, fake_rate, clone_rate = self.extractPerf(os.path.join(directory, "run.log"))
+        efficiency, fake_rate, clone_rate = self.extractPerf(
+            os.path.join(directory, "run.log"))
 
         # save floats and string with path to plot
-        self.saveFloat("efficiency",
-                       efficiency,
-                       description="efficiency",
-                       group="performance")
-        self.saveFloat("fake_rate",
-                       fake_rate,
-                       description="fake rate",
-                       group="performance")
-        self.saveFloat("clone_rate",
-                       clone_rate,
-                       description="clone rate",
-                       group="performance")
+        self.saveFloat(
+            "efficiency",
+            efficiency,
+            description="efficiency",
+            group="performance")
+        self.saveFloat(
+            "fake_rate",
+            fake_rate,
+            description="fake rate",
+            group="performance")
+        self.saveFloat(
+            "clone_rate",
+            clone_rate,
+            description="clone rate",
+            group="performance")
 
         # send plot to eos  as html
         wwwDirEos = os.environ.get("LHCBPR_WWW_EOS")
         if wwwDirEos == None:
-            raise Exception("No web dir on EOS defined, will not run extraction")
+            raise Exception(
+                "No web dir on EOS defined, will not run extraction")
         else:
-            dirname = "PrChecker_"\
-                      + str(version)\
-                      + "_"\
-                      + str(platform)\
-                      + "_"\
-                      + startTime.replace(' ', '_')
-            html_code = "<html>"\
-                        "<head></head> "\
-                        "<body> "\
-                        "<p>"\
-                        +str(version)+"<br>"\
-                        +str(platform)+"<br>"\
-                        +str(hostname)+"</p>"\
-                        "<ul>"\
-                        "  <li>Efficiency = "+str(efficiency)+" </li>"\
-                        "  <li>Fake rate = "+str(fake_rate)+"</li>"\
-                        "  <li>Clone rate = "+str(clone_rate)+"</li>"\
-                        "</ul>"\
-                        "</body>"\
-                        "</html>"
+            dirname = ("PrChecker_" + str(version) + "_" + str(platform) + "_"
+                       + startTime.replace(" ", "_"))
+            html_code = ("<html>"
+                         "<head></head> "
+                         "<body> "
+                         "<p>" + str(version) + "<br>" + str(platform) + "<br>"
+                         + str(hostname) + "</p>"
+                         "<ul>"
+                         "  <li>Efficiency = " + str(efficiency) + " </li>"
+                         "  <li>Fake rate = " + str(fake_rate) + "</li>"
+                         "  <li>Clone rate = " + str(clone_rate) + "</li>"
+                         "</ul>"
+                         "</body>"
+                         "</html>")
 
             with open("index.html", "w") as html_file:
                 html_file.write(html_code)
 
             targetRootEosDir = os.path.join(wwwDirEos, dirname)
             try:
-                subprocess.call(['xrdcp', '-f', 'index.html', targetRootEosDir + "/index.html"])
+                subprocess.call([
+                    "xrdcp", "-f", "index.html",
+                    targetRootEosDir + "/index.html"
+                ])
             except Exception as ex:
-                log.warning('Error copying html files to eos: %s', ex)
+                log.warning("Error copying html files to eos: %s", ex)
 
             # send notification on mattermost channel
             if "MATTERMOST_HOOK" in os.environ:
-                content="The results of latest PrChecker test `"\
-                    +str(options)\
-                    +"` are available on: https://cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"\
-                    +dirname\
-                    +"\n```"\
-                    +"\nEfficiency = "+str(efficiency)\
-                    +"\nFake rate = "+str(fake_rate)\
-                    +"\nClone rate = "+str(clone_rate)\
-                    +"```"
-                send_notification_mattermost(os.environ['MATTERMOST_HOOK'], content)
+                content = (
+                    "The results of latest PrChecker test `" + str(options) +
+                    "` are available on: https://cern.ch/lhcbpr-hlt/PerfTests/UpgradeVelo/"
+                    + dirname + "\n```" + "\nEfficiency = " + str(efficiency) +
+                    "\nFake rate = " + str(fake_rate) + "\nClone rate = " +
+                    str(clone_rate) + "```")
+                send_notification_mattermost(os.environ["MATTERMOST_HOOK"],
+                                             content)
             else:
-                log.warning("notifications not sent because MATTERMOST_HOOK not set")
+                log.warning(
+                    "notifications not sent because MATTERMOST_HOOK not set")
diff --git a/handlers/VTuneModuleTimingHandler.py b/handlers/VTuneModuleTimingHandler.py
index 07cccd4e4cd122597b68feaa7c7915aadae2bda6..799c01398b23f7cea73cc013ba7a86d0656c6ee5 100644
--- a/handlers/VTuneModuleTimingHandler.py
+++ b/handlers/VTuneModuleTimingHandler.py
@@ -3,19 +3,19 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class VTuneModuleTimingHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-
+    def collectResults(self, directory):
         from .timing.VTuneModuleParser import VTuneModuleParser
-        tp = VTuneModuleParser(os.path.join(directory,'module.log'));
+
+        tp = VTuneModuleParser(os.path.join(directory, "module.log"))
 
         # Now saving all the nodes
         for node in tp.getTimingList():
-            self.saveFloat(node[0], node[1], "Time per Module (library) [s]", "ModuleTiming")
-
+            self.saveFloat(node[0], node[1], "Time per Module (library) [s]",
+                           "ModuleTiming")
diff --git a/handlers/VTuneTaskTimingHandler.py b/handlers/VTuneTaskTimingHandler.py
index 34f3b70ca940d77b19ae7cc532e681add7d38139..b13558af22dba03ef6a7b8f646d3a29d3118fc74 100644
--- a/handlers/VTuneTaskTimingHandler.py
+++ b/handlers/VTuneTaskTimingHandler.py
@@ -3,28 +3,46 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class VTuneTaskTimingHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-
+    def collectResults(self, directory):
         from .timing.VTuneTimingParser import VTuneTimingParser
-        tp = VTuneTimingParser(os.path.join(directory,'run.log'), os.path.join(directory,'task.log'))
+
+        tp = VTuneTimingParser(
+            os.path.join(directory, "run.log"),
+            os.path.join(directory, "task.log"))
 
         # Now saving all the nodes
         for node in tp.getAllSorted():
             # self.saveFloat(node.name, node.value, "Processing per Event", "TaskTiming")
-            self.saveFloat(node.name, node.value, "Time per Ev. [ms]", "TaskTiming")
-            self.saveInt(node.name + "_count", node.entries, "Events processed", "TaskTimingCount")
-            self.saveInt(node.name + "_rank", node.rank, "Level of Alg. in call stack", "TaskTimingRank")
+            self.saveFloat(node.name, node.value, "Time per Ev. [ms]",
+                           "TaskTiming")
+            self.saveInt(
+                node.name + "_count",
+                node.entries,
+                "Events processed",
+                "TaskTimingCount",
+            )
+            self.saveInt(
+                node.name + "_rank",
+                node.rank,
+                "Level of Alg. in call stack",
+                "TaskTimingRank",
+            )
             if node.parent != None:
-                self.saveString(node.name + "_parent", node.parent.name, "Parent name of Alg.", "TaskTimingTree")
+                self.saveString(
+                    node.name + "_parent",
+                    node.parent.name,
+                    "Parent name of Alg.",
+                    "TaskTimingTree",
+                )
             else:
-                self.saveString(node.name + "_parent", "None", "Root", "TaskTimingTree")
-            self.saveInt(node.name + "_id", node.id, "Id in Alg. list", "TaskTimingID")
-
-
+                self.saveString(node.name + "_parent", "None", "Root",
+                                "TaskTimingTree")
+            self.saveInt(node.name + "_id", node.id, "Id in Alg. list",
+                         "TaskTimingID")
diff --git a/handlers/VTuneTimingHandler.py b/handlers/VTuneTimingHandler.py
index 202b17776ea2464c7d82965afd31a10b0724456f..78dd3a9defdf622e559852abbb5007bf6f07d5d5 100644
--- a/handlers/VTuneTimingHandler.py
+++ b/handlers/VTuneTimingHandler.py
@@ -3,27 +3,37 @@ from .BaseHandler import BaseHandler
 from xml.etree.ElementTree import ElementTree
 from xml.parsers.expat import ExpatError
 
+
 class VTuneTimingHandler(BaseHandler):
-        
     def __init__(self):
         super(self.__class__, self).__init__()
         self.finished = False
         self.results = []
 
-    def collectResults(self,directory):
-
+    def collectResults(self, directory):
         from .timing.VTuneTimingParser import VTuneTimingParser
-        tp = VTuneTimingParser(os.path.join(directory,'run.log'), os.path.join(directory,'task.log'))
+
+        tp = VTuneTimingParser(
+            os.path.join(directory, "run.log"),
+            os.path.join(directory, "task.log"))
 
         # Now saving all the nodes
         for node in tp.getAllSorted():
-            self.saveFloat(node.name, node.value, "Produced by VTune", "Timing")
-            self.saveInt(node.name + "_count", node.entries, "Produced by VTune", "TimingCount")
-            self.saveInt(node.name + "_rank", node.rank, "Produced by VTune", "TimingRank")
+            self.saveFloat(node.name, node.value, "Produced by VTune",
+                           "Timing")
+            self.saveInt(node.name + "_count", node.entries,
+                         "Produced by VTune", "TimingCount")
+            self.saveInt(node.name + "_rank", node.rank, "Produced by VTune",
+                         "TimingRank")
             if node.parent != None:
-                self.saveString(node.name + "_parent", node.parent.name, "Produced by VTune", "TimingTree")
+                self.saveString(
+                    node.name + "_parent",
+                    node.parent.name,
+                    "Produced by VTune",
+                    "TimingTree",
+                )
             else:
-                self.saveString(node.name + "_parent", "None", "Produced by VTune", "TimingTree")
-            self.saveInt(node.name + "_id", node.id, "Produced by VTune", "TimingID")
-
-
+                self.saveString(node.name + "_parent", "None",
+                                "Produced by VTune", "TimingTree")
+            self.saveInt(node.name + "_id", node.id, "Produced by VTune",
+                         "TimingID")
diff --git a/handlers/ValgrindMassifHandler.py b/handlers/ValgrindMassifHandler.py
index b0022fde04340621f2975cc8572e7186467dcc1e..eabe0f7f3271f638359056a657d0d3e749760922 100644
--- a/handlers/ValgrindMassifHandler.py
+++ b/handlers/ValgrindMassifHandler.py
@@ -2,34 +2,32 @@ import os, sys, re
 from .BaseHandler import BaseHandler
 
 
-
 class ValgrindMassifHandler(BaseHandler):
-   """ LHCbPR Handler to Upload Massif output. """
-
-   def __init__(self):
-      super(self.__class__, self).__init__()
-      self.finished = False
-      self.results = []
-      self.basefilename = 'valgrindmassif.output.log'
-      
-   def collectResults(self,directory):
-      """ Collect un results """
-      
-      # First check that we have the log file...
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-      # Then collect the info
-      self.collectLogFile(directory)
-
-
-   def collectLogFile(self,directory):
-      """ Collects the leak summary result from the log file"""
-      
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-      self.saveFile(self.basefilename, filename, "ValgrindMassifOutput", "Valgrind")
-      
+    """LHCbPR Handler to Upload Massif output."""
+
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.finished = False
+        self.results = []
+        self.basefilename = "valgrindmassif.output.log"
+
+    def collectResults(self, directory):
+        """Collect un results"""
+
+        # First check that we have the log file...
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        # Then collect the info
+        self.collectLogFile(directory)
+
+    def collectLogFile(self, directory):
+        """Collects the leak summary result from the log file"""
+
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        self.saveFile(self.basefilename, filename, "ValgrindMassifOutput",
+                      "Valgrind")
diff --git a/handlers/ValgrindMemcheckHandler.py b/handlers/ValgrindMemcheckHandler.py
index f37eb758633a8fad24b5d2678bd6bda1e7626b93..ab0dee8be77d6474638ce1ddb2eab8d04c500ff2 100644
--- a/handlers/ValgrindMemcheckHandler.py
+++ b/handlers/ValgrindMemcheckHandler.py
@@ -2,84 +2,83 @@ import os, sys, re
 from .BaseHandler import BaseHandler
 
 
-
 class ValgrindMemcheckHandler(BaseHandler):
-   """ LHCbPR Handler to parse MemcheckLog files.
-
-   It extracts the follwoing section from the valgrindmemcheck.output.log:
-   
-   ==13877== LEAK SUMMARY:
-   ==13877==    definitely lost: 22,318 bytes in 201 blocks
-   ==13877==    indirectly lost: 5,346,776 bytes in 124,690 blocks
-   ==13877==      possibly lost: 44,734,124 bytes in 150,487 blocks
-   ==13877==    still reachable: 129,650,622 bytes in 558,813 blocks
-   ==13877==         suppressed: 11,872,312 bytes in 130,811 blocks
-   ==13877== 
-   
-   And saves the whole log file as well.
-   """
-   
-   def __init__(self):
-      super(self.__class__, self).__init__()
-      self.finished = False
-      self.results = []
-      self.basefilename = 'valgrindmemcheck.output.log'
-      
-   def collectResults(self,directory):
-      """ Collect un results """
-      
-      # First check that we have the log file...
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-      # Then collect the info
-      self.collectLeakSummaryResults(directory)
-      self.collectLogFile(directory)
-
-      
-   def collectLeakSummaryResults(self,directory):
-      """ Collects the leak summary result from the log file"""
-      
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-
-      file     = open(filename)
-      foundResults = False
-      while True:
-         # Skip until the LEAK SUMMARY section is found
-         line = file.readline()
-         if "LEAK SUMMARY" not in line: continue
-
-         # At this point we MUST have found the LEAD SUMMARY section
-         keywords = ["definitely lost",
-                     "indirectly lost",
-                     "possibly lost",
-                     "still reachable",
-                     "suppressed"]
-
-         for k in keywords:
-            tmp = file.readline()
-            m = re.search("%s: ([\d,]+) bytes" % k, tmp)
-            tmpval = (m.groups(1)[0]).replace(",", "")
-            self.saveFloat(k, int(tmpval), "ValgrindMemcheck (bytes)", "Valgrind")
-
-
-         foundResults = True
-         break
-      
-      if not foundResults:
-         raise Exception("Could not find LEAK SUMMARY in %s" % filename)
-
-
-   def collectLogFile(self,directory):
-      """ Collects the leak summary result from the log file"""
-      
-      filename = os.path.join(directory, self.basefilename)
-      if not os.path.exists(filename):
-         raise Exception("File %s does not exist" % filename)
-
-      self.saveFile(self.basefilename, filename, "ValgrindMemcheckOutput", "Valgrind")
-      
+    """LHCbPR Handler to parse MemcheckLog files.
+
+    It extracts the follwoing section from the valgrindmemcheck.output.log:
+
+    ==13877== LEAK SUMMARY:
+    ==13877==    definitely lost: 22,318 bytes in 201 blocks
+    ==13877==    indirectly lost: 5,346,776 bytes in 124,690 blocks
+    ==13877==      possibly lost: 44,734,124 bytes in 150,487 blocks
+    ==13877==    still reachable: 129,650,622 bytes in 558,813 blocks
+    ==13877==         suppressed: 11,872,312 bytes in 130,811 blocks
+    ==13877==
+
+    And saves the whole log file as well.
+    """
+
+    def __init__(self):
+        super(self.__class__, self).__init__()
+        self.finished = False
+        self.results = []
+        self.basefilename = "valgrindmemcheck.output.log"
+
+    def collectResults(self, directory):
+        """Collect un results"""
+
+        # First check that we have the log file...
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        # Then collect the info
+        self.collectLeakSummaryResults(directory)
+        self.collectLogFile(directory)
+
+    def collectLeakSummaryResults(self, directory):
+        """Collects the leak summary result from the log file"""
+
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        file = open(filename)
+        foundResults = False
+        while True:
+            # Skip until the LEAK SUMMARY section is found
+            line = file.readline()
+            if "LEAK SUMMARY" not in line:
+                continue
+
+            # At this point we MUST have found the LEAD SUMMARY section
+            keywords = [
+                "definitely lost",
+                "indirectly lost",
+                "possibly lost",
+                "still reachable",
+                "suppressed",
+            ]
+
+            for k in keywords:
+                tmp = file.readline()
+                m = re.search("%s: ([\d,]+) bytes" % k, tmp)
+                tmpval = (m.groups(1)[0]).replace(",", "")
+                self.saveFloat(k, int(tmpval), "ValgrindMemcheck (bytes)",
+                               "Valgrind")
+
+            foundResults = True
+            break
+
+        if not foundResults:
+            raise Exception("Could not find LEAK SUMMARY in %s" % filename)
+
+    def collectLogFile(self, directory):
+        """Collects the leak summary result from the log file"""
+
+        filename = os.path.join(directory, self.basefilename)
+        if not os.path.exists(filename):
+            raise Exception("File %s does not exist" % filename)
+
+        self.saveFile(self.basefilename, filename, "ValgrindMemcheckOutput",
+                      "Valgrind")
diff --git a/handlers/booleValidation.py b/handlers/booleValidation.py
index 3ff5fa8d98093e5449c1f8343761250ba850a9a7..1c263d67b32123617e125cc83815ef0ddb7ab835 100644
--- a/handlers/booleValidation.py
+++ b/handlers/booleValidation.py
@@ -7,41 +7,45 @@ import lxml.etree as etree
 global DEBUG
 DEBUG = True
 
+
 #################################################################################
 # search "pattern" on "line", if not found return "default"
 # if DEBUG and "name" are define print: "name: value"
-def grepPattern(pattern, line, default = None, name = ""):
-  result = default
-  resultobject = re.search( pattern, line )
-  if ( resultobject != None ):
-    tmp = resultobject.groups()
-    if ( len(tmp) == 1 ):
-      result = tmp[0]
+def grepPattern(pattern, line, default=None, name=""):
+    result = default
+    resultobject = re.search(pattern, line)
+    if resultobject != None:
+        tmp = resultobject.groups()
+        if len(tmp) == 1:
+            result = tmp[0]
+        else:
+            result = tmp
+        if DEBUG and name:
+            print("[grepPattern] %s: %s" % (name, result))
     else:
-      result = tmp
-    if (DEBUG and name):
-      print("[grepPattern] %s: %s" % (name, result))
-  else:
-    print("WARNING: attribute %s was not found!" % name)
-  return result
+        print("WARNING: attribute %s was not found!" % name)
+    return result
+
 
 #################################################################################
 
 
 class booleValidation(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
-
-    def collectResults(self,directory):
-
-        logfile = os.path.join(directory, 'run.log')
-        rootfile = grepPattern('RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)' , open(logfile, 'r', encoding='ISO-8859-1').read() )
-        rootfullname = os.path.join(directory,rootfile)
+    def collectResults(self, directory):
+        logfile = os.path.join(directory, "run.log")
+        rootfile = grepPattern(
+            "RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
+        rootfullname = os.path.join(directory, rootfile)
 
         if os.path.isfile(rootfullname) == 0:
-            raise Exception("Could not locate histo file: %s in the given directory"%rootfile)
+            raise Exception(
+                "Could not locate histo file: %s in the given directory" %
+                rootfile)
 
         fileName, fileExtension = os.path.splitext(rootfile)
         self.saveFile(fileName, rootfullname)
diff --git a/handlers/brunelValidation.py b/handlers/brunelValidation.py
index b5d044422bbad79e7359e1af8b732e3f4bafbc5e..0e75d73bb17ba6c84b4b6baaab3fe63c2624732c 100644
--- a/handlers/brunelValidation.py
+++ b/handlers/brunelValidation.py
@@ -7,42 +7,45 @@ import lxml.etree as etree
 global DEBUG
 DEBUG = True
 
+
 #################################################################################
 # search "pattern" on "line", if not found return "default"
 # if DEBUG and "name" are define print: "name: value"
-def grepPattern(pattern, line, default = None, name = ""):
-  result = default
-  resultobject = re.search( pattern, line )
-  if ( resultobject != None ):
-    tmp = resultobject.groups()
-    if ( len(tmp) == 1 ):
-      result = tmp[0]
+def grepPattern(pattern, line, default=None, name=""):
+    result = default
+    resultobject = re.search(pattern, line)
+    if resultobject != None:
+        tmp = resultobject.groups()
+        if len(tmp) == 1:
+            result = tmp[0]
+        else:
+            result = tmp
+        if DEBUG and name:
+            print("[grepPattern] %s: %s" % (name, result))
     else:
-      result = tmp
-    if (DEBUG and name):
-      print("[grepPattern] %s: %s" % (name, result))
-  else:
-    print("WARNING: attribute %s was not found!" % name)
-  return result
+        print("WARNING: attribute %s was not found!" % name)
+    return result
 
-#################################################################################
 
+#################################################################################
 
 
 class brunelValidation(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
-
-    def collectResults(self,directory):
-
-        logfile = os.path.join(directory, 'run.log')
-        rootfile = grepPattern('RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)' , open(logfile, 'r', encoding='ISO-8859-1').read() )
-        rootfullname = os.path.join(directory,rootfile)
+    def collectResults(self, directory):
+        logfile = os.path.join(directory, "run.log")
+        rootfile = grepPattern(
+            "RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
+        rootfullname = os.path.join(directory, rootfile)
 
         if os.path.isfile(rootfullname) == 0:
-            raise Exception("Could not locate histo file: %s in the given directory"%rootfile)
+            raise Exception(
+                "Could not locate histo file: %s in the given directory" %
+                rootfile)
 
         fileName, fileExtension = os.path.splitext(rootfile)
         self.saveFile(fileName, rootfullname)
diff --git a/handlers/gaussValidation.py b/handlers/gaussValidation.py
index 36e132083f512b1fb76c61a26a67a42700975aea..d80231ee4fa3cb473dd6a206ff751a1f8fd29e1e 100644
--- a/handlers/gaussValidation.py
+++ b/handlers/gaussValidation.py
@@ -11,352 +11,707 @@ import string
 global DEBUG
 DEBUG = True
 
+
 #################################################################################
 # search "pattern" on "line", if not found return "default"
 # if DEBUG and "name" are define print: "name: value"
-def grepPattern(pattern, line, default = None, name = ""):
-  result = default
-  resultobject = re.search( pattern, line )
-  if ( resultobject != None ):
-    tmp = resultobject.groups()
-    if ( len(tmp) == 1 ):
-      result = tmp[0]
+def grepPattern(pattern, line, default=None, name=""):
+    result = default
+    resultobject = re.search(pattern, line)
+    if resultobject != None:
+        tmp = resultobject.groups()
+        if len(tmp) == 1:
+            result = tmp[0]
+        else:
+            result = tmp
+        if DEBUG and name:
+            print("[grepPattern] %s: %s" % (name, result))
     else:
-      result = tmp
-    if (DEBUG and name):
-      print("[grepPattern] %s: %s" % (name, result))
-  else:
-    print("WARNING: attribute %s was not found!" % name)
-  return result
+        print("WARNING: attribute %s was not found!" % name)
+    return result
 
-#################################################################################
 
-class GaussLogFile:
-  def __init__(self,N):
-    self.fileName = N
-
-
-	# parse the xml log file and returns a dictionary with INT and FLOAT variables
-  def parseXmlLog(self, filename):
-    # the result dictionary initialization
-    result = {}
-    result["Int"] = {} # int variables
-    result["Float"] = {} # float variables
-    result["String_fraction"] = {} # variables in format number (value +/- err)
-    result["String_efficiency"] = {} # variables in format number (value +/- err)
-    result["String_gen"] = {} # general informations
-
-    try:
-      fd = open(filename, "r", encoding="ISO-8859-1")
-      parser = etree.XMLParser(recover=True)
-      tree   = etree.parse(fd, parser)
-    except IOError:
-      print("WARNING! File GeneratorLog.xml was not set!")
-      return False
-
-    if DEBUG:
-      print("Parsing GeneratorLog.xml...")
-
-    root = tree.getroot()
-
-    # To sanitise unicode chars which cause troubles
-    printable_chars = set(string.printable)
-
-    # first save all the couples name = value
-    for counter in root.findall('counter'):
-      value = counter.find('value').text.strip()
-      name = counter.get('name')
-      if DEBUG:
-        print(name, value)
-
-      # save all values in the dictionary
-      result["Int"][name] = value
-
-    # look at the crosssection part
-    for crosssection in root.findall('crosssection'):
-      description = ''.join([x for x in crosssection.find('description').text if x in printable_chars])
-      generated = crosssection.find('generated').text.strip()
-      value = crosssection.find('value').text.strip()
-      id = crosssection.get('id')
-      result["Float"][description] = value
-      if DEBUG:
-        print("id", id, description, value)
-
-    # look at the fraction part
-    for fraction in root.findall('fraction'):
-      name = fraction.get('name')
-      number = fraction.find('number').text.strip()
-      value = fraction.find('value').text.strip()
-      error = fraction.find('error').text.strip()
-      result["String_fraction"][name]= number + "(" + value + "+/-" + error + ")"
-
-      if DEBUG:
-          print(name, number, value, error, result["String_fraction"][name])
-
-    # efficiencies
-    for efficiency in root.findall('efficiency'):
-      name = efficiency.get('name')
-      before = efficiency.find('before').text.strip()
-      after = efficiency.find('after').text.strip()
-      value = efficiency.find('value').text.strip()
-      error = efficiency.find('error').text.strip()
-
-      result["String_efficiency"][name]= before + "/" + after + "(" + value + "+/-" + error + ")"
-
-      if DEBUG:
-          print(name, before, after, value, error, result["String_efficiency"][name])
-
-    ## general information
-    if root.find('generator') is not None:
-      result["String_gen"]['generator'] = root.find('generator').text.strip()
-    else:
-      result["String_gen"]['generator'] = "N/A"
-
-    if root.find('eventType') is not None:
-      result["String_gen"]['eventType'] = root.find('eventType').text.strip()
-    else:
-     result["String_gen"]['eventType']  = 0
+#################################################################################
 
-    if root.find('method') is not None:
-      result["String_gen"]['method'] = root.find('method').text.strip()
-    else:
-      result["String_gen"]['method'] = "N/A/"
 
-    return result
+class GaussLogFile:
+    def __init__(self, N):
+        self.fileName = N
+
+    # parse the xml log file and returns a dictionary with INT and FLOAT variables
+    def parseXmlLog(self, filename):
+        # the result dictionary initialization
+        result = {}
+        result["Int"] = {}  # int variables
+        result["Float"] = {}  # float variables
+        result["String_fraction"] = {
+        }  # variables in format number (value +/- err)
+        result["String_efficiency"] = {
+        }  # variables in format number (value +/- err)
+        result["String_gen"] = {}  # general informations
+
+        try:
+            fd = open(filename, "r", encoding="ISO-8859-1")
+            parser = etree.XMLParser(recover=True)
+            tree = etree.parse(fd, parser)
+        except IOError:
+            print("WARNING! File GeneratorLog.xml was not set!")
+            return False
+
+        if DEBUG:
+            print("Parsing GeneratorLog.xml...")
+
+        root = tree.getroot()
+
+        # To sanitise unicode chars which cause troubles
+        printable_chars = set(string.printable)
+
+        # first save all the couples name = value
+        for counter in root.findall("counter"):
+            value = counter.find("value").text.strip()
+            name = counter.get("name")
+            if DEBUG:
+                print(name, value)
+
+            # save all values in the dictionary
+            result["Int"][name] = value
+
+        # look at the crosssection part
+        for crosssection in root.findall("crosssection"):
+            description = "".join([
+                x for x in crosssection.find("description").text
+                if x in printable_chars
+            ])
+            generated = crosssection.find("generated").text.strip()
+            value = crosssection.find("value").text.strip()
+            id = crosssection.get("id")
+            result["Float"][description] = value
+            if DEBUG:
+                print("id", id, description, value)
+
+        # look at the fraction part
+        for fraction in root.findall("fraction"):
+            name = fraction.get("name")
+            number = fraction.find("number").text.strip()
+            value = fraction.find("value").text.strip()
+            error = fraction.find("error").text.strip()
+            result["String_fraction"][
+                name] = number + "(" + value + "+/-" + error + ")"
+
+            if DEBUG:
+                print(name, number, value, error,
+                      result["String_fraction"][name])
+
+        # efficiencies
+        for efficiency in root.findall("efficiency"):
+            name = efficiency.get("name")
+            before = efficiency.find("before").text.strip()
+            after = efficiency.find("after").text.strip()
+            value = efficiency.find("value").text.strip()
+            error = efficiency.find("error").text.strip()
+
+            result["String_efficiency"][name] = (
+                before + "/" + after + "(" + value + "+/-" + error + ")")
+
+            if DEBUG:
+                print(name, before, after, value, error,
+                      result["String_efficiency"][name])
+
+        ## general information
+        if root.find("generator") is not None:
+            result["String_gen"]["generator"] = root.find(
+                "generator").text.strip()
+        else:
+            result["String_gen"]["generator"] = "N/A"
+
+        if root.find("eventType") is not None:
+            result["String_gen"]["eventType"] = root.find(
+                "eventType").text.strip()
+        else:
+            result["String_gen"]["eventType"] = 0
+
+        if root.find("method") is not None:
+            result["String_gen"]["method"] = root.find("method").text.strip()
+        else:
+            result["String_gen"]["method"] = "N/A/"
+
+        return result
+
+    def computeQuantities(self):
+        if DEBUG:
+            print("Log file name = ", self.fileName)
+
+        # read logfile in one shoot
+        f = open(self.fileName, "r", encoding="ISO-8859-1")
+        logfile = f.read()
+        f.close()
+
+        #################################################
+        # Version information                            #
+        #################################################
+
+        self.EventType = grepPattern("Requested to generate EventType (\d+)",
+                                     logfile, 0, "EventType")
+        self.GaussVersion = grepPattern("Welcome to Gauss version (\S+)",
+                                        logfile, "", "GaussVersion")
+        self.PythiaVersion = grepPattern("This is PYTHIA version (\S+)",
+                                         logfile, "", "PythiaVersion")
+        self.GeantVersion = grepPattern("Geant4 version Name: *(\S+)  *\S+",
+                                        logfile, "", "GeantVersion")
+        self.DDDBVersion = grepPattern("DDDB *INFO Using TAG (\S+)", logfile,
+                                       "", "DDDBVersion")
+        if not self.DDDBVersion:
+            self.DDDBVersion = grepPattern(
+                "GitDDDB *INFO using commit (\S+) corresponding",
+                logfile,
+                "",
+                "DDDBVersion",
+            )
+        self.SIMCONDVersion = grepPattern("SIMCOND *INFO Using TAG (\S+)",
+                                          logfile, "", "SIMCONDVersion")
+        if not self.SIMCONDVersion:
+            self.SIMCONDVersion = grepPattern(
+                "GitSIMCOND *INFO using commit (\S+) corresponding",
+                logfile,
+                "",
+                "SIMCONDVersion",
+            )
+
+        #################################################
+        # VeloGaussMoni               INFO               #
+        #################################################
+
+        self.MCHits = grepPattern(
+            "VeloGaussMoni *INFO \| Number of MCHits\/Event: *(\S+)",
+            logfile,
+            "",
+            "MCHits",
+        )
+        self.PileUpMCHits = grepPattern(
+            "VeloGaussMoni *INFO \| Number of PileUpMCHits\/Event: *(\S+)",
+            logfile,
+            "",
+            "PileUpMCHits",
+        )
+
+        #################################################
+        # TTHitMonitor               INFO *** Summary ***#
+        #################################################
+
+        self.TTHit_Hits = grepPattern(
+            "TTHitMonitor *INFO #hits per event: (\S+)", logfile, "",
+            "TTHit_Hits")
+        self.TTHit_BetaGamma = grepPattern(
+            "TTHitMonitor *INFO Mean beta \* gamma: (\S+)",
+            logfile,
+            "",
+            "TTHit_BetaGamma",
+        )
+        self.TTHit_DepCharge = grepPattern(
+            "TTHitMonitor *INFO Most Probable deposited charge: (\S+)",
+            logfile,
+            "",
+            "TTHit_DepCharge",
+        )
+        self.TTHit_HalfSampleWidth = grepPattern(
+            "TTHitMonitor *INFO Half Sample width (\S+)",
+            logfile,
+            "",
+            "TTHit_HalfSampleWidth",
+        )
+
+        #################################################
+        # ITHitMonitor               INFO *** Summary ***#
+        #################################################
+
+        self.ITHit_Hits = grepPattern(
+            "ITHitMonitor *INFO #hits per event: (\S+)", logfile, "",
+            "ITHit_Hits")
+        self.ITHit_BetaGamma = grepPattern(
+            "ITHitMonitor *INFO Mean beta \* gamma: (\S+)",
+            logfile,
+            "",
+            "ITHit_BetaGamma",
+        )
+        self.ITHit_DepCharge = grepPattern(
+            "ITHitMonitor  *INFO Most Probable deposited charge: (\S+)",
+            logfile,
+            "",
+            "ITHit_DepCharge",
+        )
+        self.ITHit_HalfSampleWidth = grepPattern(
+            "ITHitMonitor *INFO Half Sample width (\S+)",
+            logfile,
+            "",
+            "ITHit_HalfSampleWidth",
+        )
+
+        #################################################
+        # OTHitMonitor               INFO *** Summary ***#
+        #################################################
+
+        self.OTHit_Hits = grepPattern(
+            "OTHitMonitor *INFO #hits per event: (\S+)", logfile, "",
+            "OTHit_Hits")
+        self.OTHit_BetaGamma = grepPattern(
+            "OTHitMonitor  *INFO Mean beta \* gamma: (\S+)",
+            logfile,
+            "",
+            "OTHit_BetaGamma",
+        )
+        self.OTHit_DepCharge = grepPattern(
+            "OTHitMonitor *INFO Most Probable deposited charge: (\S+)",
+            logfile,
+            "",
+            "OTHit_DepCharge",
+        )
+        self.OTHit_HalfSampleWidth = grepPattern(
+            "OTHitMonitor *INFO Half Sample width (\S+)",
+            logfile,
+            "",
+            "OTHit_HalfSampleWidth",
+        )
+
+        #################################################################
+        # ******Stat******           INFO  The Final stat Table (ordered)#
+        ################################################################
+
+        # the sum is the second value
+
+        self.MCRichTracks = grepPattern(
+            '\**Stat.*INFO *"#MCRichTracks" \| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "MCRichTracks",
+        )
+        self.MCRichSegment = grepPattern(
+            '\**Stat.*INFO *"#MCRichSegment \| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "MCRichSegment",
+        )
+        self.Muon_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Muon MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Muon_MCHits",
+        )
+        self.IT_MCHits = grepPattern(
+            '\**Stat.*INFO *"#IT MCHits" *\| *\d+ \| *(\d+)', logfile, 0,
+            "IT_MCHits")
+        self.TT_MCHits = grepPattern(
+            '\**Stat.*INFO *"#TT MCHits" *\| *\d+ \| *(\d+)', logfile, 0,
+            "TT_MCHits")
+        self.Hcal_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Hcal MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Hcal_MCHits",
+        )
+        self.OT_MCHits = grepPattern(
+            '\**Stat.*INFO *"#OT MCHits" *\| *\d+ \| *(\d+)', logfile, 0,
+            "OT_MCHits")
+        self.Velo_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Velo MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Velo_MCHits",
+        )
+        self.Rich2_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Rich2 MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Rich2_MCHits",
+        )
+        self.Spd_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Spd MCHits" *\| *\d+ \| *(\d+)', logfile, 0,
+            "Spd_MCHits")
+        self.Rich1_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Rich1 MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Rich1_MCHits",
+        )
+        self.MCParticles = grepPattern(
+            '\**Stat.*INFO *"#MCParticles" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "MCParticles",
+        )
+        self.MCVertices = grepPattern(
+            '\**Stat.*INFO *"#MCVertices" *\| *\d+ \| *(\d+)', logfile, 0,
+            "MCVertices")
+        self.Prs_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Prs MCHits" *\| *\d+ \| *(\d+)', logfile, 0,
+            "Prs_MCHits")
+        self.MCRichOpPhoto = grepPattern(
+            '\**Stat.*INFO *"#MCRichOpPhoto *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "MCRichOpPhoto",
+        )
+        self.Rich_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Rich MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Rich_MCHits",
+        )
+        self.Ecal_MCHits = grepPattern(
+            '\**Stat.*INFO *"#Ecal MCHits" *\| *\d+ \| *(\d+)',
+            logfile,
+            0,
+            "Ecal_MCHits",
+        )
+
+        #################################################################
+        # Muon Monitoring Table                                         #
+        #################################################################
+
+        (
+            self.R1_M1, self.R1_M2, self.R1_M3, self.R1_M4, self.R1_M5
+        ) = grepPattern(
+            "MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R1",
+            logfile,
+            (0, 0, 0, 0, 0),
+            "R1",
+        )
+        (
+            self.R2_M1, self.R2_M2, self.R2_M3, self.R2_M4, self.R2_M5
+        ) = grepPattern(
+            "MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R2",
+            logfile,
+            (0, 0, 0, 0, 0),
+            "R2",
+        )
+        (
+            self.R3_M1, self.R3_M2, self.R3_M3, self.R3_M4, self.R3_M5
+        ) = grepPattern(
+            "MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R3",
+            logfile,
+            (0, 0, 0, 0, 0),
+            "R3",
+        )
+        (
+            self.R4_M1, self.R4_M2, self.R4_M3, self.R4_M4, self.R4_M5
+        ) = grepPattern(
+            "MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R4",
+            logfile,
+            (0, 0, 0, 0, 0),
+            "R4",
+        )
+        (self.InvRichFlags, self.InvRichFlagsErr) = grepPattern(
+            "GetRichHits *INFO.*Invalid RICH flags *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0),
+            "InvRichFlags",
+        )
+        (
+            self.MCRichHitsR1,
+            self.MCRichHitsR1Err,
+            self.MCRichHitsR2,
+            self.MCRichHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*MCRichHits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 = *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "MCRichHits",
+        )
+        (
+            self.InvRadHitsR1,
+            self.InvRadHitsR1Err,
+            self.InvRadHitsR2,
+            self.InvRadHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Invalid radiator hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "InvRadHits",
+        )
+        (
+            self.SignalHitsR1,
+            self.SignalHitsR1Err,
+            self.SignalHitsR2,
+            self.SignalHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Signal Hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "SignalHits",
+        )
+        (
+            self.GasQuartzCKHitsR1,
+            self.GasQuartzCKHitsR1Err,
+            self.GasQuartzCKHitsR2,
+            self.GasQuartzCKHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Gas Quartz CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "GasQuartzCKHits",
+        )
+        (
+            self.HPDQuartzCKHitsR1,
+            self.HPDQuartzCKHitsR1Err,
+            self.HPDQuartzCKHitsR2,
+            self.HPDQuartzCKHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*HPD Quartz CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "HPDQuartzCKHits",
+        )
+        (
+            self.NitrogenCKHitsR1,
+            self.NitrogenCKHitsR1Err,
+            self.NitrogenCKHitsR2,
+            self.NitrogenCKHitsR2Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Nitrogen CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0),
+            "NitrogenCKHits",
+        )
+        (
+            self.SignalCKAero,
+            self.SignalCKAeroErr,
+            self.SignalCKC4F10,
+            self.SignalCKC4F10Err,
+            self.SignalCKCF4,
+            self.SignalCKCF4Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Signal CK MCRichHits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0, 0, 0),
+            "SignalCK",
+        )
+        (
+            self.ScatteredHitsAero,
+            self.ScatteredHitsAeroErr,
+            self.ScatteredHitsC4F10,
+            self.ScatteredHitsC4F10Err,
+            self.ScatteredHitsCF4,
+            self.ScatteredHitsCF4Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*Rayleigh scattered hits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0, 0, 0),
+            "ScatteredHits",
+        )
+        (
+            self.MCParticleLessHitsAero,
+            self.MCParticleLessHitsAeroErr,
+            self.MCParticleLessHitsC4F10,
+            self.MCParticleLessHitsC4F10Err,
+            self.MCParticleLessHitsCF4,
+            self.MCParticleLessHitsCF4Err,
+        ) = grepPattern(
+            "GetRichHits *INFO.*MCParticle-less hits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)",
+            logfile,
+            (0, 0, 0, 0, 0, 0),
+            "MCParticleLessHits",
+        )
 
-  def computeQuantities(self):
-    if DEBUG:
-      print("Log file name = ", self.fileName)
-
-    # read logfile in one shoot
-    f = open(self.fileName, "r", encoding="ISO-8859-1")
-    logfile = f.read()
-    f.close()
-
-#################################################
-#Version information                            #
-#################################################
-
-    self.EventType = grepPattern('Requested to generate EventType (\d+)', logfile, 0, 'EventType')
-    self.GaussVersion = grepPattern( 'Welcome to Gauss version (\S+)', logfile, "", 'GaussVersion')
-    self.PythiaVersion = grepPattern( 'This is PYTHIA version (\S+)', logfile, "", 'PythiaVersion')
-    self.GeantVersion = grepPattern( 'Geant4 version Name: *(\S+)  *\S+', logfile, "", 'GeantVersion')
-    self.DDDBVersion = grepPattern( 'DDDB *INFO Using TAG (\S+)', logfile, "", 'DDDBVersion')
-    if not self.DDDBVersion:
-        self.DDDBVersion = grepPattern( 'GitDDDB *INFO using commit (\S+) corresponding', logfile, "", 'DDDBVersion')
-    self.SIMCONDVersion = grepPattern( 'SIMCOND *INFO Using TAG (\S+)', logfile, "", 'SIMCONDVersion')
-    if not self.SIMCONDVersion:
-        self.SIMCONDVersion = grepPattern( 'GitSIMCOND *INFO using commit (\S+) corresponding', logfile, "", 'SIMCONDVersion')
-
-#################################################
-#VeloGaussMoni               INFO               #
-#################################################
-
-    self.MCHits = grepPattern( 'VeloGaussMoni *INFO \| Number of MCHits\/Event: *(\S+)', logfile, "", 'MCHits')
-    self.PileUpMCHits = grepPattern( 'VeloGaussMoni *INFO \| Number of PileUpMCHits\/Event: *(\S+)', logfile, "", 'PileUpMCHits')
-
-#################################################
-#TTHitMonitor               INFO *** Summary ***#
-#################################################
-
-    self.TTHit_Hits = grepPattern( 'TTHitMonitor *INFO #hits per event: (\S+)', logfile, "", 'TTHit_Hits')
-    self.TTHit_BetaGamma = grepPattern( 'TTHitMonitor *INFO Mean beta \* gamma: (\S+)', logfile, "", 'TTHit_BetaGamma')
-    self.TTHit_DepCharge = grepPattern( 'TTHitMonitor *INFO Most Probable deposited charge: (\S+)', logfile, "", 'TTHit_DepCharge')
-    self.TTHit_HalfSampleWidth = grepPattern( 'TTHitMonitor *INFO Half Sample width (\S+)', logfile, "", 'TTHit_HalfSampleWidth')
-
-#################################################
-#ITHitMonitor               INFO *** Summary ***#
-#################################################
-
-    self.ITHit_Hits = grepPattern( 'ITHitMonitor *INFO #hits per event: (\S+)', logfile, "", 'ITHit_Hits')
-    self.ITHit_BetaGamma = grepPattern( 'ITHitMonitor *INFO Mean beta \* gamma: (\S+)', logfile, "", 'ITHit_BetaGamma')
-    self.ITHit_DepCharge = grepPattern( 'ITHitMonitor  *INFO Most Probable deposited charge: (\S+)', logfile, "", 'ITHit_DepCharge')
-    self.ITHit_HalfSampleWidth = grepPattern( 'ITHitMonitor *INFO Half Sample width (\S+)', logfile, "", 'ITHit_HalfSampleWidth')
-
-#################################################
-#OTHitMonitor               INFO *** Summary ***#
-#################################################
-
-    self.OTHit_Hits = grepPattern( 'OTHitMonitor *INFO #hits per event: (\S+)', logfile, "", 'OTHit_Hits')
-    self.OTHit_BetaGamma = grepPattern( 'OTHitMonitor  *INFO Mean beta \* gamma: (\S+)', logfile, "", 'OTHit_BetaGamma')
-    self.OTHit_DepCharge = grepPattern( 'OTHitMonitor *INFO Most Probable deposited charge: (\S+)', logfile, "", 'OTHit_DepCharge')
-    self.OTHit_HalfSampleWidth = grepPattern( 'OTHitMonitor *INFO Half Sample width (\S+)', logfile, "", 'OTHit_HalfSampleWidth')
-
-#################################################################
-#******Stat******           INFO  The Final stat Table (ordered)#
-################################################################
-
-    # the sum is the second value
-
-    self.MCRichTracks = grepPattern( '\**Stat.*INFO *"#MCRichTracks" \| *\d+ \| *(\d+)', logfile, 0, 'MCRichTracks')
-    self.MCRichSegment = grepPattern( '\**Stat.*INFO *"#MCRichSegment \| *\d+ \| *(\d+)', logfile, 0, 'MCRichSegment')
-    self.Muon_MCHits = grepPattern( '\**Stat.*INFO *"#Muon MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Muon_MCHits')
-    self.IT_MCHits = grepPattern( '\**Stat.*INFO *"#IT MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'IT_MCHits')
-    self.TT_MCHits = grepPattern( '\**Stat.*INFO *"#TT MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'TT_MCHits')
-    self.Hcal_MCHits = grepPattern( '\**Stat.*INFO *"#Hcal MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Hcal_MCHits')
-    self.OT_MCHits = grepPattern( '\**Stat.*INFO *"#OT MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'OT_MCHits')
-    self.Velo_MCHits = grepPattern( '\**Stat.*INFO *"#Velo MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Velo_MCHits')
-    self.Rich2_MCHits = grepPattern( '\**Stat.*INFO *"#Rich2 MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Rich2_MCHits')
-    self.Spd_MCHits = grepPattern( '\**Stat.*INFO *"#Spd MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Spd_MCHits')
-    self.Rich1_MCHits = grepPattern( '\**Stat.*INFO *"#Rich1 MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Rich1_MCHits')
-    self.MCParticles = grepPattern( '\**Stat.*INFO *"#MCParticles" *\| *\d+ \| *(\d+)', logfile, 0, 'MCParticles')
-    self.MCVertices = grepPattern( '\**Stat.*INFO *"#MCVertices" *\| *\d+ \| *(\d+)', logfile, 0, 'MCVertices')
-    self.Prs_MCHits = grepPattern( '\**Stat.*INFO *"#Prs MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Prs_MCHits')
-    self.MCRichOpPhoto = grepPattern( '\**Stat.*INFO *"#MCRichOpPhoto *\| *\d+ \| *(\d+)', logfile, 0, 'MCRichOpPhoto')
-    self.Rich_MCHits = grepPattern( '\**Stat.*INFO *"#Rich MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Rich_MCHits')
-    self.Ecal_MCHits = grepPattern( '\**Stat.*INFO *"#Ecal MCHits" *\| *\d+ \| *(\d+)', logfile, 0, 'Ecal_MCHits')
-
-#################################################################
-# Muon Monitoring Table                                         #
-#################################################################
-
-    (self.R1_M1, self.R1_M2, self.R1_M3, self.R1_M4, self.R1_M5) = grepPattern( 'MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R1', logfile, (0,0,0,0,0), 'R1')
-    (self.R2_M1, self.R2_M2, self.R2_M3, self.R2_M4, self.R2_M5) = grepPattern( 'MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R2', logfile, (0,0,0,0,0), 'R2')
-    (self.R3_M1, self.R3_M2, self.R3_M3, self.R3_M4, self.R3_M5) = grepPattern( 'MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R3', logfile, (0,0,0,0,0), 'R3')
-    (self.R4_M1, self.R4_M2, self.R4_M3, self.R4_M4, self.R4_M5) = grepPattern( 'MuonHitChecker             INFO (\S+) * (\S+) * (\S+) * (\S+) * (\S+) * R4', logfile, (0,0,0,0,0), 'R4')
-    (self.InvRichFlags, self.InvRichFlagsErr) = grepPattern( 'GetRichHits *INFO.*Invalid RICH flags *= *(\S+) *\+\- *(\S+)', logfile, (0, 0), 'InvRichFlags')
-    (self.MCRichHitsR1, self.MCRichHitsR1Err, self.MCRichHitsR2, self.MCRichHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*MCRichHits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 = *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'MCRichHits')
-    (self.InvRadHitsR1, self.InvRadHitsR1Err, self.InvRadHitsR2, self.InvRadHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*Invalid radiator hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'InvRadHits')
-    (self.SignalHitsR1, self.SignalHitsR1Err, self.SignalHitsR2, self.SignalHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*Signal Hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'SignalHits')
-    (self.GasQuartzCKHitsR1, self.GasQuartzCKHitsR1Err, self.GasQuartzCKHitsR2, self.GasQuartzCKHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*Gas Quartz CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'GasQuartzCKHits')
-    (self.HPDQuartzCKHitsR1, self.HPDQuartzCKHitsR1Err, self.HPDQuartzCKHitsR2, self.HPDQuartzCKHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*HPD Quartz CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'HPDQuartzCKHits')
-    (self.NitrogenCKHitsR1, self.NitrogenCKHitsR1Err, self.NitrogenCKHitsR2, self.NitrogenCKHitsR2Err) = \
-      grepPattern( 'GetRichHits *INFO.*Nitrogen CK hits *: Rich1 *= *(\S+) \+\- *(\S+).*Rich2 *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0), 'NitrogenCKHits')
-    (self.SignalCKAero, self.SignalCKAeroErr, self.SignalCKC4F10, self.SignalCKC4F10Err, self.SignalCKCF4, self.SignalCKCF4Err) = \
-      grepPattern( 'GetRichHits *INFO.*Signal CK MCRichHits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0,0,0), 'SignalCK')
-    (self.ScatteredHitsAero, self.ScatteredHitsAeroErr, self.ScatteredHitsC4F10, self.ScatteredHitsC4F10Err, self.ScatteredHitsCF4, self.ScatteredHitsCF4Err) = \
-      grepPattern( 'GetRichHits *INFO.*Rayleigh scattered hits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0,0,0), 'ScatteredHits')
-    (self.MCParticleLessHitsAero, self.MCParticleLessHitsAeroErr, self.MCParticleLessHitsC4F10, self.MCParticleLessHitsC4F10Err, self.MCParticleLessHitsCF4, self.MCParticleLessHitsCF4Err) = \
-      grepPattern( 'GetRichHits *INFO.*MCParticle-less hits *: Aero *= *(\S+) \+\- *(\S+).*Rich1Gas *= *(\S+) \+\- *(\S+).*Rich2Gas *= *(\S+) *\+\- *(\S+)', logfile, (0,0,0,0,0,0), 'MCParticleLessHits')
 
 class gaussValidation(BaseHandler):
-
     def __init__(self):
         super(self.__class__, self).__init__()
 
-    def collectResults(self,directory):
-
+    def collectResults(self, directory):
         # Informations are stored in two files: run.log and GeneratorLog.xml
-        logfile = os.path.join(directory, 'run.log')
-        genfile = os.path.join(directory, 'GeneratorLog.xml')
+        logfile = os.path.join(directory, "run.log")
+        genfile = os.path.join(directory, "GeneratorLog.xml")
 
         # define groups
         grp = {}
-        grp['general'] = "Validation_General"
-        grp['version'] = "Validation_Version"
-        grp['time'] = "Validation_Time"
-        grp['generator_count'] = "Validation_Generator_counters"
-        grp['generator_cross'] = "Validation_Generator_crossSection"
-        grp['generator_fraction'] = "Validation_Generator_fraction"
-        grp['generator_efficiency'] = "Validation_Generator_efficiency"
-        grp['it_ot_tt'] = "Validation_IT_OT_TT"
-        grp['velo'] = "Validation_Velo"
-        grp['muon'] = "Validation_Muon_detectors"
-        grp['rich'] = "Validation_Rich"
-        grp['mc_hits'] = "Validation_MC_hits"
+        grp["general"] = "Validation_General"
+        grp["version"] = "Validation_Version"
+        grp["time"] = "Validation_Time"
+        grp["generator_count"] = "Validation_Generator_counters"
+        grp["generator_cross"] = "Validation_Generator_crossSection"
+        grp["generator_fraction"] = "Validation_Generator_fraction"
+        grp["generator_efficiency"] = "Validation_Generator_efficiency"
+        grp["it_ot_tt"] = "Validation_IT_OT_TT"
+        grp["velo"] = "Validation_Velo"
+        grp["muon"] = "Validation_Muon_detectors"
+        grp["rich"] = "Validation_Rich"
+        grp["mc_hits"] = "Validation_MC_hits"
 
         # first look for the "ROOT" file
 
-        rootfile = grepPattern('RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)' , open(logfile, 'r', encoding='ISO-8859-1').read() )
-        rootfullname = os.path.join(directory,rootfile)
+        rootfile = grepPattern(
+            "RootHistSvc\s.*INFO Writing ROOT histograms to: (\S+)",
+            open(logfile, "r", encoding="ISO-8859-1").read(),
+        )
+        rootfullname = os.path.join(directory, rootfile)
 
         if os.path.isfile(rootfullname) == 0:
-            raise Exception("Could not locate histo file: %s in the given directory"%rootfile)
+            raise Exception(
+                "Could not locate histo file: %s in the given directory" %
+                rootfile)
 
         fileName, fileExtension = os.path.splitext(rootfile)
         self.saveFile("GaussROOTOutput", rootfullname)
 
-        TheLog = GaussLogFile( logfile )
+        TheLog = GaussLogFile(logfile)
 
         # Parse and save the values found in the xml file
         xmllog = TheLog.parseXmlLog(genfile)
         if xmllog:
-          for name, value in list(xmllog["Int"].items()):
-            self.saveInt(name, value, "", grp['generator_count'])
-          for name, value in list(xmllog["Float"].items()):
-            self.saveFloat(name, value, "", grp['generator_cross'])
-          for name, value in list(xmllog["String_fraction"].items()):
-            self.saveString(name, value, "", grp['generator_fraction'])
-          for name, value in list(xmllog["String_efficiency"].items()):
-            self.saveString(name, value, "", grp['generator_efficiency'])
-
-          self.saveString('Generator', xmllog["String_gen"]['generator'], grp['general'])
-          self.saveInt('EventType',  xmllog["String_gen"]['eventType'], grp['general'])
-          self.saveString('Method', xmllog["String_gen"]['method'], grp['general'])
+            for name, value in list(xmllog["Int"].items()):
+                self.saveInt(name, value, "", grp["generator_count"])
+            for name, value in list(xmllog["Float"].items()):
+                self.saveFloat(name, value, "", grp["generator_cross"])
+            for name, value in list(xmllog["String_fraction"].items()):
+                self.saveString(name, value, "", grp["generator_fraction"])
+            for name, value in list(xmllog["String_efficiency"].items()):
+                self.saveString(name, value, "", grp["generator_efficiency"])
+
+            self.saveString("Generator", xmllog["String_gen"]["generator"],
+                            grp["general"])
+            self.saveInt("EventType", xmllog["String_gen"]["eventType"],
+                         grp["general"])
+            self.saveString("Method", xmllog["String_gen"]["method"],
+                            grp["general"])
 
         # Parse the run.log file
         TheLog.computeQuantities()
 
         if not xmllog:
-          self.saveInt('EventType', TheLog.EventType, grp['general'])
-
-        self.saveString('GaussVersion', TheLog.GaussVersion, 'Gauss', grp['version'])
-        self.saveString('PythiaVersion', TheLog.PythiaVersion, 'Pythia', grp['version'])
-        self.saveString('DDDBVersion', TheLog.DDDBVersion, 'DDDB', grp['version'])
-        self.saveString('GeantVersion', TheLog.GeantVersion, 'Geant', grp['version'])
-        self.saveString('SIMCONDVersion', TheLog.SIMCONDVersion, 'SIMCOND', grp['version'])
-
-        self.saveString('MCHits',TheLog.MCHits,'Velo MCHits / Event',grp["velo"])
-        self.saveString('PileUpMCHits',TheLog.PileUpMCHits, 'Velo Pile Up MCHits', grp["velo"])
-
-        self.saveFloat('TTHits',TheLog.TTHit_Hits, 'TT hits per event', grp["it_ot_tt"])
-        self.saveFloat('TTHit_BetaGamma',TheLog.TTHit_BetaGamma, 'TT Mean beta * gamma', grp["it_ot_tt"])
-        self.saveFloat('TTHit_DepCharge',TheLog.TTHit_DepCharge, 'TT Most Probable deposited charge', grp["it_ot_tt"])
-        self.saveFloat('TTHit_HalfSampleWidth',TheLog.TTHit_HalfSampleWidth, 'TT Half Sample Width ', grp["it_ot_tt"])
-        self.saveFloat('ITHits',TheLog.TTHit_HalfSampleWidth, 'IT hits per event', grp["it_ot_tt"])
-        self.saveFloat('ITHit_BetaGamma',TheLog.ITHit_BetaGamma, 'IT Mean beta * gamma', grp["it_ot_tt"])
-        self.saveFloat('ITHit_DepCharge',TheLog.ITHit_DepCharge, 'IT Most Probable deposited charge', grp["it_ot_tt"])
-        self.saveFloat('ITHit_HalfSampleWidth',TheLog.ITHit_HalfSampleWidth, 'IT Half Sample Width', grp["it_ot_tt"])
-        self.saveFloat('OTHits',TheLog.OTHit_Hits, 'OT hits per event', grp["it_ot_tt"])
-        self.saveFloat('OTHit_BetaGamma',TheLog.OTHit_BetaGamma, 'OT Mean beta * gamma', grp["it_ot_tt"])
-        self.saveFloat('OTHit_DepCharge',TheLog.OTHit_DepCharge, 'OT Most Probable deposited charge', grp["it_ot_tt"])
-        self.saveFloat('OTHit_HalfSampleWidth',TheLog.OTHit_HalfSampleWidth, 'OT Half Sample Width', grp["it_ot_tt"])
-
-        self.saveInt('MCRichTracks',TheLog.MCRichTracks, 'No MC Rich Tracks', grp["mc_hits"])
-        self.saveInt('MCRichSegment',TheLog.MCRichSegment, 'No MC Rich Segment', grp["mc_hits"])
-        self.saveInt('Muon_MCHits',TheLog.Muon_MCHits, 'No Muon MCHits', grp["mc_hits"])
-        self.saveInt('IT_MCHits',TheLog.IT_MCHits, 'No IT MCHits', grp["mc_hits"])
-        self.saveInt('TT_MCHits',TheLog.TT_MCHits, 'No TT MCHits ', grp["mc_hits"])
-        self.saveInt('Hcal_MCHits',TheLog.Hcal_MCHits, 'No Hcal MCHits', grp["mc_hits"])
-        self.saveInt('OT_MCHits',TheLog.OT_MCHits, 'No OT MCHits', grp["mc_hits"])
-        self.saveInt('Velo_MCHits',TheLog.Velo_MCHits, 'No Velo MCHits', grp["mc_hits"])
-        self.saveInt('Rich2_MCHits',TheLog.Rich2_MCHits, 'No Rich2 MCHits', grp["mc_hits"])
-        self.saveInt('Spd_MCHits',TheLog.Spd_MCHits, 'No Spd MCHits', grp["mc_hits"])
-        self.saveInt('Rich1_MCHits',TheLog.Rich1_MCHits, 'No Rich1 MCHits', grp["mc_hits"])
-        self.saveInt('MCParticles',TheLog.MCParticles, 'No MC Particles', grp["mc_hits"])
-        self.saveInt('MCVertices',TheLog.MCVertices, 'No MC Vertices', grp["mc_hits"])
-        self.saveInt('Prs_MCHits',TheLog.Prs_MCHits, 'No Prs MCHits', grp["mc_hits"])
-        self.saveInt('MCRichOpPhoto',TheLog.MCRichOpPhoto, 'No Rich Op Photo', grp["mc_hits"])
-        self.saveInt('Rich_MCHits',TheLog.Rich_MCHits, 'No Rich MCHits', grp["mc_hits"])
-        self.saveInt('Ecal_MCHits',TheLog.Ecal_MCHits, 'No Ecal MCHits', grp["mc_hits"])
-
-        self.saveFloat('R1_M1', TheLog.R1_M1, '', grp["muon"])
-        self.saveFloat('R1_M2', TheLog.R1_M2, '', grp["muon"])
-        self.saveFloat('R1_M3', TheLog.R1_M3, '', grp["muon"])
-        self.saveFloat('R1_M4', TheLog.R1_M4, '', grp["muon"])
-        self.saveFloat('R1_M5', TheLog.R1_M5, '', grp["muon"])
-        self.saveFloat('R2_M1', TheLog.R2_M1, '', grp["muon"])
-        self.saveFloat('R2_M2', TheLog.R2_M2, '', grp["muon"])
-        self.saveFloat('R2_M3', TheLog.R2_M3, '', grp["muon"])
-        self.saveFloat('R2_M4', TheLog.R2_M4, '', grp["muon"])
-        self.saveFloat('R2_M5', TheLog.R2_M5, '', grp["muon"])
-        self.saveFloat('R3_M1', TheLog.R3_M1, '', grp["muon"])
-        self.saveFloat('R3_M2', TheLog.R3_M2, '', grp["muon"])
-        self.saveFloat('R3_M3', TheLog.R3_M3, '', grp["muon"])
-        self.saveFloat('R3_M4', TheLog.R3_M4, '', grp["muon"])
-        self.saveFloat('R3_M5', TheLog.R3_M5, '', grp["muon"])
-        self.saveFloat('R4_M1', TheLog.R4_M1, '', grp["muon"])
-        self.saveFloat('R4_M2', TheLog.R4_M2, '', grp["muon"])
-        self.saveFloat('R4_M3', TheLog.R4_M3, '', grp["muon"])
-        self.saveFloat('R4_M4', TheLog.R4_M4, '', grp["muon"])
-        self.saveFloat('R4_M5', TheLog.R4_M5, '', grp["muon"])
-
-        '''
+            self.saveInt("EventType", TheLog.EventType, grp["general"])
+
+        self.saveString("GaussVersion", TheLog.GaussVersion, "Gauss",
+                        grp["version"])
+        self.saveString("PythiaVersion", TheLog.PythiaVersion, "Pythia",
+                        grp["version"])
+        self.saveString("DDDBVersion", TheLog.DDDBVersion, "DDDB",
+                        grp["version"])
+        self.saveString("GeantVersion", TheLog.GeantVersion, "Geant",
+                        grp["version"])
+        self.saveString("SIMCONDVersion", TheLog.SIMCONDVersion, "SIMCOND",
+                        grp["version"])
+
+        self.saveString("MCHits", TheLog.MCHits, "Velo MCHits / Event",
+                        grp["velo"])
+        self.saveString("PileUpMCHits", TheLog.PileUpMCHits,
+                        "Velo Pile Up MCHits", grp["velo"])
+
+        self.saveFloat("TTHits", TheLog.TTHit_Hits, "TT hits per event",
+                       grp["it_ot_tt"])
+        self.saveFloat(
+            "TTHit_BetaGamma",
+            TheLog.TTHit_BetaGamma,
+            "TT Mean beta * gamma",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "TTHit_DepCharge",
+            TheLog.TTHit_DepCharge,
+            "TT Most Probable deposited charge",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "TTHit_HalfSampleWidth",
+            TheLog.TTHit_HalfSampleWidth,
+            "TT Half Sample Width ",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat("ITHits", TheLog.TTHit_HalfSampleWidth,
+                       "IT hits per event", grp["it_ot_tt"])
+        self.saveFloat(
+            "ITHit_BetaGamma",
+            TheLog.ITHit_BetaGamma,
+            "IT Mean beta * gamma",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "ITHit_DepCharge",
+            TheLog.ITHit_DepCharge,
+            "IT Most Probable deposited charge",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "ITHit_HalfSampleWidth",
+            TheLog.ITHit_HalfSampleWidth,
+            "IT Half Sample Width",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat("OTHits", TheLog.OTHit_Hits, "OT hits per event",
+                       grp["it_ot_tt"])
+        self.saveFloat(
+            "OTHit_BetaGamma",
+            TheLog.OTHit_BetaGamma,
+            "OT Mean beta * gamma",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "OTHit_DepCharge",
+            TheLog.OTHit_DepCharge,
+            "OT Most Probable deposited charge",
+            grp["it_ot_tt"],
+        )
+        self.saveFloat(
+            "OTHit_HalfSampleWidth",
+            TheLog.OTHit_HalfSampleWidth,
+            "OT Half Sample Width",
+            grp["it_ot_tt"],
+        )
+
+        self.saveInt("MCRichTracks", TheLog.MCRichTracks, "No MC Rich Tracks",
+                     grp["mc_hits"])
+        self.saveInt("MCRichSegment", TheLog.MCRichSegment,
+                     "No MC Rich Segment", grp["mc_hits"])
+        self.saveInt("Muon_MCHits", TheLog.Muon_MCHits, "No Muon MCHits",
+                     grp["mc_hits"])
+        self.saveInt("IT_MCHits", TheLog.IT_MCHits, "No IT MCHits",
+                     grp["mc_hits"])
+        self.saveInt("TT_MCHits", TheLog.TT_MCHits, "No TT MCHits ",
+                     grp["mc_hits"])
+        self.saveInt("Hcal_MCHits", TheLog.Hcal_MCHits, "No Hcal MCHits",
+                     grp["mc_hits"])
+        self.saveInt("OT_MCHits", TheLog.OT_MCHits, "No OT MCHits",
+                     grp["mc_hits"])
+        self.saveInt("Velo_MCHits", TheLog.Velo_MCHits, "No Velo MCHits",
+                     grp["mc_hits"])
+        self.saveInt("Rich2_MCHits", TheLog.Rich2_MCHits, "No Rich2 MCHits",
+                     grp["mc_hits"])
+        self.saveInt("Spd_MCHits", TheLog.Spd_MCHits, "No Spd MCHits",
+                     grp["mc_hits"])
+        self.saveInt("Rich1_MCHits", TheLog.Rich1_MCHits, "No Rich1 MCHits",
+                     grp["mc_hits"])
+        self.saveInt("MCParticles", TheLog.MCParticles, "No MC Particles",
+                     grp["mc_hits"])
+        self.saveInt("MCVertices", TheLog.MCVertices, "No MC Vertices",
+                     grp["mc_hits"])
+        self.saveInt("Prs_MCHits", TheLog.Prs_MCHits, "No Prs MCHits",
+                     grp["mc_hits"])
+        self.saveInt("MCRichOpPhoto", TheLog.MCRichOpPhoto, "No Rich Op Photo",
+                     grp["mc_hits"])
+        self.saveInt("Rich_MCHits", TheLog.Rich_MCHits, "No Rich MCHits",
+                     grp["mc_hits"])
+        self.saveInt("Ecal_MCHits", TheLog.Ecal_MCHits, "No Ecal MCHits",
+                     grp["mc_hits"])
+
+        self.saveFloat("R1_M1", TheLog.R1_M1, "", grp["muon"])
+        self.saveFloat("R1_M2", TheLog.R1_M2, "", grp["muon"])
+        self.saveFloat("R1_M3", TheLog.R1_M3, "", grp["muon"])
+        self.saveFloat("R1_M4", TheLog.R1_M4, "", grp["muon"])
+        self.saveFloat("R1_M5", TheLog.R1_M5, "", grp["muon"])
+        self.saveFloat("R2_M1", TheLog.R2_M1, "", grp["muon"])
+        self.saveFloat("R2_M2", TheLog.R2_M2, "", grp["muon"])
+        self.saveFloat("R2_M3", TheLog.R2_M3, "", grp["muon"])
+        self.saveFloat("R2_M4", TheLog.R2_M4, "", grp["muon"])
+        self.saveFloat("R2_M5", TheLog.R2_M5, "", grp["muon"])
+        self.saveFloat("R3_M1", TheLog.R3_M1, "", grp["muon"])
+        self.saveFloat("R3_M2", TheLog.R3_M2, "", grp["muon"])
+        self.saveFloat("R3_M3", TheLog.R3_M3, "", grp["muon"])
+        self.saveFloat("R3_M4", TheLog.R3_M4, "", grp["muon"])
+        self.saveFloat("R3_M5", TheLog.R3_M5, "", grp["muon"])
+        self.saveFloat("R4_M1", TheLog.R4_M1, "", grp["muon"])
+        self.saveFloat("R4_M2", TheLog.R4_M2, "", grp["muon"])
+        self.saveFloat("R4_M3", TheLog.R4_M3, "", grp["muon"])
+        self.saveFloat("R4_M4", TheLog.R4_M4, "", grp["muon"])
+        self.saveFloat("R4_M5", TheLog.R4_M5, "", grp["muon"])
+        """
         self.saveString('InvRichFlags (w/e)', TheLog.InvRichFlags + " +/- " + TheLog.InvRichFlagsErr, 'Av. # Invalid RICH flags', grp["rich"])
         self.saveString('MCRichHitsR1 (w/e)', TheLog.MCRichHitsR1 + " +/- " + TheLog.MCRichHitsR1Err, 'Av. # MC Rich1 Hits', grp["rich"])
         self.saveString('MCRichHitsR2 (w/e)', TheLog.MCRichHitsR2 + " +/- " + TheLog.MCRichHitsR2Err, 'AV. # MC Rich2 Hits', grp["rich"])
@@ -379,49 +734,249 @@ class gaussValidation(BaseHandler):
         self.saveString('MCParticleLessHitsAero', TheLog.MCParticleLessHitsAero + " +/- " + TheLog.MCParticleLessHitsAeroErr, 'Av. # MCParticle-less hits Aero', grp["rich"])
         self.saveString('MCParticleLessHitsC4F10', TheLog.MCParticleLessHitsC4F10 + " +/- " + TheLog.MCParticleLessHitsC4F10Err, 'Av. # MCParticle-less hits Rich1Gas', grp["rich"])
         self.saveString('MCParticleLessHitsCF4', TheLog.MCParticleLessHitsCF4 + " +/- " + TheLog.MCParticleLessHitsCF4Err, 'Av. # MCParticle-less hits Rich2Gas', grp["rich"])
-        '''
-
-        self.saveFloat('InvRichFlags', TheLog.InvRichFlags, 'Av. # Invalid RICH flags', grp["rich"])
-        self.saveFloat('InvRichFlagsErr', TheLog.InvRichFlagsErr, 'Invalid RICH flags error', grp["rich"])
-        self.saveFloat('MCRichHitsR1', TheLog.MCRichHitsR1, 'Av. # MC Rich1 Hits', grp["rich"])
-        self.saveFloat('MCRichHitsR1Err', TheLog.MCRichHitsR1Err, 'MC Rich1 Hits error', grp["rich"])
-        self.saveFloat('MCRichHitsR2', TheLog.MCRichHitsR2, 'AV. # MC Rich2 Hits', grp["rich"])
-        self.saveFloat('MCRichHitsR2Err', TheLog.MCRichHitsR2Err, 'MC Rich2 Hits error', grp["rich"])
-        self.saveFloat('InvRadHitsR1', TheLog.InvRadHitsR1, 'Av. # Invalid radiator Rich1 hits', grp["rich"])
-        self.saveFloat('InvRadHitsR1Err', TheLog.InvRadHitsR1Err, 'Av. # Invalid radiator Rich1 hits error', grp["rich"])
-        self.saveFloat('InvRadHitsR2', TheLog.InvRadHitsR2, 'Av. # Invalid radiator Rich1 hits', grp["rich"])
-        self.saveFloat('InvRadHitsR2Err', TheLog.InvRadHitsR2Err, 'Av. # Invalid radiator Rich1 hits error', grp["rich"])
-        self.saveFloat('SignalHitsR1', TheLog.SignalHitsR1, 'Av. # Signal Hits Rich1', grp["rich"])
-        self.saveFloat('SignalHitsR1Err', TheLog.SignalHitsR1Err, 'Av. # Signal Hits Rich1 error', grp["rich"])
-        self.saveFloat('SignalHitsR2', TheLog.SignalHitsR2, 'Av. # Signal Hits Rich2', grp["rich"])
-        self.saveFloat('SignalHitsR2Err', TheLog.SignalHitsR2Err, 'Av. # Signal Hits Rich2 error', grp["rich"])
-        self.saveFloat('GasQuartzCKHitsR1', TheLog.GasQuartzCKHitsR1, 'Av. # Gas Quartz CK Rich1 hits', grp["rich"])
-        self.saveFloat('GasQuartzCKHitsR1Err', TheLog.GasQuartzCKHitsR1Err, 'Av. # Gas Quartz CK Rich1 hits error', grp["rich"])
-        self.saveFloat('GasQuartzCKHitsR2', TheLog.GasQuartzCKHitsR2, 'Av. # Gas Quartz CK Rich2 hits', grp["rich"])
-        self.saveFloat('GasQuartzCKHitsR2Err', TheLog.GasQuartzCKHitsR2Err, 'Av. # Gas Quartz CK Rich2 hits error', grp["rich"])
-        self.saveFloat('HPDQuartzCKHitsR1', TheLog.HPDQuartzCKHitsR1, 'Av. # HPD Quartz CK Rich1 hits', grp["rich"])
-        self.saveFloat('HPDQuartzCKHitsR1Err', TheLog.HPDQuartzCKHitsR1Err, 'Av. # HPD Quartz CK Rich1 hits error', grp["rich"])
-        self.saveFloat('HPDQuartzCKHitsR2', TheLog.HPDQuartzCKHitsR1, 'Av. # HPD Quartz CK Rich2 hits', grp["rich"])
-        self.saveFloat('HPDQuartzCKHitsR2Err', TheLog.HPDQuartzCKHitsR1Err, 'Av. # HPD Quartz CK Rich2 hits error', grp["rich"])
-        self.saveFloat('NitrogenCKHitsR1', TheLog.NitrogenCKHitsR1, 'Av. # Nitrogen CK Rich1 hits', grp["rich"])
-        self.saveFloat('NitrogenCKHitsR1Err', TheLog.NitrogenCKHitsR1Err, 'Av. # Nitrogen CK Rich1 hits error', grp["rich"])
-        self.saveFloat('NitrogenCKHitsR2', TheLog.NitrogenCKHitsR2, 'Av. # Nitrogen CK Rich2 hits', grp["rich"])
-        self.saveFloat('NitrogenCKHitsR2Err', TheLog.NitrogenCKHitsR2Err, 'Av. # Nitrogen CK Rich2 hits error', grp["rich"])
-        self.saveFloat('SignalCKAero', TheLog.SignalCKAero, 'Av. # Signal CK MCRichHits Aero', grp["rich"])
-        self.saveFloat('SignalCKAeroErr', TheLog.SignalCKAeroErr, 'Av. # Signal CK MCRichHits Aero error', grp["rich"])
-        self.saveFloat('SignalCKC4F10', TheLog.SignalCKC4F10, 'Av. # Signal CK MCRichHits Rich1Gas', grp["rich"])
-        self.saveFloat('SignalCKC4F10Err', TheLog.SignalCKC4F10Err, 'Av. # Signal CK MCRichHits Rich1Gas error', grp["rich"])
-        self.saveFloat('SignalCKCF4', TheLog.SignalCKCF4, 'Av. # Signal CK MCRichHits Rich2Gas', grp["rich"])
-        self.saveFloat('SignalCKCF4Err', TheLog.SignalCKCF4Err, 'Av. # Signal CK MCRichHits Rich2Gas error', grp["rich"])
-        self.saveFloat('ScatteredHitsAero', TheLog.ScatteredHitsAero, 'Av. # Rayleigh scattered hits Aero', grp["rich"])
-        self.saveFloat('ScatteredHitsAeroErr', TheLog.ScatteredHitsAeroErr, 'Av. # Rayleigh scattered hits Aero error', grp["rich"])
-        self.saveFloat('ScatteredHitsC4F10', TheLog.ScatteredHitsC4F10, 'Av. # Rayleigh scattered hits Rich1Gas', grp["rich"])
-        self.saveFloat('ScatteredHitsC4F10Err', TheLog.ScatteredHitsC4F10Err, 'Av. # Rayleigh scattered hits Rich1Gas error', grp["rich"])
-        self.saveFloat('ScatteredHitsCF4', TheLog.ScatteredHitsCF4, 'Av. # Rayleigh scattered hits Rich2Gas', grp["rich"])
-        self.saveFloat('ScatteredHitsCF4Err', TheLog.ScatteredHitsCF4Err, 'Av. # Rayleigh scattered hits Rich2Gas error', grp["rich"])
-        self.saveFloat('MCParticleLessHitsAero', TheLog.MCParticleLessHitsAero, 'Av. # MCParticle-less hits Aero', grp["rich"])
-        self.saveFloat('MCParticleLessHitsAeroErr', TheLog.MCParticleLessHitsAeroErr, 'Av. # MCParticle-less hits Aero error', grp["rich"])
-        self.saveFloat('MCParticleLessHitsC4F10', TheLog.MCParticleLessHitsC4F10, 'Av. # MCParticle-less hits Rich1Gas', grp["rich"])
-        self.saveFloat('MCParticleLessHitsC4F10Err', TheLog.MCParticleLessHitsC4F10Err, 'Av. # MCParticle-less hits Rich1Gas error', grp["rich"])
-        self.saveFloat('MCParticleLessHitsCF4', TheLog.MCParticleLessHitsCF4, 'Av. # MCParticle-less hits Rich2Gas', grp["rich"])
-        self.saveFloat('MCParticleLessHitsCF4Err', TheLog.MCParticleLessHitsCF4Err, 'Av. # MCParticle-less hits Rich2Gas error', grp["rich"])
+        """
+
+        self.saveFloat("InvRichFlags", TheLog.InvRichFlags,
+                       "Av. # Invalid RICH flags", grp["rich"])
+        self.saveFloat(
+            "InvRichFlagsErr",
+            TheLog.InvRichFlagsErr,
+            "Invalid RICH flags error",
+            grp["rich"],
+        )
+        self.saveFloat("MCRichHitsR1", TheLog.MCRichHitsR1,
+                       "Av. # MC Rich1 Hits", grp["rich"])
+        self.saveFloat(
+            "MCRichHitsR1Err",
+            TheLog.MCRichHitsR1Err,
+            "MC Rich1 Hits error",
+            grp["rich"],
+        )
+        self.saveFloat("MCRichHitsR2", TheLog.MCRichHitsR2,
+                       "AV. # MC Rich2 Hits", grp["rich"])
+        self.saveFloat(
+            "MCRichHitsR2Err",
+            TheLog.MCRichHitsR2Err,
+            "MC Rich2 Hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "InvRadHitsR1",
+            TheLog.InvRadHitsR1,
+            "Av. # Invalid radiator Rich1 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "InvRadHitsR1Err",
+            TheLog.InvRadHitsR1Err,
+            "Av. # Invalid radiator Rich1 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "InvRadHitsR2",
+            TheLog.InvRadHitsR2,
+            "Av. # Invalid radiator Rich1 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "InvRadHitsR2Err",
+            TheLog.InvRadHitsR2Err,
+            "Av. # Invalid radiator Rich1 hits error",
+            grp["rich"],
+        )
+        self.saveFloat("SignalHitsR1", TheLog.SignalHitsR1,
+                       "Av. # Signal Hits Rich1", grp["rich"])
+        self.saveFloat(
+            "SignalHitsR1Err",
+            TheLog.SignalHitsR1Err,
+            "Av. # Signal Hits Rich1 error",
+            grp["rich"],
+        )
+        self.saveFloat("SignalHitsR2", TheLog.SignalHitsR2,
+                       "Av. # Signal Hits Rich2", grp["rich"])
+        self.saveFloat(
+            "SignalHitsR2Err",
+            TheLog.SignalHitsR2Err,
+            "Av. # Signal Hits Rich2 error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "GasQuartzCKHitsR1",
+            TheLog.GasQuartzCKHitsR1,
+            "Av. # Gas Quartz CK Rich1 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "GasQuartzCKHitsR1Err",
+            TheLog.GasQuartzCKHitsR1Err,
+            "Av. # Gas Quartz CK Rich1 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "GasQuartzCKHitsR2",
+            TheLog.GasQuartzCKHitsR2,
+            "Av. # Gas Quartz CK Rich2 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "GasQuartzCKHitsR2Err",
+            TheLog.GasQuartzCKHitsR2Err,
+            "Av. # Gas Quartz CK Rich2 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "HPDQuartzCKHitsR1",
+            TheLog.HPDQuartzCKHitsR1,
+            "Av. # HPD Quartz CK Rich1 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "HPDQuartzCKHitsR1Err",
+            TheLog.HPDQuartzCKHitsR1Err,
+            "Av. # HPD Quartz CK Rich1 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "HPDQuartzCKHitsR2",
+            TheLog.HPDQuartzCKHitsR1,
+            "Av. # HPD Quartz CK Rich2 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "HPDQuartzCKHitsR2Err",
+            TheLog.HPDQuartzCKHitsR1Err,
+            "Av. # HPD Quartz CK Rich2 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "NitrogenCKHitsR1",
+            TheLog.NitrogenCKHitsR1,
+            "Av. # Nitrogen CK Rich1 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "NitrogenCKHitsR1Err",
+            TheLog.NitrogenCKHitsR1Err,
+            "Av. # Nitrogen CK Rich1 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "NitrogenCKHitsR2",
+            TheLog.NitrogenCKHitsR2,
+            "Av. # Nitrogen CK Rich2 hits",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "NitrogenCKHitsR2Err",
+            TheLog.NitrogenCKHitsR2Err,
+            "Av. # Nitrogen CK Rich2 hits error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKAero",
+            TheLog.SignalCKAero,
+            "Av. # Signal CK MCRichHits Aero",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKAeroErr",
+            TheLog.SignalCKAeroErr,
+            "Av. # Signal CK MCRichHits Aero error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKC4F10",
+            TheLog.SignalCKC4F10,
+            "Av. # Signal CK MCRichHits Rich1Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKC4F10Err",
+            TheLog.SignalCKC4F10Err,
+            "Av. # Signal CK MCRichHits Rich1Gas error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKCF4",
+            TheLog.SignalCKCF4,
+            "Av. # Signal CK MCRichHits Rich2Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "SignalCKCF4Err",
+            TheLog.SignalCKCF4Err,
+            "Av. # Signal CK MCRichHits Rich2Gas error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsAero",
+            TheLog.ScatteredHitsAero,
+            "Av. # Rayleigh scattered hits Aero",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsAeroErr",
+            TheLog.ScatteredHitsAeroErr,
+            "Av. # Rayleigh scattered hits Aero error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsC4F10",
+            TheLog.ScatteredHitsC4F10,
+            "Av. # Rayleigh scattered hits Rich1Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsC4F10Err",
+            TheLog.ScatteredHitsC4F10Err,
+            "Av. # Rayleigh scattered hits Rich1Gas error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsCF4",
+            TheLog.ScatteredHitsCF4,
+            "Av. # Rayleigh scattered hits Rich2Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "ScatteredHitsCF4Err",
+            TheLog.ScatteredHitsCF4Err,
+            "Av. # Rayleigh scattered hits Rich2Gas error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsAero",
+            TheLog.MCParticleLessHitsAero,
+            "Av. # MCParticle-less hits Aero",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsAeroErr",
+            TheLog.MCParticleLessHitsAeroErr,
+            "Av. # MCParticle-less hits Aero error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsC4F10",
+            TheLog.MCParticleLessHitsC4F10,
+            "Av. # MCParticle-less hits Rich1Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsC4F10Err",
+            TheLog.MCParticleLessHitsC4F10Err,
+            "Av. # MCParticle-less hits Rich1Gas error",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsCF4",
+            TheLog.MCParticleLessHitsCF4,
+            "Av. # MCParticle-less hits Rich2Gas",
+            grp["rich"],
+        )
+        self.saveFloat(
+            "MCParticleLessHitsCF4Err",
+            TheLog.MCParticleLessHitsCF4Err,
+            "Av. # MCParticle-less hits Rich2Gas error",
+            grp["rich"],
+        )
diff --git a/handlers/hlt/EventSizeParser.py b/handlers/hlt/EventSizeParser.py
index 85694d02326a3d3c3841e273e942b22501253520..62a4662b9877081811c32da9a4112db0971c0f5b 100644
--- a/handlers/hlt/EventSizeParser.py
+++ b/handlers/hlt/EventSizeParser.py
@@ -4,12 +4,12 @@ from os import path
 
 
 def eventSizeParser(filename):
-    """ Class for parsing the information for the EventSize table
+    """Class for parsing the information for the EventSize table
     Args:
         folder (str): folder where the input is and where the output will be stored
         filename (str): filename containing the output from the hlt1 step of the ratetests
     """
-    
+
     rootFile = ROOT.TFile(filename)
     treeRB = rootFile.Get("TupleHlt1")
     eventSizes = {}
@@ -18,26 +18,26 @@ def eventSizeParser(filename):
     eventFileName = path.join(folder, "EventSize.root")
     eventFile = ROOT.TFile(eventFileName, "recreate")
 
-    names = {46:"HLT1",
-             87:"Full",
-             88:"Turbo",
-             90:"Turbocalib"}
-
+    names = {46: "HLT1", 87: "Full", 88: "Turbo", 90: "Turbocalib"}
 
     for RB, name in list(names.items()):
-        eventSizes['RB%s' % RB] = {'name' : name}
-        
+        eventSizes["RB%s" % RB] = {"name": name}
+
         for RawEventType in ["Full", "Turbo", "persistReco"]:
-            c = ROOT.TCanvas('RB%s_%s'  % (RB, RawEventType))
+            c = ROOT.TCanvas("RB%s_%s" % (RB, RawEventType))
             histname = RawEventType + str(RB)
             hist = ROOT.TH1F(histname, "Routing bit " + str(RB), 100, 0, 200)
-            treeRB.Draw(RawEventType + "RB%s*1.e-3>>%s" % (RB, histname), RawEventType + "RB%s>0" % RB)
-            hist.SetXTitle(RawEventType+ "event size (kB)")
+            treeRB.Draw(
+                RawEventType + "RB%s*1.e-3>>%s" % (RB, histname),
+                RawEventType + "RB%s>0" % RB,
+            )
+            hist.SetXTitle(RawEventType + "event size (kB)")
             hist.Draw("HIST")
             hist.SetFillColor(ROOT.kYellow)
-            hist.SetLineColor(ROOT.kYellow+1)
-            eventSizes['RB%s_%s_mean' % (RB, RawEventType) ] = hist.GetMean()
-            eventSizes['RB%s_%s_err' % (RB, RawEventType) ] = hist.GetMeanError()
+            hist.SetLineColor(ROOT.kYellow + 1)
+            eventSizes["RB%s_%s_mean" % (RB, RawEventType)] = hist.GetMean()
+            eventSizes["RB%s_%s_err" % (RB,
+                                        RawEventType)] = hist.GetMeanError()
 
             eventFile.cd()
             c.Write()
@@ -47,13 +47,11 @@ def eventSizeParser(filename):
     eventFile.Close()
 
     results = {}
-    results['eventSizes'] = eventSizes
-    results['eventFileName'] = eventFileName
+    results["eventSizes"] = eventSizes
+    results["eventFileName"] = eventFileName
 
     return results
 
-if __name__ == "__main__":
 
+if __name__ == "__main__":
     eventSizeParser(sys.argv[1])
-
-
diff --git a/handlers/hlt/HLT1TupleParser.py b/handlers/hlt/HLT1TupleParser.py
index b33ae2173967e690daca4a672fa9d4ffafd7069c..bf74a4e8e7b47570223149b1c225282604df3f6e 100755
--- a/handlers/hlt/HLT1TupleParser.py
+++ b/handlers/hlt/HLT1TupleParser.py
@@ -4,60 +4,65 @@ from array import array
 
 from .StreamDefs import _Streams
 
-RegexList = ["Hlt1.*",
-             "Hlt1LowMult.*",
-             "Hlt1(Di|Multi)Muon.*",
-             "Hlt1SingleMuon.*",
-             "Hlt1.*Electron.*",
-             "Hlt1B2.*",
-             "Hlt1IncPhi.*",
-             "Hlt1TrackMuon.*",
-             "Hlt1.*TrackMVA.*",
-             "Hlt1CalibTracking.*",
-             "Hlt1DiProton.*",
-             "OTHER"]
+RegexList = [
+    "Hlt1.*",
+    "Hlt1LowMult.*",
+    "Hlt1(Di|Multi)Muon.*",
+    "Hlt1SingleMuon.*",
+    "Hlt1.*Electron.*",
+    "Hlt1B2.*",
+    "Hlt1IncPhi.*",
+    "Hlt1TrackMuon.*",
+    "Hlt1.*TrackMVA.*",
+    "Hlt1CalibTracking.*",
+    "Hlt1DiProton.*",
+    "OTHER",
+]
 
 
 def populateDicts(ByRegex, Hlt1Lines, treeHlt1):
-
     allHLT1Branches = treeHlt1.GetListOfBranches()
 
     ### POPULATE ByRegex
 
     # Add all regex groups to dict and populate branches for use later
     for RE in RegexList:
-        ByRegex[RE] = {"hlt1processed":treeHlt1.GetEntries(),
-                       "PassEvent":False,
-                       "MatchingLines":[],
-                       ####
-                       "passed_inclusive":0,
-                       "rate_inclusive":0.,
-                       "rate_inclusive_err":0.,
-                       "MBs_inclusive":0,
-                       "kBe_inclusive":0,
-                       ###
-                       "passed_exclusive":0,
-                       "rate_exclusive":0.,
-                       "rate_exclusive_err":0.,
-                       "MBs_exclusive":0,
-                       "kBe_exclusive":0}
+        ByRegex[RE] = {
+            "hlt1processed": treeHlt1.GetEntries(),
+            "PassEvent": False,
+            "MatchingLines": [],
+            ####
+            "passed_inclusive": 0,
+            "rate_inclusive": 0.0,
+            "rate_inclusive_err": 0.0,
+            "MBs_inclusive": 0,
+            "kBe_inclusive": 0,
+            ###
+            "passed_exclusive": 0,
+            "rate_exclusive": 0.0,
+            "rate_exclusive_err": 0.0,
+            "MBs_exclusive": 0,
+            "kBe_exclusive": 0,
+        }
 
     # Find all Lines which match each Regex expression and store their names
     for RE in list(ByRegex.keys()):
         for branch in allHLT1Branches:
             branchName = branch.GetName()
             # Skip branches non containing HLT1 Data
-            if not branchName.startswith("Hlt1"): continue
+            if not branchName.startswith("Hlt1"):
+                continue
             # Add branch name to this regex if it matches
             if re.match(RE, branchName, flags=0):
-                 ByRegex[RE]["MatchingLines"].append(branchName)
+                ByRegex[RE]["MatchingLines"].append(branchName)
 
     # Make a set of all lines
     ByRegex["OTHER"]["MatchingLines"] = set(ByRegex["Hlt1.*"]["MatchingLines"])
     # Remove the lines we've seen elsewhere
     for RE in list(ByRegex.keys()):
         if not RE == "Hlt1.*" and not RE == "OTHER":
-            ByRegex["OTHER"]["MatchingLines"] -= set(ByRegex[RE]["MatchingLines"])
+            ByRegex["OTHER"]["MatchingLines"] -= set(
+                ByRegex[RE]["MatchingLines"])
 
     # Create a list of non matched lines
     ByRegex["OTHER"]["MatchingLines"] = list(ByRegex["OTHER"]["MatchingLines"])
@@ -66,19 +71,21 @@ def populateDicts(ByRegex, Hlt1Lines, treeHlt1):
 
     for branch in allHLT1Branches:
         branchName = branch.GetName()
-        if not branchName.startswith("Hlt1"): continue
-        Hlt1Lines[branchName] = {"passed":0,
-                                 "passed_unique":0,
-                                 "hlt1processed":treeHlt1.GetEntries(),
-                                 "Bytes_incl":0.0,
-                                 "rate":0,
-                                 "rate_unique":0,
-                                 "rate_unique_err":0,
-                                 "rate_err":0}
+        if not branchName.startswith("Hlt1"):
+            continue
+        Hlt1Lines[branchName] = {
+            "passed": 0,
+            "passed_unique": 0,
+            "hlt1processed": treeHlt1.GetEntries(),
+            "Bytes_incl": 0.0,
+            "rate": 0,
+            "rate_unique": 0,
+            "rate_unique_err": 0,
+            "rate_err": 0,
+        }
 
 
 def countEvents(ByRegex, Hlt1Lines, treeHlt1):
-
     ##########################################################
     ######### EVENT LOOP #####################################
     ##########################################################
@@ -89,12 +96,12 @@ def countEvents(ByRegex, Hlt1Lines, treeHlt1):
     variables = {}
     # Do this for all HLT1 lines in the tuple
     for line in list(Hlt1Lines.keys()):
-        variables[line] = array('I', [0])
+        variables[line] = array("I", [0])
         treeBranchName = line
-        treeHlt1.SetBranchAddress( treeBranchName, variables[line] )
+        treeHlt1.SetBranchAddress(treeBranchName, variables[line])
     # Add special FullEventSize
-    variables['FullEventSize'] = array('f', [0])
-    treeHlt1.SetBranchAddress( 'FullEventSize', variables['FullEventSize'] )
+    variables["FullEventSize"] = array("f", [0])
+    treeHlt1.SetBranchAddress("FullEventSize", variables["FullEventSize"])
 
     # Global counters
     iEntry = 0
@@ -103,10 +110,10 @@ def countEvents(ByRegex, Hlt1Lines, treeHlt1):
 
     # Read the next entry
     while treeHlt1.GetEntry(iEntry):
-        iEntry+=1
+        iEntry += 1
 
         if iEntry % 1000 == 0:
-            print('processed %s/%s' %(iEntry,treeHlt1.GetEntries()))
+            print("processed %s/%s" % (iEntry, treeHlt1.GetEntries()))
 
         this_pass_any = 0
         for RE in list(ByRegex.keys()):
@@ -118,7 +125,8 @@ def countEvents(ByRegex, Hlt1Lines, treeHlt1):
             # If this HLT line triggered
             if decThisLine == 1:
                 # Add to the total event size counter
-                Hlt1Lines[Hlt1Line]["Bytes_incl"] += variables["FullEventSize"][0]
+                Hlt1Lines[Hlt1Line]["Bytes_incl"] += variables[
+                    "FullEventSize"][0]
 
                 # Set PassEvent in the Regex group(s) that this HLT line matches
                 for RE in list(ByRegex.keys()):
@@ -132,7 +140,8 @@ def countEvents(ByRegex, Hlt1Lines, treeHlt1):
                 # Test to see if this event is unique to this HLT line
                 is_unique = 1
                 for other_Hlt1Line in list(Hlt1Lines.keys()):
-                    if not other_Hlt1Line == Hlt1Line and variables[other_Hlt1Line][0] == 1:
+                    if (not other_Hlt1Line == Hlt1Line
+                            and variables[other_Hlt1Line][0] == 1):
                         is_unique = 0
                 Hlt1Lines[Hlt1Line]["passed_unique"] += is_unique
 
@@ -152,81 +161,95 @@ def countEvents(ByRegex, Hlt1Lines, treeHlt1):
 
         for RE in list(ByRegex.keys()):
             # is the event exclusive and/or inclusive?
-            opts = {"inclusive" : ByRegex[RE]["PassEvent"] == True,
-                    "exclusive" : ByRegex[RE]["PassEvent"] == True and nRegexFired == 1 and not RE == "Hlt1.*"}
+            opts = {
+                "inclusive":
+                ByRegex[RE]["PassEvent"] == True,
+                "exclusive":
+                ByRegex[RE]["PassEvent"] == True and nRegexFired == 1
+                and not RE == "Hlt1.*",
+            }
 
             if RE == "OTHER":
                 ## inclusive and exclusive are the same *by definition*
-                opts["inclusive"] = nRegexFired == 0 and ByRegex["Hlt1.*"]["PassEvent"]
+                opts["inclusive"] = nRegexFired == 0 and ByRegex["Hlt1.*"][
+                    "PassEvent"]
                 opts["exclusive"] = opts["inclusive"]
-            #if RE == "Hlt1.*":
+            # if RE == "Hlt1.*":
             #    print opts
             for opt in list(opts.keys()):
                 if opts[opt] is True:
-                    ByRegex[RE]["passed_"+opt] += 1
-                    ByRegex[RE]["MBs_"+opt] += variables["FullEventSize"][0]/2.**20
-                    ByRegex[RE]["kBe_"+opt] += variables["FullEventSize"][0]/2.**10
+                    ByRegex[RE]["passed_" + opt] += 1
+                    ByRegex[RE]["MBs_" + opt] += (
+                        variables["FullEventSize"][0] / 2.0**20)
+                    ByRegex[RE]["kBe_" + opt] += (
+                        variables["FullEventSize"][0] / 2.0**10)
 
     ##########################################################
     ######### END EVENT LOOP #####################################
     ##########################################################
 
     stats = {}
-    stats['pass_any'] = pass_any
-    stats['TotalBytes'] = TotalBytes
+    stats["pass_any"] = pass_any
+    stats["TotalBytes"] = TotalBytes
 
     return stats
 
 
-def binomErr(eff,N):
-    return math.sqrt((eff*(1-eff))/N)
+def binomErr(eff, N):
+    return math.sqrt((eff * (1 - eff)) / N)
 
 
 def calculateRates(ByRegex, Hlt1Lines, treeHlt1, stats, inputrate):
-
-    pass_any = stats['pass_any']
-    TotalBytes = stats['TotalBytes']
-
-    Hlt1Lines["All"] = {"passed" : pass_any,
-                        "passed_unique" : 0,
-                        "hlt1processed" : treeHlt1.GetEntries(),
-                        "Bytes_incl" : TotalBytes,
-                        "rate" : 0,
-                        "rate_unique" : 0,
-                        "rate_unique_err" : 0,
-                        "rate_err" : 0}
+    pass_any = stats["pass_any"]
+    TotalBytes = stats["TotalBytes"]
+
+    Hlt1Lines["All"] = {
+        "passed": pass_any,
+        "passed_unique": 0,
+        "hlt1processed": treeHlt1.GetEntries(),
+        "Bytes_incl": TotalBytes,
+        "rate": 0,
+        "rate_unique": 0,
+        "rate_unique_err": 0,
+        "rate_err": 0,
+    }
 
     nHlt1 = float(treeHlt1.GetEntries())
 
     for Hlt1LineName, Hlt1Line in list(Hlt1Lines.items()):
-        MegaBytes_total = 1.e-6 * Hlt1Line["Bytes_incl"]
+        MegaBytes_total = 1.0e-6 * Hlt1Line["Bytes_incl"]
         Hlt1Line["MBs_incl"] = MegaBytes_total * inputrate / nHlt1
         Hlt1Line["kBe_incl"] = 0.0
         if Hlt1Line["passed"] > 0:
-            Hlt1Line["kBe_incl"] = 1.e-3*MegaBytes_total * inputrate / float(Hlt1Line["passed"])
+            Hlt1Line["kBe_incl"] = (1.0e-3 * MegaBytes_total * inputrate /
+                                    float(Hlt1Line["passed"]))
         this_rate = Hlt1Line["passed"] / nHlt1
         Hlt1Line["rate"] = inputrate * this_rate
         Hlt1Line["rate_err"] = inputrate * binomErr(this_rate, nHlt1)
         this_unique_rate = Hlt1Line["passed_unique"] / nHlt1
         Hlt1Line["rate_unique"] = inputrate * this_unique_rate
-        Hlt1Line["rate_unique_err"] = inputrate * binomErr(this_unique_rate, nHlt1)
+        Hlt1Line["rate_unique_err"] = inputrate * binomErr(
+            this_unique_rate, nHlt1)
 
     for RE, ThisRegex in list(ByRegex.items()):
-        for opt in ["inclusive","exclusive"]:
-            if ThisRegex["passed_"+opt] > 0:
-                 ThisRegex["kBe_"+opt] = ThisRegex["kBe_"+opt] / float(ThisRegex["passed_"+opt])
-            ThisRegex["MBs_"+opt] = inputrate * ThisRegex["MBs_"+opt] / nHlt1
+        for opt in ["inclusive", "exclusive"]:
+            if ThisRegex["passed_" + opt] > 0:
+                ThisRegex["kBe_" + opt] = ThisRegex["kBe_" + opt] / float(
+                    ThisRegex["passed_" + opt])
+            ThisRegex["MBs_" +
+                      opt] = inputrate * ThisRegex["MBs_" + opt] / nHlt1
 
         this_rate = ThisRegex["passed_inclusive"] / nHlt1
         ThisRegex["rate_inclusive"] = inputrate * this_rate
-        ThisRegex["rate_inclusive_err"] = inputrate * binomErr(this_rate, nHlt1)
+        ThisRegex["rate_inclusive_err"] = inputrate * binomErr(
+            this_rate, nHlt1)
         this_unique_rate = ThisRegex["passed_exclusive"] / nHlt1
         ThisRegex["rate_exclusive"] = inputrate * this_unique_rate
-        ThisRegex["rate_exclusive_err"] = inputrate * binomErr(this_unique_rate, nHlt1)
-
+        ThisRegex["rate_exclusive_err"] = inputrate * binomErr(
+            this_unique_rate, nHlt1)
 
-def ParseHLT1Output(hlt1_filename, inputrate=1.e6):
 
+def ParseHLT1Output(hlt1_filename, inputrate=1.0e6):
     rootFile = ROOT.TFile(hlt1_filename)
     treeHlt1 = rootFile.Get("TupleHlt1")
 
@@ -240,28 +263,27 @@ def ParseHLT1Output(hlt1_filename, inputrate=1.e6):
     calculateRates(ByRegex, Hlt1Lines, treeHlt1, stats, inputrate)
 
     results = {}
-    results['Hlt1Lines'] = Hlt1Lines
-    results['ByRegex'] = ByRegex
+    results["Hlt1Lines"] = Hlt1Lines
+    results["ByRegex"] = ByRegex
 
     return results
 
 
-if __name__ == '__main__':
-
+if __name__ == "__main__":
     if len(sys.argv) < 2:
         print("Usage:   %s fileNameHLT1.root" % sys.argv[0])
         sys.exit()
 
     filename = sys.argv[1]
 
-    results = ParseHLT1Output( filename )
+    results = ParseHLT1Output(filename)
 
-    JSONDIR = os.path.abspath(os.path.dirname( filename ))
+    JSONDIR = os.path.abspath(os.path.dirname(filename))
 
-    with open(os.path.join(JSONDIR, "Hlt1Decisions.json"), 'w') as _file:
-        print('writing json file: ' + _file.name)
-        json.dump(results['Hlt1Lines'], _file)
+    with open(os.path.join(JSONDIR, "Hlt1Decisions.json"), "w") as _file:
+        print("writing json file: " + _file.name)
+        json.dump(results["Hlt1Lines"], _file)
 
-    with open(os.path.join(JSONDIR, "Hlt1ByRegex.json"), 'w') as _file:
-        print('writing json file: ' + _file.name)
-        json.dump(results['ByRegex'], _file)
+    with open(os.path.join(JSONDIR, "Hlt1ByRegex.json"), "w") as _file:
+        print("writing json file: " + _file.name)
+        json.dump(results["ByRegex"], _file)
diff --git a/handlers/hlt/HLT2TupleParser.py b/handlers/hlt/HLT2TupleParser.py
index f516725ae28bf2e982704f682a03cab23f73acd8..5dfd7b09e663202d9597ffd0d75d05fffc4ce17e 100755
--- a/handlers/hlt/HLT2TupleParser.py
+++ b/handlers/hlt/HLT2TupleParser.py
@@ -1,82 +1,105 @@
 #!/bin/env python
-import os,sys,math,re,ROOT,json
+import os, sys, math, re, ROOT, json
 from array import array
 
+
 def rate(scale, numer, denom):
     eff = float(numer) / float(denom)
     err = math.sqrt((eff - eff**2) / denom)
     return [scale * eff, scale * err]
 
+
 def isTurbo(name):
-    if re.match("Hlt2.(?!.*?TurboCalib).*Turbo", name, flags=0): return 1
-    else: return 0
+    if re.match("Hlt2.(?!.*?TurboCalib).*Turbo", name, flags=0):
+        return 1
+    else:
+        return 0
+
 
 def isTurboCalib(name):
-    if re.match("Hlt2.*TurboCalib", name, flags=0): return 1
-    else: return 0
+    if re.match("Hlt2.*TurboCalib", name, flags=0):
+        return 1
+    else:
+        return 0
+
 
 def isFull(name):
-    if re.match("Hlt2.(?!.*?Turbo).(?!.*?Calib)", name, flags=0): return 1
-    else: return 0
+    if re.match("Hlt2.(?!.*?Turbo).(?!.*?Calib)", name, flags=0):
+        return 1
+    else:
+        return 0
 
 
 from .StreamDefs import _Streams
 
 USED_STREAMS = ["AllStreams", "Modules"]
 
-def populateDicts(Streams, decisions, branches, branch_names, persistRecoLines, nHlt1, nHlt2):
 
+def populateDicts(Streams, decisions, branches, branch_names, persistRecoLines,
+                  nHlt1, nHlt2):
     for OPT in USED_STREAMS:
         Streams[OPT] = {}
         for Mod in _Streams[OPT]:
-            Streams[OPT][Mod[0]] = {"name" : Mod[1],
-                                    "pass_current" : 0,
-                                    "rate_unique" : 0,
-                                    "passed_unique" : 0,
-                                    "passed_TurboCalib" : 0,
-                                    "passed_Turbo" : 0,
-                                    "passed_Full" : 0,
-                                    "Bytes_incl" : 0,
-                                    "lines" : 0,
-                                    "lines_Turbo" : 0,
-                                    "lines_TurboCalib" : 0,
-                                    "lines_Full" : 0,
-                                    "rate" : 0,
-                                    "rate_err" : 0,
-                                    "rate_Turbo" : 0,
-                                    "rate_Full" : 0,
-                                    "rate_TurboCalib" : 0,
-                                    "passed" : 0}
+            Streams[OPT][Mod[0]] = {
+                "name": Mod[1],
+                "pass_current": 0,
+                "rate_unique": 0,
+                "passed_unique": 0,
+                "passed_TurboCalib": 0,
+                "passed_Turbo": 0,
+                "passed_Full": 0,
+                "Bytes_incl": 0,
+                "lines": 0,
+                "lines_Turbo": 0,
+                "lines_TurboCalib": 0,
+                "lines_Full": 0,
+                "rate": 0,
+                "rate_err": 0,
+                "rate_Turbo": 0,
+                "rate_Full": 0,
+                "rate_TurboCalib": 0,
+                "passed": 0,
+            }
 
     for branch in branches:
         branchName = branch.GetName()
-        if not branchName.startswith("Hlt2"): continue ## since we now have branches other than just HLT2 lines
+        if not branchName.startswith("Hlt2"):
+            continue  ## since we now have branches other than just HLT2 lines
 
-        branchName = branchName.replace("Decision","")
+        branchName = branchName.replace("Decision", "")
 
         branch_names.append(branchName)
-        decisions[branchName] = {"passed" : 0,
-                                 "passed_unique" : 0,
-                                 "hlt1processed" : nHlt1,
-                                 "hlt2processed" : nHlt2,
-                                 "Bytes_incl" : 0.,
-                                 "BytesPR_incl" : 0.,
-                                 "persistReco" : int(branchName in persistRecoLines),
-                                 "rate" : 0,
-                                 "rate_err" : 0,
-                                 "rate_unique" : 0,
-                                 "rate_unique_err" : 0}
+        decisions[branchName] = {
+            "passed": 0,
+            "passed_unique": 0,
+            "hlt1processed": nHlt1,
+            "hlt2processed": nHlt2,
+            "Bytes_incl": 0.0,
+            "BytesPR_incl": 0.0,
+            "persistReco": int(branchName in persistRecoLines),
+            "rate": 0,
+            "rate_err": 0,
+            "rate_unique": 0,
+            "rate_unique_err": 0,
+        }
 
     #### overlap between lines and streams
     for k, v in list(decisions.items()):
-        for Stream in ["Turbo","Full","TurboCalib","TurboAndFull","TurboAndTurboCalib","FullAndTurboCalib","TurboAndNotFull"]:
-            v['passed_'+Stream] = 0.0
-            v['rate_'+Stream] = 0.0
-            v['rate_err_'+Stream] = 0.0
-            v['passed_unique_'+Stream] = 0.0
-            v['rate_unique_'+Stream] = 0.0
-            v['rate_unique_err_'+Stream] = 0.0
-
+        for Stream in [
+                "Turbo",
+                "Full",
+                "TurboCalib",
+                "TurboAndFull",
+                "TurboAndTurboCalib",
+                "FullAndTurboCalib",
+                "TurboAndNotFull",
+        ]:
+            v["passed_" + Stream] = 0.0
+            v["rate_" + Stream] = 0.0
+            v["rate_err_" + Stream] = 0.0
+            v["passed_unique_" + Stream] = 0.0
+            v["rate_unique_" + Stream] = 0.0
+            v["rate_unique_err_" + Stream] = 0.0
 
     ##############################################################
     ## remove turbo duplicates ##
@@ -93,12 +116,12 @@ def populateDicts(Streams, decisions, branches, branch_names, persistRecoLines,
         del decisions[TurboDuplicate]
 
 
-def prepareMatrix(RateMatrix, RateMatrixDict, RateMatrixLines, RateMatrixLineNames):
-
+def prepareMatrix(RateMatrix, RateMatrixDict, RateMatrixLines,
+                  RateMatrixLineNames):
     ##############################################################
     ## prepare the 2D matrix ##
     ##############################################################
-    OPTION="AllStreams"
+    OPTION = "AllStreams"
     for regex in _Streams[OPTION]:
         RateMatrixLines.append(regex[0])
         RateMatrixLineNames.append(regex[1])
@@ -119,12 +142,13 @@ def determineUnaccounted(Streams, decisions):
         accountedLines = []
         for line in list(decisions.keys()):
             accounted = False
-            for Module,Props in list(Streams[S].items()):
+            for Module, Props in list(Streams[S].items()):
                 if re.match(Module, line, flags=0):
                     Streams[S][Module]["lines"] += 1
                     Streams[S][Module]["lines_Turbo"] += isTurbo(line)
                     Streams[S][Module]["lines_Full"] += isFull(line)
-                    Streams[S][Module]["lines_TurboCalib"] += isTurboCalib(line)
+                    Streams[S][Module]["lines_TurboCalib"] += isTurboCalib(
+                        line)
                     if not Props["name"] == "ALL":
                         accounted = True
                         accountedLines.append(line)
@@ -133,8 +157,16 @@ def determineUnaccounted(Streams, decisions):
     return unaccounted
 
 
-def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines, RateMatrixLineNames, treeHlt2, nHlt1):
-
+def countEvents(
+        Streams,
+        decisions,
+        RateMatrix,
+        RateMatrixDict,
+        RateMatrixLines,
+        RateMatrixLineNames,
+        treeHlt2,
+        nHlt1,
+):
     ##########################################################
     ######### EVENT LOOP #####################################
     ##########################################################
@@ -143,19 +175,19 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
     variables = {}
     # Loop over HLT2 Lines
     for line in list(decisions.keys()):
-        variables[line] = array('I', [0])
+        variables[line] = array("I", [0])
         treeBranchName = line
-        treeHlt2.SetBranchAddress( treeBranchName, variables[line] )
-    #variables["TurboEventSize"] = array('f',[0])
-    #variables["FullEventSize"] = array('f',[0])
-    #variables["persistRecoEventSize"] = array('f',[0])
-    for b in ["TurboEventSize", "FullEventSize", 'persistRecoEventSize']:
-        variables[b] = array('f', [0])
-        treeHlt2.SetBranchAddress( b, variables[b] )
+        treeHlt2.SetBranchAddress(treeBranchName, variables[line])
+    # variables["TurboEventSize"] = array('f',[0])
+    # variables["FullEventSize"] = array('f',[0])
+    # variables["persistRecoEventSize"] = array('f',[0])
+    for b in ["TurboEventSize", "FullEventSize", "persistRecoEventSize"]:
+        variables[b] = array("f", [0])
+        treeHlt2.SetBranchAddress(b, variables[b])
 
     # Global counters
     iEntry = 0
-    TotalBytes= 0.0
+    TotalBytes = 0.0
     TotalBytesTurbo = 0.0
     TotalBytesTurboCalib = 0.0
     TotalBytesFull = 0.0
@@ -169,10 +201,9 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
     passedFull = 0
 
     while treeHlt2.GetEntry(iEntry):
-
-        iEntry+=1
+        iEntry += 1
         if iEntry % 1000 == 0:
-            print('processed %s/%s' %(iEntry, treeHlt2.GetEntries()))
+            print("processed %s/%s" % (iEntry, treeHlt2.GetEntries()))
 
         ### work with this
         persistRecoThisEvent = False
@@ -192,15 +223,18 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
         for decision, this_decision in list(decisions.items()):
             decThisLine = variables[decision][0]
             if decThisLine == 1:
-
-                if 'Turbo' in decision:
-                    this_decision["Bytes_incl"] += variables["TurboEventSize"][0]
+                if "Turbo" in decision:
+                    this_decision["Bytes_incl"] += variables["TurboEventSize"][
+                        0]
                     if this_decision["persistReco"]:
                         persistRecoThisEvent = True
-                        this_decision["Bytes_incl"] += variables["persistRecoEventSize"][0]
-                        this_decision["BytesPR_incl"] += variables["persistRecoEventSize"][0]
+                        this_decision["Bytes_incl"] += variables[
+                            "persistRecoEventSize"][0]
+                        this_decision["BytesPR_incl"] += variables[
+                            "persistRecoEventSize"][0]
                 else:
-                    this_decision["Bytes_incl"] += variables["FullEventSize"][0]
+                    this_decision["Bytes_incl"] += variables["FullEventSize"][
+                        0]
                 this_decision["passed"] += 1
                 linesFired += 1
 
@@ -213,7 +247,7 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
                     for key, value in list(Streams[S].items()):
                         if key[1] == "ALL":
                             value["pass_current"] = 1
-                    #matched = False
+                    # matched = False
                     for Module, ThisModule in list(Streams[S].items()):
                         if re.match(Module, decision, flags=0):
                             ThisModule["pass_current"] = 1
@@ -249,7 +283,6 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
                 TotalBytesTurbo += variables["persistRecoEventSize"][0]
                 TotalBytes += variables["persistRecoEventSize"][0]
 
-
         ### overlaps
         if linesFired_Turbo > 0 and linesFired_Full == 0:
             passedTurboAndNotFull += 1
@@ -260,8 +293,6 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
         if linesFired_Full > 0 and linesFired_TurboCalib > 0:
             passedFullAndTurboCalib += 1
 
-
-
         ### count unique decisions for this line
         for decision in decisions:
             decThisLine = variables[decision][0]
@@ -269,38 +300,49 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
                 decisions[decision]["passed_unique"] += 1
 
         ### count decisisions overlaping with streams
-        passedStreams = {"Turbo" : linesFired_Turbo > 0,
-                         "TurboCalib" : linesFired_TurboCalib > 0,
-                         "Full" : linesFired_Full > 0,
-                         "TurboAndTurboCalib" : linesFired_Turbo > 0 and linesFired_TurboCalib > 0,
-                         "FullAndTurboCalib" : linesFired_Full > 0 and linesFired_TurboCalib > 0,
-                         "TurboAndFull" : linesFired_Turbo > 0 and linesFired_Full > 0,
-                         "TurboAndNotFull" : linesFired_Turbo > 0 and linesFired_Full == 0}
+        passedStreams = {
+            "Turbo":
+            linesFired_Turbo > 0,
+            "TurboCalib":
+            linesFired_TurboCalib > 0,
+            "Full":
+            linesFired_Full > 0,
+            "TurboAndTurboCalib":
+            linesFired_Turbo > 0 and linesFired_TurboCalib > 0,
+            "FullAndTurboCalib":
+            linesFired_Full > 0 and linesFired_TurboCalib > 0,
+            "TurboAndFull":
+            linesFired_Turbo > 0 and linesFired_Full > 0,
+            "TurboAndNotFull":
+            linesFired_Turbo > 0 and linesFired_Full == 0,
+        }
 
         for decision in decisions:
             decThisLine = variables[decision][0]
-            for k,v in list(passedStreams.items()):
+            for k, v in list(passedStreams.items()):
                 if decThisLine == 1 and v == True:
-                    decisions[decision]["passed_"+k] += 1
+                    decisions[decision]["passed_" + k] += 1
 
         for S in USED_STREAMS:
             for Module, ThisModule in list(Streams[S].items()):
                 ThisModule["passed"] += ThisModule["pass_current"]
                 ThisModule["passed_Turbo"] += ThisModule["pass_current_Turbo"]
                 ThisModule["passed_Full"] += ThisModule["pass_current_Full"]
-                ThisModule["passed_TurboCalib"] += ThisModule["pass_current_TurboCalib"]
+                ThisModule["passed_TurboCalib"] += ThisModule[
+                    "pass_current_TurboCalib"]
 
         for S in USED_STREAMS:
             for X in list(Streams[S].keys()):
                 UNIQUE = Streams[S][X]["pass_current"] == 1
                 for Y in list(Streams[S].keys()):
-                    if not Streams[S][Y]["name"] == "ALL" and X != Y and Streams[S][Y]["pass_current"] == 1:
+                    if (not Streams[S][Y]["name"] == "ALL" and X != Y
+                            and Streams[S][Y]["pass_current"] == 1):
                         UNIQUE = False
                 if UNIQUE:
                     Streams[S][X]["passed_unique"] += 1
 
-        for S in ["AllStreams"]: ## since the rate matrix is only set up for this
-
+        for S in ["AllStreams"
+                  ]:  ## since the rate matrix is only set up for this
             for X in range(0, len(RateMatrixLines)):
                 name = RateMatrixLineNames[X]
                 line = RateMatrixLines[X]
@@ -310,30 +352,37 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
                     if name not in RateMatrixDict:
                         RateMatrixDict[name] = {}
                     if name2 not in RateMatrixDict[name]:
-                        RateMatrixDict[name][name2] = 0.
-                    if all([ True if Streams[S][_]['pass_current'] == 1 else False for _ in (line, line2)]):
-                        RateMatrixDict[name][name2] += 1.
+                        RateMatrixDict[name][name2] = 0.0
+                    if all([
+                            True
+                            if Streams[S][_]["pass_current"] == 1 else False
+                            for _ in (line, line2)
+                    ]):
+                        RateMatrixDict[name][name2] += 1.0
 
-                RateMatrixDict['lines'] = line
+                RateMatrixDict["lines"] = line
 
             for name in RateMatrixDict:
-                if name in ['All', 'Rate', 'Unique', 'lines']:
+                if name in ["All", "Rate", "Unique", "lines"]:
                     continue
                 for name2 in RateMatrixDict[name]:
-                    if name2 in ['All', 'Rate', 'Unique', 'lines']:
+                    if name2 in ["All", "Rate", "Unique", "lines"]:
                         continue
-                    rate = float(RateMatrixDict[name][name2]) *1.e3/ float(nHlt1)
-                    norm = float(RateMatrixDict[name][name]) *1.e3/ float(nHlt1)
-                    if norm <= 0.:
-                        rate = 0.
+                    rate = float(
+                        RateMatrixDict[name][name2]) * 1.0e3 / float(nHlt1)
+                    norm = float(
+                        RateMatrixDict[name][name]) * 1.0e3 / float(nHlt1)
+                    if norm <= 0.0:
+                        rate = 0.0
                     else:
                         rate /= norm
-                    RateMatrixDict[name][name2] = rate*100.
+                    RateMatrixDict[name][name2] = rate * 100.0
 
-
-            for X in range(0,len(RateMatrixLines)):
-                for Y in range(0,len(RateMatrixLines)):
-                    if Streams[S][RateMatrixLines[X]]["pass_current"] == 1 and Streams[S][RateMatrixLines[Y]]["pass_current"] == 1:
+            for X in range(0, len(RateMatrixLines)):
+                for Y in range(0, len(RateMatrixLines)):
+                    if (Streams[S][RateMatrixLines[X]]["pass_current"] == 1
+                            and Streams[S][
+                                RateMatrixLines[Y]]["pass_current"] == 1):
                         RateMatrix[X][Y] += 1
 
     ##########################################################
@@ -341,101 +390,112 @@ def countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines,
     ##########################################################
 
     stats = {}
-    stats['passedGlobal'] = passedGlobal
-    stats['passedTurbo'] = passedTurbo
-    stats['passedTurboAndNotFull'] = passedTurboAndNotFull
-    stats['passedTurboAndFull'] = passedTurboAndFull
-    stats['passedTurboAndTurboCalib'] = passedTurboAndTurboCalib
-    stats['passedFullAndTurboCalib'] = passedFullAndTurboCalib
-    stats['passedTurboCalib'] = passedTurboCalib
-    stats['passedFull'] = passedFull
-    stats['TotalBytes'] = TotalBytes
-    stats['TotalBytesTurbo'] = TotalBytesTurbo
-    stats['TotalBytesTurboCalib'] = TotalBytesTurboCalib
-    stats['TotalBytesFull'] = TotalBytesFull
+    stats["passedGlobal"] = passedGlobal
+    stats["passedTurbo"] = passedTurbo
+    stats["passedTurboAndNotFull"] = passedTurboAndNotFull
+    stats["passedTurboAndFull"] = passedTurboAndFull
+    stats["passedTurboAndTurboCalib"] = passedTurboAndTurboCalib
+    stats["passedFullAndTurboCalib"] = passedFullAndTurboCalib
+    stats["passedTurboCalib"] = passedTurboCalib
+    stats["passedFull"] = passedFull
+    stats["TotalBytes"] = TotalBytes
+    stats["TotalBytesTurbo"] = TotalBytesTurbo
+    stats["TotalBytesTurboCalib"] = TotalBytesTurboCalib
+    stats["TotalBytesFull"] = TotalBytesFull
 
     return stats
 
 
 def initRateDict(decisions, stats, nHlt1, nHlt2):
-
-    for G in [["Total", stats['passedGlobal']],
-              ["Total_Turbo", stats['passedTurbo']],
-              ["Total_TurboAndNotFull", stats['passedTurboAndNotFull']],
-              ["Total_TurboAndFull", stats['passedTurboAndFull']],
-              ["Total_TurboAndTurboCalib", stats['passedTurboAndTurboCalib']],
-              ["Total_FullAndTurboCalib", stats['passedFullAndTurboCalib']],
-              ["Total_TurboCalib", stats['passedTurboCalib']],
-              ["Total_Full", stats['passedFull']]
-             ]:
-
-        decisions[G[0]] = {"passed" : G[1],
-                           "passed_unique" : 0,
-                           "hlt1processed" : nHlt1,
-                           "hlt2processed" : nHlt2,
-                           "Bytes_incl" : 0.0,
-                           "BytesPR_incl" : 0.0,
-                           "rate" : 0,
-                           "rate_err" : 0,
-                           "rate_unique" : 0,
-                           "rate_unique_err" : 0}
+    for G in [
+        ["Total", stats["passedGlobal"]],
+        ["Total_Turbo", stats["passedTurbo"]],
+        ["Total_TurboAndNotFull", stats["passedTurboAndNotFull"]],
+        ["Total_TurboAndFull", stats["passedTurboAndFull"]],
+        ["Total_TurboAndTurboCalib", stats["passedTurboAndTurboCalib"]],
+        ["Total_FullAndTurboCalib", stats["passedFullAndTurboCalib"]],
+        ["Total_TurboCalib", stats["passedTurboCalib"]],
+        ["Total_Full", stats["passedFull"]],
+    ]:
+        decisions[G[0]] = {
+            "passed": G[1],
+            "passed_unique": 0,
+            "hlt1processed": nHlt1,
+            "hlt2processed": nHlt2,
+            "Bytes_incl": 0.0,
+            "BytesPR_incl": 0.0,
+            "rate": 0,
+            "rate_err": 0,
+            "rate_unique": 0,
+            "rate_unique_err": 0,
+        }
 
         this_decision = decisions[G[0]]
 
-        for S in ["Turbo", "Full", "TurboCalib", "TurboAndFull",
-                  "TurboAndTurboCalib", "FullAndTurboCalib", "TurboAndNotFull"]:
-            this_decision['passed_'+S] = 0.0
-            this_decision['rate_'+S] = 0.0
-            this_decision['rate_err_'+S] = 0.0
-            this_decision['passed_unique_'+S] = 0.0
-            this_decision['rate_unique_'+S] = 0.0
-            this_decision['rate_err_unique_'+S] = 0.0
+        for S in [
+                "Turbo",
+                "Full",
+                "TurboCalib",
+                "TurboAndFull",
+                "TurboAndTurboCalib",
+                "FullAndTurboCalib",
+                "TurboAndNotFull",
+        ]:
+            this_decision["passed_" + S] = 0.0
+            this_decision["rate_" + S] = 0.0
+            this_decision["rate_err_" + S] = 0.0
+            this_decision["passed_unique_" + S] = 0.0
+            this_decision["rate_unique_" + S] = 0.0
+            this_decision["rate_err_unique_" + S] = 0.0
 
 
 def calculateRates(Streams, decisions, stats, nHlt1, nHlt2, inputrate):
-
     initRateDict(decisions, stats, nHlt1, nHlt2)
 
-    decisions['Total']['Bytes_incl'] = stats['TotalBytes']
-    decisions['Total_Turbo']['Bytes_incl'] = stats['TotalBytesTurbo']
-    decisions['Total_TurboCalib']['Bytes_incl'] = stats['TotalBytesTurboCalib']
-    decisions['Total_Full']['Bytes_incl'] = stats['TotalBytesFull']
+    decisions["Total"]["Bytes_incl"] = stats["TotalBytes"]
+    decisions["Total_Turbo"]["Bytes_incl"] = stats["TotalBytesTurbo"]
+    decisions["Total_TurboCalib"]["Bytes_incl"] = stats["TotalBytesTurboCalib"]
+    decisions["Total_Full"]["Bytes_incl"] = stats["TotalBytesFull"]
 
     nHlt1 = float(nHlt1)
     nHlt2 = float(nHlt2)
 
     for key, value in list(decisions.items()):
+        nPassed = float(value["passed"])
 
-        nPassed = float(value['passed'])
-
-        MegaBytes_total = 1.e-6 * value["Bytes_incl"]
-        MegaBytesPR_total = 1.e-6 * value["BytesPR_incl"] #PersistReco part
+        MegaBytes_total = 1.0e-6 * value["Bytes_incl"]
+        MegaBytesPR_total = 1.0e-6 * value["BytesPR_incl"]  # PersistReco part
         value["MBs_incl"] = inputrate * MegaBytes_total / nHlt1
-        value["kBe_incl"] = 0.0 ## average kB/event
-        value["kBePR_incl"] = 0.0 ## average kB/event (persistReco)
+        value["kBe_incl"] = 0.0  ## average kB/event
+        value["kBePR_incl"] = 0.0  ## average kB/event (persistReco)
         if nPassed > 0.0:
-            value["kBe_incl"] = 1.e3 * MegaBytes_total / nPassed
-            value["kBePR_incl"] = 1.e3 * MegaBytesPR_total / nPassed
-
-
-        for S in ["Turbo", "Full", "TurboCalib", "TurboAndFull",
-                  "TurboAndTurboCalib", "FullAndTurboCalib", "TurboAndNotFull"]:
-            eff = rate(inputrate, float(value["passed_"+S]), nHlt1)
-            value["rate_"+S] = eff[0]
-            value["rate_err_"+S] = eff[1]
+            value["kBe_incl"] = 1.0e3 * MegaBytes_total / nPassed
+            value["kBePR_incl"] = 1.0e3 * MegaBytesPR_total / nPassed
+
+        for S in [
+                "Turbo",
+                "Full",
+                "TurboCalib",
+                "TurboAndFull",
+                "TurboAndTurboCalib",
+                "FullAndTurboCalib",
+                "TurboAndNotFull",
+        ]:
+            eff = rate(inputrate, float(value["passed_" + S]), nHlt1)
+            value["rate_" + S] = eff[0]
+            value["rate_err_" + S] = eff[1]
 
         eff = nPassed / nHlt1
-        eff_err = math.sqrt(eff * (1-eff) / nHlt1)
+        eff_err = math.sqrt(eff * (1 - eff) / nHlt1)
         value["rate"] = inputrate * eff
         value["rate_err"] = inputrate * eff_err
         eff = float(value["passed_unique"]) / nHlt1
-        eff_err = math.sqrt(eff * (1-eff) / nHlt1)
+        eff_err = math.sqrt(eff * (1 - eff) / nHlt1)
         value["rate_unique"] = inputrate * eff
         value["rate_unique_err"] = inputrate * eff_err
 
     for S in USED_STREAMS:
         for key, value in list(Streams[S].items()):
-
             rate_incl = rate(inputrate, value["passed"], nHlt1)
             rate_excl = rate(inputrate, value["passed_unique"], nHlt1)
 
@@ -450,51 +510,77 @@ def calculateRates(Streams, decisions, stats, nHlt1, nHlt2, inputrate):
                 value["rate_%s_err" % PhysicalStream] = R[1]
 
 
-def ParseHLT2Output(filename, filename2, inputrate=1.e6):
-
+def ParseHLT2Output(filename, filename2, inputrate=1.0e6):
     rootFile1 = ROOT.TFile(filename)
     rootFile2 = ROOT.TFile(filename2)
 
     treeHlt1 = rootFile1.Get("TupleHlt1")
     treeHlt2 = rootFile2.Get("TupleHlt2")
     branches = treeHlt2.GetListOfBranches()
-    persistRecoLines = [l.GetName() for l in rootFile1.Get("TuplePR").GetListOfBranches()]
+    persistRecoLines = [
+        l.GetName() for l in rootFile1.Get("TuplePR").GetListOfBranches()
+    ]
     decisions = {}
     branch_names = []
 
     Streams = {}
 
-    populateDicts(Streams, decisions, branches, branch_names, persistRecoLines, treeHlt1.GetEntries(), treeHlt2.GetEntries())
+    populateDicts(
+        Streams,
+        decisions,
+        branches,
+        branch_names,
+        persistRecoLines,
+        treeHlt1.GetEntries(),
+        treeHlt2.GetEntries(),
+    )
 
     RateMatrix = []
     RateMatrixDict = {}
     RateMatrixLines = []
     RateMatrixLineNames = []
-    prepareMatrix(RateMatrix, RateMatrixDict, RateMatrixLines, RateMatrixLineNames)
+    prepareMatrix(RateMatrix, RateMatrixDict, RateMatrixLines,
+                  RateMatrixLineNames)
 
     unaccounted = determineUnaccounted(Streams, decisions)
 
-    stats = countEvents(Streams, decisions, RateMatrix, RateMatrixDict, RateMatrixLines, RateMatrixLineNames, treeHlt2, treeHlt1.GetEntries())
-
-    calculateRates(Streams, decisions, stats, treeHlt1.GetEntries(), treeHlt2.GetEntries(), inputrate)
+    stats = countEvents(
+        Streams,
+        decisions,
+        RateMatrix,
+        RateMatrixDict,
+        RateMatrixLines,
+        RateMatrixLineNames,
+        treeHlt2,
+        treeHlt1.GetEntries(),
+    )
+
+    calculateRates(
+        Streams,
+        decisions,
+        stats,
+        treeHlt1.GetEntries(),
+        treeHlt2.GetEntries(),
+        inputrate,
+    )
 
     results = {}
-    results['unaccounted'] = unaccounted
-    results['decisions'] = decisions
-    results['persistRecoLines'] = persistRecoLines
-    results['Streams'] = Streams
-    results['RateMatrix'] = RateMatrix
-    results['RateMatrixDict'] = RateMatrixDict
-    results['RateMatrixLines'] = RateMatrixLines
-    results['RateMatrixLineNames'] = RateMatrixLineNames
+    results["unaccounted"] = unaccounted
+    results["decisions"] = decisions
+    results["persistRecoLines"] = persistRecoLines
+    results["Streams"] = Streams
+    results["RateMatrix"] = RateMatrix
+    results["RateMatrixDict"] = RateMatrixDict
+    results["RateMatrixLines"] = RateMatrixLines
+    results["RateMatrixLineNames"] = RateMatrixLineNames
 
     return results
 
 
 if __name__ == "__main__":
-
     if len(sys.argv) < 2:
-        print("Usage:   %s fileNameHLT1.root (fileNameHLT2.root)" % sys.argv[0])
+        print(
+            "Usage:   %s fileNameHLT1.root (fileNameHLT2.root)" % sys.argv[0])
         sys.exit()
 
     filename = sys.argv[1]
@@ -506,50 +592,55 @@ if __name__ == "__main__":
         filename2 = sys.argv[2]
         results = ParseHLT2Output(filename, filename2)
 
-    for k, v in list(results['unaccounted'].items()):
+    for k, v in list(results["unaccounted"].items()):
         filename = os.path.join(JSONDIR, "%s.unaccounted.list" % k)
-        with open(filename, 'w') as _file:
+        with open(filename, "w") as _file:
             print("Writing: " + filename)
             for line in v:
-                _file.write(line+"\n")
+                _file.write(line + "\n")
 
     ##### write all line rates to file
-    with open(os.path.join(JSONDIR, "rates.list"), 'w') as output:
+    with open(os.path.join(JSONDIR, "rates.list"), "w") as output:
         print("writing: " + output.name)
         AllRates = {}
-        decisions = results['decisions']
+        decisions = results["decisions"]
         for key, value in list(decisions.items()):
             AllRates[key] = value["rate"]
 
-        for key, value in sorted(iter(list(AllRates.items())), key=lambda v_k: (v_k[1],v_k[0]), reverse=True):
-            output.write('%s\t%.0f+-%.0f\n' % (key, value, decisions[key]["rate_err"])) #["rate"]))
+        for key, value in sorted(
+                iter(list(AllRates.items())),
+                key=lambda v_k: (v_k[1], v_k[0]),
+                reverse=True):
+            output.write(
+                "%s\t%.0f+-%.0f\n" %
+                (key, value, decisions[key]["rate_err"]))  # ["rate"]))
 
     ############ finally, write out everything to json files
-    with open(os.path.join(JSONDIR, "persistRecoLines.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "persistRecoLines.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['persistRecoLines'], _file)
+        json.dump(results["persistRecoLines"], _file)
 
-    with open(os.path.join(JSONDIR, "decisions.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "decisions.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['decisions'], _file)
+        json.dump(results["decisions"], _file)
 
-    with open(os.path.join(JSONDIR, "Streams.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "Streams.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['Streams'], _file)
+        json.dump(results["Streams"], _file)
 
-    with open(os.path.join(JSONDIR, "RateMatrix.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "RateMatrix.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['RateMatrix'], _file)
+        json.dump(results["RateMatrix"], _file)
 
-    with open(os.path.join(JSONDIR, "RateMatrixDict.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "RateMatrixDict.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['RateMatrixDict'], _file)
+        json.dump(results["RateMatrixDict"], _file)
 
-    with open(os.path.join(JSONDIR, "RateMatrixLines.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "RateMatrixLines.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['RateMatrixLines'], _file)
+        json.dump(results["RateMatrixLines"], _file)
 
-    with open(os.path.join(JSONDIR, "RateMatrixLineNames.json"), 'w') as _file:
+    with open(os.path.join(JSONDIR, "RateMatrixLineNames.json"), "w") as _file:
         print("writing: " + _file.name)
-        json.dump(results['RateMatrixLineNames'], _file)
+        json.dump(results["RateMatrixLineNames"], _file)
     ############# the end
diff --git a/handlers/hlt/HLTCorrelations.py b/handlers/hlt/HLTCorrelations.py
index 74daccbba5adf5777b5ac701e35b0dc40dd4e855..9fee466185e0fcbd68cd38f11ac3ff67ad17d802 100755
--- a/handlers/hlt/HLTCorrelations.py
+++ b/handlers/hlt/HLTCorrelations.py
@@ -1,23 +1,26 @@
 #!/usr/bin/python
-import os,sys,math,re,ROOT
+import os, sys, math, re, ROOT
 import random
-sys.argv.append( '-b' )
+
+sys.argv.append("-b")
 from array import array
-sys.path.insert(0, 'scripts')
-sys.path.insert(1, 'scripts')
+
+sys.path.insert(0, "scripts")
+sys.path.insert(1, "scripts")
 ######### SET ME ##########
 maxEvt = -1
 filename = sys.argv[1]
 ######### SET ME ##########
 
-from .StreamDefs import (exclude,_Streams,prescales)
+from .StreamDefs import exclude, _Streams, prescales
+
 print(exclude)
-ChosenStream  = _Streams["AllStreams"]
+ChosenStream = _Streams["AllStreams"]
 
-rootFile = ROOT.TFile( filename )
+rootFile = ROOT.TFile(filename)
 rootFile.ls()
-treeHlt1 = rootFile.Get( "TupleHlt1/HltDecReportsTuple" )
-treeHlt2 = rootFile.Get( "TupleHlt2/HltDecReportsTuple" )
+treeHlt1 = rootFile.Get("TupleHlt1/HltDecReportsTuple")
+treeHlt2 = rootFile.Get("TupleHlt2/HltDecReportsTuple")
 branches = treeHlt2.GetListOfBranches()
 decisions = {}
 lineName = None
@@ -26,8 +29,9 @@ branch_names = []
 
 for branch in branches:
     branchName = branch.GetName()
-    if "Decision" in branchName and branchName.replace("Decision","") not in exclude:
-        branchName = branchName.replace("Decision","")
+    if "Decision" in branchName and branchName.replace("Decision",
+                                                       "") not in exclude:
+        branchName = branchName.replace("Decision", "")
         print(branchName)
         branch_names.append(branchName)
         decisions[branchName] = {}
@@ -41,18 +45,20 @@ for branch in branches:
 
 variables = {}
 for line in list(decisions.keys()):
-    variables[line] = array('I',[0])
-    treeHlt2.SetBranchAddress( line+"Decision", variables[line] )
+    variables[line] = array("I", [0])
+    treeHlt2.SetBranchAddress(line + "Decision", variables[line])
 iEntry = 0
 
 while treeHlt2.GetEntry(iEntry):
-    iEntry+=1
-    if maxEvt > 0 and iEntry > maxEvt: break
-    if iEntry % 10000 == 0:print('processed %s/%s' %(iEntry,treeHlt2.GetEntries()))
+    iEntry += 1
+    if maxEvt > 0 and iEntry > maxEvt:
+        break
+    if iEntry % 10000 == 0:
+        print("processed %s/%s" % (iEntry, treeHlt2.GetEntries()))
     ## set prescales
     for decision in decisions:
         if decision in list(prescales.keys()):
-            if random.uniform(0,1) > prescales[decision]:
+            if random.uniform(0, 1) > prescales[decision]:
                 variables[decision][0] = 0
     ## by stream
     StreamDecs = {}
@@ -68,11 +74,11 @@ while treeHlt2.GetEntry(iEntry):
     for decision in decisions:
         decThisLine = variables[decision][0]
         for Stream in ChosenStream:
-            #print "------"
-            #print 'line = %s' %decision
-            #print 'line dec = %s' %decThisLine
-            #print "StreamDecs:"
-            #print StreamDecs
+            # print "------"
+            # print 'line = %s' %decision
+            # print 'line dec = %s' %decThisLine
+            # print "StreamDecs:"
+            # print StreamDecs
             if decThisLine == 1 and StreamDecs[Stream[0]] == 1:
                 decisions[decision][Stream[1]] += 1
 
@@ -81,11 +87,13 @@ while treeHlt2.GetEntry(iEntry):
 ##########################################################
 
 # convert from events to rate
-for Hlt2LineName,Hlt2LineStats in list(decisions.items()):
-    for key,value in list(Hlt2LineStats.items()):
-        Hlt2LineStats[key] = 1.e6*float(value)/float(treeHlt1.GetEntries()) ## rate
+for Hlt2LineName, Hlt2LineStats in list(decisions.items()):
+    for key, value in list(Hlt2LineStats.items()):
+        Hlt2LineStats[key] = (
+            1.0e6 * float(value) / float(treeHlt1.GetEntries()))  ## rate
 
 ############ finally, write out everything to json files
 import json
-print('writing json files')
-json.dump(decisions, open("decisions.json",'w'))
+
+print("writing json files")
+json.dump(decisions, open("decisions.json", "w"))
diff --git a/handlers/hlt/HLTIndependenceParser.py b/handlers/hlt/HLTIndependenceParser.py
index d6e85a3a331fb3708c107d758dc0ea5f06776a4a..4678f161b55f6da0346cb6db2fd62ccbaf6961a8 100644
--- a/handlers/hlt/HLTIndependenceParser.py
+++ b/handlers/hlt/HLTIndependenceParser.py
@@ -7,18 +7,19 @@ from functools import reduce
 # Common literals and words
 NL = Suppress(LineEnd())
 slit = lambda x: Suppress(Literal(x))
-plus  = slit("+")
+plus = slit("+")
 minus = slit("-")
-pipe  = slit("|")
-kHz   = slit("kHz")
-lb    = slit("(")
-rb    = slit(")")
-eq    = slit("=")
-sc    = slit(":")
-nbwitherr = Word(nums + '.') + plus + minus + Word(nums + '.')
+pipe = slit("|")
+kHz = slit("kHz")
+lb = slit("(")
+rb = slit(")")
+eq = slit("=")
+sc = slit(":")
+nbwitherr = Word(nums + ".") + plus + minus + Word(nums + ".")
+
 
 def getPropertyMatcher(name, valType=None):
-    """ Returns pyparsing structure to match a line:
+    """Returns pyparsing structure to match a line:
     Turbo rate = 0.01 kHz
     or
     Hlt2Global rate = (30.0+-17.0587221092)kHz
@@ -26,62 +27,59 @@ def getPropertyMatcher(name, valType=None):
     expr = literalMatcher(name)
     return expr + valType + Suppress(restOfLine)
 
+
 def getNbMatcher(name):
-    """ Return a a pyparsing structure matching a number,
+    """Return a a pyparsing structure matching a number,
     with the name associated
     """
     return Word(nums + ".").setResultsName(name)
 
+
 def literalMatcher(sentence):
-    ltokens =  [Suppress(Literal(l))
-                for l in sentence.split(" ")
-                if l != ""]
-    return reduce(operator.add, ltokens,Empty())
+    ltokens = [Suppress(Literal(l)) for l in sentence.split(" ") if l != ""]
+    return reduce(operator.add, ltokens, Empty())
 
-def getParser():
 
+def getParser():
     # Preparing the header
     ###############################################################
     commentL = LineStart() + Suppress(Literal("-")) + restOfLine
-    gpm = lambda x: getPropertyMatcher("%s = " %x, getNbMatcher(x))
+    gpm = lambda x: getPropertyMatcher("%s = " % x, getNbMatcher(x))
     headerL = literalMatcher("all jobs completed")
     completedL = gpm("completed")
     requestedL = gpm("requested")
-    processedL = getPropertyMatcher("processed ",  \
-                                    getNbMatcher("processed") \
-                                    + Suppress(Literal("events")))
-    nomismatchL = getPropertyMatcher("No mismatches found in ",  \
-                                    getNbMatcher("nomismatch"))
-
-    header = [ Suppress(headerL),
-               completedL,
-               requestedL,
-               processedL,
-               nomismatchL]
+    processedL = getPropertyMatcher(
+        "processed ",
+        getNbMatcher("processed") + Suppress(Literal("events")))
+    nomismatchL = getPropertyMatcher("No mismatches found in ",
+                                     getNbMatcher("nomismatch"))
+
+    header = [
+        Suppress(headerL), completedL, requestedL, processedL, nomismatchL
+    ]
 
     # Parser for the table
     ###############################################################
 
     tHeadL = Group(Literal("Line:") + restOfLine).suppress()
-    tRowL = OneOrMore(Group(Word(nums) + sc
-                            + Word(alphanums + "_:&")
-                            + pipe + Word(nums) + Word(nums)
-                            + Word(nums) + Word(nums)))
+    tRowL = OneOrMore(
+        Group(
+            Word(nums) + sc + Word(alphanums + "_:&") + pipe + Word(nums) +
+            Word(nums) + Word(nums) + Word(nums)))
 
-    table = [ commentL
-              + tHeadL
-              + commentL
-              + tRowL.setResultsName("HLT1LineStats")
-              + commentL]
+    table = [
+        commentL + tHeadL + commentL + tRowL.setResultsName("HLT1LineStats") +
+        commentL
+    ]
 
     # Assemble grammar
     ###############################################################
-    bnf = reduce(operator.add, header + table , Empty())
+    bnf = reduce(operator.add, header + table, Empty())
     return bnf
 
 
 def parseHLTIndependenceTable(logtxt):
-    """ Tool to parse the HLT rates table"""
+    """Tool to parse the HLT rates table"""
     grammar = getParser()
     result = grammar.parseString(logtxt)
     return dict(result)
diff --git a/handlers/hlt/HLTInputStats.py b/handlers/hlt/HLTInputStats.py
index 43110069a1d8313437bcf460c1a94156d148928e..b7959b5d420acc53d52b78a737bb3b46b74cec24 100755
--- a/handlers/hlt/HLTInputStats.py
+++ b/handlers/hlt/HLTInputStats.py
@@ -4,8 +4,7 @@ from array import array
 
 
 def studyInputStats(filename):
-
-    rootFile = ROOT.TFile( filename )
+    rootFile = ROOT.TFile(filename)
     if rootFile.IsZombie():
         print("ROOT file is Zombie")
         sys.exit()
@@ -16,21 +15,19 @@ def studyInputStats(filename):
 
     results = []
     for data in tree:
-        results.append( {'inputrate' : data.inputrate,
-                         'bunches' : data.bunches} )
+        results.append({"inputrate": data.inputrate, "bunches": data.bunches})
 
     return results
 
 
 if __name__ == "__main__":
-
     filename = sys.argv[1]
 
     results = studyInputStats(filename)
 
     JSONDIR = os.path.abspath(os.path.dirname(filename))
 
-    for data in results: #only one entry
-        with open(os.path.join(JSONDIR, "InputStats.json"), 'w') as _file:
+    for data in results:  # only one entry
+        with open(os.path.join(JSONDIR, "InputStats.json"), "w") as _file:
             print("writing: " % _file.name)
             json.dump(data, _file)
diff --git a/handlers/hlt/HLTPerfParser.py b/handlers/hlt/HLTPerfParser.py
index bbff68f88b2f0890966c316e389136bb18c1d87d..ba55bf4ef44c8ead812d543feaae8fdb483dafe8 100755
--- a/handlers/hlt/HLTPerfParser.py
+++ b/handlers/hlt/HLTPerfParser.py
@@ -9,18 +9,18 @@ from math import sqrt
 import re
 import json
 
-def _parse_log_file(this_log):
 
+def _parse_log_file(this_log):
     output_dict = {}
     timing_list = []
 
     re_timing = re.compile(r"(?:\|?\s*(\d+(?:\.\d+)?)\s*)")
     re_name = re.compile(r"TIMER\.TIMER:(\s*)([\w0-9]+(?:\s\w+)?)\s*\|")
-    with open(this_log, 'r') as _logFile:
+    with open(this_log, "r") as _logFile:
         for _line in _logFile:
             if _line.find("TIMER.TIMER:") == -1:
                 continue
-            number_line = ''.join(_line.split('|')[1:])
+            number_line = "".join(_line.split("|")[1:])
             numbers = [float(n) for n in re_timing.findall(number_line)]
 
             r = re_name.search(_line)
@@ -30,29 +30,38 @@ def _parse_log_file(this_log):
 
                 timing_list.append((alg_name, depth))
 
-                output_dict[alg_name] = {'depth': depth, '<user>': numbers[0], '<clock>': numbers[1],
-                                         'min': numbers[2], 'max': numbers[3], 'entries': numbers[4],
-                                         'total (s)': numbers[5], 'total2': numbers[6]}
-
-    i=0
-    for (name, depth) in timing_list:
-        if 'parent' not in output_dict[name]:
-            output_dict[name]['parent'] = ''
-        if 'children' not in output_dict[name]:
-            output_dict[name]['children'] = []
+                output_dict[alg_name] = {
+                    "depth": depth,
+                    "<user>": numbers[0],
+                    "<clock>": numbers[1],
+                    "min": numbers[2],
+                    "max": numbers[3],
+                    "entries": numbers[4],
+                    "total (s)": numbers[5],
+                    "total2": numbers[6],
+                }
+
+    i = 0
+    for name, depth in timing_list:
+        if "parent" not in output_dict[name]:
+            output_dict[name]["parent"] = ""
+        if "children" not in output_dict[name]:
+            output_dict[name]["children"] = []
         j = i
         while j >= 0:
             parent_depth = timing_list[j][1]
             if parent_depth == (depth - 1):
-                output_dict[name]['parent'] = timing_list[j][0]
-                output_dict[timing_list[j][0]]['children'].append(name)
+                output_dict[name]["parent"] = timing_list[j][0]
+                output_dict[timing_list[j][0]]["children"].append(name)
                 break
             j -= 1
         i += 1
 
     return output_dict
 
-def correct_times(total_events, total_time, norm_rate, events, time, n_cpu, n_workers):
+
+def correct_times(total_events, total_time, norm_rate, events, time, n_cpu,
+                  n_workers):
     rate = total_events / total_time
     if norm_rate == None:
         norm_rate = rate
@@ -60,20 +69,22 @@ def correct_times(total_events, total_time, norm_rate, events, time, n_cpu, n_wo
         norm_rate /= float(n_workers)
     norm = rate / norm_rate
 
-    tpe = time / events if time != 0.0 else 0.
+    tpe = time / events if time != 0.0 else 0.0
     tpe /= norm
     return tpe * float(n_cpu) / float(n_workers)
 
+
 def filter_logs(log_file):
-    if not hasattr(filter_logs, 'regex'):
-        filter_logs.regex = re.compile(r'^%s_(\d(\d+)).*' % 'moores')
+    if not hasattr(filter_logs, "regex"):
+        filter_logs.regex = re.compile(r"^%s_(\d(\d+)).*" % "moores")
     r = filter_logs.regex.match(log_file)
     return int(r.group(2)) if r else r
 
 
 def process_directory(log_dir):
-
-    files = [path.join(log_dir, lf) for lf in listdir(log_dir) if filter_logs(lf)]
+    files = [
+        path.join(log_dir, lf) for lf in listdir(log_dir) if filter_logs(lf)
+    ]
 
     total_input = {}
 
@@ -81,40 +92,43 @@ def process_directory(log_dir):
         total_input[_file] = _parse_log_file(_file)
 
     moore_metadata = None
-    with open(path.join(log_dir, 'output.json'), 'r') as _file:
+    with open(path.join(log_dir, "output.json"), "r") as _file:
         moore_metadata = json.load(_file)
 
     timing_results = calc_times(total_input, moore_metadata)
 
-    throughput = moore_metadata['av_inst']
-    throughput_err = moore_metadata['av_inst_s']
+    throughput = moore_metadata["av_inst"]
+    throughput_err = moore_metadata["av_inst_s"]
     throughput_farm = scale_to_farm(throughput)
     throughput_farm_err = scale_to_farm(throughput_err)
 
-    return {'timing_results': timing_results,
-            'throughput': throughput,
-            'throughput_err': throughput_err,
-            'throughput_farm': throughput_farm,
-            'throughput_farm_err': throughput_farm_err,
-            }
+    return {
+        "timing_results": timing_results,
+        "throughput": throughput,
+        "throughput_err": throughput_err,
+        "throughput_farm": throughput_farm,
+        "throughput_farm_err": throughput_farm_err,
+    }
+
 
 def scale_to_farm(throughput):
     nFaster = 296
     nFast = 792
     nMedium = 96
     nSlow = 372
-    return throughput * (nFaster + 0.8 * nFast + 0.5 *(nMedium + nSlow)) / 1000.
+    return throughput * (nFaster + 0.8 * nFast + 0.5 *
+                         (nMedium + nSlow)) / 1000.0
 
-def calc_times(total_input, moore_metadata):
 
+def calc_times(total_input, moore_metadata):
     temp_result = {}
 
-    n_cpu = moore_metadata['cpu_count']
-    n_workers = moore_metadata['n_nodes'] * moore_metadata['n_slaves']
+    n_cpu = moore_metadata["cpu_count"]
+    n_workers = moore_metadata["n_nodes"] * moore_metadata["n_slaves"]
 
     for k, v in list(total_input.items()):
-        total_events = v['EVENT LOOP']['entries']
-        total_time = v['EVENT LOOP']['total (s)']
+        total_events = v["EVENT LOOP"]["entries"]
+        total_time = v["EVENT LOOP"]["total (s)"]
         for AlgKey, Alg in list(v.items()):
             for attr_key, attr_val in list(Alg.items()):
                 if AlgKey not in temp_result:
@@ -126,58 +140,64 @@ def calc_times(total_input, moore_metadata):
                     continue
                 if attr_key not in temp_result[AlgKey]:
                     temp_result[AlgKey][attr_key] = {}
-                    temp_result[AlgKey][attr_key]['values'] = []
-
-                if attr_key == 'events':
-                    true_value = 1000 * correct_times(total_events, total_time, moore_metadata['av_inst'],
-                                               attr_val, Alg['events'], n_cpu, n_workers )
+                    temp_result[AlgKey][attr_key]["values"] = []
+
+                if attr_key == "events":
+                    true_value = 1000 * correct_times(
+                        total_events,
+                        total_time,
+                        moore_metadata["av_inst"],
+                        attr_val,
+                        Alg["events"],
+                        n_cpu,
+                        n_workers,
+                    )
                 else:
                     true_value = attr_val
 
-                temp_result[AlgKey][attr_key]['values'].append(attr_val)
-
+                temp_result[AlgKey][attr_key]["values"].append(attr_val)
 
     for AlgKey, Alg in list(temp_result.items()):
         for attr_key, attr_val in list(Alg.items()):
             if not isinstance(attr_val, dict):
                 continue
-            values = attr_val['values']
-            attr_val['min'] = min(values)
-            attr_val['max'] = max(values)
-            attr_val['total'] = sum(values)
-            total2 = sum([_*_ for _ in values])
-            attr_val['total_sq'] = total2
+            values = attr_val["values"]
+            attr_val["min"] = min(values)
+            attr_val["max"] = max(values)
+            attr_val["total"] = sum(values)
+            total2 = sum([_ * _ for _ in values])
+            attr_val["total_sq"] = total2
             num = float(len(values))
-            avg = attr_val['total'] / num
-            attr_val['average'] = avg
+            avg = attr_val["total"] / num
+            attr_val["average"] = avg
             try:
-                attr_val['error'] = sqrt((total2 / num) - avg*avg)
+                attr_val["error"] = sqrt((total2 / num) - avg * avg)
             except ValueError:
-                attr_val['error'] = 0.
-
+                attr_val["error"] = 0.0
 
     for AlgKey, Alg in list(temp_result.items()):
         for attr_key, attr_val in list(Alg.items()):
             if not isinstance(attr_val, dict):
                 continue
-            for i in 'values', 'total', 'total_sq':
+            for i in "values", "total", "total_sq":
                 del attr_val[i]
 
     return temp_result
 
 
 def main():
-
     import sys
 
     if len(sys.argv) == 2:
         _dir = sys.argv[1]
     else:
-        _dir = '.'
+        _dir = "."
 
     output = process_directory(_dir)
 
-    print("throughput = %s +/- %s" % (output['throughput'], output['throughput_err']))
+    print("throughput = %s +/- %s" % (output["throughput"],
+                                      output["throughput_err"]))
+
 
 if __name__ == "__main__":
     main()
diff --git a/handlers/hlt/HLTRateParser.py b/handlers/hlt/HLTRateParser.py
index 1403b6dea6f3444285bd8f1118018501cd6e48a0..334d42d724e50dd99d75123334a940d92bd33fe1 100644
--- a/handlers/hlt/HLTRateParser.py
+++ b/handlers/hlt/HLTRateParser.py
@@ -1,21 +1,31 @@
 #!/usr/bin/env python
 
-from pyparsing import (LineEnd, Literal, Word, Suppress, LineStart, Optional,
-                       ZeroOrMore, OneOrMore, Group)
+from pyparsing import (
+    LineEnd,
+    Literal,
+    Word,
+    Suppress,
+    LineStart,
+    Optional,
+    ZeroOrMore,
+    OneOrMore,
+    Group,
+)
 
 # Common literals and words
 NL = LineEnd().suppress()
-plus  = Literal("+").suppress()
+plus = Literal("+").suppress()
 minus = Literal("-").suppress()
-pipe  = Literal("|").suppress()
+pipe = Literal("|").suppress()
 kHz = Literal("kHz").suppress()
 lb = Literal("(").suppress()
 rb = Literal(")").suppress()
 eq = Literal("=").suppress()
-nbwitherr = Word(nums + '.') + plus + minus + Word(nums + '.')
+nbwitherr = Word(nums + ".") + plus + minus + Word(nums + ".")
+
 
 def getPropertyMatcher(name, valType=None):
-    """ Returns pyparsing structure to match a line:
+    """Returns pyparsing structure to match a line:
     Turbo rate = 0.01 kHz
     or
     Hlt2Global rate = (30.0+-17.0587221092)kHz
@@ -23,9 +33,10 @@ def getPropertyMatcher(name, valType=None):
     expr = literalMatcher(name)
     return expr + valType + Suppress(restOfLine)
 
+
 def literalMatcher(sentence):
     tokens = sentence.split(" ")
-    ltokens =  [Suppress(Literal(l)) for l in tokens if l != ""]
+    ltokens = [Suppress(Literal(l)) for l in tokens if l != ""]
     expr = None
     for l in ltokens:
         if expr == None:
@@ -34,65 +45,69 @@ def literalMatcher(sentence):
             expr += l
     return expr
 
-def getHLTRateParser():
 
+def getHLTRateParser():
     # Preparing the header
     commentLine = LineStart() + Suppress(Literal("-")) + restOfLine
     headerLine = restOfLine
-    header = Optional(ZeroOrMore("=")) \
-             + literalMatcher("HLT rate summary starts here") \
-             + Optional(ZeroOrMore("="))
-    trailer = Optional(ZeroOrMore("=")) \
-             + literalMatcher("HLT rate summary ends here") \
-             + Optional(ZeroOrMore("="))
-    removedLines = Literal("removed").suppress() \
-                   + Literal("lines").suppress() + restOfLine.suppress()
-    nbevents = Literal("processed:").suppress() \
-                + Word(nums).setResultsName("nbevents") + Literal("events").suppress()
-    ratesAssume = Literal("rates assume").suppress() + Word(nums + ".").setResultsName("ratesAssume") + Literal("Hz from level-0").suppress()
+    header = (Optional(ZeroOrMore("=")) +
+              literalMatcher("HLT rate summary starts here") + Optional(
+                  ZeroOrMore("=")))
+    trailer = (Optional(ZeroOrMore("=")) +
+               literalMatcher("HLT rate summary ends here") + Optional(
+                   ZeroOrMore("=")))
+    removedLines = (Literal("removed").suppress() +
+                    Literal("lines").suppress() + restOfLine.suppress())
+    nbevents = (
+        Literal("processed:").suppress() +
+        Word(nums).setResultsName("nbevents") + Literal("events").suppress())
+    ratesAssume = (Literal("rates assume").suppress() +
+                   Word(nums + ".").setResultsName("ratesAssume") +
+                   Literal("Hz from level-0").suppress())
 
-    getLinesnbMatcher = lambda x: Word(nums).setResultsName(x) \
-                        + Literal(x).suppress()
+    getLinesnbMatcher = lambda x: Word(nums).setResultsName(x) + Literal(x).suppress()
     hlt1Lines = getLinesnbMatcher("Hlt1Lines")
     hlt2Lines = getLinesnbMatcher("Hlt2Lines")
 
     linesSep = Group(Literal("|***|") + restOfLine).suppress()
-    lineslist = OneOrMore(Group(pipe + Word(nums,min=3) + pipe + Word(alphanums + "_:&.*()|?!") \
-                                 + pipe + nbwitherr + pipe + nbwitherr + pipe))
-    linesHlt1Global = (Group(Literal("|---|Hlt1Global") \
-                                 + pipe + nbwitherr + pipe + Literal("--") + pipe))
-    linesHlt2Global = (Group(Literal("|---|Hlt2Global") \
-                                 + pipe + nbwitherr + pipe + Literal("--") + pipe))
-    linesHlt2Turbo = (Group(Literal("|---|Hlt2.*Turbo") \
-                                 + pipe + nbwitherr + pipe + Literal("--") + pipe))
-    linesHlt2Full = (Group(Literal("|---|Hlt2.*Full") \
-                                 + pipe + nbwitherr + pipe + Literal("--") + pipe))
-    linesHlt2Turcal = (Group(Literal("|---|Hlt2.*Turcal") \
-                                 + pipe + nbwitherr + pipe + Literal("--") + pipe))
-
+    lineslist = OneOrMore(
+        Group(pipe + Word(nums, min=3) + pipe +
+              Word(alphanums + "_:&.*()|?!") + pipe + nbwitherr + pipe +
+              nbwitherr + pipe))
+    linesHlt1Global = Group(
+        Literal("|---|Hlt1Global") + pipe + nbwitherr + pipe + Literal("--") +
+        pipe)
+    linesHlt2Global = Group(
+        Literal("|---|Hlt2Global") + pipe + nbwitherr + pipe + Literal("--") +
+        pipe)
+    linesHlt2Turbo = Group(
+        Literal("|---|Hlt2.*Turbo") + pipe + nbwitherr + pipe + Literal("--") +
+        pipe)
+    linesHlt2Full = Group(
+        Literal("|---|Hlt2.*Full") + pipe + nbwitherr + pipe + Literal("--") +
+        pipe)
+    linesHlt2Turcal = Group(
+        Literal("|---|Hlt2.*Turcal") + pipe + nbwitherr + pipe +
+        Literal("--") + pipe)
 
     # Assemble grammar
-    bnf = Suppress(header) + hlt1Lines + hlt2Lines + removedLines + commentLine \
-          + nbevents + ratesAssume + linesSep \
-          + linesHlt1Global.setResultsName("Hlt1GlobalRate") \
-          + linesHlt2Global.setResultsName("Hlt2GlobalRate") \
-          + linesHlt2Turbo.setResultsName("TurboRate") \
-          + linesHlt2Full.setResultsName("FullRate") \
-          + linesHlt2Turcal.setResultsName("TurcalRate") \
-          + commentLine + linesSep \
-          + lineslist.setResultsName("Hlt1RegexStats") \
-          + commentLine + linesSep \
-          + lineslist.setResultsName("Hlt2RegexStats") \
-          + commentLine + linesSep \
-          + lineslist.setResultsName("Hlt1Stats") \
-          + linesSep \
-          + lineslist.setResultsName("Hlt2Stats") \
-          + trailer
+    bnf = (Suppress(header) + hlt1Lines + hlt2Lines + removedLines +
+           commentLine + nbevents + ratesAssume + linesSep +
+           linesHlt1Global.setResultsName("Hlt1GlobalRate") +
+           linesHlt2Global.setResultsName("Hlt2GlobalRate") +
+           linesHlt2Turbo.setResultsName("TurboRate") +
+           linesHlt2Full.setResultsName("FullRate") +
+           linesHlt2Turcal.setResultsName("TurcalRate") + commentLine +
+           linesSep + lineslist.setResultsName("Hlt1RegexStats") + commentLine
+           + linesSep + lineslist.setResultsName("Hlt2RegexStats") +
+           commentLine + linesSep + lineslist.setResultsName("Hlt1Stats") +
+           linesSep + lineslist.setResultsName("Hlt2Stats") + trailer)
 
     return bnf
 
+
 def parseHLTRateList(logtxt):
-    """ Tool to parse the HLT rates table"""
+    """Tool to parse the HLT rates table"""
     grammar = getHLTRateParser()
     result = grammar.parseString(logtxt)
     return dict(result)
diff --git a/handlers/hlt/StreamDefs.py b/handlers/hlt/StreamDefs.py
index f791537cf01e606c9d03d3fae29dfc098541c1a8..f48204a5a98e657e5d14b6757d41caaf393dd27a 100644
--- a/handlers/hlt/StreamDefs.py
+++ b/handlers/hlt/StreamDefs.py
@@ -1,7 +1,7 @@
 #!/bin/env python
 _Streams = {}
 
-#_PersistRecoList = ['Hlt2DiMuonJPsiTurbo',
+# _PersistRecoList = ['Hlt2DiMuonJPsiTurbo',
 #                    'Hlt2CharmHadDstp2D0Pip_D02KmPipTurbo',
 #                    'Hlt2Dstp2D0Pip_D02KmPimPipPipTurbo',
 #                    'Hlt2D02KmPipTurbo',
@@ -11,140 +11,176 @@ _Streams = {}
 #                    'Hlt2Xic0ToPpKmKmPipTurbo',
 #                    'Hlt2XicpToPpKmPipTurbo']
 
-_Streams["AllStreams"] = [["Hlt2.*","ALL"],
-                          ["Hlt2CharmHad.(?!.*?Turbo)","CharmFull"],
-                          ["Hlt2CharmHad.*Turbo","CharmTurbo"],
-                          ["Hlt2Topo.*","Topo"],
-                          ["Hlt2(DiMuon|SingleMuon|TriMuon|DiElectron|LFV).*","Leptons"],
-                          ["Hlt2.*(TrackEff|TurboCalib)","TurboCalib"],
-                          ["Hlt2(EW|Exotica|Jets).*","EW"],
-                          ["Hlt2LowMult.*","LowMult"],
-                          #["Hlt2(Bc2JpsiX|Bottomonium|CcDiHadron||XcMuX|RareCharm|B2|Lb2|Bd2|Bs2|Bu2|DisplVert|DiPhi|DiProt|Radiative|Phi2KsKs|RareStrange|IncPhi|DPS|BHad).*","Other"],
-                          ["Hlt2(B2Kpi0|B2HH|Majorana|Bc2JpsiX|Bottomonium|CcDiHadron|XcMuX|RareCharm|DisplVert|Radiative|RareStrange|Strange|Phi|DPS|BHad).*","Other"],
-                          ["Hlt2(Lumi|ErrorEvent|PassThrough|Forward|DebugEvent|Transparent|NoBiasNonBeamBeam)","Hlt2Technical"]]
-
-_Streams["Modules"] = [["Hlt2.*","ALL"],
-                       ["Hlt2CcDiHadron.*","Hlt2CcDiHadron"],
-                       ["Hlt2DPS.*","Hlt2DPS"],
-                       ["Hlt2Bc2JpsiX.*","Hlt2Bc2JpsiX"],
-                       ["Hlt2DiMuon.*","Hlt2DiMuon"],
-                       ["Hlt2Majorana.*","Hlt2Majorana"],
-                       ["Hlt2Bottomonium.*","Hlt2Bottomonium"],
-                       ["Hlt2Strange.*","Hlt2Strange"],
-                       ["Hlt2DisplVertices.*","Hlt2DisplVertices"],
-                       ["Hlt2TrackEff_D0.*","Hlt2TrackEff"],
-                       ["Hlt2TrackEffDiMuon.*","Hlt2TrackEffDiMuon"],
-                       ["Hlt2(Lumi|ErrorEvent|PassThrough|Forward|DebugEvent|Transparent|NoBiasNonBeamBeam).*","Hlt2Technical"],
-                       ["Hlt2RareStrange.*","Hlt2RareStrange"],
-                       ["Hlt2TriMuon.*","Hlt2TriMuon"],
-                       #["Hlt2(Phi2|Bs2PhiPhi|IncPhi|DiPhi)","Hlt2Phi"],
-                       ["Hlt2Phi.*","Hlt2Phi"],
-                       ["Hlt2XcMuXForTau.*","Hlt2XcMuXForTau"],
-                       ["Hlt2SingleMuon.*","Hlt2SingleMuon"],
-                       ["Hlt2B2HH.*","Hlt2B2HH"],
-                       ["Hlt2B2Kpi0.*","Hlt2B2Kpi0"],
-                       ["Hlt2BHad.*","Hlt2BHad"],
-                       ["Hlt2CharmHad.*","Hlt2CharmHad"],
-                       ["Hlt2DiElectron.*","Hlt2DiElectron"],
-                       ["Hlt2Exotica.*","Hlt2Exotica"],
-                       ["Hlt2Jets.*","Hlt2Jets"],
-                       ["Hlt2LFV.*","Hlt2LFV"],
-                       ["Hlt2LowMult.*","Hlt2LowMult"],
-                       ["Hlt2PID.*","Hlt2PID"],
-                       ["Hlt2Radiative.*","Hlt2Radiative"],
-                       ["Hlt2RareCharm.*","Hlt2RareCharm"],
-                       ["Hlt2EW.*","Hlt2EW"],
-                       ["Hlt2Topo.*","Hlt2Topo"]]
-                       
-
-
-_Streams["Spectroscopy"] = [["Hlt2.*","ALL"],
-                          ["Hlt2(DiMuonJPsiTurbo|CharmHadInclDst2PiD02HHXBDT|CharmHadSpec_D0ToKPi_PiTurbo|CharmHadDpToKmPipPipTurbo|CharmHadDspToKpPimPipTurbo|CharmHadLcpToPpKmPipTurbo|CharmHadXicpToPpKmPip).*","Spec"]]
-
-
-
-_Streams["Paper"] = [["Hlt2.*","ALL"],
-                     ["Hlt2CharmHadIncl.*","Incl_D"],
-                     ["Hlt2Topo.*","Incl_B"],
-                     #["Hlt2.*SingleMuon.*","SingleMuon"],
-                     #["Hlt2.*(SingleMuon.*","SingleMuon"],
-                     ["Hlt2(?!.*?LowMult).*Muon.*","Muon"],
-                     ["Hlt2(DiProtonLowMult|LowMult).*","LowMult"],
-                     ["Hlt2.*TurboCalib","Calibration"],
-                     ["Hlt2(?!.*?CharmHadIncl)(?!.*?Topo)(?!.*?SingleMuon)(?!.*?Muon)(?!.*?LowMult)(?!.*?TurboCalib)","Exclusives"]]
-
-_Streams["Bandwidth"] = [["Hlt2.*","ALL"],
-                         ["Hlt2(?!.*?LowMult).(?!.*?Turbo).(?!.*?Calib)","Full (rest)"],
-                         ["Hlt2LowMult.*","Full (LowMult)"],
-                         ["Hlt2.(?!.*?TurboCalib).*Turbo","Turbo"],
-                         ["Hlt2.*TurboCalib","TurboCalib"]]
-
-_Streams["CharmFull"] = [[[x for x in _Streams["AllStreams"] if x[1] == "CharmFull"][0][0],"ALL"],
-                         ["Hlt2CharmHad.*(Eta|Pi0|ee|gamma)(?!.*?Turbo)","Neutrals"],
-                         ["Hlt2CharmHad.*HHXBDT","InclDstar"],
-                         ["Hlt2CharmHad(Omega|Xi).(?!.*?Turbo)","Hyperons"],
-                         ["Hlt2CharmHadDstD02Ksh.(?!.*?Turbo)","NeutralD_KsHH_Tagged"],
-                         ["Hlt2CharmHad.*ForKPiAsym","ChargedD_HHH_AKpi"]]
-
-_Streams["CharmTurbo"] = [[[x for x in _Streams["AllStreams"] if x[1] == "CharmTurbo"][0][0],"ALL"],
-                          ### D0/D*
-                          ["Hlt2CharmHadDst_2D0.(?!.*?LTUNB)","D0_HH_Tagged_LTBiased"],
-                          ["Hlt2CharmHadDst_2D0.*LTUNB","D0_HH_Tagged_LTUB"],
-                          ["Hlt2CharmHadDst_2D0Pi_D02(KK|PiPi|KPi).*","NeutralD_HH_Tagged"],
-                          ["Hlt2CharmHadD02(KPi|KK|PiPi)Turbo","NeutralD_HH_Untagged"],
-                          ["Hlt2CharmHad(?!.*?Spec).*D02.*(PiPiPiPi|KPiPiPi|KKPiPi|KKKPi).*","NeutralD_HHHH_Tagged"],
-                          ["Hlt2CharmHadD2KS0KS0.*","NeutralD_KsKs_Untagged"],
-                          #### D+
-                          ["Hlt2CharmHadD2KS0(K_|Pi_).*","ChargedD_KsH"],
-                          ["Hlt2CharmHad(DspTo|DpTo).*","ChargedD_HHHandHHHHH"],
-                          ["Hlt2CharmHadD(p2|s2|2)(KKPi|PiPiPi|KPiPi|KKK)Ks.*","ChargedD_HHHKs"],
-                          #### other
-                          ["Hlt2CharmHadSpec.*","CharmHadSpec"],
-                          ["Hlt2CharmHadLc2Lambda.*","Lambdac_LambdaH"],
-                          ["Hlt2CharmHadLcpTo.*","Lambdac_phh"]]
-                          
-
-
-_Streams["Leptons"] = [[[x for x in _Streams["AllStreams"] if x[1] == "Leptons"][0][0],"ALL"],
-                     ["Hlt2DiMuonSoft","DiMuonSoft"],
-                     ["Hlt2DiMuon(?!.*?Soft).*","DiMuon"],
-                     ["Hlt2TriMuon.*","TriMuon"],
-                     ["Hlt2SingleMuon.*","SingleMuon"]]
-
-_Streams["Other"] = [[[x for x in _Streams["AllStreams"] if x[1] == "Other"][0][0],"ALL"],
-                     ["Hlt2(B2|Lb2|Bd2|Bs2|Bu2).*","B_HH"],
-                     ["Hlt2RareCharm.*","RareCharm"],
-                     ["Hlt2DisplVert.*","DisplVert"],
-                     ["Hlt2.*(DiPhi|DiProt).*","DiPhiDiProt"],
-                     ["Hlt2IncPhi.*","IncPhi"],
-                     ["Hlt2Radiative.*","Radiative"],
-                     ["Hlt2Phi2KsKs.*","PhiToKsKs"],
-                     ["Hlt2DPS.*","DPS"],
-                     #["Hlt2LowMult.*","LowMult"],
-                     ["Hlt2RareStrange.*","RareStrange"]]
-
-_Streams["EW"] = [[[x for x in _Streams["AllStreams"] if x[1] == "EW"][0][0],"ALL"],
-                  ["Hlt2EWSingleTau.*","HighpT_tau"],
-                  ["Hlt2EWSingleMuon.*","Single_mu"],
-                  ["Hlt2EWSingle.*Electron.*","Single_e"],
-                  ["Hlt2EWDiMuon.*","DiMuon"],
-                  ["Hlt2EWDiElectron.*","DiEle"]]
-
-_Streams["Topo"] = [[[x for x in _Streams["AllStreams"] if x[1] == "Topo"][0][0],"ALL"],
-                    ["Hlt2Topo2Body.*","2Body"],
-                    ["Hlt2Topo3Body.*","3Body"],
-                    ["Hlt2Topo4Body.*","4Body"],
-                    ["Hlt2TopoMu2Body.*","Mu2Body"],
-                    ["Hlt2TopoMu3Body.*","Mu3Body"],
-                    ["Hlt2TopoMu4Body.*","Mu4Body"]]
-
-_Streams["TurboCalib"] = [[[x for x in _Streams["AllStreams"] if x[1] == "TurboCalib"][0][0],"ALL"],
-                          ["Hlt2PID.*","PID"],
-                          ["Hlt2TrackEffDiMuon","TrackEff(psi)"],
-                          ["Hlt2TrackEff_D0","TrackEff(D*)"]]
+_Streams["AllStreams"] = [
+    ["Hlt2.*", "ALL"],
+    ["Hlt2CharmHad.(?!.*?Turbo)", "CharmFull"],
+    ["Hlt2CharmHad.*Turbo", "CharmTurbo"],
+    ["Hlt2Topo.*", "Topo"],
+    ["Hlt2(DiMuon|SingleMuon|TriMuon|DiElectron|LFV).*", "Leptons"],
+    ["Hlt2.*(TrackEff|TurboCalib)", "TurboCalib"],
+    ["Hlt2(EW|Exotica|Jets).*", "EW"],
+    ["Hlt2LowMult.*", "LowMult"],
+    # ["Hlt2(Bc2JpsiX|Bottomonium|CcDiHadron||XcMuX|RareCharm|B2|Lb2|Bd2|Bs2|Bu2|DisplVert|DiPhi|DiProt|Radiative|Phi2KsKs|RareStrange|IncPhi|DPS|BHad).*","Other"],
+    [
+        "Hlt2(B2Kpi0|B2HH|Majorana|Bc2JpsiX|Bottomonium|CcDiHadron|XcMuX|RareCharm|DisplVert|Radiative|RareStrange|Strange|Phi|DPS|BHad).*",
+        "Other",
+    ],
+    [
+        "Hlt2(Lumi|ErrorEvent|PassThrough|Forward|DebugEvent|Transparent|NoBiasNonBeamBeam)",
+        "Hlt2Technical",
+    ],
+]
+
+_Streams["Modules"] = [
+    ["Hlt2.*", "ALL"],
+    ["Hlt2CcDiHadron.*", "Hlt2CcDiHadron"],
+    ["Hlt2DPS.*", "Hlt2DPS"],
+    ["Hlt2Bc2JpsiX.*", "Hlt2Bc2JpsiX"],
+    ["Hlt2DiMuon.*", "Hlt2DiMuon"],
+    ["Hlt2Majorana.*", "Hlt2Majorana"],
+    ["Hlt2Bottomonium.*", "Hlt2Bottomonium"],
+    ["Hlt2Strange.*", "Hlt2Strange"],
+    ["Hlt2DisplVertices.*", "Hlt2DisplVertices"],
+    ["Hlt2TrackEff_D0.*", "Hlt2TrackEff"],
+    ["Hlt2TrackEffDiMuon.*", "Hlt2TrackEffDiMuon"],
+    [
+        "Hlt2(Lumi|ErrorEvent|PassThrough|Forward|DebugEvent|Transparent|NoBiasNonBeamBeam).*",
+        "Hlt2Technical",
+    ],
+    ["Hlt2RareStrange.*", "Hlt2RareStrange"],
+    ["Hlt2TriMuon.*", "Hlt2TriMuon"],
+    # ["Hlt2(Phi2|Bs2PhiPhi|IncPhi|DiPhi)","Hlt2Phi"],
+    ["Hlt2Phi.*", "Hlt2Phi"],
+    ["Hlt2XcMuXForTau.*", "Hlt2XcMuXForTau"],
+    ["Hlt2SingleMuon.*", "Hlt2SingleMuon"],
+    ["Hlt2B2HH.*", "Hlt2B2HH"],
+    ["Hlt2B2Kpi0.*", "Hlt2B2Kpi0"],
+    ["Hlt2BHad.*", "Hlt2BHad"],
+    ["Hlt2CharmHad.*", "Hlt2CharmHad"],
+    ["Hlt2DiElectron.*", "Hlt2DiElectron"],
+    ["Hlt2Exotica.*", "Hlt2Exotica"],
+    ["Hlt2Jets.*", "Hlt2Jets"],
+    ["Hlt2LFV.*", "Hlt2LFV"],
+    ["Hlt2LowMult.*", "Hlt2LowMult"],
+    ["Hlt2PID.*", "Hlt2PID"],
+    ["Hlt2Radiative.*", "Hlt2Radiative"],
+    ["Hlt2RareCharm.*", "Hlt2RareCharm"],
+    ["Hlt2EW.*", "Hlt2EW"],
+    ["Hlt2Topo.*", "Hlt2Topo"],
+]
+
+_Streams["Spectroscopy"] = [
+    ["Hlt2.*", "ALL"],
+    [
+        "Hlt2(DiMuonJPsiTurbo|CharmHadInclDst2PiD02HHXBDT|CharmHadSpec_D0ToKPi_PiTurbo|CharmHadDpToKmPipPipTurbo|CharmHadDspToKpPimPipTurbo|CharmHadLcpToPpKmPipTurbo|CharmHadXicpToPpKmPip).*",
+        "Spec",
+    ],
+]
+
+_Streams["Paper"] = [
+    ["Hlt2.*", "ALL"],
+    ["Hlt2CharmHadIncl.*", "Incl_D"],
+    ["Hlt2Topo.*", "Incl_B"],
+    # ["Hlt2.*SingleMuon.*","SingleMuon"],
+    # ["Hlt2.*(SingleMuon.*","SingleMuon"],
+    ["Hlt2(?!.*?LowMult).*Muon.*", "Muon"],
+    ["Hlt2(DiProtonLowMult|LowMult).*", "LowMult"],
+    ["Hlt2.*TurboCalib", "Calibration"],
+    [
+        "Hlt2(?!.*?CharmHadIncl)(?!.*?Topo)(?!.*?SingleMuon)(?!.*?Muon)(?!.*?LowMult)(?!.*?TurboCalib)",
+        "Exclusives",
+    ],
+]
+
+_Streams["Bandwidth"] = [
+    ["Hlt2.*", "ALL"],
+    ["Hlt2(?!.*?LowMult).(?!.*?Turbo).(?!.*?Calib)", "Full (rest)"],
+    ["Hlt2LowMult.*", "Full (LowMult)"],
+    ["Hlt2.(?!.*?TurboCalib).*Turbo", "Turbo"],
+    ["Hlt2.*TurboCalib", "TurboCalib"],
+]
+
+_Streams["CharmFull"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "CharmFull"][0][0], "ALL"],
+    ["Hlt2CharmHad.*(Eta|Pi0|ee|gamma)(?!.*?Turbo)", "Neutrals"],
+    ["Hlt2CharmHad.*HHXBDT", "InclDstar"],
+    ["Hlt2CharmHad(Omega|Xi).(?!.*?Turbo)", "Hyperons"],
+    ["Hlt2CharmHadDstD02Ksh.(?!.*?Turbo)", "NeutralD_KsHH_Tagged"],
+    ["Hlt2CharmHad.*ForKPiAsym", "ChargedD_HHH_AKpi"],
+]
+
+_Streams["CharmTurbo"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "CharmTurbo"][0][0], "ALL"],
+    ### D0/D*
+    ["Hlt2CharmHadDst_2D0.(?!.*?LTUNB)", "D0_HH_Tagged_LTBiased"],
+    ["Hlt2CharmHadDst_2D0.*LTUNB", "D0_HH_Tagged_LTUB"],
+    ["Hlt2CharmHadDst_2D0Pi_D02(KK|PiPi|KPi).*", "NeutralD_HH_Tagged"],
+    ["Hlt2CharmHadD02(KPi|KK|PiPi)Turbo", "NeutralD_HH_Untagged"],
+    [
+        "Hlt2CharmHad(?!.*?Spec).*D02.*(PiPiPiPi|KPiPiPi|KKPiPi|KKKPi).*",
+        "NeutralD_HHHH_Tagged",
+    ],
+    ["Hlt2CharmHadD2KS0KS0.*", "NeutralD_KsKs_Untagged"],
+    #### D+
+    ["Hlt2CharmHadD2KS0(K_|Pi_).*", "ChargedD_KsH"],
+    ["Hlt2CharmHad(DspTo|DpTo).*", "ChargedD_HHHandHHHHH"],
+    ["Hlt2CharmHadD(p2|s2|2)(KKPi|PiPiPi|KPiPi|KKK)Ks.*", "ChargedD_HHHKs"],
+    #### other
+    ["Hlt2CharmHadSpec.*", "CharmHadSpec"],
+    ["Hlt2CharmHadLc2Lambda.*", "Lambdac_LambdaH"],
+    ["Hlt2CharmHadLcpTo.*", "Lambdac_phh"],
+]
+
+_Streams["Leptons"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "Leptons"][0][0], "ALL"],
+    ["Hlt2DiMuonSoft", "DiMuonSoft"],
+    ["Hlt2DiMuon(?!.*?Soft).*", "DiMuon"],
+    ["Hlt2TriMuon.*", "TriMuon"],
+    ["Hlt2SingleMuon.*", "SingleMuon"],
+]
+
+_Streams["Other"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "Other"][0][0], "ALL"],
+    ["Hlt2(B2|Lb2|Bd2|Bs2|Bu2).*", "B_HH"],
+    ["Hlt2RareCharm.*", "RareCharm"],
+    ["Hlt2DisplVert.*", "DisplVert"],
+    ["Hlt2.*(DiPhi|DiProt).*", "DiPhiDiProt"],
+    ["Hlt2IncPhi.*", "IncPhi"],
+    ["Hlt2Radiative.*", "Radiative"],
+    ["Hlt2Phi2KsKs.*", "PhiToKsKs"],
+    ["Hlt2DPS.*", "DPS"],
+    # ["Hlt2LowMult.*","LowMult"],
+    ["Hlt2RareStrange.*", "RareStrange"],
+]
+
+_Streams["EW"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "EW"][0][0], "ALL"],
+    ["Hlt2EWSingleTau.*", "HighpT_tau"],
+    ["Hlt2EWSingleMuon.*", "Single_mu"],
+    ["Hlt2EWSingle.*Electron.*", "Single_e"],
+    ["Hlt2EWDiMuon.*", "DiMuon"],
+    ["Hlt2EWDiElectron.*", "DiEle"],
+]
+
+_Streams["Topo"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "Topo"][0][0], "ALL"],
+    ["Hlt2Topo2Body.*", "2Body"],
+    ["Hlt2Topo3Body.*", "3Body"],
+    ["Hlt2Topo4Body.*", "4Body"],
+    ["Hlt2TopoMu2Body.*", "Mu2Body"],
+    ["Hlt2TopoMu3Body.*", "Mu3Body"],
+    ["Hlt2TopoMu4Body.*", "Mu4Body"],
+]
+
+_Streams["TurboCalib"] = [
+    [[x for x in _Streams["AllStreams"] if x[1] == "TurboCalib"][0][0], "ALL"],
+    ["Hlt2PID.*", "PID"],
+    ["Hlt2TrackEffDiMuon", "TrackEff(psi)"],
+    ["Hlt2TrackEff_D0", "TrackEff(D*)"],
+]
 
 ############# obsolete
-#_Streams["Regex"] = [["Hlt2.*","ALL"],
+# _Streams["Regex"] = [["Hlt2.*","ALL"],
 #                     ["Hlt2CharmHad.(?!.*?HHX).*","CharmExcl"],
 #                     ["Hlt2CharmHad.*HHXBDT","CharmIncl"],
 #                     ["Hlt2Topo.*","Topo"],
diff --git a/handlers/parser/GaudiSequenceParser.py b/handlers/parser/GaudiSequenceParser.py
index d9af7d9f9ef49d444f9fc20d30040d258aa2dd83..de2c96ef6736b9d5bee701569a433474ef55dc5a 100644
--- a/handlers/parser/GaudiSequenceParser.py
+++ b/handlers/parser/GaudiSequenceParser.py
@@ -1,5 +1,4 @@
 import os
-
 """
 The GaudiSequenceParser parses a Gaudi log file generated with the
 '--printsequence' option. The data structure contains the function class,
@@ -10,13 +9,13 @@ returns those algorithms which are 'below' those of the arguments passed.
 
 
 class GaudiSequenceParser:
-
-    def __init__(self,
-                 dir,
-                 file='run.log',
-                 startpattern='*' * 30 + ' Algorithm Sequence ' + '*' * 28,
-                 endpattern='*' * 78,
-                 ):
+    def __init__(
+            self,
+            dir,
+            file="run.log",
+            startpattern="*" * 30 + " Algorithm Sequence " + "*" * 28,
+            endpattern="*" * 78,
+    ):
         self.directory = dir
         self.file = file
         self.startpattern = startpattern
@@ -32,12 +31,15 @@ class GaudiSequenceParser:
             if startsequence:
                 if line.find(self.endpattern) != -1:
                     break
-                algo = line.split('SUCCESS')[-1]
-                indent = (algo.count(' ') - 1) / 5
+                algo = line.split("SUCCESS")[-1]
+                indent = (algo.count(" ") - 1) / 5
                 algo = algo.strip()
-                (algocl, algoname) = algo.split('/')
-                self.sequence.append({'indent': indent, 'algo_class': algocl,
-                                      'algo_name': algoname})
+                (algocl, algoname) = algo.split("/")
+                self.sequence.append({
+                    "indent": indent,
+                    "algo_class": algocl,
+                    "algo_name": algoname
+                })
             elif line.find(self.startpattern) != -1:
                 startsequence = True
 
@@ -48,14 +50,14 @@ class GaudiSequenceParser:
         indent = 0
         startselect = False
         for algo in self.sequence:
-            algofull = '%s/%s' % (algo['algo_class'], algo['algo_name'])
+            algofull = "%s/%s" % (algo["algo_class"], algo["algo_name"])
             if startselect:
-                if algo['indent'] <= indent:
+                if algo["indent"] <= indent:
                     startselect = False
                 else:
                     self.algorithms.append(algo)
             if not startselect and algofull in algoselect:
-                indent = algo['indent']
+                indent = algo["indent"]
                 startselect = True
         return self.algorithms
 
@@ -70,5 +72,5 @@ class GaudiSequenceParser:
         print(self.sequence)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     GaudiSequenceParser().run()
diff --git a/handlers/timing/CallgrindLogParser.py b/handlers/timing/CallgrindLogParser.py
index 4e1662e69bfd4aff3421f6bc80b869eec6178dfd..06c31ae6dfe6028b68a7178246de7ff1d00f076d 100644
--- a/handlers/timing/CallgrindLogParser.py
+++ b/handlers/timing/CallgrindLogParser.py
@@ -1,7 +1,5 @@
 import re
 import os
-
-
 """
 The CallgrindLogParser parses a callgrind logfile which was generated with
 
@@ -16,75 +14,85 @@ are caclulated such as cache misses, ratios etc.
 """
 
 
-class CallgrindLogParser():
-
+class CallgrindLogParser:
     def __init__(self,
                  dir,
                  algos=[],
-                 filepat=r'callgrind.out.[0-9]+.[0-9].anno'):
+                 filepat=r"callgrind.out.[0-9]+.[0-9].anno"):
         self.directory = dir
         self.algos = algos
         self.callgrindfilepat = filepat
-        self.callgrindvalues = [['ir', 'instruction fetch'],
-                                ['dr', 'data read access'],
-                                ['dw', 'data write access'],
-                                ['i1mr', 'l1 instruction fetch miss'],
-                                ['d1mr', 'l1 data read miss'],
-                                ['d1mw', 'l1 data write miss'],
-                                ['ilmr', 'last level data instr. fetch miss'],
-                                ['dlmr', 'last level data read miss'],
-                                ['dlmw', 'last level data write miss'],
-                                ['bc', 'conditional branch'],
-                                ['bcm', 'mispredicted conditional branch'],
-                                ['bi', 'indirect branch'],
-                                ['bim', 'mispredicted indirect branch'],
-                                ['ifp32x1', 'scalar f32 instructon'],
-                                ['ifp64x1', 'scalar f64 instructon'],
-                                ['ifp32x2', 'simdx2 f32 instructon'],
-                                ['ifp64x2', 'simdx2 f64 instructon'],
-                                ['ifp32x4', 'simdx4 f32 instructon'],
-                                ['ifp64x4', 'simdx4 f64 instructon'],
-                                ['ifp32x8', 'simdx8 f32 instructon']]
-        self.callgrindfields = [x[0] for x in self.callgrindvalues] + \
-            ['file', 'funall', 'funname']
+        self.callgrindvalues = [
+            ["ir", "instruction fetch"],
+            ["dr", "data read access"],
+            ["dw", "data write access"],
+            ["i1mr", "l1 instruction fetch miss"],
+            ["d1mr", "l1 data read miss"],
+            ["d1mw", "l1 data write miss"],
+            ["ilmr", "last level data instr. fetch miss"],
+            ["dlmr", "last level data read miss"],
+            ["dlmw", "last level data write miss"],
+            ["bc", "conditional branch"],
+            ["bcm", "mispredicted conditional branch"],
+            ["bi", "indirect branch"],
+            ["bim", "mispredicted indirect branch"],
+            ["ifp32x1", "scalar f32 instructon"],
+            ["ifp64x1", "scalar f64 instructon"],
+            ["ifp32x2", "simdx2 f32 instructon"],
+            ["ifp64x2", "simdx2 f64 instructon"],
+            ["ifp32x4", "simdx4 f32 instructon"],
+            ["ifp64x4", "simdx4 f64 instructon"],
+            ["ifp32x8", "simdx8 f32 instructon"],
+        ]
+        self.callgrindfields = [x[0] for x in self.callgrindvalues] + [
+            "file",
+            "funall",
+            "funname",
+        ]
         self.callgrindmetrics = {}
 
     def extractCallgrindMetrics(self):
         pat = re.compile(self.callgrindfilepat)
         callgrindfile = list(filter(pat.match, os.listdir(self.directory)))
         if len(callgrindfile) == 0:
-            print('Error: cannot find callgrind log file in %s' %\
-                  str(os.path.realpath(os.curdir)))
+            print("Error: cannot find callgrind log file in %s" % str(
+                os.path.realpath(os.curdir)))
         elif len(callgrindfile) > 1:
-            print('Error: found multiple log files matching pattern %s in %s' %\
+            print("Error: found multiple log files matching pattern %s in %s" %
                   (self.callgrindfilepat, str(os.listdir(os.curdir))))
 
-        searchops = ['%s::operator()' % x['algo_class'] for x in self.algos]
-        oppatstr = r'[A-Z,a-z,0-9]+::operator\(\)'
-        oppat = re.compile(r':' + oppatstr)
-        oppatbeg = re.compile(r'^' + oppatstr)
-        algpat = re.compile(r'[A-Z,a-z,0-9]+')
-        fh = open(os.path.join(self.directory,callgrindfile[0]))
+        searchops = ["%s::operator()" % x["algo_class"] for x in self.algos]
+        oppatstr = r"[A-Z,a-z,0-9]+::operator\(\)"
+        oppat = re.compile(r":" + oppatstr)
+        oppatbeg = re.compile(r"^" + oppatstr)
+        algpat = re.compile(r"[A-Z,a-z,0-9]+")
+        fh = open(os.path.join(self.directory, callgrindfile[0]))
         for line in fh.readlines():
             match = oppat.findall(line)
             if len(match) and match[0][1:] in searchops:
                 lines = line.split()
                 linesvals = lines[:20]
-                linesvals = [int(x.replace(',', '').
-                                replace('.', '0')) for x in linesvals]
-                metricvals = linesvals + ''.join(lines[19:]).split(':', 1)
+                linesvals = [
+                    int(x.replace(",", "").replace(".", "0"))
+                    for x in linesvals
+                ]
+                metricvals = linesvals + "".join(lines[19:]).split(":", 1)
                 metricvals.append(oppatbeg.findall(metricvals[-1])[0])
                 algname = algpat.findall(metricvals[-1])[0]
                 metricdict = dict(list(zip(self.callgrindfields, metricvals)))
                 if algname in self.callgrindmetrics:
                     for metr in self.callgrindvalues:
                         value = metr[0]
-                        if self.callgrindmetrics[algname][value] != \
-                           metricdict[value]:
+                        if self.callgrindmetrics[algname][value] != metricdict[
+                                value]:
                             print("""Error: for algorithm %s two different
                                   values found for callgrind metric %s: %d, %d
-                                  """ % (algname, value, metricdict[value],
-                                         self.callgrindmtrics[algname][value]))
+                                  """ % (
+                                algname,
+                                value,
+                                metricdict[value],
+                                self.callgrindmtrics[algname][value],
+                            ))
                 else:
                     self.callgrindmetrics[algname] = metricdict
         fh.close()
@@ -93,7 +101,8 @@ class CallgrindLogParser():
     def run(self):
         self.extractCallgrindMetrics()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     CallgrindLogParser().run()
 
 # ir        Instruction Fetch
diff --git a/handlers/timing/MemoryParser.py b/handlers/timing/MemoryParser.py
index b51ab6a5ccd0e01db5391b8bb88d97347fd72f62..b2112f19f39fd8eb1dadb6ff3bcbeaa31c4bbfb1 100644
--- a/handlers/timing/MemoryParser.py
+++ b/handlers/timing/MemoryParser.py
@@ -10,8 +10,9 @@ import re
 
 
 class MemoryParser:
-    """ Class responsible for parsing the MemoryAuditor log from the
-    Gaudi run log files """
+    """Class responsible for parsing the MemoryAuditor log from the
+    Gaudi run log files"""
+
     Nb_of_events = -1
 
     def __init__(self, filename):
@@ -19,7 +20,7 @@ class MemoryParser:
         self.parse(filename)
 
     def parse(self, logfilename):
-        """ Parse the log file"""
+        """Parse the log file"""
         # Now iterating on the input and looking for the MemoryAuditor lines
         regxp = "^MemoryAuditor.*\s(after|before)\s([a-zA-Z0-9_]+)\s(Initialize|Execute|Finalize).*\s\=\s([\d\.]+).*\s\=\s([\d\.]+)"
         regxp_event_loop = "(TIMER|TimingAuditor).(TIMER|T...)\s+INFO EVENT LOOP\s*\|([\d\s\.]+?)\|([\d\s\.]+?)\|([\d\s\.]+?)\|([\d\s\.]+?)\|.*"
@@ -28,17 +29,27 @@ class MemoryParser:
             for l in logf.readlines():
                 m = re.match(regxp, l)
                 if m != None:
-                    elem = MemNode(m.group(2), m.group(3), m.group(1), float(m.group(4)), float(m.group(5)))
+                    elem = MemNode(
+                        m.group(2),
+                        m.group(3),
+                        m.group(1),
+                        float(m.group(4)),
+                        float(m.group(5)),
+                    )
                 m = re.match(regxp_event_loop, l)
                 if m != None:
                     MemoryParser.Nb_of_events = int(m.group(6))
 
             logf.close()
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__) +
-                            ": Data file not found, please consider the correct name of the analysed log file.' ")
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, please consider the correct name of the analysed log file.' "
+            )
 
 
 class MemNode:
@@ -84,8 +95,9 @@ class MemNode:
                     memory_vm += node_tmp.vsmem
                     node_tmp = node_tmp.node
                 node_tmp = MemNode.NodeList[name]
-                print("Name: {0}, Period: {1}, AB: {2}, Virt.: {3}, Res.: {4}".format(
-                    name, node_tmp.period, node_tmp.ab, memory_vm, memory_re))
+                print("Name: {0}, Period: {1}, AB: {2}, Virt.: {3}, Res.: {4}".
+                      format(name, node_tmp.period, node_tmp.ab, memory_vm,
+                             memory_re))
             except KeyError:
                 print("Nothing Found!")
 
@@ -137,7 +149,7 @@ class MemNode:
                     max_memory_vm = node.vsmem
             except KeyError:
                 print("Nothing Found!")
-        return max_memory_re-max_init_re, max_memory_vm-max_init_vm
+        return max_memory_re - max_init_re, max_memory_vm - max_init_vm
 
     @staticmethod
     def getFinalizationMemory():
@@ -156,11 +168,15 @@ class MemNode:
                     max_memory_vm = node.vsmem
             except KeyError:
                 print("Nothing Found!")
-        return max_memory_re-(max_init_re+max_exec_re), max_memory_vm-(max_init_vm+max_exec_vm)
+        return max_memory_re - (max_init_re + max_exec_re), max_memory_vm - (
+            max_init_vm + max_exec_vm)
 
     @staticmethod
     def getMemPerEvent():
-        return float(MemNode.getExecutionMemory()[0]/MemoryParser.Nb_of_events), float(MemNode.getExecutionMemory()[1]/MemoryParser.Nb_of_events)
+        return float(
+            MemNode.getExecutionMemory()[0] /
+            MemoryParser.Nb_of_events), float(
+                MemNode.getExecutionMemory()[1] / MemoryParser.Nb_of_events)
 
 
 #
@@ -169,6 +185,7 @@ class MemNode:
 ################################################################################
 if __name__ == "__main__":
     import sys
+
     if len(sys.argv) < 2:
         print("Please specify log filename")
         sys.exit(1)
diff --git a/handlers/timing/SimpleTimingParser.py b/handlers/timing/SimpleTimingParser.py
index 1ad23f0d2333d68992594b96d057e1b86d2d66e2..487a89ea4264eae4de46c8280c4ab38f35e46148 100644
--- a/handlers/timing/SimpleTimingParser.py
+++ b/handlers/timing/SimpleTimingParser.py
@@ -4,20 +4,22 @@ import sys
 import os
 import re
 
+
 #
 # Parser for the VTune logfile
 #
 ################################################################################
 class SimpleTimingParser:
-    """ Class responsible for parsing the TimingAuditor log from the
-    Gaudi run  log files """
+    """Class responsible for parsing the TimingAuditor log from the
+    Gaudi run  log files"""
+
     def __init__(self, filename):
         self.root = None
-    	self.timingTable = []
-	self.parse(filename)
+        self.timingTable = []
+        self.parse(filename)
 
     def parse(self, logfilename):
-        """ Parse the log file"""
+        """Parse the log file"""
         # Now iterating on the input and looking for the TimingAuditor lines
         # The hiererarchy of Algos and sequences is rebuilt based on the order
         # in the text file.
@@ -30,12 +32,18 @@ class SimpleTimingParser:
                     self.timingTable.append([m.group(1), float(m.group(2))])
             logf.close()
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__)+": Data file not found, please consider the correct name of the analysed log file.' ")
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, please consider the correct name of the analysed log file.' "
+            )
 
     def getTimingList(self):
-	return self.timingTable
+        return self.timingTable
+
 
 #
 # Main
@@ -43,6 +51,7 @@ class SimpleTimingParser:
 ################################################################################
 if __name__ == "__main__":
     import sys
+
     if len(sys.argv) < 2:
         print("Please specify log filename")
         sys.exit(1)
diff --git a/handlers/timing/TimeLineSvcParser.py b/handlers/timing/TimeLineSvcParser.py
index 1035cd0c899da5f8aaeb7f62e2d0151e82bf15c9..ada9ed23d7c94a825b645eff31131449529c5459 100644
--- a/handlers/timing/TimeLineSvcParser.py
+++ b/handlers/timing/TimeLineSvcParser.py
@@ -3,30 +3,28 @@
 import csv
 import json
 import os
-
 """TimeLineSvcParser will read a timeline.csv file and return a json object out
 of the rsulting data. """
 
 
-class TimeLineSvcParser():
-
+class TimeLineSvcParser:
     def __init__(self, directory):
-        csv.register_dialect('timelinesvc', delimiter=' ')
+        csv.register_dialect("timelinesvc", delimiter=" ")
         self.directory = directory
         self.data = []
 
     def collectData(self):
-        csvfile = self.directory + os.sep + 'timeline.csv'
+        csvfile = self.directory + os.sep + "timeline.csv"
         csvh = open(csvfile)
         firstline = csvh.readlines()[0]
-        while firstline[0] == '#':
+        while firstline[0] == "#":
             firstline = firstline[1:]
         firstline.strip()
         fieldnames = tuple(firstline.split())
         csvh.close()
 
         csvh = open(csvfile)
-        reader = csv.DictReader(csvh, fieldnames, dialect='timelinesvc')
+        reader = csv.DictReader(csvh, fieldnames, dialect="timelinesvc")
         for row in reader:
             if list(row.keys())[0] != row[list(row.keys())[0]]:
                 self.data.append(row)
diff --git a/handlers/timing/TimingParser.py b/handlers/timing/TimingParser.py
index e72acfb805b4be8de292bc46676d02371a3cede7..817ceecbb231d398868a8c46c12779b61288342c 100644
--- a/handlers/timing/TimingParser.py
+++ b/handlers/timing/TimingParser.py
@@ -10,8 +10,8 @@ import re
 
 
 class TimingParser:
-    """ Class responsible for parsing the TimingAuditor log from the
-    Gaudi run  log files """
+    """Class responsible for parsing the TimingAuditor log from the
+    Gaudi run  log files"""
 
     def __init__(self, filename):
         self.root = None
@@ -19,7 +19,7 @@ class TimingParser:
         self.parse(filename)
 
     def parse(self, logfilename):
-        """ Parse the log file"""
+        """Parse the log file"""
 
         # Now iterating on the input and looking for the TimingAuditor lines
         # The hiererarchy of Algos and sequences is rebuilt based on the order
@@ -46,7 +46,14 @@ class TimingParser:
                     name = m.group(3).strip()
                     id = id + 1
                     # print "Id: ", id, "Name: ", name, "Value: ", float(m.group(4)), "Level: ", level, "Entries: ", m.group(7).strip()
-                    node = Node(id, level, name, float(m.group(4).strip()), int(m.group(7).strip()), parent)
+                    node = Node(
+                        id,
+                        level,
+                        name,
+                        float(m.group(4).strip()),
+                        int(m.group(7).strip()),
+                        parent,
+                    )
                     try:
                         lastparent[level] = node
                     except IndexError as e:
@@ -56,10 +63,14 @@ class TimingParser:
                     self.overall_timing = m_time.group(1)
             logf.close()
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__) +
-                            ": Data file not found, this handler excepts a 'run.log' in the results directory' ")
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, this handler excepts a 'run.log' in the results directory' "
+            )
 
         # Getting the actual root "EVENT LOOP"
         root = lastparent[0]
@@ -68,7 +79,7 @@ class TimingParser:
         self.root = root
 
     def getRoot(self):
-        """ returns the root of the tree """
+        """returns the root of the tree"""
         return self.root
 
     def getHierarchicalJSON(self):
@@ -89,11 +100,11 @@ class TimingParser:
         return self.root.findByName(name)
 
     def getTopN(self, n):
-        """ Get the top N ranked algorithms"""
+        """Get the top N ranked algorithms"""
         return sorted(self.root.getAllChildren(), key=Node.getRank)[:n]
 
     def getAllSorted(self):
-        """ Get the top N ranked algorithms"""
+        """Get the top N ranked algorithms"""
         return sorted(self.root.getAllChildren(), key=Node.getRank)
 
 
@@ -102,7 +113,7 @@ class TimingParser:
 #
 ################################################################################
 class Node:
-    """ Representation of an algorithm or sequence """
+    """Representation of an algorithm or sequence"""
 
     @classmethod
     def getActualTimeUsed(cls, o):
@@ -113,7 +124,7 @@ class Node:
         return o.rank
 
     def __init__(self, id, level, name, value, entries, parent=None):
-        """ Constructor """
+        """Constructor"""
         self.id = id
         self.level = level
         self.name = name.replace(" ", "_")
@@ -128,7 +139,7 @@ class Node:
             parent.children.append(self)
 
     def findByName(self, name):
-        """ Find an algorithm in the subtree related to the Node  """
+        """Find an algorithm in the subtree related to the Node"""
         if self.name == name:
             return self
 
@@ -139,12 +150,12 @@ class Node:
         return None
 
     def actualTimeUsed(self):
-        """ returns the CPU time actually used in the sequence,
-        excluding time used by the children """
+        """returns the CPU time actually used in the sequence,
+        excluding time used by the children"""
         return self.total - self.getSumChildrenTime()
 
     def getAllChildren(self):
-        """ Navigate the tree to rturn all the children"""
+        """Navigate the tree to rturn all the children"""
         cdren = []
         cdren.append(self)
         for c in self.children:
@@ -152,7 +163,7 @@ class Node:
         return cdren
 
     def getNodesMatching(self, namepattern, foundnodes=None):
-        """ Find all children matching a given name """
+        """Find all children matching a given name"""
         if foundnodes == None:
             foundnodes = set()
 
@@ -165,7 +176,7 @@ class Node:
         return foundnodes
 
     def getParentNodes(self):
-        """ Find all children matching a given name """
+        """Find all children matching a given name"""
 
         parents = set()
         if self.parent != None:
@@ -175,7 +186,7 @@ class Node:
         return parents
 
     def getMinChildrenRank(self):
-        """ Get the lowest rank in all the children """
+        """Get the lowest rank in all the children"""
         m = self.rank
         for c in self.children:
             if c.getMinChildrenRank() < m:
@@ -183,22 +194,22 @@ class Node:
         return m
 
     def getSumChildrenTime(self):
-        """ Get the sum of CPU time spent by the children """
+        """Get the sum of CPU time spent by the children"""
         tmptotal = 0.0
         for c in self.children:
             tmptotal += c.total
         return tmptotal
 
     def perLevel(self):
-        """ Percentage of time spent in this algo/seq over the
-        time used by the parent """
+        """Percentage of time spent in this algo/seq over the
+        time used by the parent"""
         if self.parent != None:
-            return round((self.total * 100.0)/self.parent.total, 2)
+            return round((self.total * 100.0) / self.parent.total, 2)
         else:
             return 100.0
 
     def getEventTotal(self):
-        """ Get the total time spent in the EVENT LOOP """
+        """Get the total time spent in the EVENT LOOP"""
         if self.eventTotal != None:
             return self.eventTotal
 
@@ -210,38 +221,48 @@ class Node:
             return self.eventTotal
 
     def perTotal(self):
-        """ percentage time spent in this algorithm vs the TOTAL time"""
+        """percentage time spent in this algorithm vs the TOTAL time"""
         return round(self.total * 100.0 / self.getEventTotal(), 2)
 
     def getfullname(self):
-        """ Returns the complete path flatened joined by '-' """
+        """Returns the complete path flatened joined by '-'"""
         if self.parent != None:
             return self.parent.getfullname() + "-" + self.name
         else:
             return self.name
 
     def getJSON(self, hierarchical=True):
-        """ Returns teh JSON representation of thios node """
+        """Returns teh JSON representation of thios node"""
         cjson = ""
 
         if hierarchical and len(self.children) > 0:
             cjson = ', "children":[%s]' % self._childrenjson()
 
         tmpl = '{"code":%d, "name":"%s", "rank":%d, "mrank":%d, "childrenTotal":%.2f, "perTotal":%.2f, "perLevel":%.2f, "avgtime":%.2f, "total":%.2f, "entries":%d '
-        vals = [self.id, self.name, self.rank, self.getMinChildrenRank(), self.getSumChildrenTime(), self.perTotal(),
-                self.perLevel(), self.value, self.total, self.entries]
+        vals = [
+            self.id,
+            self.name,
+            self.rank,
+            self.getMinChildrenRank(),
+            self.getSumChildrenTime(),
+            self.perTotal(),
+            self.perLevel(),
+            self.value,
+            self.total,
+            self.entries,
+        ]
         if self.parent != None:
             tmpl += ', "_parentCode":%d %s}'
             vals.append(self.parent.id)
             vals.append(cjson)
         else:
-            tmpl += ' %s}'
+            tmpl += " %s}"
             vals.append(cjson)
 
         return tmpl % tuple(vals)
 
     def printChildrenList(self, maxLevel=-1, thisLevel=0):
-        """ Prints the list of children down to a level """
+        """Prints the list of children down to a level"""
 
         # print ">>>> %d\t%s" % (thisLevel, self.name)
         if thisLevel < maxLevel:
@@ -249,7 +270,7 @@ class Node:
                 c.printChildrenList(maxLevel, thisLevel + 1)
 
     def _childrenjson(self):
-        """ Util function to return the JSON reprentation of the children of the node """
+        """Util function to return the JSON reprentation of the children of the node"""
         ct = 1
         json = ""
         for c in self.children:
@@ -260,9 +281,10 @@ class Node:
         return json
 
     def rankChildren(self):
-        """ Actually sort of the children of this node and set their rank.
+        """Actually sort of the children of this node and set their rank.
         This MUST be called on the tree before using teh rank value"""
-        l = sorted(self.getAllChildren(), key=Node.getActualTimeUsed, reverse=True)
+        l = sorted(
+            self.getAllChildren(), key=Node.getActualTimeUsed, reverse=True)
         for i, n in enumerate(l):
             n.rank = i + 1
 
@@ -273,6 +295,7 @@ class Node:
 ################################################################################
 if __name__ == "__main__":
     import sys
+
     if len(sys.argv) < 2:
         print("Please specify log filename")
         sys.exit(1)
@@ -283,21 +306,22 @@ if __name__ == "__main__":
 
         # nodelist.append(eventLoop)
 
-        #dvUserSeq = eventLoop.findByName("DaVinciUserSequence")
+        # dvUserSeq = eventLoop.findByName("DaVinciUserSequence")
         # nodelist.append(dvUserSeq)
         # for c in dvUserSeq.children:
         # nodelist.append(c)
 
-        #stripGlobal = dvUserSeq.findByName("StrippingGlobal")
+        # stripGlobal = dvUserSeq.findByName("StrippingGlobal")
         # nodelist.append(stripGlobal)
         # for c in stripGlobal.children:
         # nodelist.append(c)
 
-        #StrippingProtectedSequenceALL = stripGlobal.findByName("StrippingProtectedSequenceALL")
+        # StrippingProtectedSequenceALL = stripGlobal.findByName("StrippingProtectedSequenceALL")
         # nodelist.append(StrippingProtectedSequenceALL)
         # for c in StrippingProtectedSequenceALL.children:
         # nodelist.append(c)
 
         for node in t.getAllSorted():
-            if node.name == 'Hlt2CharmHadD2HHHKsDD':
-                print("{0} - {1} - {2} - {3}".format(node.id, node.name, node.value, node.entries))
+            if node.name == "Hlt2CharmHadD2HHHKsDD":
+                print("{0} - {1} - {2} - {3}".format(node.id, node.name,
+                                                     node.value, node.entries))
diff --git a/handlers/timing/VTuneModuleParser.py b/handlers/timing/VTuneModuleParser.py
index c46645d4a2ca8afdd6045af6b40991efc621727b..9cff746f0c6ba17ee06e35e441eeb2ed3f70df52 100644
--- a/handlers/timing/VTuneModuleParser.py
+++ b/handlers/timing/VTuneModuleParser.py
@@ -4,19 +4,21 @@ import sys
 import os
 import re
 
+
 #
 # Parser for the VTune logfile
 #
 ################################################################################
 class VTuneModuleParser:
-    """ Class responsible for parsing the IntelAuditor log for modules (libraries)."""
+    """Class responsible for parsing the IntelAuditor log for modules (libraries)."""
+
     def __init__(self, filename):
         self.root = None
-    	self.timingTable = []
-	self.parse(filename)
+        self.timingTable = []
+        self.parse(filename)
 
     def parse(self, logfilename):
-        """ Parse the log file"""
+        """Parse the log file"""
         # Now iterating on the input and looking for the TimingAuditor lines
         # The hiererarchy of Algos and sequences is rebuilt based on the order
         # in the text file.
@@ -29,12 +31,18 @@ class VTuneModuleParser:
                     self.timingTable.append([m.group(1), float(m.group(2))])
             logf.close()
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__)+": Data file not found, please consider the correct name of the analysed log file.' ")
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, please consider the correct name of the analysed log file.' "
+            )
 
     def getTimingList(self):
-	return self.timingTable
+        return self.timingTable
+
 
 #
 # Main
@@ -42,6 +50,7 @@ class VTuneModuleParser:
 ################################################################################
 if __name__ == "__main__":
     import sys
+
     if len(sys.argv) < 2:
         print("Please specify log filename")
         sys.exit(1)
diff --git a/handlers/timing/VTuneTimingParser.py b/handlers/timing/VTuneTimingParser.py
index f2e5649a31304dc57b2a32e9e1ec80f8f85fb6ba..9dee4acd5f4e8b6db7e4a2ab393a8d7e9747d4da 100644
--- a/handlers/timing/VTuneTimingParser.py
+++ b/handlers/timing/VTuneTimingParser.py
@@ -3,23 +3,25 @@
 import sys
 import re
 
+
 #
 # Parser for the IntelAuditor logfile
 #
 ################################################################################
 class VTuneTimingParser:
-    """ Class responsible for parsing the TimingAuditor log from the
-    Gaudi run log files """
+    """Class responsible for parsing the TimingAuditor log from the
+    Gaudi run log files"""
+
     def __init__(self, filename_run, filename_task):
         self.root = None
         self.parse(filename_run, filename_task)
 
     def parse(self, rfname, tfname):
-        """ Parse the log file"""
+        """Parse the log file"""
 
         regxp = "(TIMER|TimingAuditor).(TIMER|T...)\s+INFO ([\s\w]+?)\s*\|([\d\s\.]+?)\|([\d\s\.]+?)\|([\d\s\.]+?)\|([\d\s\.]+?)\|.*"
         nb_of_evts_per_alg = []
-        event_loop         = .0
+        event_loop = 0.0
         try:
             log = open(rfname, "r")
             for l in log.readlines():
@@ -27,16 +29,24 @@ class VTuneTimingParser:
                 if m != None:
                     if "EVENT LOOP" == m.group(3).strip():
                         event_loop = float(m.group(7).strip())
-                    nb_of_evts_per_alg.append([m.group(3).strip(), float(m.group(7).strip())])
+                    nb_of_evts_per_alg.append(
+                        [m.group(3).strip(),
+                         float(m.group(7).strip())])
             log.close()
-            nb_of_evts_per_alg[0][0] = re.sub("EVENT LOOP", "EVENT_LOOP", nb_of_evts_per_alg[0][0])
-            #print nb_of_evts_per_alg
+            nb_of_evts_per_alg[0][0] = re.sub("EVENT LOOP", "EVENT_LOOP",
+                                              nb_of_evts_per_alg[0][0])
+            # print nb_of_evts_per_alg
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__)+": Data file not found, this handler excepts a 'run.log' in the results directory' ")
-        parent       = None
-        lastparent   = [None]
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, this handler excepts a 'run.log' in the results directory' "
+            )
+        parent = None
+        lastparent = [None]
         id = 0
         regxp = "^\s*([\[\]\w_ ]+)\s{5,}([\d\.]+)\s+([\d\.]+)"
         try:
@@ -47,39 +57,51 @@ class VTuneTimingParser:
                     full_name = m.group(1).rstrip()
                     if str(full_name) == "[Outside any task]":
                         full_name = "EVENT_LOOP"
-                    final_digit = re.search('\s{3,}\d+', full_name)
+                    final_digit = re.search("\s{3,}\d+", full_name)
                     if final_digit != None:
                         full_name = full_name[:final_digit.start(0)]
                     names = full_name.split()
                     if full_name == "EVENT_LOOP":
-                       level = 0
+                        level = 0
                     else:
-                       level = len(names)
+                        level = len(names)
                     parent = None
                     if level > 0:
-                       parent = lastparent[level-1]
+                        parent = lastparent[level - 1]
                     nb_of_evts = -1
-                    mai        =  0
-                    idx        = -1
+                    mai = 0
+                    idx = -1
                     for i in nb_of_evts_per_alg:
-                        search_str = '^' + i[0]
-                        n = re.search(search_str, names[len(names)-1])
+                        search_str = "^" + i[0]
+                        n = re.search(search_str, names[len(names) - 1])
                         if n != None:
                             if mai < len(n.group(0)):
                                 mai = len(n.group(0))
                                 idx = i
                     nb_of_evts = int(idx[1])
                     id = id + 1
-                    node = Node(id, level, names[len(names)-1], float(m.group(2).strip()), int(nb_of_evts), parent)
+                    node = Node(
+                        id,
+                        level,
+                        names[len(names) - 1],
+                        float(m.group(2).strip()),
+                        int(nb_of_evts),
+                        parent,
+                    )
                     try:
                         lastparent[level] = node
                     except IndexError as e:
                         lastparent.append(node)
             logf.close()
         except OSError:
-            raise Exception(str(self.__class__)+": No result directory, check the given result directory")
+            raise Exception(
+                str(self.__class__) +
+                ": No result directory, check the given result directory")
         except IOError:
-            raise Exception(str(self.__class__)+": Data file not found, this handler excepts a 'run.log' in the results directory' ")
+            raise Exception(
+                str(self.__class__) +
+                ": Data file not found, this handler excepts a 'run.log' in the results directory' "
+            )
 
         # Getting the actual root "EVENT LOOP"
         root = lastparent[0]
@@ -91,7 +113,7 @@ class VTuneTimingParser:
         self.root = root
 
     def getRoot(self):
-        """ returns the root of the tree """
+        """returns the root of the tree"""
         return self.root
 
     def getHierarchicalJSON(self):
@@ -99,7 +121,7 @@ class VTuneTimingParser:
 
     def getFlatJSON(self):
         ct = 1
-        json="["
+        json = "["
         for c in self.root.getAllChildren():
             if ct > 1:
                 json += ",\n"
@@ -112,11 +134,11 @@ class VTuneTimingParser:
         return self.root.findByName(name)
 
     def getTopN(self, n):
-        """ Get the top N ranked algorithms"""
+        """Get the top N ranked algorithms"""
         return sorted(self.root.getAllChildren(), key=Node.getRank)[:n]
 
     def getAllSorted(self):
-        """ Get the top N ranked algorithms"""
+        """Get the top N ranked algorithms"""
         return sorted(self.root.getAllChildren(), key=Node.getRank)
 
 
@@ -125,7 +147,7 @@ class VTuneTimingParser:
 #
 ################################################################################
 class Node:
-    """ Representation of an algorithm or sequence """
+    """Representation of an algorithm or sequence"""
 
     @classmethod
     def getActualTimeUsed(cls, o):
@@ -140,9 +162,9 @@ class Node:
         self.level = level
         self.name = name.replace(" ", "_")
         self.rank = 0
-        self.value = float(value) # in [ms]
+        self.value = float(value)  # in [ms]
         self.entries = int(entries)
-        self.total = float(self.value) # in [s]
+        self.total = float(self.value)  # in [s]
         self.children = []
         self.parent = parent
         self.eventTotal = None
@@ -159,15 +181,25 @@ class Node:
     def finalize2(self):
         for n in self.children:
             n.finalize2()
-        self.value = (self.value/self.entries)*1000
+        self.value = (self.value / self.entries) * 1000
 
     def printTime(self):
-        print(self.name, ", ", self.value, ", ", self.level, ", ", self.total , ", ", self.entries)
+        print(
+            self.name,
+            ", ",
+            self.value,
+            ", ",
+            self.level,
+            ", ",
+            self.total,
+            ", ",
+            self.entries,
+        )
         for n in self.children:
             n.printTime()
 
     def findByName(self, name):
-        """ Find an algorithm in the subtree related to the Node  """
+        """Find an algorithm in the subtree related to the Node"""
         if self.name == name:
             return self
 
@@ -177,14 +209,13 @@ class Node:
                 return tmp
         return None
 
-
     def actualTimeUsed(self):
-        """ returns the CPU time actually used in the sequence,
-        excluding time used by the children """
+        """returns the CPU time actually used in the sequence,
+        excluding time used by the children"""
         return self.total - self.getSumChildrenTime()
 
     def getAllChildren(self):
-        """ Navigate the tree to rturn all the children"""
+        """Navigate the tree to rturn all the children"""
         cdren = []
         cdren.append(self)
         for c in self.children:
@@ -192,31 +223,30 @@ class Node:
         return cdren
 
     def getMinChildrenRank(self):
-        """ Get the lowest rank in all the children """
+        """Get the lowest rank in all the children"""
         m = self.rank
-	for c in self.children:
+        for c in self.children:
             if c.getMinChildrenRank() < m:
-                 m = c.getMinChildrenRank()
+                m = c.getMinChildrenRank()
         return m
 
-
     def getSumChildrenTime(self):
-        """ Get the sum of CPU time spent by the children """
+        """Get the sum of CPU time spent by the children"""
         tmptotal = 0.0
         for c in self.children:
             tmptotal += c.total
         return tmptotal
 
     def perLevel(self):
-        """ Percentage of time spent in this algo/seq over the
-        time used by the parent """
+        """Percentage of time spent in this algo/seq over the
+        time used by the parent"""
         if self.parent != None:
-            return round((self.total * 100.0)/self.parent.total,2)
+            return round((self.total * 100.0) / self.parent.total, 2)
         else:
             return 100.0
 
     def getEventTotal(self):
-        """ Get the total time spent in the EVENT LOOP """
+        """Get the total time spent in the EVENT LOOP"""
         if self.eventTotal != None:
             return self.eventTotal
 
@@ -228,41 +258,50 @@ class Node:
             return self.eventTotal
 
     def perTotal(self):
-        """ percentage time spent in this algorithm vs the TOTAL time"""
-        return round(self.total * 100.0 / self.getEventTotal(),2)
+        """percentage time spent in this algorithm vs the TOTAL time"""
+        return round(self.total * 100.0 / self.getEventTotal(), 2)
 
     def getfullname(self):
-        """ Returns the complete path flatened joined by '-' """
+        """Returns the complete path flatened joined by '-'"""
         if self.parent != None:
             return self.parent.getfullname() + "-" + self.name
         else:
             return self.name
 
     def getJSON(self, hierarchical=True):
-        """ Returns teh JSON representation of thios node """
+        """Returns teh JSON representation of thios node"""
         cjson = ""
 
         if hierarchical and len(self.children) > 0:
             cjson = ', "children":[%s]' % self._childrenjson()
 
         tmpl = '{"code":%d, "name":"%s", "rank":%d, "mrank":%d, "childrenTotal":%.2f, "perTotal":%.2f, "perLevel":%.2f, "avgtime":%.2f, "total":%.2f, "entries":%d '
-        vals =  [ self.id, self.name, self.rank, self.getMinChildrenRank(), self.getSumChildrenTime(), self.perTotal(),
-                  self.perLevel(), self.value, self.total, self.entries ]
+        vals = [
+            self.id,
+            self.name,
+            self.rank,
+            self.getMinChildrenRank(),
+            self.getSumChildrenTime(),
+            self.perTotal(),
+            self.perLevel(),
+            self.value,
+            self.total,
+            self.entries,
+        ]
         if self.parent != None:
             tmpl += ', "_parentCode":%d %s}'
             vals.append(self.parent.id)
             vals.append(cjson)
         else:
-            tmpl += ' %s}'
-            vals.append( cjson)
+            tmpl += " %s}"
+            vals.append(cjson)
 
         return tmpl % tuple(vals)
 
-
     def _childrenjson(self):
-        """ Util function to return the JSON reprentation of the children of the node """
+        """Util function to return the JSON reprentation of the children of the node"""
         ct = 1
-        json=""
+        json = ""
         for c in self.children:
             if ct > 1:
                 json += ",\n"
@@ -271,26 +310,29 @@ class Node:
         return json
 
     def rankChildren(self):
-        """ Actually sort of the children of this node and set their rank.
+        """Actually sort of the children of this node and set their rank.
         This MUST be called on the tree before using teh rank value"""
-        l = sorted(self.getAllChildren(), key=Node.getActualTimeUsed, reverse=True)
+        l = sorted(
+            self.getAllChildren(), key=Node.getActualTimeUsed, reverse=True)
         for i, n in enumerate(l):
             n.rank = i + 1
 
+
 #
 # Main
 #
 ################################################################################
 if __name__ == "__main__":
     import sys
+
     if len(sys.argv) < 3:
         print("Please specify log filenames")
         sys.exit(1)
     else:
-        filename_run  = sys.argv[1]
+        filename_run = sys.argv[1]
         filename_task = sys.argv[2]
         print("Processing ... ")
         t = VTuneTimingParser(filename_run, filename_task)
 
-        #for n in t.getTopN(10):
-            #print n.name, " - ", n.perTotal()
+        # for n in t.getTopN(10):
+        # print n.name, " - ", n.perTotal()
diff --git a/handlers/utils/ConfigHistos.py b/handlers/utils/ConfigHistos.py
index c07ca6a798be274b6d0bf825f6219130261b81c4..12afa2fe6d177c1b5ae5e03737b7b2f566da0429 100644
--- a/handlers/utils/ConfigHistos.py
+++ b/handlers/utils/ConfigHistos.py
@@ -1,14 +1,9 @@
 from collections import defaultdict
 
-def efficiencyHistoDict() :
-    basedict = {
-        "eta" : {},
-        "p" : {},
-        "pt" : {},
-        "phi" : {},
-        "nPV" : {}
-        }
-    
+
+def efficiencyHistoDict():
+    basedict = {"eta": {}, "p": {}, "pt": {}, "phi": {}, "nPV": {}}
+
     basedict["eta"]["xTitle"] = "#eta"
     basedict["eta"]["variable"] = "Eta"
 
@@ -23,69 +18,89 @@ def efficiencyHistoDict() :
 
     basedict["nPV"]["xTitle"] = "# of PVs"
     basedict["nPV"]["variable"] = "nPV"
-    
+
     return basedict
 
-def ghostHistoDict() :
-    basedict = {
-        "eta" : {},
-        "nPV" : {}
-        }
+
+def ghostHistoDict():
+    basedict = {"eta": {}, "nPV": {}}
 
     basedict["eta"]["xTitle"] = "#eta"
     basedict["eta"]["variable"] = "Eta"
 
     basedict["nPV"]["xTitle"] = "# of PVs"
     basedict["nPV"]["variable"] = "nPV"
-    
+
+    return basedict
+
+
+def getCuts():
+    basedict = {"Velo": {}, "Upstream": {}, "Forward": {}}
+
+    basedict["Velo"] = [
+        "VeloTracks",
+        "VeloTracks_eta25",
+        "LongFromB_eta25",
+        "LongFromD_eta25",
+        "LongStrange_eta25",
+    ]
+    basedict["Upstream"] = [
+        "VeloUTTracks_eta25",
+        "LongFromB_eta25",
+        "LongFromD_eta25",
+        "LongStrange_eta25",
+    ]
+    basedict["Forward"] = [
+        "Long_eta25",
+        "Long_eta25_triggerNumbers",
+        "LongFromB_eta25",
+        "LongFromD_eta25",
+        "LongStrange_eta25",
+    ]
+
     return basedict
 
-def getCuts() :
-     basedict = {
-        "Velo" : {},
-        "Upstream" : {},
-        "Forward" : {} 
-        }
-    
-     basedict["Velo"] = ["VeloTracks", "VeloTracks_eta25", "LongFromB_eta25", "LongFromD_eta25", "LongStrange_eta25"]
-     basedict["Upstream"] = ["VeloUTTracks_eta25", "LongFromB_eta25", "LongFromD_eta25", "LongStrange_eta25"]
-     basedict["Forward"] = ["Long_eta25", "Long_eta25_triggerNumbers", "LongFromB_eta25", "LongFromD_eta25", "LongStrange_eta25"]
-    
-     return basedict
-
-def categoriesDict() :
-    basedict = defaultdict(lambda : defaultdict(dict))
-
-    basedict["Velo"]["VeloTracks"]["title"]        = "Velo"
-    basedict["Velo"]["VeloTracks_eta25"]["title"]  = "Velo, 2 < eta < 5"
-    basedict["Velo"]["LongFromB_eta25"]["title"]   = "Long from B, 2 < eta < 5"
-    basedict["Velo"]["LongFromD_eta25"]["title"]   = "Long from D, 2 < eta < 5"
-    basedict["Velo"]["LongStrange_eta25"]["title"] = "Long strange, 2 < eta < 5"
-    basedict["Velo"]["VeloTracks"]["plotElectrons"]        = False
-    basedict["Velo"]["VeloTracks_eta25"]["plotElectrons"]  = True
-    basedict["Velo"]["LongFromB_eta25"]["plotElectrons"]   = False
-    basedict["Velo"]["LongFromD_eta25"]["plotElectrons"]   = False
+
+def categoriesDict():
+    basedict = defaultdict(lambda: defaultdict(dict))
+
+    basedict["Velo"]["VeloTracks"]["title"] = "Velo"
+    basedict["Velo"]["VeloTracks_eta25"]["title"] = "Velo, 2 < eta < 5"
+    basedict["Velo"]["LongFromB_eta25"]["title"] = "Long from B, 2 < eta < 5"
+    basedict["Velo"]["LongFromD_eta25"]["title"] = "Long from D, 2 < eta < 5"
+    basedict["Velo"]["LongStrange_eta25"][
+        "title"] = "Long strange, 2 < eta < 5"
+    basedict["Velo"]["VeloTracks"]["plotElectrons"] = False
+    basedict["Velo"]["VeloTracks_eta25"]["plotElectrons"] = True
+    basedict["Velo"]["LongFromB_eta25"]["plotElectrons"] = False
+    basedict["Velo"]["LongFromD_eta25"]["plotElectrons"] = False
     basedict["Velo"]["LongStrange_eta25"]["plotElectrons"] = False
 
     basedict["Upstream"]["VeloUTTracks_eta25"]["title"] = "veloUT, 2 < eta < 5"
-    basedict["Upstream"]["LongFromB_eta25"]["title"]    = "Long from B, 2 < eta < 5"
-    basedict["Upstream"]["LongFromD_eta25"]["title"]    = "Long from D, 2 < eta < 5"
-    basedict["Upstream"]["LongStrange_eta25"]["title"]  = "Long strange, 2 < eta < 5"
+    basedict["Upstream"]["LongFromB_eta25"][
+        "title"] = "Long from B, 2 < eta < 5"
+    basedict["Upstream"]["LongFromD_eta25"][
+        "title"] = "Long from D, 2 < eta < 5"
+    basedict["Upstream"]["LongStrange_eta25"][
+        "title"] = "Long strange, 2 < eta < 5"
     basedict["Upstream"]["VeloUTTracks_eta25"]["plotElectrons"] = False
-    basedict["Upstream"]["LongFromB_eta25"]["plotElectrons"]    = False
-    basedict["Upstream"]["LongFromD_eta25"]["plotElectrons"]    = False
-    basedict["Upstream"]["LongStrange_eta25"]["plotElectrons"]  = False
-
-    basedict["Forward"]["Long_eta25"]["title"]        = "Long, 2 < eta < 5"
-    basedict["Forward"]["Long_eta25_triggerNumbers"]["title"] = "Long, 2 < eta < 5, p > 3 GeV, pt > 500 MeV"
-    basedict["Forward"]["LongFromB_eta25"]["title"]   = "Long from B, 2 < eta < 5"
-    basedict["Forward"]["LongFromD_eta25"]["title"]   = "Long from D, 2 < eta < 5"
-    basedict["Forward"]["LongStrange_eta25"]["title"] = "Long strange, 2 < eta < 5"
-    basedict["Forward"]["Long_eta25"]["plotElectrons"]        = True
+    basedict["Upstream"]["LongFromB_eta25"]["plotElectrons"] = False
+    basedict["Upstream"]["LongFromD_eta25"]["plotElectrons"] = False
+    basedict["Upstream"]["LongStrange_eta25"]["plotElectrons"] = False
+
+    basedict["Forward"]["Long_eta25"]["title"] = "Long, 2 < eta < 5"
+    basedict["Forward"]["Long_eta25_triggerNumbers"][
+        "title"] = "Long, 2 < eta < 5, p > 3 GeV, pt > 500 MeV"
+    basedict["Forward"]["LongFromB_eta25"][
+        "title"] = "Long from B, 2 < eta < 5"
+    basedict["Forward"]["LongFromD_eta25"][
+        "title"] = "Long from D, 2 < eta < 5"
+    basedict["Forward"]["LongStrange_eta25"][
+        "title"] = "Long strange, 2 < eta < 5"
+    basedict["Forward"]["Long_eta25"]["plotElectrons"] = True
     basedict["Forward"]["Long_eta25_triggerNumbers"]["plotElectrons"] = True
-    basedict["Forward"]["LongFromB_eta25"]["plotElectrons"]           = False
-    basedict["Forward"]["LongFromD_eta25"]["plotElectrons"]           = False
-    basedict["Forward"]["LongStrange_eta25"]["plotElectrons"]         = False
-    
-    
+    basedict["Forward"]["LongFromB_eta25"]["plotElectrons"] = False
+    basedict["Forward"]["LongFromD_eta25"]["plotElectrons"] = False
+    basedict["Forward"]["LongStrange_eta25"]["plotElectrons"] = False
+
     return basedict
diff --git a/handlers/utils/ConfigHistos_HLT2.py b/handlers/utils/ConfigHistos_HLT2.py
index cdc4e9053b94d1ccecceaed261f8932f8d86bc6b..31d06ac9ca9a56251cfa9fe6720c76c5e70da300 100644
--- a/handlers/utils/ConfigHistos_HLT2.py
+++ b/handlers/utils/ConfigHistos_HLT2.py
@@ -1,54 +1,64 @@
 from collections import defaultdict
 
-Velo_sel_raw = { 
-    "Velo_eta25" : "velo track, 2 < eta < 5",
-    "Velo_hasnoUTSeed_eta25" : "velo track, not UT, Scifi hits, 2 < eta < 5",
-    "Velo_isLong_eta25" : "Velo track, has Scifi hits, 2 < eta < 5", 
-    "Velo_hasVeloUTSeed_eta25" : "Velo track, has UT and Scifi hits, 2 < eta < 5", 
-    "Velo_hasVeloSeed_eta25" : "Velo track, has Scifi hits, has no UT hits, 2 < eta < 5", 
-    "Velo" : "velo track",
-    "Velo_hasnoUTSeed" : "velo track, not UT, Scifi hits",
-    "Velo_isLong" : "Velo track, has Scifi hits", 
-    "Velo_hasVeloUTSeed" : "Velo track, has UT and Scifi hits", 
-    "Velo_hasVeloSeed" : "Velo track, has Scifi hits, has no UT hits", 
+Velo_sel_raw = {
+    "Velo_eta25":
+    "velo track, 2 < eta < 5",
+    "Velo_hasnoUTSeed_eta25":
+    "velo track, not UT, Scifi hits, 2 < eta < 5",
+    "Velo_isLong_eta25":
+    "Velo track, has Scifi hits, 2 < eta < 5",
+    "Velo_hasVeloUTSeed_eta25":
+    "Velo track, has UT and Scifi hits, 2 < eta < 5",
+    "Velo_hasVeloSeed_eta25":
+    "Velo track, has Scifi hits, has no UT hits, 2 < eta < 5",
+    "Velo":
+    "velo track",
+    "Velo_hasnoUTSeed":
+    "velo track, not UT, Scifi hits",
+    "Velo_isLong":
+    "Velo track, has Scifi hits",
+    "Velo_hasVeloUTSeed":
+    "Velo track, has UT and Scifi hits",
+    "Velo_hasVeloSeed":
+    "Velo track, has Scifi hits, has no UT hits",
 }
 
-Upstream_sel_raw = { 
-    "Upstream_eta25" : "Upstream track, 2 < eta < 5", 
-    "Upstream_hasSeed_eta25" : "Upstream track, has UT hits, 2 < eta < 5",
-    "Upstream_hasnoSeed_eta25" : "Upstream track, has no UT hits, 2 < eta < 5",
+Upstream_sel_raw = {
+    "Upstream_eta25": "Upstream track, 2 < eta < 5",
+    "Upstream_hasSeed_eta25": "Upstream track, has UT hits, 2 < eta < 5",
+    "Upstream_hasnoSeed_eta25": "Upstream track, has no UT hits, 2 < eta < 5",
 }
 
-Ttrack_sel_raw = { 
-    "Seed_eta25" : "Ttrack, 2 < eta < 5",
-    "Seed_hasnoVeloUT_eta25" : "Ttrack, without Velo or UT hits, 2 < eta < 5",
-    "Seed_isLong_eta25" : "Ttrack, with Velo hits, 2 < eta < 5",
-    "Seed_isDown_eta25" : "Ttrack, with UT hits, 2 < eta < 5"
+Ttrack_sel_raw = {
+    "Seed_eta25": "Ttrack, 2 < eta < 5",
+    "Seed_hasnoVeloUT_eta25": "Ttrack, without Velo or UT hits, 2 < eta < 5",
+    "Seed_isLong_eta25": "Ttrack, with Velo hits, 2 < eta < 5",
+    "Seed_isDown_eta25": "Ttrack, with UT hits, 2 < eta < 5",
 }
 
-Down_sel_raw = { 
-    "Down_eta25" : "Downstream track, 2 < eta < 5", 
-    "Down_hasnoVelo_eta25" : "Downstream track, without Velo hits, 2 < eta < 5",
-    "Down_hasVelo_eta25" : "Downstream track, with Velo hits, 2 < eta < 5"
+Down_sel_raw = {
+    "Down_eta25": "Downstream track, 2 < eta < 5",
+    "Down_hasnoVelo_eta25": "Downstream track, without Velo hits, 2 < eta < 5",
+    "Down_hasVelo_eta25": "Downstream track, with Velo hits, 2 < eta < 5",
 }
 
-Forward_sel_raw = { 
-    "Long_eta25" : "Long track, 2 < eta < 5", 
-    "Long_hasUT_eta25" : "Long track, with UT hits, 2 < eta < 5", 
-    "Long_hasnoUT_eta25" : "Long track, without UT hits, 2 < eta < 5", 
+Forward_sel_raw = {
+    "Long_eta25": "Long track, 2 < eta < 5",
+    "Long_hasUT_eta25": "Long track, with UT hits, 2 < eta < 5",
+    "Long_hasnoUT_eta25": "Long track, without UT hits, 2 < eta < 5",
 }
 
 
-def efficiencyHistoDict() :
+def efficiencyHistoDict():
     basedict = {
-        "eta" : {},
-        "p" : {},
-        "pt" : {},
-        "phi" : {},
-        "nPV" : {},
-        "docaz" : {},
-        }
-    
+        "eta": {},
+        "p": {},
+        "pt": {},
+        "phi": {},
+        "nPV": {},
+        "docaz": {},
+    }
+
     basedict["eta"]["xTitle"] = "#eta"
     basedict["eta"]["variable"] = "Eta"
 
@@ -66,16 +76,20 @@ def efficiencyHistoDict() :
 
     basedict["docaz"]["xTitle"] = "docaz [mm]"
     basedict["docaz"]["variable"] = "docaz"
-    
+
     return basedict
 
 
-def enrich_selections(mysel_raw): #Give a dict as input, return a dict with enriched selections. The same function in PrCheckerEfficiencyPlots_HLT2.py.
+def enrich_selections(
+        mysel_raw,
+):  # Give a dict as input, return a dict with enriched selections. The same function in PrCheckerEfficiencyPlots_HLT2.py.
     sel_raw = mysel_raw
     if not type(sel_raw) == dict:
-        print("ERROR! The function \'enrich_selections\' requires a dict as input.")
+        print(
+            "ERROR! The function 'enrich_selections' requires a dict as input."
+        )
         exit()
-    #Add the "FromB, FromD, Strange" labels before "_electrons" or "_notElectrons".
+    # Add the "FromB, FromD, Strange" labels before "_electrons" or "_notElectrons".
     for sel_label in list(sel_raw.keys()):
         sel_content = sel_raw[sel_label]
         sel_label_FromB = sel_label + "_FromB"
@@ -87,7 +101,7 @@ def enrich_selections(mysel_raw): #Give a dict as input, return a dict with enri
         sel_raw[sel_label_FromB] = sel_content_FromB
         sel_raw[sel_label_FromD] = sel_content_FromD
         sel_raw[sel_label_Strange] = sel_content_Strange
-    #Add the "isDecay, PairProd, fromHI" labels before "_electrons" or "_notElectrons"
+    # Add the "isDecay, PairProd, fromHI" labels before "_electrons" or "_notElectrons"
     for sel_label in list(sel_raw.keys()):
         sel_content = sel_raw[sel_label]
         sel_label_isDecay = sel_label + "_isDecay"
@@ -101,24 +115,26 @@ def enrich_selections(mysel_raw): #Give a dict as input, return a dict with enri
         sel_raw[sel_label_fromHI] = sel_content_fromHI
     return sel_raw
 
+
 Velo_sels = enrich_selections(Velo_sel_raw)
 Upstream_sels = enrich_selections(Upstream_sel_raw)
 Forward_sels = enrich_selections(Forward_sel_raw)
 Down_sels = enrich_selections(Down_sel_raw)
 Ttrack_sels = enrich_selections(Ttrack_sel_raw)
 
-def getCuts() :
+
+def getCuts():
     basedict = {
-       "Velo" : {},
-       "Upstream" : {},
-       "Forward" : {},
-       "TTrack" : {},
-       "Downstream" : {},
-       "Match" : {},
-       "Best" : {},
-       "BestLong" : {},
-       "BestDown" : {},
-       }
+        "Velo": {},
+        "Upstream": {},
+        "Forward": {},
+        "TTrack": {},
+        "Downstream": {},
+        "Match": {},
+        "Best": {},
+        "BestLong": {},
+        "BestDown": {},
+    }
 
     basedict["Velo"] = list(Velo_sels.keys())
 
@@ -132,43 +148,44 @@ def getCuts() :
     basedict["Downstream"] = list(Down_sels.keys())
     basedict["BestDown"] = list(Down_sels.keys())
 
-
     basedict["TTrack"] = list(Ttrack_sels.keys())
-    
+
     return basedict
 
-def categoriesDict() :
-    basedict = defaultdict(lambda : defaultdict(dict))
-#Velo tracks
+
+def categoriesDict():
+    basedict = defaultdict(lambda: defaultdict(dict))
+    # Velo tracks
     Velo_list = ["Velo"]
     for Velo_algo in Velo_list:
         for Velo_sel in list(Velo_sels.keys()):
             basedict[Velo_algo][Velo_sel]["title"] = Velo_sels[Velo_sel]
 
-#Up tracks
+    # Up tracks
     Up_list = ["Upstream"]
     for Up_algo in Up_list:
         for Upstream_sel in list(Upstream_sels.keys()):
-            basedict[Up_algo][Upstream_sel]["title"] = Upstream_sels[Upstream_sel]
+            basedict[Up_algo][Upstream_sel]["title"] = Upstream_sels[
+                Upstream_sel]
 
-#Forward tracks
+    # Forward tracks
     Forward_list = ["Forward", "Match", "Best", "BestLong"]
     for Forward_algo in Forward_list:
         for Forward_sel in list(Forward_sels.keys()):
-            basedict[Forward_algo][Forward_sel]["title"] = Forward_sels[Forward_sel]
+            basedict[Forward_algo][Forward_sel]["title"] = Forward_sels[
+                Forward_sel]
 
-#Down tracks
-    Down_list = ["Downstream","BestDown"]
+    # Down tracks
+    Down_list = ["Downstream", "BestDown"]
     for Down_algo in Down_list:
         for Down_sel in list(Down_sels.keys()):
             basedict[Down_algo][Down_sel]["title"] = Down_sels[Down_sel]
 
-#TTrack
+    # TTrack
     TTrack_list = ["TTrack"]
     for Ttrack_algo in TTrack_list:
         for Ttrack_sel in list(Ttrack_sels.keys()):
-            basedict[Ttrack_algo][Ttrack_sel]["title"] = Ttrack_sels[Ttrack_sel]
-
+            basedict[Ttrack_algo][Ttrack_sel]["title"] = Ttrack_sels[
+                Ttrack_sel]
 
-    
     return basedict
diff --git a/handlers/utils/HashRemover.py b/handlers/utils/HashRemover.py
index 1bc406a7cbf5092a25d0fbb181d0b3ee8b7145ca..4b44d417dcf2d99c3edd1479bd3041bfc13c604c 100644
--- a/handlers/utils/HashRemover.py
+++ b/handlers/utils/HashRemover.py
@@ -4,27 +4,34 @@ import tempfile
 import os
 import shutil
 
-class HashRemover:
 
-    """This class is used to remove the hash code at the end of the directory names of a rootfile. 
-    Hash code is judged by an underline and 8 random characters e.g."_(\w{8})$". 
+class HashRemover:
+    """This class is used to remove the hash code at the end of the directory names of a rootfile.
+    Hash code is judged by an underline and 8 random characters e.g."_(\w{8})$".
     If both input_file_path and output_file_path are provided, modified file will be saved in output_file_path;
     If only input_file_path is provided, modified file will replace the old file"""
+
     # Example usage
     # modifier = HashRemover("old_file.root", "new_file.root") or modifier = HashRemover("old_file.root")
     # modifier.modify_root_file()
-        
+
     def __init__(self, input_file_path, output_file_path=None):
         self.input_file_path = input_file_path
         self.output_file_path = output_file_path
 
-    def process_directory(self, old_dir, new_parent_dir, subdir_counts=None, parent_path=""):
+    def process_directory(self,
+                          old_dir,
+                          new_parent_dir,
+                          subdir_counts=None,
+                          parent_path=""):
         if subdir_counts is None:
             subdir_counts = {}
-        directory_path = "/".join(filter(None, [parent_path, old_dir.GetName()]))
+        directory_path = "/".join(
+            filter(None, [parent_path, old_dir.GetName()]))
         new_dir_name = self.remove_hashcode(old_dir.GetName())
         complete_dir_name = self.remove_hashcode(directory_path)
-        subdir_counts[complete_dir_name] = subdir_counts.get(complete_dir_name, 0) + 1
+        subdir_counts[complete_dir_name] = subdir_counts.get(
+            complete_dir_name, 0) + 1
         variant = subdir_counts[complete_dir_name]
         if variant > 1:
             new_dir_name += "_variant" + str(variant)
@@ -36,7 +43,8 @@ class HashRemover:
         for key in old_dir.GetListOfKeys():
             obj = key.ReadObj()
             if isinstance(obj, ROOT.TDirectory):
-                self.process_directory(obj, new_dir, subdir_counts, parent_path=directory_path)
+                self.process_directory(
+                    obj, new_dir, subdir_counts, parent_path=directory_path)
             elif isinstance(obj, ROOT.TH1):
                 new_dir.cd()
                 histogram = obj.Clone()
@@ -56,19 +64,22 @@ class HashRemover:
         # Open the original ROOT file
         input_file = ROOT.TFile.Open(self.input_file_path)
         if input_file is None or input_file.IsZombie():
-            print("Error: Failed to open the original file '{}'".format(self.input_file_path))
+            print("Error: Failed to open the original file '{}'".format(
+                self.input_file_path))
             return
 
         if self.output_file_path is None:
             # If the output_file_path is not provided, create a temporary file
-            tmp_output_file_path = os.path.join(tempfile.gettempdir(), "tmp_modified_file.root")
+            tmp_output_file_path = os.path.join(tempfile.gettempdir(),
+                                                "tmp_modified_file.root")
             output_file = ROOT.TFile(tmp_output_file_path, "RECREATE")
         else:
             # If the output_file_path is provided, create a new ROOT file
             output_file = ROOT.TFile(self.output_file_path, "RECREATE")
 
         if output_file.IsZombie():
-            print("Error: Failed to create the new file '{}'".format(self.output_file_path))
+            print("Error: Failed to create the new file '{}'".format(
+                self.output_file_path))
             return
 
         # Process the directories recursively
diff --git a/handlers/utils/LHCbStyle.py b/handlers/utils/LHCbStyle.py
index e57b3fe3876b456dc79701776ae106967f21ec67..5d36d4b99c464650f31322aec86b4ae3b59db9f8 100644
--- a/handlers/utils/LHCbStyle.py
+++ b/handlers/utils/LHCbStyle.py
@@ -2,16 +2,17 @@ from ROOT import gStyle
 from ROOT import gROOT
 from ROOT import TStyle
 
-def setLHCbStyle() :
+
+def setLHCbStyle():
     global lhcbStyle
 
-    lhcbFont     = 132
-    lhcbTSize    = 0.06
-    lhcbWidth    = 2
-    
-    lhcbStyle= TStyle("lhcbStyle","LHCb plots style");
+    lhcbFont = 132
+    lhcbTSize = 0.06
+    lhcbWidth = 2
+
+    lhcbStyle = TStyle("lhcbStyle", "LHCb plots style")
     lhcbStyle.SetFillColor(1)
-    lhcbStyle.SetFillStyle(1001)   # solid
+    lhcbStyle.SetFillStyle(1001)  # solid
     lhcbStyle.SetFrameFillColor(0)
     lhcbStyle.SetFrameBorderMode(0)
     lhcbStyle.SetPadBorderMode(0)
@@ -26,62 +27,63 @@ def setLHCbStyle() :
     lhcbStyle.SetTextFont(lhcbFont)
     lhcbStyle.SetTitleFont(lhcbFont)
     lhcbStyle.SetTextSize(lhcbTSize)
-    lhcbStyle.SetLabelFont(lhcbFont,"x")
-    lhcbStyle.SetLabelFont(lhcbFont,"y")
-    lhcbStyle.SetLabelFont(lhcbFont,"z")
-    lhcbStyle.SetLabelSize(lhcbTSize,"x")
-    lhcbStyle.SetLabelSize(lhcbTSize,"y")
-    lhcbStyle.SetLabelSize(lhcbTSize,"z")
+    lhcbStyle.SetLabelFont(lhcbFont, "x")
+    lhcbStyle.SetLabelFont(lhcbFont, "y")
+    lhcbStyle.SetLabelFont(lhcbFont, "z")
+    lhcbStyle.SetLabelSize(lhcbTSize, "x")
+    lhcbStyle.SetLabelSize(lhcbTSize, "y")
+    lhcbStyle.SetLabelSize(lhcbTSize, "z")
     lhcbStyle.SetTitleFont(lhcbFont)
-    lhcbStyle.SetTitleFont(lhcbFont,"x")
-    lhcbStyle.SetTitleFont(lhcbFont,"y")
-    lhcbStyle.SetTitleFont(lhcbFont,"z")
-    lhcbStyle.SetTitleSize(1.2*lhcbTSize,"x")
-    lhcbStyle.SetTitleSize(1.2*lhcbTSize,"y")
-    lhcbStyle.SetTitleSize(1.2*lhcbTSize,"z")
+    lhcbStyle.SetTitleFont(lhcbFont, "x")
+    lhcbStyle.SetTitleFont(lhcbFont, "y")
+    lhcbStyle.SetTitleFont(lhcbFont, "z")
+    lhcbStyle.SetTitleSize(1.2 * lhcbTSize, "x")
+    lhcbStyle.SetTitleSize(1.2 * lhcbTSize, "y")
+    lhcbStyle.SetTitleSize(1.2 * lhcbTSize, "z")
 
     # set the paper & margin sizes
-    lhcbStyle.SetPaperSize(20,26)
+    lhcbStyle.SetPaperSize(20, 26)
     lhcbStyle.SetPadTopMargin(0.05)
-    lhcbStyle.SetPadRightMargin(0.05) # increase for colz plots
+    lhcbStyle.SetPadRightMargin(0.05)  # increase for colz plots
     lhcbStyle.SetPadBottomMargin(0.16)
     lhcbStyle.SetPadLeftMargin(0.14)
 
     # use medium bold lines and thick markers
-    lhcbStyle.SetLineWidth(lhcbWidth);
-    lhcbStyle.SetFrameLineWidth(lhcbWidth);
-    lhcbStyle.SetHistLineWidth(lhcbWidth);
-    lhcbStyle.SetFuncWidth(lhcbWidth);
-    lhcbStyle.SetGridWidth(lhcbWidth);
-    lhcbStyle.SetLineStyleString(2,"[12 12]"); # postscript dashes
-    lhcbStyle.SetMarkerStyle(20);
-    lhcbStyle.SetMarkerSize(1.0);
-    
+    lhcbStyle.SetLineWidth(lhcbWidth)
+    lhcbStyle.SetFrameLineWidth(lhcbWidth)
+    lhcbStyle.SetHistLineWidth(lhcbWidth)
+    lhcbStyle.SetFuncWidth(lhcbWidth)
+    lhcbStyle.SetGridWidth(lhcbWidth)
+    lhcbStyle.SetLineStyleString(2, "[12 12]")
+    # postscript dashes
+    lhcbStyle.SetMarkerStyle(20)
+    lhcbStyle.SetMarkerSize(1.0)
+
     # label offsets
-    lhcbStyle.SetLabelOffset(0.010,"X");
-    lhcbStyle.SetLabelOffset(0.010,"Y");
-    
+    lhcbStyle.SetLabelOffset(0.010, "X")
+    lhcbStyle.SetLabelOffset(0.010, "Y")
+
     # by default, do not display histogram decorations:
-    lhcbStyle.SetOptStat(0)  
-    #lhcbStyle.SetOptStat("emr")  # show only nent -e , mean - m , rms -r
+    lhcbStyle.SetOptStat(0)
+    # lhcbStyle.SetOptStat("emr")  # show only nent -e , mean - m , rms -r
     # full opts at http:#root.cern.ch/root/html/TStyle.html#TStyle:SetOptStat
-    lhcbStyle.SetStatFormat("6.3g") # specified as c printf options
+    lhcbStyle.SetStatFormat("6.3g")  # specified as c printf options
     lhcbStyle.SetOptTitle(0)
     lhcbStyle.SetOptFit(0)
-    #lhcbStyle.SetOptFit(1011) # order is probability, Chi2, errors, parameters
-    #titles
-    lhcbStyle.SetTitleOffset(0.85,"X")
-    lhcbStyle.SetTitleOffset(0.85,"Y")
-    lhcbStyle.SetTitleOffset(1.2,"Z")
+    # lhcbStyle.SetOptFit(1011) # order is probability, Chi2, errors, parameters
+    # titles
+    lhcbStyle.SetTitleOffset(0.85, "X")
+    lhcbStyle.SetTitleOffset(0.85, "Y")
+    lhcbStyle.SetTitleOffset(1.2, "Z")
     lhcbStyle.SetTitleFillColor(0)
     lhcbStyle.SetTitleStyle(0)
     lhcbStyle.SetTitleBorderSize(0)
-    lhcbStyle.SetTitleFont(lhcbFont,"title")
+    lhcbStyle.SetTitleFont(lhcbFont, "title")
     lhcbStyle.SetTitleX(0.0)
-    lhcbStyle.SetTitleY(1.0) 
+    lhcbStyle.SetTitleY(1.0)
     lhcbStyle.SetTitleW(1.0)
     lhcbStyle.SetTitleH(0.05)
-  
+
     # look of the statistics box:
     lhcbStyle.SetStatBorderSize(0)
     lhcbStyle.SetStatFont(lhcbFont)
@@ -90,15 +92,14 @@ def setLHCbStyle() :
     lhcbStyle.SetStatY(0.9)
     lhcbStyle.SetStatW(0.25)
     lhcbStyle.SetStatH(0.15)
-    
+
     # put tick marks on top and RHS of plots
     lhcbStyle.SetPadTickX(1)
     lhcbStyle.SetPadTickY(1)
-    
+
     # histogram divisions: only 5 in x to avoid label overlaps
-    lhcbStyle.SetNdivisions(505,"x")
-    lhcbStyle.SetNdivisions(510,"y")
-    
+    lhcbStyle.SetNdivisions(505, "x")
+    lhcbStyle.SetNdivisions(510, "y")
+
     gROOT.SetStyle("lhcbStyle")
     return
- 
diff --git a/handlers/utils/Legend.py b/handlers/utils/Legend.py
index 6366403950605fae2ed603dfd5f55dfa602c2ea8..7739f0b73887fa21e9c6317a5682ddabdd96de7d 100644
--- a/handlers/utils/Legend.py
+++ b/handlers/utils/Legend.py
@@ -3,20 +3,20 @@ import itertools
 
 # Some convenience function to easily iterate over the parts of the collections
 
-
 # Needed if importing this script from another script in case TMultiGraphs are used
-#ROOT.SetMemoryPolicy(ROOT.kMemoryStrict)
-
+# ROOT.SetMemoryPolicy(ROOT.kMemoryStrict)
 
 # Start a bit right of the Yaxis and above the Xaxis to not overlap with the ticks
 start, stop = 0.18, 0.89
 x_width, y_width = 0.3, 0.2
-PLACES = [(start, stop - y_width, start + x_width, stop),  # top left opt
-          (start, start, start + x_width, start + y_width),  # bottom left opt
-          (stop - x_width, stop - y_width, stop, stop),  # top right opt
-          (stop - x_width, start, stop, start + y_width),  # bottom right opt
-          (stop - x_width, 0.5 - y_width / 2, stop, 0.5 + y_width / 2),  # right
-          (start, 0.5 - y_width / 2, start + x_width, 0.5 + y_width / 2)]  # left
+PLACES = [
+    (start, stop - y_width, start + x_width, stop),  # top left opt
+    (start, start, start + x_width, start + y_width),  # bottom left opt
+    (stop - x_width, stop - y_width, stop, stop),  # top right opt
+    (stop - x_width, start, stop, start + y_width),  # bottom right opt
+    (stop - x_width, 0.5 - y_width / 2, stop, 0.5 + y_width / 2),  # right
+    (start, 0.5 - y_width / 2, start + x_width, 0.5 + y_width / 2),
+]  # left
 
 
 def transform_to_user(canvas, x1, y1, x2, y2):
@@ -53,7 +53,8 @@ def overlap_h(hist, x1, y1, x2, y2):
         if y1 <= val <= y2:
             return True
         # Errors
-        if val + hist.GetBinErrorUp(i) > y1 and val - hist.GetBinErrorLow(i) < y2:
+        if val + hist.GetBinErrorUp(i) > y1 and val - hist.GetBinErrorLow(
+                i) < y2:
             # print "Overlap with histo", hist.GetName(), "at bin", i
             return True
     return False
@@ -67,6 +68,7 @@ def overlap_rect(rect1, rect2):
         return False
     return True
 
+
 def overlap_g(graph, x1, y1, x2, y2):
     x_values = list(graph.GetX())
     y_values = list(graph.GetY())
@@ -80,7 +82,14 @@ def overlap_g(graph, x1, y1, x2, y2):
             return True
     return False
 
-def place_legend(canvas, x1=None, y1=None, x2=None, y2=None, header="", option="LP"):
+
+def place_legend(canvas,
+                 x1=None,
+                 y1=None,
+                 x2=None,
+                 y2=None,
+                 header="",
+                 option="LP"):
     # If position is specified, use that
     if all(x is not None for x in (x1, x2, y1, y2)):
         return canvas.BuildLegend(x1, y1, x2, y2, header, option)
@@ -101,10 +110,12 @@ def place_legend(canvas, x1=None, y1=None, x2=None, y2=None, header="", option="
         # Make sure there are no overlaps
         if any(obj.Overlap(*place_user) for obj in objects):
             continue
-        return canvas.BuildLegend(place[0], place[1], place[2], place[3], header, option)
+        return canvas.BuildLegend(place[0], place[1], place[2], place[3],
+                                  header, option)
     # As a fallback, use the default values, taken from TCanvas::BuildLegend
     return canvas.BuildLegend(0.5, 0.67, 0.88, 0.88, header, option)
 
+
 # Monkey patch ROOT objects to make it all work
 ROOT.THStack.__iter__ = lambda self: iter(self.GetHists())
 ROOT.TMultiGraph.__iter__ = lambda self: iter(self.GetListOfGraphs())
diff --git a/handlers/utils/dashboard.py b/handlers/utils/dashboard.py
index 6b63ce2417e36502bf54c7f4bc5bc5d3df952fcc..e44a8c56c39cd23ae2015e24820a7893294bcb62 100644
--- a/handlers/utils/dashboard.py
+++ b/handlers/utils/dashboard.py
@@ -5,6 +5,7 @@ from couchdb.http import ResourceNotFound
 
 log = logging.getLogger(__name__)
 
+
 @lru_cache(maxsize=None)
 def get_nightly_dashboard():
     return Dashboard()
@@ -18,8 +19,7 @@ def get_periodic_dashboard():
 def get_test_doc(version, options):
     """Get the document for a test from the periodic test dashboard."""
     past_tests = [
-        row.doc
-        for row in get_periodic_dashboard().db.iterview(
+        row.doc for row in get_periodic_dashboard().db.iterview(
             "nightlies_summaries/by_app_version",
             batch=10,
             key=version,
@@ -33,16 +33,14 @@ def get_test_doc(version, options):
         docs.sort(key=lambda x: x["time_start"], reverse=True)
         log.warning(
             f"Multiple ({len(docs)}) tests found for test {options} for slot {version}. "
-            "Taking the latest."
-        )
+            "Taking the latest.")
     return docs[0]
 
 
 def get_mr_slots_by_ref_slot(slot, build_id):
     """Return *-mr slots corresponding to a *-ref slot."""
     return [
-        doc.doc
-        for doc in get_nightly_dashboard().db.iterview(
+        doc.doc for doc in get_nightly_dashboard().db.iterview(
             "merge_requests/mr_slots_by_ref_slot",
             batch=100,
             include_docs=True,
@@ -77,23 +75,20 @@ def get_ci_test_pairs(slot, build_id):
             raise ValueError(f"{slot}.{build_id} is not from a ci-test")
         ref_slot, ref_build_id = metadata["reference"]
         log.info(f"Determined ref build to be {ref_slot}.{ref_build_id}")
-        return [((ref_slot, ref_build_id), (slot, build_id), metadata["trigger"])]
+        return [((ref_slot, ref_build_id), (slot, build_id),
+                 metadata["trigger"])]
 
     # it's a reference slot
     else:
-        mr_slots = [
-            (
-                doc["slot"],
-                doc["build_id"],
-                doc["config"]["metadata"]["ci_test"]["trigger"],
-            )
-            for doc in get_mr_slots_by_ref_slot(slot, build_id)
-        ]
+        mr_slots = [(
+            doc["slot"],
+            doc["build_id"],
+            doc["config"]["metadata"]["ci_test"]["trigger"],
+        ) for doc in get_mr_slots_by_ref_slot(slot, build_id)]
 
         if not mr_slots:
-            log.warning(f"Found no corresponding test builds for {slot}.{build_id}")
+            log.warning(
+                f"Found no corresponding test builds for {slot}.{build_id}")
 
-        return [
-            ((slot, build_id), (test_slot, test_build_id), trigger)
-            for test_slot, test_build_id, trigger in mr_slots
-        ]
+        return [((slot, build_id), (test_slot, test_build_id), trigger)
+                for test_slot, test_build_id, trigger in mr_slots]
diff --git a/handlers/utils/lhcbpr.py b/handlers/utils/lhcbpr.py
index ed2615447cce4a656c9d116e8ce28385ac10d048..5d85a28249a5dcf85fbb6472044c8a9650948db1 100644
--- a/handlers/utils/lhcbpr.py
+++ b/handlers/utils/lhcbpr.py
@@ -3,6 +3,7 @@ import requests
 
 log = logging.getLogger(__name__)
 
+
 class JobNotFound(Exception):
     pass
 
@@ -13,8 +14,9 @@ def get_lhcbpr_application_id(name):
     r.raise_for_status()
     data = r.json()
     assert data["count"] == len(data["results"])
-    (app_id,) = [
-        app["id"] for app in data["results"] if app["name"].upper() == name.upper()
+    (app_id, ) = [
+        app["id"] for app in data["results"]
+        if app["name"].upper() == name.upper()
     ]
     return app_id
 
@@ -22,22 +24,21 @@ def get_lhcbpr_application_id(name):
 def get_job_ids(slot, build_id, application, options):
     """Return job ids for an application/options/version."""
     results = []
-    url = (
-        f"https://lblhcbpr.cern.ch/metrics/jobs?"
-        f"app={application}&versions={slot}.{build_id}&options={options}&"
-        f"format=json&sortby=id&exact=true"
-    )
+    url = (f"https://lblhcbpr.cern.ch/metrics/jobs?"
+           f"app={application}&versions={slot}.{build_id}&options={options}&"
+           f"format=json&sortby=id&exact=true")
     # In the unlikely case there are multiple pages of results, fetch all.
     while url:
         r = requests.get(url)
         r.raise_for_status()
         data = r.json()
-        results += (data["results"] or [])
+        results += data["results"] or []
         url = "https://" + data["next"] if "next" in data else None
 
     assert all(x["version"] == f"{slot}.{build_id}" for x in results)
     ids = [x["id"] for x in results]
-    log.debug(f"Found jobs {ids} for {slot}/{build_id} {application} {options}")
+    log.debug(
+        f"Found jobs {ids} for {slot}/{build_id} {application} {options}")
     return ids
 
 
@@ -56,7 +57,8 @@ def get_latest_job_id(slot, build_id, application, options):
 
 def get_job_results(job_id):
     """Return the registered results for a job."""
-    r = requests.get(f"https://lblhcbpr.cern.ch/api/jobs/{job_id}/results/?format=json")
+    r = requests.get(
+        f"https://lblhcbpr.cern.ch/api/jobs/{job_id}/results/?format=json")
     r.raise_for_status()
 
     data = r.json()
@@ -72,7 +74,10 @@ def get_job_results(job_id):
 
     # arrange results per name and parse value strings
     results = {
-        res["attr"]["name"]: {"value": value(res), "attr": res["attr"]}
+        res["attr"]["name"]: {
+            "value": value(res),
+            "attr": res["attr"]
+        }
         for res in data["results"]
     }
     if len(results) < len(data["results"]):
@@ -82,4 +87,5 @@ def get_job_results(job_id):
 
 def get_latest_job_results(slot, build_id, application, options):
     """Return the results for the latest matching job or raise."""
-    return get_job_results(get_latest_job_id(slot, build_id, application, options))
+    return get_job_results(
+        get_latest_job_id(slot, build_id, application, options))
diff --git a/sendToDB.py b/sendToDB.py
index 5117595b71c10619db0a4477f734cee8a7556921..d287a28c9dee423eeadf37caab133d373301217a 100755
--- a/sendToDB.py
+++ b/sendToDB.py
@@ -2,33 +2,29 @@
 
 import sys
 import os
-
 import json
 import logging
 import zipfile
 import subprocess
 import time
 import shutil
-
 from optparse import OptionParser
 
-
-diracStorageElementName = 'StatSE'
-
-diracStorageElementFolder = '/lhcb/prdata/results'
-
-logger = logging.getLogger('sendToDB.py')
+diracStorageElementName = "StatSE"
+diracStorageElementFolder = "/lhcb/prdata/results"
+logger = logging.getLogger("sendToDB.py")
 
 
 def storeBackupOnEOS(zipFile):
     eos_path = os.path.join(
-        '/eos/lhcb/storage/lhcbpr/lhcbprdata/zips_backup',
+        "/eos/lhcb/storage/lhcbpr/lhcbprdata/zips_backup",
         time.strftime("%Y"),
-        time.strftime("%b-%Y")
+        time.strftime("%b-%Y"),
     )
 
     if not os.path.exists(eos_path):
-        logger.info("EOS backup path does not exist, creating: {0}".format(eos_path))
+        logger.info(
+            "EOS backup path does not exist, creating: {0}".format(eos_path))
         try:
             os.makedirs(eos_path)
         except:
@@ -51,13 +47,13 @@ def sendViaDiracStorageElement(zipFile):
     # log = statSE.putFile(
     #     {os.path.join(diracStorageElementFolder, tailzipFile): zipFile})
     # logger.info('{0}'.format(log))
-    lfn = os.path.join(diracStorageElementFolder,
-                    time.strftime("%b-%Y"),
-                    tailzipFile)
+    lfn = os.path.join(diracStorageElementFolder, time.strftime("%b-%Y"),
+                       tailzipFile)
 
     try:
-        result = subprocess.check_output(["dirac-dms-add-file", "-ddd", lfn,
-                                        zipFile, diracStorageElementName])
+        result = subprocess.check_output([
+            "dirac-dms-add-file", "-ddd", lfn, zipFile, diracStorageElementName
+        ])
         if result.decode().count("Successfully uploaded ") == 1:
             logger.info("Uploaded {0}".format(lfn))
         else:
@@ -67,11 +63,10 @@ def sendViaDiracStorageElement(zipFile):
 
 
 def run(zipFile, ssss, delzip):
-
     ch = logging.StreamHandler()
     ch.setLevel(level=logging.WARNING)
-    if os.path.exists('output/collect.log'):
-        fh = logging.FileHandler(os.path.join('output', 'collect.log'))
+    if os.path.exists("output/collect.log"):
+        fh = logging.FileHandler(os.path.join("output", "collect.log"))
         fh.setLevel(level=logging.WARNING)
 
     if not ssss:
@@ -81,7 +76,7 @@ def run(zipFile, ssss, delzip):
             fh.setLevel(logging.INFO)
 
     formatter = logging.Formatter(
-                        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
     ch.setFormatter(formatter)
     if fh:
         fh.setFormatter(formatter)
@@ -91,26 +86,25 @@ def run(zipFile, ssss, delzip):
         logger.addHandler(fh)
 
     if not zipfile.is_zipfile(zipFile):
-        logger.error(
-            'Given object is not a valid zip file,'
-            ' please give a valid one, aborting...')
+        logger.error("Given object is not a valid zip file,"
+                     " please give a valid one, aborting...")
         return
 
     # checking if the zip contains what it should contains
     try:
         unzipper = zipfile.ZipFile(zipFile)
-        dataDict = json.loads(unzipper.read('json_results'))
+        dataDict = json.loads(unzipper.read("json_results"))
 
-        for atr in dataDict['JobAttributes']:
-            if atr['type'] == 'File':
-                unzipper.read(atr['filename'])
+        for atr in dataDict["JobAttributes"]:
+            if atr["type"] == "File":
+                unzipper.read(atr["filename"])
 
     except Exception as e:
         logger.error(e)
-        logger.error('Aborting...')
+        logger.error("Aborting...")
         return
 
-    logger.info('Given zip file is valid, sending to database...')
+    logger.info("Given zip file is valid, sending to database...")
 
     sendViaDiracStorageElement(zipFile)
     storeBackupOnEOS(zipFile)
@@ -133,24 +127,35 @@ def main():
     description = """The program needs all the input arguments
                   (options in order to run properly)"""
 
-    parser = OptionParser(usage='usage: %prog [options]',
-                          description=description)
-    parser.add_option('-s', '--send-results',
-                      action='store',
-                      type='string',
-                      dest='zipFile',
-                      help='Zip file with results to be pushed to database')
-    parser.add_option("-q", "--quiet", action="store_true",
-                      dest="ssss", default=False,
-                      help="do not print info from logger, optional")
-    parser.add_option('-d', '--delete',
-                      action='store_true',
-                      dest='delzip',
-                      default=False,
-                      help='Delete the ZIP file with results after processing, optional')
+    parser = OptionParser(
+        usage="usage: %prog [options]", description=description)
+    parser.add_option(
+        "-s",
+        "--send-results",
+        action="store",
+        type="string",
+        dest="zipFile",
+        help="Zip file with results to be pushed to database",
+    )
+    parser.add_option(
+        "-q",
+        "--quiet",
+        action="store_true",
+        dest="ssss",
+        default=False,
+        help="do not print info from logger, optional",
+    )
+    parser.add_option(
+        "-d",
+        "--delete",
+        action="store_true",
+        dest="delzip",
+        default=False,
+        help="Delete the ZIP file with results after processing, optional",
+    )
 
     if len(sys.argv) < needed_options:
-        parser.parse_args(['--help'])
+        parser.parse_args(["--help"])
         return
 
     options, args = parser.parse_args()
@@ -158,5 +163,5 @@ def main():
     run(options.zipFile, options.ssss, options.delzip)
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
diff --git a/testHandlers.py b/testHandlers.py
index 7a9b69953626bba7a3d221d5e3ed58f87519764e..7e1b73864b24cf5c4b76c3796692994a8234e4af 100755
--- a/testHandlers.py
+++ b/testHandlers.py
@@ -6,71 +6,107 @@ import argparse
 from datetime import datetime, timedelta
 import copy
 
-def main(args):
-
 
+def main(args):
     description = """Test handlers: you need to set a directory with job results and a list of handlers"""
     parser = argparse.ArgumentParser(description=description)
-    parser.add_argument('-r', '--results', default=".",
-                        help='Directory which contains results, default is the current directory')
-    parser.add_argument("-l", "--list-handlers",
-                        dest="handlers", help="The list of handlers (comma separated.)",
-                        required=True)
-    parser.add_argument('--app-name', dest='app_name', default='DummyApp',
-                        help='Application name')
-    parser.add_argument('--app-version', dest='app_version', default='v1r0',
-                        help='Application version')
-    parser.add_argument('--platform', dest='job_platform', default='x86_64-centos7-gcc62-opt',
-                        help='Job Platform')
-    parser.add_argument('--host', dest='job_host', default='dummy-host',
-                        help='Job Machine hostname')
-    parser.add_argument('--opt', dest='job_opt', default='dummy-opt',
-                        help='Job Opt Name')
-    parser.add_argument('--opt-cont', dest='job_opt_cont', default='dummy-exec-content',
-                        help='Job Opt Content')
+    parser.add_argument(
+        "-r",
+        "--results",
+        default=".",
+        help=
+        "Directory which contains results, default is the current directory",
+    )
+    parser.add_argument(
+        "-l",
+        "--list-handlers",
+        dest="handlers",
+        help="The list of handlers (comma separated.)",
+        required=True,
+    )
+    parser.add_argument(
+        "--app-name",
+        dest="app_name",
+        default="DummyApp",
+        help="Application name")
+    parser.add_argument(
+        "--app-version",
+        dest="app_version",
+        default="v1r0",
+        help="Application version")
+    parser.add_argument(
+        "--platform",
+        dest="job_platform",
+        default="x86_64-centos7-gcc62-opt",
+        help="Job Platform",
+    )
+    parser.add_argument(
+        "--host",
+        dest="job_host",
+        default="dummy-host",
+        help="Job Machine hostname")
+    parser.add_argument(
+        "--opt", dest="job_opt", default="dummy-opt", help="Job Opt Name")
+    parser.add_argument(
+        "--opt-cont",
+        dest="job_opt_cont",
+        default="dummy-exec-content",
+        help="Job Opt Content",
+    )
 
     job_time = datetime.now()
-    time_fmt = '%Y-%m-%d %H:%M:%S +0200'
-    job_start_str = job_time.strftime( time_fmt )
+    time_fmt = "%Y-%m-%d %H:%M:%S +0200"
+    job_start_str = job_time.strftime(time_fmt)
 
-    parser.add_argument('--start-time', dest='job_begin', default=job_start_str,
-                        help='Job start time in "%%Y-%%m-%%d %%H:%%M:%%S +0200", e.g. {0}'.format(job_start_str))
-    parser.add_argument('--run-time', dest='run_time', default=2,
-                        help='Job run time (hr)')
+    parser.add_argument(
+        "--start-time",
+        dest="job_begin",
+        default=job_start_str,
+        help='Job start time in "%%Y-%%m-%%d %%H:%%M:%%S +0200", e.g. {0}'.
+        format(job_start_str),
+    )
+    parser.add_argument(
+        "--run-time", dest="run_time", default=2, help="Job run time (hr)")
 
-    parser.add_argument('--build_week', dest='build_week', default=False, action="store_true",
-                        help='Should we build a week of data (1 data point per day) duplicating this test?')
+    parser.add_argument(
+        "--build_week",
+        dest="build_week",
+        default=False,
+        action="store_true",
+        help=
+        "Should we build a week of data (1 data point per day) duplicating this test?",
+    )
 
     options = parser.parse_args(args)
 
     job_time = datetime.strptime(job_start_str, time_fmt)
     job_time = job_time + timedelta(hours=options.run_time)
-    job_end_str = job_time.strftime( time_fmt )
+    job_end_str = job_time.strftime(time_fmt)
 
-    params =  ['myapp']
-    params += ['--app-name', str(options.app_name).upper() ]
-    params += ['--app-version', options.app_version ]
-    params += ['--app-version-datetime', job_start_str]
-    params += ['--exec-name', 'dummy-exec']
-    params += ['--exec-content', 'dummy-exec-content']
-    params += ['--opt-name', options.job_opt]
-    params += ['--opt-content', options.job_opt_cont]
-    params += ['-s', job_start_str]
-    params += ['-e', job_end_str]
-    params += ['-p', options.job_host]
-    params += ['-c', options.job_platform]
-    params += ['-l', options.handlers]
-    params += ['-r', options.results]
-    params += ['-u','']
-    params += ['-m','']
-    params += ['--debug']
+    params = ["myapp"]
+    params += ["--app-name", str(options.app_name).upper()]
+    params += ["--app-version", options.app_version]
+    params += ["--app-version-datetime", job_start_str]
+    params += ["--exec-name", "dummy-exec"]
+    params += ["--exec-content", "dummy-exec-content"]
+    params += ["--opt-name", options.job_opt]
+    params += ["--opt-content", options.job_opt_cont]
+    params += ["-s", job_start_str]
+    params += ["-e", job_end_str]
+    params += ["-p", options.job_host]
+    params += ["-c", options.job_platform]
+    params += ["-l", options.handlers]
+    params += ["-r", options.results]
+    params += ["-u", ""]
+    params += ["-m", ""]
+    params += ["--debug"]
 
     if not options.build_week:
         sys.argv = params
         print(sys.argv)
         collectRunResults.main()
     else:
-        i=0
+        i = 0
         arg_time = datetime.strptime(job_start_str, time_fmt)
         start_time = arg_time - timedelta(days=7)
         while i < 7:
@@ -79,15 +115,16 @@ def main(args):
             new_end_time = new_start_time + timedelta(hours=options.run_time)
             new_params = copy.deepcopy(params)
             # Change the app version id so it's unique
-            new_params[4] = new_params[4] + '.' + str(i)
+            new_params[4] = new_params[4] + "." + str(i)
             # Change the start time
-            new_params[16] = new_start_time.strftime( time_fmt )
+            new_params[16] = new_start_time.strftime(time_fmt)
             # Change the end time
-            new_params[18] = new_end_time.strftime( time_fmt )
+            new_params[18] = new_end_time.strftime(time_fmt)
 
             sys.argv = new_params
             print(sys.argv)
             collectRunResults.main()
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main(sys.argv[1:])