diff --git a/Trigger/TrigValidation/TrigP1Test/CMakeLists.txt b/Trigger/TrigValidation/TrigP1Test/CMakeLists.txt
index d9362c2971ae62a27134d42aba5ccd621e4f5715..4ff15c2cfe91e2bb98e97c4c4e6ca0293e353697 100644
--- a/Trigger/TrigValidation/TrigP1Test/CMakeLists.txt
+++ b/Trigger/TrigValidation/TrigP1Test/CMakeLists.txt
@@ -15,7 +15,7 @@ atlas_depends_on_subdirs( PUBLIC
 atlas_install_python_modules( python/*.py )
 atlas_install_joboptions( share/*.py )
 
-atlas_install_runtime( test/TrigP1Test_TestConfiguration.xml Testing/*.trans Testing/*.conf python/RunMsgFinder.py python/RttScriptRunner.py python/RootComp.py python/RegTest.py python/CheckLog.py python/CheckStatusCode.py python/LogTail.py python/DeleteRaw.py python/TruncateLog.py python/releaseFromSMK.py)
+atlas_install_runtime( test/TrigP1Test_TestConfiguration.xml Testing/*.trans Testing/*.conf python/RttScriptRunner.py python/RootComp.py python/RegTest.py python/CheckLog.py python/CheckStatusCode.py python/LogTail.py python/DeleteRaw.py python/TruncateLog.py python/releaseFromSMK.py)
 atlas_install_scripts( bin/*py share/*.sh share/testMonHistOH.py share/part_lhl2ef_fix.py share/setMagFieldCurrents.py share/part_get_runnumber.py share/trigp1test_athenaHLT.py share/trigp1test_prescaleForROSsim.py Testing/*.sh Testing/splitlog.py test/exec*.sh test/test*.sh test/test*.py)
 
 # Unit tests:
diff --git a/Trigger/TrigValidation/TrigP1Test/python/RunMsgFinder.py b/Trigger/TrigValidation/TrigP1Test/python/RunMsgFinder.py
deleted file mode 100755
index de0634b33a3b706ef042de4a1c0ea7fcb05b88b6..0000000000000000000000000000000000000000
--- a/Trigger/TrigValidation/TrigP1Test/python/RunMsgFinder.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
-
-# @file: RunMsgFinder.py
-# @purpose: Find messages printed during running by algorithms
-# @author: Frank Winklmeier (fwinkl)
-# $Id: RunMsgFinder.py,v 1.6 2008-10-23 12:11:49 fwinkl Exp $
-
-import sys, os, fnmatch, re
-
-class RunMsgFinder:
-    
-    def __init__(self, paramDict={}):
-
-        from Logger import Logger
-        
-        jDescriptor = paramDict.get('JobDescriptor',None)
-        if jDescriptor:
-            self.joblog = jDescriptor.log
-        else:
-            self.joblog = "test.log"
-            
-        self.success   =  0
-        self.error     = -1
-        self.logger    = Logger()
-
-        self.logfile = paramDict.get('logfile', 'test.RunMsgFinder.log')
-
-        # Ignore list
-        ignore = ['.*EventLoopMgr\s*INFO\s*Change of HLT prescales requested.*',
-                  '.*EventLoopMgr\s*INFO.*calling HLTConfigSvc::updatePrescaleSets.*',
-                  'State is.*<f>orward, <b>ackward, <u>ser_command.*',
-                  '.*WARNING Item .* required: 1 of:.*',
-                  ]
-        self.ignoreList = [re.compile(s) for s in ignore]
-        self.ignoreFirstNEvents = 1
-        
-    def ignore(self, msg):
-        """Return true if this message should be ignored"""
-        
-        for regexp in self.ignoreList:
-            if regexp.match(msg): return True
-        return False
-    
-    def extractRunMsg(self, file):
-        """Extract the messages during running"""
-
-        running = False
-        event = 0
-        runMsg = []
-        ignoreMsg = []
-        
-        # Regular expression to extract event number
-        evtRE = re.compile("\s*PerfMonSvc:\s*INFO \[evt:\s*([0-9]*)\].*")
-        
-        for line in file:
-            if line.find('state is "PREPARED"')>-1:
-                running = True
-                event = 1
-                continue
-            
-            if line.find('Psc stopRun started')>-1:
-                running = False
-                break
-            
-            if running:
-                m = evtRE.match(line)
-                if m and len(m.groups())==1: event = int(m.group(1)) + 1
-                    
-                if line.find('PerfMonSvc')==-1 and \
-                       line.find('Py:athena')==-1:
-
-                    if self.ignore(line) or event<=self.ignoreFirstNEvents:
-                        ignoreMsg += [ "%-11s %s" % ("[evt #%i]" % event ,line) ]
-                    else:
-                        runMsg += [ "%-11s %s" % ("[evt #%i]" % event ,line) ]
-                    continue
-                            
-        return (runMsg,ignoreMsg)
-            
-    def run(self):
-        self.logger.debug('Running RunMsgFinder')
-
-        self.logger.debug('Analyzing %s' % self.joblog)
-        runMsg,ignoreMsg = self.extractRunMsg(open(self.joblog,'r'))
-
-        if len(runMsg)>0: status = self.error
-        else: status = self.success
-
-        try:
-            log = open(self.logfile,"w")
-        except:
-            self.logger.error("Cannot open %s" % self.logfile)
-            
-        log.write('='*80+'\n')
-        log.write('RunMsgFinder: Found %d messages during RUNNING state\n' % len(runMsg))
-        log.write('='*80+'\n')
-        log.writelines(runMsg)
-
-        log.write('\n'+'='*80+'\n')
-        log.write('RunMsgFinder: Ignored %d messages\n' % len(ignoreMsg))
-        log.write('='*80+'\n')
-        log.writelines(ignoreMsg)
-        log.close()
-        
-        return status
-
-    
-if __name__ == "__main__":
-
-    sys.path.append('/afs/cern.ch/atlas/project/RTT/prod/Tools/RunTimeTester/src')
-    sys.path.append('/afs/cern.ch/atlas/project/RTT/prod/Tools/RunTimeTester/share')    
-
-    params = {}
-    r = RunMsgFinder(params)
-    status = r.run()
-    
-    for l in r.logger.all_lines: print l
-
-    sys.exit(status)
-    
diff --git a/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py b/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py
new file mode 100755
index 0000000000000000000000000000000000000000..d67c9f8c99d74c5551bffe01eeff8e4c07bc72cf
--- /dev/null
+++ b/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+#
+
+'''
+Script listing and counting Athena messages between two points in a log file,
+typically between start and end of event loop.
+'''
+
+from __future__ import print_function
+import sys
+import os
+import re
+import logging
+import argparse
+import json
+from collections import OrderedDict
+
+
+default_ignore_patterns = [
+    r'TrigCOOLUpd.*INFO.*Reload',
+    r'TrigCOOLUpd.*INFO.*Invalidated',
+]
+
+
+def get_parser():
+    parser = argparse.ArgumentParser(usage='%(prog)s [options] LogFiles',
+                                     description=__doc__)
+    parser.add_argument('logfiles',
+                        metavar='LogFiles',
+                        nargs='+',
+                        help='Log file(s) to analyse. Multiple space-separated file names may be given.')
+    parser.add_argument('-s', '--startPattern',
+                        metavar='PATTERN',
+                        help='Pattern to match start point for log analysis. If empty, log files are analysed from first line.')
+    parser.add_argument('-e', '--endPattern',
+                        metavar='PATTERN',
+                        help='Pattern to match end point for log analysis. If empty, log files are analysed until last line.')
+    parser.add_argument('-i', '--ignore',
+                        metavar='PATTERN',
+                        action='append',
+                        default=[],
+                        help='Add an ignore pattern to the default ones.' +
+                             'The option can be specified multiple times. Defaults are: {:s}'.format(default_ignore_patterns))
+    parser.add_argument('-p', '--printMessages',
+                        action='store_true',
+                        help='Print the messages found in analysed files')
+    parser.add_argument('-v', '--verbose',
+                        action='store_true',
+                        help='Increase output verbosity')
+    return parser
+
+
+def extract_messages(lines, start, end, ignore):
+    patterns = [re.compile(p) for p in ['FATAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'VERBOSE']]
+    result = OrderedDict()
+    for p in patterns:
+        result[p.pattern] = []
+    result['other'] = []
+    result['ignored'] = []
+
+    running = False if start else True
+    for line in lines:
+        if not running and start and start.search(line):
+            running = True
+            continue
+        if running and end and end.search(line):
+            running = False
+            break
+        if running:
+            matched = False
+            ignored = False
+            for p in ignore:
+                if p.search(line):
+                    result['ignored'].append(line)
+                    ignored = True
+                    break
+            if ignored:
+                continue
+            for p in patterns:
+                if p.search(line):
+                    result[p.pattern].append(line)
+                    matched = True
+                    break
+            if not matched:
+                result['other'].append(line)
+
+    return result
+
+
+def make_summary(result):
+    summary = OrderedDict()
+    for p in result.keys():
+        summary[p] = len(result[p])
+    total = sum(summary.values())
+    summary['all'] = total
+    return summary
+
+
+def print_result(summary, full_result, print_messages=False):
+    summary_str = 'Found the following number of messages:\n'
+    for p, n in summary.iteritems():
+        summary_str += '{:8d} {:s} messages\n'.format(n, p)
+    logging.info(summary_str)
+    if print_messages:
+        for p, lines in full_result.iteritems():
+            logging.info('##### The following %s messages were found #####', p)
+            for line in lines:
+                print(line, end='')  # noqa: ATL901
+
+
+def save_to_json(result, filename):
+    logging.info('Saving results to %s', filename)
+    with open(filename, 'w') as f:
+        json.dump(result, f, indent=4)
+
+
+def main():
+    args = get_parser().parse_args()
+    logging.basicConfig(stream=sys.stdout,
+                        format='messageCounter %(levelname)-8s %(message)s',
+                        level=logging.DEBUG if args.verbose else logging.INFO)
+
+    start = re.compile(args.startPattern) if args.startPattern else None
+    end = re.compile(args.endPattern) if args.endPattern else None
+    args.ignore.extend(default_ignore_patterns)
+    ignore = [re.compile(p) for p in args.ignore]
+
+    for fname in args.logfiles:
+        if not os.path.isfile(fname):
+            logging.error('Cannot open file %s, skipping', fname)
+            continue
+        logging.info('Analysing file %s', fname)
+        with open(fname) as f:
+            messages = extract_messages(f, start, end, ignore)
+        summary = make_summary(messages)
+        print_result(summary, messages, args.printMessages)
+        out_file_name = 'MessageCount.{:s}.json'.format(fname)
+        save_to_json(summary, out_file_name)
+
+
+if '__main__' in __name__:
+    sys.exit(main())
diff --git a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
index 78480234817600b8fe027ba2e94c3be2a31ba9b2..1800ee5c0c47c571e6a30de0c647c95ca9ce3a7b 100644
--- a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
+++ b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
@@ -10,6 +10,7 @@ import sys
 import os
 import re
 import subprocess
+import json
 
 from TrigValTools.TrigValSteering.Step import Step
 from TrigValTools.TrigValSteering.Common import art_input_eos, art_input_cvmfs
@@ -514,6 +515,85 @@ class ZeroCountsStep(Step):
         return self.result, cmd
 
 
+class MessageCountStep(Step):
+    '''Count messages printed inside event loop'''
+
+    def __init__(self, name='MessageCount'):
+        super(MessageCountStep, self).__init__(name)
+        self.executable = 'messageCounter.py'
+        self.log_regex = r'(athena\..*log$|athenaHLT:.*\.out$|^log\..*to.*)'
+        self.start_pattern = r'(HltEventLoopMgr|AthenaHiveEventLoopMgr).*INFO Starting loop on events'
+        self.end_pattern = r'(HltEventLoopMgr.*INFO All events processed|AthenaHiveEventLoopMgr.*INFO.*Loop Finished)'
+        self.info_threshold = None
+        self.debug_threshold = None
+        self.verbose_threshold = None
+        self.other_threshold = None
+        self.auto_report_result = True
+
+    def configure(self, test):
+        self.args += ' -s "{:s}"'.format(self.start_pattern)
+        self.args += ' -e "{:s}"'.format(self.end_pattern)
+        if self.info_threshold is None:
+            self.info_threshold = test.exec_steps[0].max_events
+        if self.debug_threshold is None:
+            self.debug_threshold = 0
+        if self.verbose_threshold is None:
+            self.verbose_threshold = 0
+        if self.other_threshold is None:
+            self.other_threshold = test.exec_steps[0].max_events
+        super(MessageCountStep, self).configure(test)
+
+    def run(self, dry_run=False):
+        files = os.listdir('.')
+        r = re.compile(self.log_regex)
+        log_files = filter(r.match, files)
+        self.args += ' ' + ' '.join(log_files)
+        auto_report = self.auto_report_result
+        self.auto_report_result = False
+        ret, cmd = super(MessageCountStep, self).run(dry_run)
+        self.auto_report_result = auto_report
+        if ret != 0:
+            self.log.error('%s failed')
+            self.result = 1
+            if self.auto_report_result:
+                self.report_result()
+            return self.result, cmd
+        (num_info, num_debug, num_verbose, num_other) = (0, 0, 0, 0)
+        for log_file in log_files:
+            json_file = 'MessageCount.{:s}.json'.format(log_file)
+            if not os.path.isfile(json_file):
+                self.log.warning('%s cannot open file %s', self.name, json_file)
+            with open(json_file) as f:
+                summary = json.load(f)
+                num_info += summary['INFO']
+                num_debug += summary['DEBUG']
+                num_verbose += summary['VERBOSE']
+                num_other += summary['other']
+        if num_info > self.info_threshold:
+            self.log.info(
+                '%s Number of INFO messages %s is higher than threshold %s',
+                self.name, num_info, self.info_threshold)
+            self.result += 1
+        if num_debug > self.debug_threshold:
+            self.log.info(
+                '%s Number of DEBUG messages %s is higher than threshold %s',
+                self.name, num_debug, self.debug_threshold)
+            self.result += 1
+        if num_verbose > self.verbose_threshold:
+            self.log.info(
+                '%s Number of VERBOSE messages %s is higher than threshold %s',
+                self.name, num_verbose, self.verbose_threshold)
+            self.result += 1
+        if num_other > self.other_threshold:
+            self.log.info(
+                '%s Number of "other" messages %s is higher than threshold %s',
+                self.name, num_other, self.other_threshold)
+            self.result += 1
+        if self.auto_report_result:
+            self.report_result()
+        return self.result, cmd
+
+
 def default_check_steps(test):
     '''
     Create the default list of check steps for a test. The configuration
@@ -588,6 +668,10 @@ def default_check_steps(test):
         checkwarn.log_file = log_to_check
     check_steps.append(checkwarn)
 
+    # MessageCount
+    msgcount = MessageCountStep('MessageCount')
+    check_steps.append(msgcount)
+
     # RegTest
     regtest = RegTestStep()
     if log_to_check is not None: