diff --git a/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py b/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py
new file mode 100755
index 0000000000000000000000000000000000000000..d67500164cd63a7194d8929a8eb552215c55a909
--- /dev/null
+++ b/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration
+#
+
+# This script parses outputs of trigger nightly test post-processing steps and creates a JSON file with extra data
+# other than result codes (which are handled by ART).
+
+import json
+import re
+import sys
+import logging
+import os.path
+from collections import OrderedDict
+
+
+class LastUpdatedOrderedDict(OrderedDict):
+    'Store items in the order the keys were last added'
+
+    def __setitem__(self, key, value):
+        if key in self:
+            del self[key]
+        OrderedDict.__setitem__(self, key, value)
+
+
+def find_line_in_file(pattern, filename):
+    if not os.path.isfile(filename):
+        logging.warning("Cannot open file {}".format(filename))
+        return None
+    with open(filename) as logfile:
+        lines = re.findall("{}.*$".format(pattern), logfile.read(), re.MULTILINE)
+        if len(lines) == 0:
+            logging.warning("Could not find pattern \"{}\" in file {}".format(pattern, filename))
+            return None
+        return lines[0]
+
+
+def get_num_from_checklog(filename):
+    line = find_line_in_file('Found messages in', filename)
+    if line is None:
+        logging.warning("Cannot extract number of messages from {}".format(filename))
+        return None
+    logging.debug("line: {}".format(line))
+    m = re.search('\((.+?)\):', line)
+    return m.group(1)
+
+
+def get_num_histos(filename):
+    line = find_line_in_file('Total histograms:', filename)
+    if line is None:
+        logging.warning("Cannot extract number of histograms from {}".format(filename))
+        return None
+    logging.debug("line: {}".format(line))
+    return line.split()[-1]
+
+
+def convert_to_megabytes(number, unit):
+    multipliers = {
+        'B': 1.0/(1024**2),
+        'kB': 1.0/1024,
+        'MB': 1,
+        "GB": 1024,
+        'TB': 1024**2
+    }
+    for unit_name, mult in multipliers.iteritems():
+        if unit_name == unit:
+            return float(number)*mult
+    logging.error("Unit conversion failed from {} to MB".format(unit))
+    return None
+
+
+def extract_mem(line):
+    words = line[0].split()
+    mem_end = words[5:7]
+    logging.debug("mem_end = {}".format(mem_end))
+    mem_delta = words[8:10]
+    logging.debug("mem_delta = {}".format(mem_delta))
+    mem_mb = convert_to_megabytes(mem_end[0], mem_end[1])
+    logging.debug("mem_mb = {}".format(mem_mb))
+    delta_mb = convert_to_megabytes(mem_delta[0], mem_delta[1])
+    logging.debug("delta_mb = {}".format(delta_mb))
+    return mem_mb, delta_mb
+
+
+def analyse_perfmon(filename):
+    if not os.path.isfile(filename):
+        logging.warning("Cannot open file {}".format(filename))
+        return None
+    with open(filename) as logfile:
+        first_line = -1
+        last_line = -1
+        all_lines = logfile.readlines()
+        for i, line in enumerate(all_lines):
+            if first_line >= 0 and last_line >= 0:
+                break
+            if "=== [evt - slice] ===" in line:
+                first_line = i
+            elif "=== [fin - slice] ===" in line:
+                last_line = i
+        if first_line < 0 or last_line < 0:
+            logging.warning("Cannot extract memory usage information from {}".format(filename))
+            return None
+        evt_mon_lines = all_lines[first_line:last_line]
+        vmem_line = re.findall("^VMem.*$", '\n'.join(evt_mon_lines), re.MULTILINE)
+        rss_line = re.findall("^RSS.*$", '\n'.join(evt_mon_lines), re.MULTILINE)
+        logging.debug("vmem_line = {}".format(vmem_line))
+        logging.debug("rss_line = {}".format(rss_line))
+        if len(vmem_line) == 0:
+            logging.warning("Cannot extract VMem information from {}".format(filename))
+        if len(rss_line) == 0:
+            logging.warning("Cannot extract RSS information from {}".format(filename))
+        vmem, dvmem = extract_mem(vmem_line)
+        rss, drss = extract_mem(rss_line)
+        data = LastUpdatedOrderedDict()
+        data['vmem'] = "{0:.3f}".format(vmem)
+        data['delta-vmem'] = "{0:.3f}".format(dvmem)
+        data['rss'] = "{0:.3f}".format(rss)
+        data['delta-rss'] = "{0:.3f}".format(drss)
+        return data
+
+
+def main():
+    logging.basicConfig(stream=sys.stdout,
+                        format='%(levelname)-8s %(message)s',
+                        level=logging.INFO)
+
+    data = LastUpdatedOrderedDict()
+
+    # Get number of errors
+    ne = get_num_from_checklog('checklog.log')
+    logging.debug("ne: {}".format(ne))
+    if ne is None:
+        logging.warning("Failed to read number of errors from the log")
+        data['num-errors'] = 'n/a'
+    else:
+        data['num-errors'] = ne
+
+    # Get number of warnings
+    nw = get_num_from_checklog('warnings.log')
+    logging.debug("nw: {}".format(nw))
+    if nw is None:
+        logging.warning("Failed to read number of warnings from the log")
+        data['num-warnings'] = 'n/a'
+    else:
+        data['num-warnings'] = nw
+
+    # Get number of histograms
+    nh = get_num_histos('histSizes.log')
+    logging.debug("nh: {}".format(nh))
+    if nh is None:
+        logging.warning("Failed to read number of histograms from the log")
+        data['num-histograms'] = 'n/a'
+    else:
+        data['num-histograms'] = nh
+
+    # Get memory usage information
+    perfmon_data = analyse_perfmon("ntuple.perfmon.summary.txt")
+    if perfmon_data is None:
+        logging.warning("Failed to read memory usage information from the log")
+        data['memory-usage'] = 'n/a'
+    else:
+        data['memory-usage'] = perfmon_data
+
+    # Save data to JSON file
+    with open('extra-results.json', 'w') as outfile:
+        json.dump(data, outfile, indent=4)
+
+
+if "__main__" in __name__:
+    sys.exit(main())
diff --git a/Trigger/TrigValidation/TriggerTest/share/TriggerTestCommon.py b/Trigger/TrigValidation/TriggerTest/share/TriggerTestCommon.py
index 9da516b98b966833880d8b69a9b1741877f2afac..8623d1be67e4233ed5b2e2528fe2ccea954e3864 100644
--- a/Trigger/TrigValidation/TriggerTest/share/TriggerTestCommon.py
+++ b/Trigger/TrigValidation/TriggerTest/share/TriggerTestCommon.py
@@ -71,7 +71,12 @@ if not ('checkLeak' in dir()):
     if not ('doPerfMon' in dir()):
         rec.doPerfMon = True
     if rec.doPerfMon:
+        rec.doDetailedPerfMon = False
+        rec.doSemiDetailedPerfMon = False
         jobproperties.PerfMonFlags.doMonitoring = True
+        jobproperties.PerfMonFlags.doDetailedMonitoring = False
+        jobproperties.PerfMonFlags.doSemiDetailedMonitoring = False
+        jobproperties.PerfMonFlags.doFastMon = False
         jobproperties.PerfMonFlags.OutputFile = "ntuple.root"
 
 
diff --git a/Trigger/TrigValidation/TriggerTest/test/exec_art_triggertest_post.sh b/Trigger/TrigValidation/TriggerTest/test/exec_art_triggertest_post.sh
index 1aacb10be503c238861a9a2c65b1bc6d4c8e0cd2..b78ca98d689623ee00294fbd50677f83c9af4f63 100755
--- a/Trigger/TrigValidation/TriggerTest/test/exec_art_triggertest_post.sh
+++ b/Trigger/TrigValidation/TriggerTest/test/exec_art_triggertest_post.sh
@@ -38,15 +38,22 @@ if [ "${ATH_RETURN}" -ne "0" ] && [ -n "${gitlabTargetBranch}" ]; then
   cat ${JOB_LOG}
 fi
 
-echo $(date "+%FT%H:%M %Z")"     Running checklog"
+echo $(date "+%FT%H:%M %Z")"     Running checklog for errors"
 timeout 5m check_log.pl --config checklogTriggerTest.conf --showexcludestats ${JOB_LOG} 2>&1 | tee checklog.log
-
 echo "art-result: ${PIPESTATUS[0]} CheckLog"
 
+echo $(date "+%FT%H:%M %Z")"     Running checklog for warnings"
+timeout 5m check_log.pl --config checklogTriggerTest.conf --noerrors --warnings --showexcludestats ${JOB_LOG} >warnings.log 2>&1
+
 ### PERFMON
 
+echo $(date "+%FT%H:%M %Z")"     Running perfmon"
 timeout 5m perfmon.py -f 0.90 ntuple.pmon.gz
-timeout 5m convert -density 300 -trim ntuple.perfmon.pdf -quality 100 -resize 50% ntuple.perfmon.png
+
+### HISTOGRAM COUNT
+
+echo $(date "+%FT%H:%M %Z")"     Running histSizes"
+timeout 5m histSizes.py -t expert-monitoring.root >histSizes.log 2>&1
 
 ### CHAINDUMP
 
@@ -75,7 +82,7 @@ mv athena.regtest athena.regtest.new
 
 if [ -f ${REF_FOLDER}/expert-monitoring.root ]; then
   echo $(date "+%FT%H:%M %Z")"     Running rootcomp"
-  timeout 10m rootcomp.py ${REF_FOLDER}/expert-monitoring.root expert-monitoring.root 2>&1 | tee rootcompout.log
+  timeout 10m rootcomp.py ${REF_FOLDER}/expert-monitoring.root expert-monitoring.root >rootcompout.log 2>&1
   echo "art-result: ${PIPESTATUS[0]} RootComp"
   echo $(date "+%FT%H:%M %Z")"     Running checkcounts"
   timeout 10m trigtest_checkcounts.sh 0 expert-monitoring.root ${REF_FOLDER}/expert-monitoring.root HLT 2>&1 | tee checkcountout.log
@@ -143,6 +150,12 @@ else
   echo $(date "+%FT%H:%M %Z")"     No AOD.pool.root to check"
 fi
 
+### GENERATE JSON WITH POST-PROCESSING INFORMATION
+
+echo $(date "+%FT%H:%M %Z")"     Running trig-test-json.py"
+timeout 5m trig-test-json.py
+cat extra-results.json && echo
+
 ### SUMMARY
 
 echo  $(date "+%FT%H:%M %Z")"     Files in directory:"
diff --git a/Trigger/TrigValidation/TriggerTest/test/exec_athena_art_trigger_validation.sh b/Trigger/TrigValidation/TriggerTest/test/exec_athena_art_trigger_validation.sh
index 3dc9c4be6e2ad7c766d8e5df74efe2be6624422d..7ae05d9993d41401707645bcea1de8273c3f3c43 100755
--- a/Trigger/TrigValidation/TriggerTest/test/exec_athena_art_trigger_validation.sh
+++ b/Trigger/TrigValidation/TriggerTest/test/exec_athena_art_trigger_validation.sh
@@ -58,6 +58,11 @@ else
 fi
 
 
+######################################
+
+# Generate empty PoolFileCatalog.xml - this prevents incorrect handling of crashes on the grid
+art.py createpoolfile
+
 ######################################
 
 echo "Running athena command:"