diff --git a/Trigger/TrigValidation/TrigValTools/bin/check_log.py b/Trigger/TrigValidation/TrigValTools/bin/check_log.py
index 75e21bf84d8bbbe2ced1d34782396805173a55e5..7d720c44e5a88ee0a3b527e7098309b3c64ea5f2 100755
--- a/Trigger/TrigValidation/TrigValTools/bin/check_log.py
+++ b/Trigger/TrigValidation/TrigValTools/bin/check_log.py
@@ -6,7 +6,6 @@ import re
 import argparse
 import sys
 import os
-import six
 
 desc = 'Tool to check for error messages in a log file. By default ERROR, FATAL \
   and CRITICAL messages are considered. The config file may be used to \
@@ -131,8 +130,7 @@ def scanLogfile():
     msgLevels = re.compile('|'.join(pattern))
     igLevels = re.compile('|'.join(ignorePattern))
     logFileAddress = args.logfile
-    encargs = {} if six.PY2 else {'encoding' : 'utf-8'}
-    with open(logFileAddress,'r',**encargs) as logFile:
+    with open(logFileAddress,'r', encoding='utf-8') as logFile:
         tracing = False
         for line in logFile:
             #Tracing only makes sense for errors
diff --git a/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py b/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py
index 44148ce9c9ce665e281d299c014c2df1892bc808..69a28999024bcb9ce2c4889bd0eafd97f2d001cf 100755
--- a/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py
+++ b/Trigger/TrigValidation/TrigValTools/bin/messageCounter.py
@@ -16,7 +16,6 @@ import logging
 import argparse
 import json
 from collections import OrderedDict
-import six
 
 
 default_ignore_patterns = [
@@ -106,11 +105,11 @@ def make_summary(result):
 
 def print_result(summary, full_result, print_messages=False):
     summary_str = 'Found the following number of messages:\n'
-    for p, n in six.iteritems(summary):
+    for p, n in summary.items():
         summary_str += '{:8d} {:s} messages\n'.format(n, p)
     logging.info(summary_str)
     if print_messages:
-        for p, lines in six.iteritems(full_result):
+        for p, lines in full_result.items():
             logging.info('##### The following %s messages were found #####', p)
             for line in lines:
                 print(line, end='')  # noqa: ATL901
@@ -144,8 +143,7 @@ def main():
             logging.error('Cannot open file %s, skipping', fname)
             continue
         logging.info('Analysing file %s', fname)
-        encargs = {} if six.PY2 else {'encoding' : 'utf-8'}
-        with open(fname, **encargs) as f:
+        with open(fname, encoding='utf-8') as f:
             messages = extract_messages(f, start, end, ignore)
         summary = make_summary(messages)
         print_result(summary, messages, args.printMessages)
diff --git a/Trigger/TrigValidation/TrigValTools/bin/runTrigART.py b/Trigger/TrigValidation/TrigValTools/bin/runTrigART.py
index 69b6d38dfc0ddacc169cb07ce6924a675eef4cad..7a630cf809658981763fff7f25c7873cb7e46b62 100755
--- a/Trigger/TrigValidation/TrigValTools/bin/runTrigART.py
+++ b/Trigger/TrigValidation/TrigValTools/bin/runTrigART.py
@@ -10,7 +10,6 @@ import argparse
 import shutil
 import subprocess
 import json
-import six
 from TrigValTools.TrigARTUtils import package_prefix, find_scripts, remember_cwd
 
 
@@ -121,7 +120,7 @@ def analyse_results(all_test_results):
     max_len_col1 = len(max(table.keys(), key=len))
     max_len_col2 = len(max(table.values(), key=len))
     logging.info('-'*(max_len_col1+max_len_col2+7))
-    for k, v in six.iteritems(table):
+    for k, v in table.items():
         logging.info('| {col1:<{width1}} | {col2:<{width2}} |'.format(
             col1=k, width1=max_len_col1,
             col2=v, width2=max_len_col2))
diff --git a/Trigger/TrigValidation/TrigValTools/bin/runTrigCI.py b/Trigger/TrigValidation/TrigValTools/bin/runTrigCI.py
index ddb93bbcdbb2b3019d5ff6928e561bd0280b8b9a..fb8ac6167d20df45fcd419abe76ac9fe8120f04c 100755
--- a/Trigger/TrigValidation/TrigValTools/bin/runTrigCI.py
+++ b/Trigger/TrigValidation/TrigValTools/bin/runTrigCI.py
@@ -11,7 +11,6 @@ import logging
 import argparse
 import subprocess
 import errno
-from six import iteritems
 from collections import OrderedDict
 from TrigValTools.TrigARTUtils import find_scripts, remember_cwd
 
@@ -117,7 +116,7 @@ def main():
     logging.info('RESULTS SUMMARY:')
     logging.info('='*(max_name_len+11))
     final_code = 0
-    for script, result in iteritems(results):
+    for script, result in results.items():
         logging.info('| %s : %4d |', '{:{width}s}'.format(script, width=max_name_len), result)
         if abs(result) > final_code:
             final_code = abs(result)
diff --git a/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py b/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py
index ec1b3e6a85313113f6f696db573deac1a496b1f5..75d2d79cd4d01efacde122a59727ddf76d19e0fe 100755
--- a/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py
+++ b/Trigger/TrigValidation/TrigValTools/bin/trig-test-json.py
@@ -12,7 +12,6 @@ import re
 import sys
 import logging
 import os
-import six
 from collections import OrderedDict
 from TrigValTools.TrigARTUtils import first_existing_file, newest_file
 
@@ -84,7 +83,7 @@ def convert_to_megabytes(number, unit):
         "GB": 1024,
         'TB': 1024**2
     }
-    for unit_name, mult in six.iteritems(multipliers):
+    for unit_name, mult in multipliers.items():
         if unit_name == unit:
             return float(number)*mult
     logging.error("Unit conversion failed from {} to MB".format(unit))
diff --git a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
index 4b7b19a736ec2ec48eee83706294181cede99f10..02b2a8137303e2300a5933fc0e1a72854abb3a60 100644
--- a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
+++ b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/CheckSteps.py
@@ -10,7 +10,6 @@ import os
 import re
 import subprocess
 import json
-import six
 import glob
 
 from TrigValTools.TrigValSteering.Step import Step, get_step_from_list
@@ -147,9 +146,8 @@ class LogMergeStep(Step):
                 self.log_files.append(f)
 
     def merge_logs(self):
-        encargs = {} if six.PY2 else {'encoding' : 'utf-8'}
         try:
-            with open(self.merged_name, 'w', **encargs) as merged_file:
+            with open(self.merged_name, 'w', encoding='utf-8') as merged_file:
                 for log_name in self.log_files:
                     if not os.path.isfile(log_name):
                         if self.warn_if_missing:
@@ -157,7 +155,7 @@ class LogMergeStep(Step):
                             merged_file.write(
                                 '### WARNING Missing {} ###\n'.format(log_name))
                         continue
-                    with open(log_name, **encargs) as log_file:
+                    with open(log_name, encoding='utf-8') as log_file:
                         merged_file.write('### {} ###\n'.format(log_name))
                         for line in log_file:
                             merged_file.write(line)
@@ -305,11 +303,10 @@ class RegTestStep(RefComparisonStep):
         if not os.path.isfile(log_file):
             self.log.error('%s input file %s is missing', self.name, log_file)
             return False
-        encargs = {} if six.PY2 else {'encoding' : 'utf-8'}
-        with open(log_file, **encargs) as f_in:
+        with open(log_file, encoding='utf-8') as f_in:
             matches = re.findall('({}.*).*$'.format(self.regex),
                                  f_in.read(), re.MULTILINE)
-            with open(self.input_file, 'w', **encargs) as f_out:
+            with open(self.input_file, 'w', encoding='utf-8') as f_out:
                 for line in matches:
                     linestr = str(line[0]) if type(line) is tuple else line
                     f_out.write(linestr+'\n')
@@ -563,8 +560,7 @@ class ZeroCountsStep(Step):
                 self.name, input_file)
             return -1
         lines_checked = 0
-        encargs = {} if six.PY2 else {'encoding' : 'utf-8'}
-        with open(input_file, **encargs) as f_in:
+        with open(input_file, encoding='utf-8') as f_in:
             for line in f_in.readlines():
                 split_line = line.split()
                 lines_checked += 1
@@ -648,7 +644,7 @@ class MessageCountStep(Step):
                 self.log.warning('%s cannot open file %s', self.name, json_file)
             with open(json_file) as f:
                 summary = json.load(f)
-                for level, threshold in six.iteritems(self.thresholds):
+                for level, threshold in self.thresholds.items():
                     if summary[level] > threshold:
                         self.result += 1
                         self.log.info(
diff --git a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/Step.py b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/Step.py
index 58bcaf906843d30d39bef4365ec1a50ee7984bb9..bdb5bf6a2f78a960fb8e9079ea2a154447dce482 100644
--- a/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/Step.py
+++ b/Trigger/TrigValidation/TrigValTools/python/TrigValSteering/Step.py
@@ -12,6 +12,7 @@ import signal
 import subprocess
 import time
 import re
+import psutil
 from enum import Enum
 from threading import Timer
 from TrigValTools.TrigValSteering.Common import get_logger, art_result, running_in_CI
@@ -86,24 +87,24 @@ class Step(object):
         where the first is filled with the backtrace by this function
         (it has to be a list to be mutable).
         '''
-        # Produce backtrace for the parent and all children
-
         try:
-            import psutil
+            # Produce backtrace for the parent and all children
             parent = psutil.Process(pid)
             backtrace = ''
             for proc in [parent] + parent.children(recursive=True):
                 backtrace += '\nTraceback for {} PID {}:\n'.format(proc.name(), proc.pid)
                 backtrace += subprocess.check_output('$ROOTSYS/etc/gdb-backtrace.sh {}'.format(proc.pid),
-                                                     stderr=subprocess.STDOUT, shell=True)
-        except ImportError:
-            # psutil is missing in LCG_96 python3
-            backtrace = 'psutil not available; no backtrace generated'
+                                                        stderr=subprocess.STDOUT, shell=True).decode('utf-8')
+            backtrace_list[0] = backtrace
 
-        backtrace_list[0] = backtrace
+            # Kill the process
+            os.killpg(pid, signal)
 
-        # Kill the process
-        os.killpg(pid, signal)
+        except Exception as e:
+            # This may happen e.g. if one of the processes finishes before we generate backtrace
+            msg = 'Caught exception while generating backtrace: ' + str(e)
+            backtrace_list[0] = msg
+            self.log.error(msg)
 
     def __execute_with_timeout(self, cmd, timeout_sec):
         '''