diff --git a/cmt/requirements b/cmt/requirements
index 3b767ce32c5ffa4354ceb343c696ad5326b6b28f..0ba6869b2f883bcdbed2b71cb4b90f49ff4e1d6f 100644
--- a/cmt/requirements
+++ b/cmt/requirements
@@ -3,7 +3,7 @@
 # Maintainer : Ben Couturier
 #============================================================================
 package           PRConfig
-version           v1r63
+version           v1r65
 
 #============================================================================
 # Structure, i.e. directories to process.
diff --git a/doc/release.notes b/doc/release.notes
index 9e125e5df5df5ac59fbfdb9ff5961b2c1cb9d76a..945e94cfb508e7aabe7f47ac620765422945ed7f 100644
--- a/doc/release.notes
+++ b/doc/release.notes
@@ -4,6 +4,125 @@
 ! Purpose     : App Configuration for performance and regression tests
 !-----------------------------------------------------------------------------
 
+========================= PRConfig v1r65 2023-12-07 =========================
+
+! 2023-12-05 - commit 2fd9c22
+
+ - Merge branch 'lugrazet-BW-hlt1-fix-loggingerror' into 'master'
+
+   [RTA BW Tests] quick fix for Hlt1 BW test. len(inputs) < 2 breaks logging
+   string
+
+   See merge request lhcb-datapkg/PRConfig!363
+
+! 2023-12-05 - commit ee5e840
+
+ - Merge branch 'spruce-bw-input-Dec23' into 'master'
+
+   Add new Dec2023 samples for Sprucing PR tests
+
+   See merge request lhcb-datapkg/PRConfig!364
+
+! 2023-11-29 - commit f4c873b
+
+ - Merge branch 'rjhunter-bwtest-cleanup-copies' into 'master'
+
+   [RTA BW tests] Small cleanups after !359
+
+   See merge request lhcb-datapkg/PRConfig!362
+
+! 2023-11-28 - commit 6f6c963
+
+ - Merge branch 'rjhunter-reduce-moore-threads-in-bw-test' into 'master'
+
+   [Bandwidth tests] Use LBN_BUILD_JOBS to properly set n_threads on Moore in BW
+   test
+
+   See merge request lhcb-datapkg/PRConfig!356
+
+! 2023-11-16 - commit 11208b4
+
+ - Merge branch 'rjhunter-chained-test-feasibility' into 'master'
+
+   [RTA BW tests] Test feasibility of copying HLT2 output to read into sprucing
+   test
+
+   See merge request lhcb-datapkg/PRConfig!359
+
+! 2023-11-08 - commit ac9460c
+
+ - Merge branch 'lugrazet-BW-hlt1testpage-cleanup' into 'master'
+
+   [RTA BW Tests] BW test page clean-ups
+
+   See merge request lhcb-datapkg/PRConfig!355
+
+========================= PRConfig v1r64 2023-11-01 =========================
+
+! 2023-11-01 - commit f7f0f10
+
+ - Merge branch 'lugrazet-BW-initialhlt1test' into 'master'
+
+   [RTA BW Tests] Introducing an Hlt1-bandwidth test via Moore_in_Allen
+
+   See merge request lhcb-datapkg/PRConfig!330
+
+! 2023-10-26 - commit e4b282f
+
+ - Merge branch 'bw-test-minor-update' into 'master'
+
+   Minor update to BW test page
+
+   See merge request lhcb-datapkg/PRConfig!353
+
+! 2023-10-17 - commit 2a05e36
+
+ - Merge branch 'audurier-ift-run3' into 'master'
+
+   Update Ion sequence and MC datasets
+
+   See merge request lhcb-datapkg/PRConfig!328
+
+! 2023-10-17 - commit 9749563
+
+ - Merge branch 'rm-2023_raw_hlt1_269939' into 'master'
+
+   Add 2023_raw_hlt1_269939 TestFileDB sample
+
+   See merge request lhcb-datapkg/PRConfig!352
+
+! 2023-10-11 - commit 81970da
+
+ - Merge branch 'sponce_newFileUT' into 'master'
+
+   Added new file to be used for Boole tests of the UT
+
+   See merge request lhcb-datapkg/PRConfig!335
+
+! 2023-10-09 - commit 51ab80c
+
+ - Merge branch 'xueting_addSMOG2jobs_update' into 'master'
+
+   Add_2_SMOG_jobs
+
+   See merge request lhcb-datapkg/PRConfig!351
+
+! 2023-10-03 - commit dfd6b02
+
+ - Merge branch 'dd4hep_future_upgrades_refactor' into 'master'
+
+   Fix errors caused by DD4Hep future upgrades refactor
+
+   See merge request lhcb-datapkg/PRConfig!350
+
+! 2023-09-28 - commit aab79d9
+
+ - Merge branch 'rjhunter-trim-fat-from-BW-tests' into 'master'
+
+   Refactor and speed-up the periodic BW tests
+
+   See merge request lhcb-datapkg/PRConfig!349
+
 ========================= PRConfig v1r63 2023-09-25 =========================
 
 ! 2023-09-13 - commit 1fd8aa7
diff --git a/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_pAr_Nu0.37.py b/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_pAr_Nu0.37.py
new file mode 100644
index 0000000000000000000000000000000000000000..b539eb8868a215c5ac30a2a64fec989f88e51520
--- /dev/null
+++ b/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_pAr_Nu0.37.py
@@ -0,0 +1,20 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from Moore import options
+
+from PRConfig.FilesFromDirac import get_access_urls_mc
+
+options.input_files = get_access_urls_mc(
+    "/MC/2022/pAr-Beam6800GeV-0GeV-2022-MagDown-SMOG2-Nu0.37-EPOS/Sim10b/Digi16",
+    "30000000", ["DIGI"])
+
+options.input_type = "ROOT"
+options.simulation = True
diff --git a/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_ppAr_Nu2.1.py b/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_ppAr_Nu2.1.py
new file mode 100644
index 0000000000000000000000000000000000000000..506e671267c269361a9a75a57c8c2ce9c0a0526c
--- /dev/null
+++ b/options/Moore/DataChallenges/DC_Sim10b_Digi16_SMOG2_ppAr_Nu2.1.py
@@ -0,0 +1,20 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from Moore import options
+
+from PRConfig.FilesFromDirac import get_access_urls_mc
+
+options.input_files = get_access_urls_mc(
+    "/MC/2022/ppAr-Beam6800GeV-2022-andSMOG2-Nu2.1andNu0.37-Pythia8andEPOS/Sim10b/Digi16",
+    "30000000", ["DIGI"])
+
+options.input_type = "ROOT"
+options.simulation = True
diff --git a/python/MooreTests/calculate_stream_overlap.py b/python/MooreTests/calculate_stream_overlap.py
index 4ea51d2b29e7b0da285521c180f6ef1fc183336c..e624123a8d743ba07b1d1c40207b11e0407a0ea1 100755
--- a/python/MooreTests/calculate_stream_overlap.py
+++ b/python/MooreTests/calculate_stream_overlap.py
@@ -30,7 +30,7 @@ def get_all_event_numbers(args):
     return ret
 
 
-def calculate_similarity_matrix(event_numbers_by_stream):
+def get_event_number_matrix(event_numbers_by_stream):
 
     all_event_numbers = set([
         evt_no for evt_no_list in event_numbers_by_stream.values()
@@ -48,6 +48,11 @@ def calculate_similarity_matrix(event_numbers_by_stream):
         for evt_no in evt_no_list:
             df[stream][evt_no] = True
 
+    return df
+
+
+def calculate_similarity_matrix(df):
+
     jaccard = 1 - pairwise_distances(
         df.T.to_numpy(), metric='jaccard'
     )  # .T bcuz pairwise_distance must expect the fields to take similarity between to be rows rather than columns
@@ -57,6 +62,18 @@ def calculate_similarity_matrix(event_numbers_by_stream):
     return jaccard_sim_matrix_df
 
 
+def calculate_overlap_matrix(df):
+    cond_prob_per_stream = {stream: [] for stream in df.columns}
+    for target_stream in df.columns:
+        for comparison_stream in df.columns:
+            cond_prob_per_stream[target_stream].append(
+                sum(df[comparison_stream] * df[target_stream]) / sum(
+                    df[target_stream]))
+    overlap_matrix_df = pd.DataFrame(
+        cond_prob_per_stream, columns=df.columns, index=df.columns)
+    return overlap_matrix_df
+
+
 def save(df, htmlpath):
     # Generate HTML table for similarity matrix
     html = df.to_html(float_format=lambda x: f"{x:.1%}")
@@ -68,12 +85,18 @@ def main():
 
     parser = argparse.ArgumentParser()
     parser.add_argument(
-        '-p', '--process', type=str, required=True, choices=['hlt2', 'spruce'])
+        '-p',
+        '--process',
+        type=str,
+        help='Compute for Hlt2 or Sprucing lines',
+        choices=['hlt2', 'spruce'],
+        required=True)
     parser.add_argument(
         '--stream-config',
         type=str,
-        required=True,
-        choices=["wg", "production"])
+        help='Choose production or per-WG stream configuration',
+        choices=['production', 'wg'],
+        required=True)
     parser.add_argument('--streams', nargs='+', type=str, required=True)
     args = parser.parse_args()
     fname_helper = FileNameHelper(args.process)
@@ -83,14 +106,23 @@ def main():
         print(
             f"Found {len(event_numbers[stream])} events for {stream} stream.")
 
+    df = get_event_number_matrix(event_numbers)
+
     ofile = fname_helper.jaccard_similarities_path(args.stream_config)
-    sim_matrix = calculate_similarity_matrix(event_numbers)
+    sim_matrix = calculate_similarity_matrix(df)
     print(
         f"Calculated similarity matrix. Printing and saving to html at {ofile}."
     )
     print(sim_matrix)
     save(sim_matrix, ofile)
 
+    ofile = fname_helper.overlap_matrix_path(args.stream_config)
+    overlap_matrix = calculate_overlap_matrix(df)
+    print(
+        f"Calculated overlap matrix. Printing and saving to html at {ofile}.")
+    print(overlap_matrix)
+    save(overlap_matrix, ofile)
+
 
 if __name__ == "__main__":
     main()
diff --git a/python/MooreTests/combine_rate_output.py b/python/MooreTests/combine_rate_output.py
index 282921980d1029b5c51a0e4255222f1df2267b22..8497bbc10fb44632eef68d0ceccb3444fe3978cd 100755
--- a/python/MooreTests/combine_rate_output.py
+++ b/python/MooreTests/combine_rate_output.py
@@ -25,19 +25,24 @@ COLUMNS_PER_STREAM = [
 ]
 
 
-def _columns_per_line():
-    # Possibility is here (add an arg) to make the thresholds change based on hlt2/spruce
+def _columns_per_line(process):
+    tols = {
+        # Tolerances per process.
+        'hlt1': (1e3, None, 150, 0, 0),
+        'hlt2': (1, 1e3, 0.2, 1e3, 0.2),
+        'spruce': (1, 1e3, 0.2, 1e3, 0.2),
+    }[process]
     return {
         # col_name, threshold for turning it red to catch the reader's eye
         'Line': None,
         'Total Retention (%)': None,
-        'Rate (kHz)': 1,
+        'Rate (kHz)': tols[0],
         'Exclusive Retention(%)': None,
         'Exclusive Rate (kHz)': None,
-        'Avg Total Event Size (kB)': 1e3,
-        'Total Bandwidth (GB/s)': 0.2,
-        'Avg DstData Size (kB)': 1e3,
-        'DstData Bandwidth (GB/s)': 0.2
+        'Avg Total Event Size (kB)': tols[1],
+        'Total Bandwidth (GB/s)': tols[2],
+        'Avg DstData Size (kB)': tols[3],
+        'DstData Bandwidth (GB/s)': tols[4]
     }
 
 
@@ -46,7 +51,7 @@ def _sorted_df_by_retention(df):
         by=['Total Retention (%)'], ascending=False).reset_index(drop=True)
 
 
-def rates_all_lines(stream_config, fname_helper):
+def rates_all_lines(stream_config, fname_helper, process):
     """Make 1 enormous table with rate/bw info per line for all lines in all streams (i.e. n_rows = n_lines).
        Saves to .csv and .html.
        stream_config is either "production" or "wg"
@@ -60,7 +65,7 @@ def rates_all_lines(stream_config, fname_helper):
         frames.append(df)
 
     df = pd.concat(frames)
-    df.columns = _columns_per_line().keys()
+    df.columns = _columns_per_line(process).keys()
 
     df = _sorted_df_by_retention(df)
     df.to_csv(fname_helper.final_rate_table_all_lines_path("csv"))
@@ -69,7 +74,7 @@ def rates_all_lines(stream_config, fname_helper):
         return f'background-color: {color}' if val > threshold else ''
 
     styler = None
-    for column, threshold in _columns_per_line().items():
+    for column, threshold in _columns_per_line(process).items():
         # Make cell red if column value greater than threshold
         if threshold:
             if styler:
@@ -79,14 +84,16 @@ def rates_all_lines(stream_config, fname_helper):
                 styler = df.style.applymap(
                     highlight_vals, subset=[column], threshold=threshold)
 
-    html = styler.set_table_attributes("border=1").to_html()
+    html = styler.format(
+        '{:.3g}', subset=df.columns[
+            df.columns != 'Line']).set_table_attributes("border=1").to_html()
     with open(fname_helper.final_rate_table_all_lines_path("html"), 'w') as f:
         f.write(html)
 
     return
 
 
-def make_rate_table_row_per_line(stream_config, fname_helper):
+def make_rate_table_row_per_line(stream_config, fname_helper, process):
     """ Makes (1 table with rate/bw info per line in the streamed mdf) for all <stream_config> streams (i.e. n_tables = n_streams).
         Puts them all on 1 html page, adds hyperlinks to jump to the different streams on the page.
         Saves to .html page only.
@@ -113,23 +120,25 @@ def make_rate_table_row_per_line(stream_config, fname_helper):
             f.write(f'<head>{stream.upper()}</head>')
             f.write(f'<a id="{stream}_label">')
             df = pd.read_csv(file, header=None)
-            df.columns = _columns_per_line().keys()
+            df.columns = _columns_per_line(process).keys()
             df = _sorted_df_by_retention(df)
-            f.write(df.to_html())
+            f.write(
+                df.style.format(
+                    '{:.3g}', subset=df.columns[df.columns != 'Line']).
+                set_table_attributes("border=1").to_html())
             f.write('</a>')
             f.write('<br/><br/>')
 
     return
 
 
-def make_rate_table_row_per_stream(stream_config, fname_helper):
+def make_rate_table_row_per_stream(stream_config, fname_helper, process):
     """ Makes 1 table with rate/bw info integrated over the whole streamed mdf for all <stream_config> streams (i.e. a table with n_rows = n_streams).
         Saves to .html and .csv.
         stream_config is either "production" or "wg"
     """
 
     frames = []
-
     for file in glob.glob(
             fname_helper.tmp_rate_table_per_stream_path(stream_config, "*")):
         df = pd.read_csv(file, header=None)
@@ -143,7 +152,10 @@ def make_rate_table_row_per_stream(stream_config, fname_helper):
         fname_helper.final_rate_table_all_streams_path(
             stream_config, ext="csv"))
 
-    html = df.to_html()
+    html = df.style.format(
+        '{:.3g}',
+        subset=df.columns[df.columns != 'Stream']).set_table_attributes(
+            "border=1").to_html()
     with open(
             fname_helper.final_rate_table_all_streams_path(
                 stream_config, ext="html"), 'w') as f:
@@ -155,14 +167,23 @@ def make_rate_table_row_per_stream(stream_config, fname_helper):
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument(
-        '--process', type=str, required=True, choices=["hlt2", "spruce"])
+        '-p',
+        '--process',
+        type=str,
+        help='Compute for Hlt1, Hlt2 or Sprucing lines',
+        choices=['hlt1', 'hlt2', 'spruce'],
+        required=True)
     args = parser.parse_args()
 
     fname_helper = FileNameHelper(args.process)
-    stream_configs = ["production", "wg"] if args.process == "hlt2" else ["wg"]
+    stream_configs, main_stream_config = {
+        "hlt1": (["streamless"], "streamless"),
+        "hlt2": (["production", "wg"], "production"),
+        "spruce": (["wg"], "wg")
+    }[args.process]
 
-    rates_all_lines("production" if args.process == "hlt2" else "wg",
-                    fname_helper)
+    rates_all_lines(main_stream_config, fname_helper, args.process)
     for stream_config in stream_configs:
-        make_rate_table_row_per_stream(stream_config, fname_helper)
-        make_rate_table_row_per_line(stream_config, fname_helper)
+        make_rate_table_row_per_stream(stream_config, fname_helper,
+                                       args.process)
+        make_rate_table_row_per_line(stream_config, fname_helper, args.process)
diff --git a/python/MooreTests/download_hlt2_output.py b/python/MooreTests/download_hlt2_output.py
new file mode 100644
index 0000000000000000000000000000000000000000..95b24d0fa123a5e78e00ea3d5dd4dce2d9e5bc44
--- /dev/null
+++ b/python/MooreTests/download_hlt2_output.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+
+import socket
+import os
+import atexit
+import tempfile
+import logging
+from datetime import datetime
+import shutil
+from Moore.qmtest.context import download_mdf_inputs_locally
+
+# Default cache dir is the current working directory as this is most convenient for the machine
+# that the test runs on periodically. It assumes the working directory is not cleaned up often,
+# and so the files remain available for subsequent jobs.
+DEFAULT_CACHE_DIRS = {'default': '.'}
+
+# prefer XDG_RUNTIME_DIR which should be on tmpfs
+FALLBACK_CACHE_DIR = os.getenv('XDG_RUNTIME_DIR', tempfile.gettempdir())
+
+FILE_TO_COPY = "mdf:root://eoslhcb.cern.ch//eos/lhcb/storage/lhcbpr/www/UpgradeRateTest/current_hlt2_output/hlt2_bw_testing__production__full.mdf"
+
+
+def default_cache_dirs():
+    hostname = socket.getfqdn()
+    dirs = DEFAULT_CACHE_DIRS.get(hostname, DEFAULT_CACHE_DIRS['default'])
+    return dirs
+
+
+def main():
+
+    logging.basicConfig(
+        format='%(levelname)-7s %(message)s', level=logging.INFO)
+
+    cache_dir = default_cache_dirs()
+    if not os.path.isdir(cache_dir):
+        fallback_dir = tempfile.mkdtemp(
+            prefix='bandwidth-', dir=FALLBACK_CACHE_DIR)
+        logging.warning('default cache dir {!r} doesnt exist, using {}'.format(
+            cache_dir, fallback_dir))
+        cache_dir = fallback_dir
+        # if we use the fallback directory, clean up after ourselves
+        atexit.register(shutil.rmtree, fallback_dir)
+
+    # Now download file
+    logging.info(f'Downloading input file {FILE_TO_COPY}')
+    # download_mdf_inputs_locally only downloads if files
+    # are not already available locally on the machine
+    logging.info(f'Downloading inputs for bandwidth job to {cache_dir}')
+    before_copy = datetime.now()
+    kB_to_GB = 1e3
+    downloaded_path = download_mdf_inputs_locally(
+        [FILE_TO_COPY], cache_dir, max_size=300 * kB_to_GB * 2e4
+    )  # Guesses as to output size and n_events in the FULL stream TODO improve
+    logging.info(
+        f"Downloaded {downloaded_path}. This took: {datetime.now() - before_copy}"
+    )
+
+
+if __name__ == "__main__":
+    main()
diff --git a/python/MooreTests/line-and-stream-rates.py b/python/MooreTests/line-and-stream-rates.py
index e764d8f23289162e17af009261cad28a1ba2368b..1a78a79d93c08ed69bca3e5db4adaf9233e1b2f0 100644
--- a/python/MooreTests/line-and-stream-rates.py
+++ b/python/MooreTests/line-and-stream-rates.py
@@ -12,7 +12,8 @@
 import GaudiPython as GP
 from GaudiConf.reading import decoder, unpack_rawevent, hlt_decisions
 from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
-                           EventSelector, createODIN)
+                           EventSelector, createODIN, LHCb__UnpackRawEvent,
+                           HltDecReportsDecoder)
 from GaudiConf import IOHelper
 from PyConf.application import configured_ann_svc
 import operator
@@ -46,6 +47,7 @@ from PRConfig.bandwidth_helpers import FileNameHelper
             6. Bandwidth
 
     When running wg-stream config, returns same figures as above (both per line and per stream)
+    When running streamless-stream config, returns just the per-line information.
 
 '''
 
@@ -247,14 +249,14 @@ if __name__ == '__main__':
         '-p',
         '--process',
         type=str,
-        help='Compute for Hlt2 or Sprucing lines',
-        choices=['hlt2', 'spruce'],
+        help='Compute for Hlt1, Hlt2 or Sprucing lines',
+        choices=['hlt1', 'hlt2', 'spruce'],
         required=True)
     parser.add_argument(
         '--stream-config',
         type=str,
-        help='Choose production or per-WG stream configuration',
-        choices=['production', 'wg'],
+        help='Choose production, per-WG or streamless stream configuration',
+        choices=['streamless', 'production', 'wg'],
         required=True)
     args = parser.parse_args()
 
@@ -264,17 +266,15 @@ if __name__ == '__main__':
 
     input_config = parse_yaml(args.config)
 
-    if args.process == "spruce" and args.stream_config == "production":
+    if args.process == "spruce" and args.stream_config != "wg":
         raise RuntimeError(
-            '"production" stream config not defined for sprucing. Please use "wg".'
+            '"production" and "streamless" stream configs are not defined for sprucing. Please use "wg".'
         )
-
-    LHCbApp(
-        DataType="Upgrade",
-        Simulation=True,
-        DDDBtag="dddb-20171126",
-        CondDBtag="sim-20171127-vc-md100",
-        EvtMax=n_events)
+    if args.process == "hlt1" and args.stream_config != "streamless":
+        raise RuntimeError(
+            '"production" and "wg" stream configs are not defined for hlt1. Please use "streamless".'
+        )
+    LHCbApp(DataType="Upgrade", Simulation=True, EvtMax=n_events)
     EventSelector().PrintFreq = 10000
     IODataManager(DisablePFNWarning=True)
 
@@ -282,43 +282,61 @@ if __name__ == '__main__':
     # because we need to set `input_process='Hlt2'` in `unpack_rawevent`
     # to read MDF output from Sprucing
     algs = []
-    unpack = unpack_rawevent(
-        bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
-        configurables=True)
-    hlt2 = [hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")]
-    if args.process == 'spruce':
-        spruce = [
-            hlt_decisions(
-                source="Spruce", output_loc="/Event/Spruce/DecReports")
-        ]
-    else:
-        spruce = []
-    decoder = decoder(input_process=args.process.capitalize())
-    algs = [unpack] + hlt2 + spruce + [decoder] + [createODIN(ODIN='myODIN')]
-
-    appMgr = ApplicationMgr(TopAlg=algs)
-    appMgr.ExtSvc += [
-        configured_ann_svc(json_file=fname_helper.tck(args.stream_config))
-    ]
+    with open(fname_helper.stream_config_json_path(args.stream_config)) as f:
+        lines = json.load(f)[args.stream]
 
     IOHelper("MDF").inputFiles(
         [fname_helper.mdf_fname_for_reading(args.stream_config, args.stream)])
 
-    with open(fname_helper.stream_config_json_path(args.stream_config)) as f:
-        lines = json.load(f)[args.stream]
-
+    # Hlt1 requires different unpacking than hlt2/sprucing.
+    if args.process == "hlt1":
+        unpacker = LHCb__UnpackRawEvent(
+            "UnpackRawEvent",
+            RawBankLocations=["DAQ/RawBanks/HltDecReports"],
+            BankTypes=["HltDecReports"])
+        decDec = HltDecReportsDecoder(
+            "HltDecReportsDecoder/Hlt1DecReportsDecoder",
+            OutputHltDecReportsLocation="/Event/Hlt1/DecReports",
+            SourceID="Hlt1",
+            DecoderMapping="TCKANNSvc",
+            RawBanks=unpacker.RawBankLocations[0])
+        appMgr = ApplicationMgr(TopAlg=[unpacker, decDec])
+        appMgr.ExtSvc += [configured_ann_svc(name='TCKANNSvc')]
+    else:
+        unpack = unpack_rawevent(
+            bank_types=['ODIN', 'HltDecReports', 'DstData', 'HltRoutingBits'],
+            configurables=True)
+        hlt2 = [
+            hlt_decisions(source="Hlt2", output_loc="/Event/Hlt2/DecReports")
+        ]
+        if args.process == 'spruce':
+            spruce = [
+                hlt_decisions(
+                    source="Spruce", output_loc="/Event/Spruce/DecReports")
+            ]
+        else:
+            spruce = []
+        decoder = decoder(input_process=args.process.capitalize())
+        algs = [unpack] + hlt2 + spruce + [decoder
+                                           ] + [createODIN(ODIN='myODIN')]
+        appMgr = ApplicationMgr(TopAlg=algs)
+        appMgr.ExtSvc += [
+            configured_ann_svc(json_file=fname_helper.tck(args.stream_config))
+        ]
     appMgr = GP.AppMgr()
     evt = appMgr.evtsvc()
 
-    # Calculates retention, rate and bandwidth per line and stream (file)
     evts_all, rawbanks_all, dst_all, event_stats, exclusive, raw, dst = processing_events_per_line_and_stream(
         LHCbApp().EvtMax, lines, args.process)
-    rates_per_line(
-        event_stats, exclusive, raw, dst, input_config['input_rate'],
-        fname_helper.tmp_rate_table_per_line_path(args.stream_config,
-                                                  args.stream))
+
+    # Calculate key quantities per stream
     rates_per_stream(
         evts_all, rawbanks_all, dst_all, args.stream,
         input_config['input_rate'],
         fname_helper.tmp_rate_table_per_stream_path(args.stream_config,
                                                     args.stream))
+    # Calculate key quantities per line
+    rates_per_line(
+        event_stats, exclusive, raw, dst, input_config['input_rate'],
+        fname_helper.tmp_rate_table_per_line_path(args.stream_config,
+                                                  args.stream))
diff --git a/python/MooreTests/line-descriptives.py b/python/MooreTests/line-descriptives.py
index a7899246717091aab4aca4e6d6d2a09771569583..8a17ef83a435b775ccf5160ab2a4f3c4ae2fac89 100644
--- a/python/MooreTests/line-descriptives.py
+++ b/python/MooreTests/line-descriptives.py
@@ -38,6 +38,11 @@ def _descriptives(lines, process):
     return
 
 
+if options.input_process == "Hlt1":
+    raise RuntimeError(
+        "line-descriptives only makes sense for options.input_process = Hlt2/Sprucing"
+    )
+
 options.input_type = 'MDF'
 options.simulation = True
 options.dddb_tag = 'dddb-20171010'
diff --git a/python/MooreTests/make_bandwidth_test_page.py b/python/MooreTests/make_bandwidth_test_page.py
index 75f01db7d26991d1e3d2850c309923d549174488..e771a6b49aeb824c47789e63d80181088991af7d 100644
--- a/python/MooreTests/make_bandwidth_test_page.py
+++ b/python/MooreTests/make_bandwidth_test_page.py
@@ -28,6 +28,8 @@ REPORT_TEMPLATE = jinja2.Template("""
 <body>
 <p>
     slot.build_id: $$version$$<br>
+    start time: $$start_time$$<br>
+    end time: $$end_time$$<br>
     platform: $$platform$$<br>
     hostname: $$hostname$$<br>
     cpu_info: $$cpu_info$$<br>
@@ -44,6 +46,7 @@ REPORT_TEMPLATE = jinja2.Template("""
     <ul>
     <li>Inclusive retention and rate</li>
     <li>(Jaccard) similarity matrix</li>
+    <li>(Conditional) overlap matrix</li>
     <li>Average DstData size and bandwidth</li>
     <li>Average event size and bandwidth</li>
     </ul>
@@ -65,17 +68,11 @@ REPORT_TEMPLATE = jinja2.Template("""
     <li>Radius of VELO opening: {{INPUT_VELO_RADIUS}} mm</li>
     </ul>
 </p>
-{{HLT2_OR_SPRUCE_TEMPLATE}}
+{{TEMPLATE}}
 <p>
     Other results are shown by plots or tables (in the links) below. <br>
 </p>
-<object type="image/png" data="lines_per_wg.png"></object>
-<p>
-    The number of selection lines per working group. <br>
-    "Other" category contains those lines with a parsed name that doesn't belong to any known WG. <br>
-    To make lines properly categorized, one should follow the naming convention,
-    name of lines should start with `Hlt2/Spruce[WG]_`.
-</p>
+{{LINES_PER_WG}}
 <object type="image/png" data="hist__rate.png"></object>
 <p>
     Distribution of rate of selection lines. <br>
@@ -85,12 +82,9 @@ REPORT_TEMPLATE = jinja2.Template("""
     or larger than 1 kHz, which requires some attention. <br>
     The rates of all lines are listed in a html page attached below. <br>
 </p>
-<object type="image/png" data="hist__dst_data_size.png"></object>
-<p>
-    Distribution of DstData RawBank size of selection lines. <br>
-    The total distribution is shown as a stacked histogram, split into several histograms of WGs. <br>
-    The distributions per WG is attached in the html page below.
-</p>
+
+{{DST_DATA_HIST}}
+
 <object type="image/png" data="hist__total_size.png"></object>
 <p>
     Distribution of total event size of selection lines. <br>
@@ -100,12 +94,9 @@ REPORT_TEMPLATE = jinja2.Template("""
     is larger than 1 MB, which requires some attention. <br>
     The event sizes of all lines are listed in a html page attached below. <br>
 </p>
-<object type="image/png" data="hist__dst_bandwidth.png"></object>
-<p>
-    Distribution of bandwidth computed from DstData RawBank size. <br>
-    The total distribution is shown as a stacked histogram, split into several histograms of WGs. <br>
-    The distributions per WG is attached in the html page below.
-</p>
+
+{{DST_BW_HIST}}
+
 <object type="image/png" data="hist__tot_bandwidth.png"></object>
 <p>
     Distribution of bandwidth computed from total event size. <br>
@@ -126,25 +117,26 @@ REPORT_TEMPLATE = jinja2.Template("""
     The maximum resident set size usage is $$max_rss$$ GB. <br>
     The maximum proportional set size usage is $$max_pss$$ GB. <br>
 </p>
-<ul>
-    <li><a href="{{BASE_PATH}}/other_lines.html">Show list of lines in "Other" category</a></li>
-    <li><a href="{{BASE_PATH}}/plots_per_wg.html">Show plots split by WGs</a></li>
-    <li><a href="{{BASE_PATH}}/all_rates.html">Show rates, event sizes and bandwidths of all lines</a></li>
-    <li><a href="{{BASE_PATH}}/similarities_jaccards.html"> Show similarities Jaccards of different stream configurations</a></li>
-    <li><a href="{{BASE_PATH}}/rates_streaming.html"> Show rates of streams under different configurations</a></li>
-    <li><a href="{{BASE_PATH}}/{{line_descr}}"> PersistReco and ExtraOutput for selection lines</a></li>
-    <li><a href="{{BASE_PATH}}/{{rate_table_split_by_wg_stream}}"> Split by working group: rates, event sizes and bandwidths of all lines</a></li>
-    $$comparison$$
-    </b></b>
-</ul>
-<p> Additional results for HLT2 Bandwidth test (not available for Sprucing test) </p>
-<ul>
-    <li><a href="{{BASE_PATH}}/{{rate_table_split_by_prod_stream}}"> Split by production stream: rates, event sizes and bandwidths of all lines</a></li>
-</ul>
+{{ALL_RESULTS}}
 </body>
 </html>
 """)
 
+HLT1_REPORT_TEMPLATE = jinja2.Template("""<p>
+    The bandwidth test was run under a single streamless configuration. <br>
+    The definition of the configuration can be found below.
+</p>
+<ul>
+    <li><a href="{{BASE_PATH}}/{{stream_config_json_wg}}">Streamless configuration</a></li>
+</ul>
+<p>
+    The streamless configuration is representative of data taking. <br>
+    The rates, event sizes and bandwidth results from the streamless configuration is: <br>
+</p>
+<p>
+</p>
+{{table_streamless_rates}}""")
+
 HLT2_REPORT_TEMPLATE = jinja2.Template("""<p>
     The bandwidth test was run under 3 streaming configurations: streamless (all lines written to the same output file), production-stream and wg-stream. <br>
     The definition of the production streaming and working-group streaming can be found below.
@@ -172,6 +164,73 @@ SPRUCE_REPORT_TEMPLATE = jinja2.Template("""<p>
 </p>
 {{table_wgstream_rates}}""")
 
+HLT1_ALL_RESULTS = jinja2.Template("""
+<ul>
+    <li><a href="{{BASE_PATH}}/all_rates.html">Show rates, event sizes and bandwidths of all lines</a></li>
+</ul>
+""")
+
+HLT2_ALL_RESULTS = jinja2.Template("""
+<ul>
+    <li><a href="{{BASE_PATH}}/other_lines.html">Show list of lines in "Other" category</a></li>
+    <li><a href="{{BASE_PATH}}/plots_per_wg.html">Show plots split by WGs</a></li>
+    <li><a href="{{BASE_PATH}}/all_rates.html">Show rates, event sizes and bandwidths of all lines</a></li>
+    <li><a href="{{BASE_PATH}}/similarity_matrices.html"> Show similarity Jaccards and overlap matrices between streams for different stream configurations</a></li>
+    <li><a href="{{BASE_PATH}}/rates_streaming.html"> Show rates of streams under different configurations</a></li>
+    <li><a href="{{BASE_PATH}}/{{line_descr}}"> PersistReco and ExtraOutput for selection lines</a></li>
+    <li><a href="{{BASE_PATH}}/{{rate_table_split_by_wg_stream}}"> Split by working group: rates, event sizes and bandwidths of all lines</a></li>
+    $$comparison$$
+    <li><a href="{{BASE_PATH}}/{{rate_table_split_by_prod_stream}}"> Split by production stream: rates, event sizes and bandwidths of all lines</a></li>
+    </b></b>
+</ul>
+""")
+
+SPRUCING_ALL_RESULTS = jinja2.Template("""
+<ul>
+    <li><a href="{{BASE_PATH}}/other_lines.html">Show list of lines in "Other" category</a></li>
+    <li><a href="{{BASE_PATH}}/plots_per_wg.html">Show plots split by WGs</a></li>
+    <li><a href="{{BASE_PATH}}/all_rates.html">Show rates, event sizes and bandwidths of all lines</a></li>
+    <li><a href="{{BASE_PATH}}/similarity_matrices.html"> Show similarity Jaccards and overlap matrices between streams for different stream configurations</a></li>
+    <li><a href="{{BASE_PATH}}/rates_streaming.html"> Show rates of streams under different configurations</a></li>
+    <li><a href="{{BASE_PATH}}/{{line_descr}}"> PersistReco and ExtraOutput for selection lines</a></li>
+    <li><a href="{{BASE_PATH}}/{{rate_table_split_by_wg_stream}}"> Split by working group: rates, event sizes and bandwidths of all lines</a></li>
+    $$comparison$$
+    </b></b>
+</ul>
+""")
+
+HLT1_LINES_PER_WG = jinja2.Template("""""")
+HLT1_DST_DATA_HIST = jinja2.Template("""""")
+HLT1_DST_BW_HIST = jinja2.Template("""""")
+
+HLT2_OR_SPRUCING_LINES_PER_WG = jinja2.Template("""
+<object type="image/png" data="lines_per_wg.png"></object>
+<p>
+    The number of selection lines per working group. <br>
+    "Other" category contains those lines with a parsed name that doesn't belong to any known WG. <br>
+    To make lines properly categorized, one should follow the naming convention,
+    name of lines should start with `Hlt1/Hlt2/Spruce[WG]_`.
+</p>
+""")
+
+HLT2_OR_SPRUCING_DST_DATA_HIST = jinja2.Template("""
+<object type="image/png" data="hist__dst_data_size.png"></object>
+<p>
+    Distribution of DstData RawBank size of selection lines. <br>
+    The total distribution is shown as a stacked histogram, split into several histograms of WGs. <br>
+    The distributions per WG is attached in the html page below.
+</p>
+""")
+
+HLT2_OR_SPRUCING_DST_BW_HIST = jinja2.Template("""
+<object type="image/png" data="hist__dst_bandwidth.png"></object>
+<p>
+    Distribution of bandwidth computed from DstData RawBank size. <br>
+    The total distribution is shown as a stacked histogram, split into several histograms of WGs. <br>
+    The distributions per WG is attached in the html page below.
+</p>
+""")
+
 TABLE_OTHER_LINE_TEMPLATE = jinja2.Template("""
 <p>
     List of line names that categorized to "Others".
@@ -239,12 +298,13 @@ def histo_maker(entry_list,
                 nbins=100,
                 range=None,
                 take_log=False,
+                log_th=-4,
                 stacked=False,
                 labels=[],
                 legend=False):
     if take_log:
-        safe_log = lambda rate: log10(max(rate, 0.1))
-        title = f"{title} (all values <= log10(0.1) are in the first bin)"
+        safe_log = lambda rate: log10(rate) if rate > float(f'1e{log_th}') else log_th - 1
+        title = f"{title} (all values <= log10(1e{log_th}) are in the first bin)"
         if stacked:
             # entry_list is a list of lists
             entry_list = [[safe_log(rate) for rate in lst]
@@ -280,25 +340,19 @@ def make_plots_per_wg(fname_helper, wg_name, wg_bw_info, process):
     '''
 
     title = f"{wg_name} {process.capitalize()}"
-    for attrib, xtitle, plot_bit, take_log, range in zip(
+    for attrib, xtitle, plot_bit in zip(
         ["rate", "dst_size", "tot_size", "dst_bw", "tot_bw"], [
-            "Log10(Rate [Hz])", "DstData RawBank Size [kB]",
-            "Total Event Size [kB]",
-            "Log10(Bandwidth from DstData Size [MB/s])",
-            "Log10(Bandwidth from Total Event Size [MB/s])"
+            "Rate [Hz]", "DstData RawBank Size [kB]", "Total Event Size [kB]",
+            "Bandwidth from DstData Size [GB/s]",
+            "Bandwidth from Total Event Size [GB/s]"
         ], [
             "rate", "dst_data_size", "total_size", "dst_bandwidth",
             "tot_bandwidth"
-        ], [True, False, False, True, True], [(-2, 7), None, None, (-2, 5),
-                                              (-2, 5)]):
+        ]):
         histo_maker(
-            getattr(wg_bw_info, attrib),
-            xtitle,
-            title,
+            getattr(wg_bw_info, attrib), xtitle, title,
             fname_helper.html_page_outputs_path(
-                f"hist__{plot_bit}__{wg_name}.png"),
-            range=range,
-            take_log=take_log)
+                f"hist__{plot_bit}__{wg_name}.png"))
 
 
 def make_plots(all_lines_bw_info,
@@ -324,7 +378,7 @@ def make_plots(all_lines_bw_info,
     list_other_lines = []
     for line, bw_info in all_lines_bw_info.items():
         found_wg = False
-        # Expect e.g {Hlt2,Spruce}<WG>_<rest-of-line-name>
+        # Expect e.g {Hlt1,Hlt2,Spruce}<WG>_<rest-of-line-name>
         wg_guess = line.split("_")[0].removeprefix(process.capitalize())
         for wg in rate_info_per_wg.keys():
             if wg_guess.startswith(wg):
@@ -346,14 +400,6 @@ def make_plots(all_lines_bw_info,
         for k, info in rate_info_per_wg.items() if info.nlines != 0
     }
 
-    # Sort the wg in number of lines
-    rate_info_per_wg = {
-        k: info
-        for k, info in sorted(
-            rate_info_per_wg.items(), key=lambda x: x[1].nlines)
-        if info.nlines != 0
-    }
-
     # Make a pie plot of lines per WG
     labels = [f"{k} ({int(v.nlines)})" for k, v in rate_info_per_wg.items()]
     fig = plt.figure()
@@ -367,21 +413,21 @@ def make_plots(all_lines_bw_info,
 
     ### Make hist plots
     title = f"{process.capitalize()}"
-    for attrib, xtitle, title, plot_bit, take_log, range in zip(
+    for attrib, xtitle, title, plot_bit, take_log, log_th, range in zip(
         ["rate", "dst_size", "tot_size", "dst_bw", "tot_bw"], [
             "Log10(Rate [Hz])", "DstData RawBank Size [kB]",
             "Total Event Size [kB]",
-            "Log10(Bandwidth from DstData Size [MB/s])",
-            "Log10(Bandwidth from Total Event Size [MB/s])"
+            "Log10(Bandwidth from DstData Size [GB/s])",
+            "Log10(Bandwidth from Total Event Size [GB/s])"
         ], [
             f"Total Rate: {tot_rate:.2f} kHz", "", "", "",
             f"Total bandwidth: {tot_bandwidth:.2f} GB/s"
         ], [
             "rate", "dst_data_size", "total_size", "dst_bandwidth",
             "tot_bandwidth"
-        ], [True, False, False, True, True],
+        ], [True, False, False, True, True], [-1, 0, 0, -4, -4],
         [(-2, 7), (0, 500 if process == 'hlt2' else 1000),
-         (0, 500 if process == 'hlt2' else 1000), (-2, 5), (-2, 5)]):
+         (0, 500 if process == 'hlt2' else 1000), (-5, 2), (-5, 2)]):
         histo_maker(
             [getattr(info, attrib) for info in rate_info_per_wg.values()],
             xtitle,
@@ -389,6 +435,7 @@ def make_plots(all_lines_bw_info,
             fname_helper.html_page_outputs_path(f"hist__{plot_bit}.png"),
             range=range,
             take_log=take_log,
+            log_th=log_th,
             stacked=True,
             legend=True,
             labels=list(rate_info_per_wg.keys()))
@@ -440,7 +487,7 @@ if __name__ == '__main__':
         '-p',
         '--process',
         type=str,
-        choices=['hlt2', 'spruce'],
+        choices=['hlt1', 'hlt2', 'spruce'],
         required=True,
         help='Which stage was the test run on')
     parser.add_argument(
@@ -488,28 +535,32 @@ if __name__ == '__main__':
         fname_helper.final_rate_table_all_lines_path("csv"), sep=',')
     number_of_lines = len(df)
 
-    GB_to_MB = 1000
     kHz_to_Hz = 1000
     rate_bw_info_by_line = {
         df['Line'][i]: LineRateBWInfo(
             df['Rate (kHz)'][i] * kHz_to_Hz, df["Avg DstData Size (kB)"][i],
             df["Avg Total Event Size (kB)"][i],
-            df["DstData Bandwidth (GB/s)"][i] * GB_to_MB,
-            df["Total Bandwidth (GB/s)"][i] * GB_to_MB)
+            df["DstData Bandwidth (GB/s)"][i], df["Total Bandwidth (GB/s)"][i])
         for i in range(number_of_lines)
     }
 
     # Prepare messages to GitLab
-    # limits on rate: 1 kHz for Hlt2 rate and 0.5% for Sprucing retention
-    tol = 1000 if args.process == 'hlt2' else 500
+    # limits on rate: 1 MHz for Hlt1, 1 kHz for Hlt2 rate and 0.5% for Sprucing retention
+    tol = {'hlt1': 1e6, 'hlt2': 1000, 'spruce': 500}[args.process]
     n_low_rate = len(
         [info for info in rate_bw_info_by_line.values() if info.rate == 0])
     n_high_rate = len(
         [info for info in rate_bw_info_by_line.values() if info.rate > tol])
 
+    main_stream_config = {
+        "hlt1": "streamless",
+        "hlt2": "production",
+        "spruce": "wg"
+    }[args.process]
+
     prod_df = pd.read_csv(
         fname_helper.final_rate_table_all_streams_path(
-            "production" if args.process == "hlt2" else "wg", ext="csv"))
+            main_stream_config, ext="csv"))
     tot_rate = sum(prod_df['Rate (kHz)'])
     tot_bandwidth = sum(prod_df['Total Bandwidth (GB/s)'])
 
@@ -523,47 +574,74 @@ if __name__ == '__main__':
     other_line_table = make_other_line_table(other_line_list)
     plots_per_wg = make_plots_per_wg_list(wg_list)
 
+    with open(
+            fname_helper.final_rate_table_all_streams_path(main_stream_config),
+            "r") as rate_html:
+        table_main_stream_rates = rate_html.read()
     if args.process == 'hlt2':
-        with open(
-                fname_helper.final_rate_table_all_streams_path("production"),
-                "r") as rate_html:
-            table_5stream_rates = rate_html.read()
-        hlt2_or_spruce_template = HLT2_REPORT_TEMPLATE.render(
+        template = HLT2_REPORT_TEMPLATE.render(
             BASE_PATH=fname_helper.base_html_path(args.building_locally),
             stream_config_json_prod=fname_helper.stream_config_json_path(
                 "production", full_path=False),
             stream_config_json_wg=fname_helper.stream_config_json_path(
                 "wg", full_path=False),
-            table_5stream_rates=table_5stream_rates)
+            table_5stream_rates=table_main_stream_rates)
+        all_results = HLT2_ALL_RESULTS.render(
+            BASE_PATH=fname_helper.base_html_path(args.building_locally),
+            line_descr=fname_helper.line_descr_path(full_path=False),
+            rate_table_split_by_prod_stream=fname_helper.
+            final_rate_table_all_lines_split_by_stream_path(
+                "production", full_path=False),
+            rate_table_split_by_wg_stream=fname_helper.
+            final_rate_table_all_lines_split_by_stream_path(
+                "wg", full_path=False))
+        lines_per_wg = HLT2_OR_SPRUCING_LINES_PER_WG.render()
+        dst_data_hist = HLT2_OR_SPRUCING_DST_DATA_HIST.render()
+        dst_bw_hist = HLT2_OR_SPRUCING_DST_BW_HIST.render()
     elif args.process == 'spruce':
-        with open(fname_helper.final_rate_table_all_streams_path("wg"),
-                  "r") as rate_html:
-            table_wgstream_rates = rate_html.read()
-        hlt2_or_spruce_template = SPRUCE_REPORT_TEMPLATE.render(
+        template = SPRUCE_REPORT_TEMPLATE.render(
             BASE_PATH=fname_helper.base_html_path(args.building_locally),
             stream_config_json_wg=fname_helper.stream_config_json_path(
                 "wg", full_path=False),
-            table_wgstream_rates=table_wgstream_rates)
+            table_wgstream_rates=table_main_stream_rates)
+        all_results = SPRUCING_ALL_RESULTS.render(
+            BASE_PATH=fname_helper.base_html_path(args.building_locally),
+            line_descr=fname_helper.line_descr_path(full_path=False),
+            rate_table_split_by_wg_stream=fname_helper.
+            final_rate_table_all_lines_split_by_stream_path(
+                "wg", full_path=False))
+        lines_per_wg = HLT2_OR_SPRUCING_LINES_PER_WG.render()
+        dst_data_hist = HLT2_OR_SPRUCING_DST_DATA_HIST.render()
+        dst_bw_hist = HLT2_OR_SPRUCING_DST_BW_HIST.render()
+
+    elif args.process == 'hlt1':
+        template = HLT1_REPORT_TEMPLATE.render(
+            BASE_PATH=fname_helper.base_html_path(args.building_locally),
+            stream_config_json_wg=fname_helper.stream_config_json_path(
+                "streamless", full_path=False),
+            table_streamless_rates=table_main_stream_rates)
+        all_results = HLT1_ALL_RESULTS.render(
+            BASE_PATH=fname_helper.base_html_path(args.building_locally))
+        lines_per_wg = HLT1_LINES_PER_WG.render()
+        dst_data_hist = HLT1_DST_DATA_HIST.render()
+        dst_bw_hist = HLT1_DST_BW_HIST.render()
 
     with open(fname_helper.html_page_outputs_path("index.html"),
               "w") as html_file:
         html = REPORT_TEMPLATE.render(
             SCRIPTPATH=args.script_path,
             BASE_PATH=fname_helper.base_html_path(args.building_locally),
-            HLT2_OR_SPRUCE_TEMPLATE=hlt2_or_spruce_template,
+            TEMPLATE=template,
+            ALL_RESULTS=all_results,
+            LINES_PER_WG=lines_per_wg,
+            DST_DATA_HIST=dst_data_hist,
+            DST_BW_HIST=dst_bw_hist,
             INPUT_CONFIG_PATH=os.path.expandvars(args.input_config),
             INPUT_RATE=input_info['input_rate'],
             INPUT_NU=input_info['nu'],
             INPUT_VELO_RADIUS=input_info['velo_radial_opening'],
             EXIT_CODE_SENTENCE=exit_code_sentence,
-            EXIT_CODE_COLOUR=exit_code_col,
-            line_descr=fname_helper.line_descr_path(full_path=False),
-            rate_table_split_by_prod_stream=fname_helper.
-            final_rate_table_all_lines_split_by_stream_path(
-                "production", full_path=False),
-            rate_table_split_by_wg_stream=fname_helper.
-            final_rate_table_all_lines_split_by_stream_path(
-                "wg", full_path=False))
+            EXIT_CODE_COLOUR=exit_code_col)
         html_file.write(html)
 
     with open(fname_helper.html_page_outputs_path("other_lines.html"),
@@ -588,34 +666,64 @@ if __name__ == '__main__':
                   "r") as rate_table:
             html_file.write(rate_table.read())
 
-    stream_configs = ["production", "wg"] if args.process == "hlt2" else ["wg"]
-    with open(
-            fname_helper.html_page_outputs_path("similarities_jaccards.html"),
-            "w") as html_file:
-        for stream_config in stream_configs:
-            html_file.write(f"""
+    stream_configs = {
+        "hlt1": ["streamless"],
+        "hlt2": ["production", "wg"],
+        "spruce": ["wg"]
+    }[args.process]
+
+    if args.process != "hlt1":
+        with open(
+                fname_helper.html_page_outputs_path(
+                    "similarity_matrices.html"), "w") as html_file:
+            html_file.write("""
                 <p>
-                    The Jaccard similarity matrix (fractional overlap) of the {stream_config} streams is:
+                    The overlap between two streams, A and B, w.r.t to one of the stream, A, is computed as |A n B| / |A|.
+                    It shows how much events in the stream A are covered by another stream B. <br>
+                    The columns in the overlap matrices are target streams (A) and the rows are comparison streams (B),
+                    i.e. the numbers correspond to overlaps w.r.t to the column streams. <br>
                 </p>
                 """)
+            for stream_config in stream_configs:
+                html_file.write(f"""
+                    <p>
+                        The overlap matrix of the {stream_config} streams is:
+                    </p>
+                    """)
+                with open(
+                        fname_helper.overlap_matrix_path(stream_config),
+                        "r") as overlap:
+                    html_file.write(overlap.read())
+            html_file.write("""
+                <p>
+                    The Jaccard index between two streams, A and B, is computed as |A n B| / |A u B|.
+                    It shows how similar the two streams are and is useful in bandwidth division. <br>
+                </p>
+                """)
+            for stream_config in stream_configs:
+                html_file.write(f"""
+                    <p>
+                        The Jaccard similarity matrix of the {stream_config} streams is:
+                    </p>
+                    """)
             with open(
                     fname_helper.jaccard_similarities_path(stream_config),
                     "r") as jaccard:
                 html_file.write(jaccard.read())
 
-    with open(
-            fname_helper.html_page_outputs_path("rates_streaming.html"),
-            "w") as html_file:
-        for stream_config in stream_configs:
-            html_file.write(f"""
-                <p>
-                   The rates, event sizes and bandwidths of the {stream_config} streams are:
-                </p>
-                """)
-            with open(
-                    fname_helper.final_rate_table_all_streams_path(
-                        stream_config), "r") as rate_html:
-                html_file.write(rate_html.read())
+        with open(
+                fname_helper.html_page_outputs_path("rates_streaming.html"),
+                "w") as html_file:
+            for stream_config in stream_configs:
+                html_file.write(f"""
+                    <p>
+                    The rates, event sizes and bandwidths of the {stream_config} streams are:
+                    </p>
+                    """)
+                with open(
+                        fname_helper.final_rate_table_all_streams_path(
+                            stream_config), "r") as rate_html:
+                    html_file.write(rate_html.read())
 
     with open(fname_helper.html_page_outputs_path("message.txt"),
               "w") as message:
diff --git a/python/MooreTests/run_bandwidth_test_jobs.py b/python/MooreTests/run_bandwidth_test_jobs.py
index 5151e35a7dd922e74d732808e8a0cb0491ca8605..9950548316a111ee273f918e80df91d2bcf61e6b 100644
--- a/python/MooreTests/run_bandwidth_test_jobs.py
+++ b/python/MooreTests/run_bandwidth_test_jobs.py
@@ -27,6 +27,7 @@ import tempfile
 import atexit
 import shutil
 import yaml
+from datetime import datetime
 
 # Default cache dir is the current working directory as this is most convenient for the machine
 # that the test runs on periodically. It assumes the working directory is not cleaned up often,
@@ -36,9 +37,6 @@ DEFAULT_CACHE_DIRS = {'default': ['.']}
 # prefer XDG_RUNTIME_DIR which should be on tmpfs
 FALLBACK_CACHE_DIR = os.getenv('XDG_RUNTIME_DIR', tempfile.gettempdir())
 
-# Limit size of output log if many options files
-MAX_NFILES_TO_PRINT_TO_LOG = 10
-
 
 def default_cache_dirs():
     hostname = socket.getfqdn()
@@ -170,7 +168,6 @@ if __name__ == '__main__':
     job_inputs = [
         inputs_fns
     ]  # This is a list to allow for possible NUMA extension: see discussion on !316.
-    logging.info(inputs_fns[:MAX_NFILES_TO_PRINT_TO_LOG])
 
     # Set up local directories where inputs are cached
     if args.download_input_files:
@@ -190,22 +187,25 @@ if __name__ == '__main__':
 
         # Now download files
         for i, inputs in enumerate(job_inputs):
-            logging.info(
-                f'Downloading input files {inputs[:MAX_NFILES_TO_PRINT_TO_LOG]}'
-            )
             if all(is_remote(url) for url in inputs):
                 from Moore.qmtest.context import download_mdf_inputs_locally
                 # download_mdf_inputs_locally only downloads if files
                 # are not already available locally on the machine
+                before_copy = datetime.now()
+                logging.info(
+                    f'Downloading inputs for bandwidth job to {args.cache_dirs[i]}'
+                )
                 logging.info(
-                    'Downloading inputs for bandwidth job to {}'.format(
-                        args.cache_dirs[i]))
+                    f'There are {len(inputs)} input files: [{inputs[0]} ' +
+                    ']' if len(inputs) < 2 else '{inputs[1]}, ... ]')
                 kB_to_GB = 1e3
                 job_inputs[i] = download_mdf_inputs_locally(
                     inputs,
                     args.cache_dirs[i],
                     max_size=args.avg_evt_size * kB_to_GB * args.events)
-                logging.info(inputs)
+                logging.info(
+                    f"Finished file downloads. This took: {datetime.now() - before_copy}"
+                )
             elif any(is_remote(url) for url in inputs_fns):
                 parser.error('inputs must either be all xrootd or all local')
             else:
diff --git a/python/PRConfig/TestFileDB.py b/python/PRConfig/TestFileDB.py
index 6eeb492bade43e213ca213bace95a859b0936a0d..0cd4bd639e9e4d7bd6ef029144aae8f0b55b74ca 100644
--- a/python/PRConfig/TestFileDB.py
+++ b/python/PRConfig/TestFileDB.py
@@ -10403,6 +10403,263 @@ testfiles(
      'Around 10k events available.'),
     test_file_db=test_file_db)
 
+testfiles(
+    myname='upgrade-202305-PbPb-EPOS-b-8_22-VELO-10mm-digi',
+    filenames=[
+        'root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/2023_PbPb_MB/'
+        'model{0}/Gausstest_MB.xdigi'.format(i) for i in range(1, 100)
+    ],
+    qualifiers={
+        'Author': 'Chenxi Gu',
+        'DataType': 'Upgrade',
+        'Format': 'ROOT',
+        'Simulation': True,
+        'CondDB': 'sim-20230322-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Private sample using EPOS MB with 10 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-202305-PbPb-EPOS-b-8_22-VELO-10mm-mdf',
+    filenames=[
+        'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/2023_PbPb_MB/dump/mdf_sim-20230322-vc-md100/dumped_file.mdf'
+    ],
+    qualifiers={
+        'Author': 'Chenxi Gu',
+        'DataType': 'Upgrade',
+        'Format': 'MDF',
+        'Simulation': True,
+        'CondDB': 'sim-20230322-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Private sample using EPOS MB with 10 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-digi',
+    filenames=[
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000012_1.xdigi",
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000016_1.xdigi",
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000020_1.xdigi",
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000027_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000001_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000002_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000010_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000022_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000009_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000024_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000025_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000003_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000007_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000014_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000028_1.xdigi",
+        "root://x509up_u74587@xrootd.pic.es//pnfs/pic.es/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000019_1.xdigi",
+        "root://x509up_u74587@xrootd.pic.es//pnfs/pic.es/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000030_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000004_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000005_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000006_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000008_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000011_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000015_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000017_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000018_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000021_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000023_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000029_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000032_1.xdigi",
+        "root://x509up_u74587@xrootd.grid.surfsara.nl//pnfs/grid.sara.nl/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000013_1.xdigi",
+        "root://x509up_u74587@xrootd.grid.surfsara.nl//pnfs/grid.sara.nl/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000026_1.xdigi",
+        "root://x509up_u74587@xrootd.grid.surfsara.nl//pnfs/grid.sara.nl/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197804/0000/00197804_00000031_1.xdigi",
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'ROOT',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf',
+    filenames=[
+        'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf/dump/mdf_sim-20230626-vc-md100/dumped_file.mdf'
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'MDF',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-d0-digi',
+    filenames=[
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000003_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000001_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000002_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000004_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000005_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000006_1.xdigi",
+        "root://x509up_u74587@lhcbxrootd-kit.gridka.de//pnfs/gridka.de/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000009_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000007_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197808/0000/00197808_00000008_1.xdigi"
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'ROOT',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-d0-mdf',
+    filenames=[
+        'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-d0-mdf/dump/mdf_sim-20230626-vc-md100/dumped_file.mdf'
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'MDF',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-jpsi-digi',
+    filenames=[
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000004_1.xdigi",
+        "root://x509up_u74587@eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000007_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000001_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000002_1.xdigi",
+        "root://xrootd-lhcb.cr.cnaf.infn.it:1094//storage/gpfs_lhcb/disk/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000003_1.xdigi",
+        "root://x509up_u74587@ccxrootdlhcb.in2p3.fr//pnfs/in2p3.fr/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000008_1.xdigi",
+        "root://x509up_u74587@xrootd.pic.es//pnfs/pic.es/data/lhcb/LHCb-Disk/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000006_1.xdigi",
+        "root://xrootd.echo.stfc.ac.uk//lhcb:prod/lhcb/MC/Dev/XDIGI/00197810/0000/00197810_00000005_1.xdigi",
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'ROOT',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-jpsi-mdf',
+    filenames=[
+        'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-jpsi-mdf/dump/mdf_sim-20230626-vc-md100/dumped_file.mdf'
+    ],
+    qualifiers={
+        'Author': 'Benjamin Audurier',
+        'DataType': 'Upgrade',
+        'Format': 'MDF',
+        'Simulation': True,
+        'CondDB': 'sim-20230626-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Official sample using EPOS MB with 23.5 mm VELO apperture'
+             'with the impact parameter b ranging '
+             'between 8-22 fm (30%-100% centrality).'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-digi',
+    filenames=[
+        'root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/2023_PbAr_MB/'
+        'job0{0}/Boole-30000000-1000ev-20230601-Extended.digi'.format(i)
+        for i in range(1, 32)
+    ],
+    qualifiers={
+        'Author': 'Chenxi Gu',
+        'DataType': 'Upgrade',
+        'Format': 'ROOT',
+        'Simulation': True,
+        'CondDB': 'sim-20230322-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Private sample using EPOS MB with 10 mm VELO apperture'
+             'with SMOG2 configuration.'),
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-mdf',
+    filenames=[
+        'mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/IonPhysics/Simulations/2023_PbAr_MB/dump/mdf_sim-20230322-vc-md100/dumped_file.mdf'
+    ],
+    qualifiers={
+        'Author': 'Chenxi Gu',
+        'DataType': 'Upgrade',
+        'Format': 'MDF',
+        'Simulation': True,
+        'CondDB': 'sim-20230322-vc-md100',
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+        'DDDB': 'dddb-20230313',
+        'Date': '2023-06-01 00:00:00'
+    },
+    comment=('Private sample using EPOS MB with 10 mm VELO apperture'
+             'with SMOG2 configuration.'),
+    test_file_db=test_file_db)
+
 ### auto created ###
 testfiles(
     myname='genFSR_upgrade_ldst',
@@ -13186,3 +13443,67 @@ testfiles(
     comment=
     'Hlt2 Full stream output as of August 2023, used as inputs for Sprucing tests, input rate around 124 kHz',
     test_file_db=test_file_db)
+
+testfiles(
+    myname="Gauss-13104012-10ev-20230802-trackers+magnet",
+    filenames=[
+        "root://eoslhcb.cern.ch//eos/lhcb/cern-swtest/lhcb/swtest/Gauss-13104012-10ev-20230802-trackers+magnet/Gauss-13104012-10ev-20230802-trackers+magnet.sim",
+    ],
+    qualifiers={
+        "Author": "Hangyi Wu",
+        "Format": "ROOT",
+        "DataType": "Upgrade",
+        "Date": "2023-08-02",
+        "Simulation": True,
+        "DDDB": "upgrade/UTv4r2-newUTID",
+        "CondDB": "upgrade/UTv4r2-newUTID",
+        "GeometryVersion": "trunk",
+        "ConditionsVersion": "master",
+    },
+    comment=
+    "Example MC file, generated with Detector!395 and event type 13104012, used for UT after fixing stave staggerings in the geometry. ",
+    test_file_db=test_file_db,
+)
+
+testfiles(
+    myname='2023_raw_hlt1_269939',
+    filenames=[
+        "mdf:root://eoslhcb.cern.ch//eos/lhcb/cern-swtest/lhcb/swtest/2023_raw_hlt1_269939/Run_0000269939_20230711-194825-096_PLEB01_0001.mdf"
+    ],
+    qualifiers={
+        'Author': 'Rosen Matev',
+        'Format': 'MDF',
+        'DataType': 'Upgrade',
+        'Date': '2023-10-13',
+        'Simulation': False,
+        "GeometryVersion": "run3/trunk",
+        "ConditionsVersion": "master",
+        "DDDB": "master",
+        "CondDB": "master",
+    },
+    comment='Real data selected by HLT1 from run 269939.',
+    test_file_db=test_file_db)
+
+testfiles(
+    myname='upgrade-minbias-hlt2-full-output-Dec2023',
+    filenames=[
+        "mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp1/hlt2_full_stream_Dec2023/MagDown/hlt2_full_stream_{0}.mdf"
+        .format(i) for i in list(range(35)) + list(range(36, 240))
+    ] + [
+        "mdf:root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp1/hlt2_full_stream_Dec2023/MagUp/hlt2_full_stream_{0}.mdf"
+        .format(i) for i in list(range(117)) + list(range(118, 135))
+    ],
+    qualifiers={
+        'Author': 'Shunan Zhang',
+        'Format': 'MDF',
+        'DataType': 'Upgrade',
+        'Date': '2023-12-01',
+        'Simulation': True,
+        "CondDB": "sim-20171127-vc-md100",
+        "GeometryVersion": "run3/trunk",
+        "ConditionsVersion": "master",
+        "DDDB": "dddb-20171126",
+    },
+    comment=
+    'Hlt2 Full stream output as of December 2023, used as inputs for Sprucing tests, HLT2 output rate around 128 kHz',
+    test_file_db=test_file_db)
diff --git a/python/PRConfig/bandwidth_helpers.py b/python/PRConfig/bandwidth_helpers.py
index 6e143bd211bb8ecc7607d5a33dcb4d9e97033360..537d4d17114a2fffeef06d13cece6a65fbe85103 100644
--- a/python/PRConfig/bandwidth_helpers.py
+++ b/python/PRConfig/bandwidth_helpers.py
@@ -76,6 +76,12 @@ class FileNameHelper(object):
             self._join(self.process, stream_config,
                        "jaccard_similarity_matrix") + ".html")
 
+    def overlap_matrix_path(self, stream_config):
+        return os.path.join(
+            self.base_dir, self.output_subdir,
+            self._join(self.process, stream_config, "overlap_matrix") +
+            ".html")
+
     def event_no_fname(self, stream_config, stream):
         return os.path.join(
             self.base_dir, self.output_subdir,
diff --git a/scripts/benchmark-scripts/Moore_bandwidth_test.sh b/scripts/benchmark-scripts/Moore_bandwidth_test.sh
index b316b454e9769c1f52e3affe84eb9710144c2028..9c30db3bf2a1b04c868f191b1d79d540e9e9ce2b 100755
--- a/scripts/benchmark-scripts/Moore_bandwidth_test.sh
+++ b/scripts/benchmark-scripts/Moore_bandwidth_test.sh
@@ -21,9 +21,10 @@ Usage: Moore/run /path/to/Moore_bandwidth_test.sh [options] 2>&1 | tee <path-to-
 
        Expected to be called by e.g. Moore_hlt2_bandwidth.sh for the periodic LHCbPR tests.
 
---process: "hlt2" or "spruce"
---input-data: "nominal" or "2023". "2023" not currently available for process == spruce
--h|--help: print this message and exit
+--process: "hlt1", "hlt2" or "spruce".
+--input-data: "nominal" or "2023".
+    "2023" not currently available for process == spruce or hlt1.
+-h|--help: print this message and exit.
 
 EOF
 )
@@ -96,11 +97,30 @@ mkdir -p tmp/Output
 mkdir -p tmp/Output/Inter
 
 # Set configuration variables and check configuration makes sense
-MOORE_THREADS=$(nproc)
-N_EVTS=1e5
-TEST_PATH_PREFIX='$HLT2CONFROOT/tests/options/bandwidth/'
+# TODO: Remove Process Dependence on N_EVTS, see: https://gitlab.cern.ch/lhcb-datapkg/PRConfig/-/issues/12
 case $PROCESS in
+    hlt1)
+    MOORE_THREADS=1
+    TEST_PATH_PREFIX='$HLT1CONFROOT/tests/options/bandwidth/'
+    EVENT_SIZE_UPPER_LIMIT=200
+    GAUDIRUN_INPUT_PROCESS="Hlt1"
+    STREAM_CONFIGS=( "streamless" )
+    case $INPUTDATA in
+        nominal)
+        N_EVTS=1e4
+        CONFIG_FILE="${TEST_PATH_PREFIX}hlt1_bandwidth_input.yaml"
+        EXTRA_OPTS="-e 1 $MOOREROOT/options/muon_geometry_v2.py" #Requires #EvtSlots==1 due to singlethreading on DIGIs.
+        ;;
+        *)
+        echo "ERROR: --input-data must be \"nominal\" for process \"$PROCESS\""
+        exit 1
+        ;;
+    esac
+    ;;
     hlt2)
+    N_EVTS=1e5
+    MOORE_THREADS=${LBN_BUILD_JOBS:-1} # Default to single-threaded
+    TEST_PATH_PREFIX='$HLT2CONFROOT/tests/options/bandwidth/'
     EVENT_SIZE_UPPER_LIMIT=200
     GAUDIRUN_INPUT_PROCESS="Hlt2"
     STREAM_CONFIGS=( "wg" "production" )
@@ -120,6 +140,9 @@ case $PROCESS in
     esac
     ;;
     spruce)
+    N_EVTS=1e5
+    MOORE_THREADS=${LBN_BUILD_JOBS:-1} # Default to single-threaded
+    TEST_PATH_PREFIX='$HLT2CONFROOT/tests/options/bandwidth/'
     EVENT_SIZE_UPPER_LIMIT=300
     GAUDIRUN_INPUT_PROCESS="Spruce"
     STREAM_CONFIGS=( "wg" )
@@ -135,7 +158,7 @@ case $PROCESS in
     esac
     ;;
     *)
-    echo "Unrecognised process \"$PROCESS\". It must be \"hlt2\" or \"spruce\"."
+    echo "Unrecognised process \"$PROCESS\". It must be \"hlt1\" or \"hlt2\" or \"spruce\"."
     exit 1
     ;;
 esac
@@ -151,9 +174,14 @@ for STREAM_CONFIG in "${STREAM_CONFIGS[@]}"; do
 done
 
 # 2. Compute line descriptives: persist reco, extra output
-echo 'Obtaining line descriptives'
-time gaudirun.py --option "from Moore import options;options.input_process=\"${GAUDIRUN_INPUT_PROCESS}\"" $PRCONFIGROOT/python/MooreTests/line-descriptives.py
-STORE_ERR_CODE
+if [ $PROCESS = "hlt1" ]
+then
+    echo 'Skipping line descriptives as $PROCESS = "hlt1"'
+else
+    echo 'Obtaining line descriptives'
+    time gaudirun.py --option "from Moore import options;options.input_process=\"${GAUDIRUN_INPUT_PROCESS}\"" $PRCONFIGROOT/python/MooreTests/line-descriptives.py
+    STORE_ERR_CODE
+fi
 
 for STREAM_CONFIG in "${STREAM_CONFIGS[@]}"; do
     # 3. Work out what the streams are from the config JSON; needed for later steps
@@ -163,14 +191,19 @@ for STREAM_CONFIG in "${STREAM_CONFIGS[@]}"; do
     echo "Found ${STREAM_CONFIG} streams: ${STREAMS[@]}"
 
     # 4. Compute similarity matrices between streams by comparing event numbers
-    echo "Obtaining similarity matrix for ${STREAM_CONFIG}-stream configuration"
-    for stream in "${STREAMS[@]}"; do
-        echo "Stream name: ${stream}"
-        time python $PRCONFIGROOT/python/MooreTests/list_event_numbers.py -p $PROCESS -n $N_EVTS --stream-config $STREAM_CONFIG --stream $stream
+    if [ $PROCESS = "hlt1" ]
+    then
+        echo 'Skipping similarity matrix per stream as $PROCESS = "hlt1"'
+    else
+        echo "Obtaining similarity matrix for ${STREAM_CONFIG}-stream configuration"
+        for stream in "${STREAMS[@]}"; do
+            echo "Stream name: ${stream}"
+            time python $PRCONFIGROOT/python/MooreTests/list_event_numbers.py -p $PROCESS -n $N_EVTS --stream-config $STREAM_CONFIG --stream $stream
+            STORE_ERR_CODE
+        done
+        time python $PRCONFIGROOT/python/MooreTests/calculate_stream_overlap.py -p $PROCESS --stream-config $STREAM_CONFIG --streams ${STREAMS[@]}
         STORE_ERR_CODE
-    done
-    time python $PRCONFIGROOT/python/MooreTests/calculate_stream_overlap.py -p $PROCESS --stream-config $STREAM_CONFIG --streams ${STREAMS[@]}
-    STORE_ERR_CODE
+    fi
 
     # 5. Computing rates per stream as well as per line (tables split by stream)
     echo "Obtaining rates and bandwidth for ${STREAM_CONFIG}-stream configuration"
@@ -186,6 +219,14 @@ echo 'Combining all rate and bandwidth tables'
 time python $PRCONFIGROOT/python/MooreTests/combine_rate_output.py --process $PROCESS
 STORE_ERR_CODE
 
-# 7. Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+# 7. Test on the feasibility of a chained HLT2->Sprucing test
+if [ $PROCESS = "spruce" ] && [ $INPUTDATA = "nominal" ]
+then
+    echo 'Testing downloads of Hlt2 output for the future'
+    time python -m MooreTests.download_hlt2_output
+fi
+STORE_ERR_CODE
+
+# 8. Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
 echo 'Making plots and HTML pages'
 time python -m MooreTests.make_bandwidth_test_page -p $PROCESS -c $CONFIG_FILE -s $SCRIPT_PATH -e $ERR_CODE
\ No newline at end of file
diff --git a/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh
new file mode 100755
index 0000000000000000000000000000000000000000..034deaae4ea9c9ff38d5cd1b6199ca462616f4e2
--- /dev/null
+++ b/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+###############################################################################
+# (c) Copyright 2022-2023 CERN for the benefit of the LHCb Collaboration      #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+
+# this path ends up printed on the BW test page; export so it can be picked up in the child process
+export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
+
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt1 --input-data nominal
+
+# force 0 return code so the handler runs even for failed jobs
+exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2022.sh b/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_25000_without_UT.sh
old mode 100755
new mode 100644
similarity index 85%
rename from scripts/benchmark-scripts/Moore_hlt2_lead_lead_2022.sh
rename to scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_25000_without_UT.sh
index 7b07eb0c39a9054950079f613802adaa45ec32dc..c8ab364ed8cea64eedc3cba7fc9f995b54b16940
--- a/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2022.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_25000_without_UT.sh
@@ -20,12 +20,12 @@ elif [[ $(hostname --fqdn) == "lbquantaperf02.cern.ch" ]]; then
 fi
 
 \time python -m MooreTests.run_throughput_jobs -n 300 "${cache_dirs[@]}" \
-  --test-file-db-key=Upgrade_PbPb_minbias_mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf_without_UT_gec_60000.py'
+  --test-file-db-key=upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_without_UT_gec_25000.py'
 
 \time python -m MooreTests.run_throughput_jobs -n 300 -j 1 --profile "${cache_dirs[@]}" \
-  --test-file-db-key=Upgrade_PbPb_minbias_mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf_without_UT_gec_60000.py'
+  --test-file-db-key=upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_without_UT_gec_25000.py'
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2022.sh b/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_40000_with_UT.sh
similarity index 86%
rename from scripts/benchmark-scripts/Moore_hlt2_lead_argon_2022.sh
rename to scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_40000_with_UT.sh
index 37bf63abb6948ef73f66641e62ad3b0c535176dc..15b9708f892136afc2d633767b6dd7ab7cb03229 100644
--- a/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2022.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_lead_argon_2023_gec_40000_with_UT.sh
@@ -20,12 +20,12 @@ elif [[ $(hostname --fqdn) == "lbquantaperf02.cern.ch" ]]; then
 fi
 
 \time python -m MooreTests.run_throughput_jobs -n 300 "${cache_dirs[@]}" \
-  --test-file-db-key=Upgrade_PbAr_D0Lc-mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf_without_UT_gec_60000.py'
+  --test-file-db-key=upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_gec_40000.py'
 
 \time python -m MooreTests.run_throughput_jobs -n 300 -j 1 --profile "${cache_dirs[@]}" \
-  --test-file-db-key=Upgrade_PbAr_D0Lc-mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf_without_UT_gec_60000.py'
+  --test-file-db-key=upgrade-202305-PbAr-FT-EPOS-b-8_22-VELO-10mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_gec_40000.py'
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_lead_lead_light_reco_pr_kf.sh b/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_25000_without_UT.sh
similarity index 74%
rename from scripts/benchmark-scripts/Moore_hlt2_lead_lead_light_reco_pr_kf.sh
rename to scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_25000_without_UT.sh
index 8adccb7baefe5732c3d509960227789870e57b29..47513958b508f056dbdff31fde5091ab7e700760 100755
--- a/scripts/benchmark-scripts/Moore_hlt2_lead_lead_light_reco_pr_kf.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_25000_without_UT.sh
@@ -20,12 +20,12 @@ elif [[ $(hostname --fqdn) == "lbquantaperf02.cern.ch" ]]; then
 fi
 
 \time python -m MooreTests.run_throughput_jobs -n 300 "${cache_dirs[@]}" \
-  --test-file-db-key=upgrade-202004-PbPb-EPOS-b-6_14fm-mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf.py'
+  --test-file-db-key=upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_without_UT_gec_25000.py'
 
 \time python -m MooreTests.run_throughput_jobs -n 300 -j 1 --profile "${cache_dirs[@]}" \
-  --test-file-db-key=upgrade-202004-PbPb-EPOS-b-6_14fm-mdf --avg-event-size=500000 \
-  '$MOOREROOT/options/ft_decoding_v6.py' '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_light_reco_pr_kf.py'
+  --test-file-db-key=upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_without_UT_gec_25000.py'
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_40000_with_UT.sh b/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_40000_with_UT.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fd9fe027f25dfbacaf5d5d1e56a04c3f84021272
--- /dev/null
+++ b/scripts/benchmark-scripts/Moore_hlt2_lead_lead_2023_gec_40000_with_UT.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+###############################################################################
+# (c) Copyright 2000-2022 CERN for the benefit of the LHCb Collaboration      #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+
+$(dirname $0)/cleanup.sh
+
+if [[ $(hostname --fqdn) == "lbhltperf01.cern.ch" ]]; then
+  # this test runs on lbhltperf01 where we have more space for HLT2 input at
+  cache_dirs=(--cache-dirs "/scratch/z5/data/Hlt2Throughput")
+elif [[ $(hostname --fqdn) == "lbquantaperf02.cern.ch" ]]; then
+  cache_dirs=(--cache-dirs "/localdisk1/Hlt2Throughput")
+fi
+
+\time python -m MooreTests.run_throughput_jobs -n 300 "${cache_dirs[@]}" \
+  --test-file-db-key=upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_gec_40000.py'
+
+\time python -m MooreTests.run_throughput_jobs -n 300 -j 1 --profile "${cache_dirs[@]}" \
+  --test-file-db-key=upgrade-official-PbPb-EPOS-b-8_22-VELO-23.5mm-mdf --avg-event-size=500000 \
+  '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$RECOCONFROOT/options/hlt2_lead_lead_fast_reco_pr_kf_gec_40000.py'
+
+# force 0 return code so the handler runs even for failed jobs
+exit 0
diff --git a/scripts/benchmark-scripts/Moore_spruce_all_lines.sh b/scripts/benchmark-scripts/Moore_spruce_all_lines.sh
index 8987962648aff4958a5b6f6f971f0f6ae2e6e41b..42f619ebd07d3b1be7d6e49b4b6ac9c36f524d4d 100755
--- a/scripts/benchmark-scripts/Moore_spruce_all_lines.sh
+++ b/scripts/benchmark-scripts/Moore_spruce_all_lines.sh
@@ -22,9 +22,9 @@ fi
 export THOR_JIT_N_SPLITS=8
 export THOR_JIT_N_JOBS=8
 
-python -m MooreTests.run_throughput_jobs -n=2e4 --avg-event-size=300000 --test-file-db-key=upgrade-minbias-hlt2-full-output-Aug2023 '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$HLT2CONFROOT/options/sprucing/spruce_all_lines.py' "${cache_dirs[@]}"
+python -m MooreTests.run_throughput_jobs -n=2e4 --avg-event-size=300000 --test-file-db-key=upgrade-minbias-hlt2-full-output-Dec2023 '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$HLT2CONFROOT/options/sprucing/spruce_all_lines.py' "${cache_dirs[@]}"
 
-python -m MooreTests.run_throughput_jobs -n=-1 -j 1 --profile --avg-event-size=300000 --test-file-db-key=upgrade-minbias-hlt2-full-output-Aug2023 '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$HLT2CONFROOT/options/sprucing/spruce_all_lines.py' "${cache_dirs[@]}"
+python -m MooreTests.run_throughput_jobs -n=-1 -j 1 --profile --avg-event-size=300000 --test-file-db-key=upgrade-minbias-hlt2-full-output-Dec2023 '$MOOREROOT/tests/options/disable-git-metadata-write.py' '$HLT2CONFROOT/options/sprucing/spruce_all_lines.py' "${cache_dirs[@]}"
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0