From fefb5c040a393db331007be0556ee93fdb0e7f72 Mon Sep 17 00:00:00 2001
From: Ross John Hunter <ross.john.hunter@cern.ch>
Date: Fri, 19 Apr 2024 16:45:28 +0200
Subject: [PATCH] [RTA/DPA BW tests]: Add Sprucing jobs for Turbo and TurCal
 (and many generalisations/simplifications as this necessitated)

---
 python/MooreTests/calculate_stream_overlap.py |    2 +-
 python/MooreTests/combine_rate_output.py      |    9 +-
 python/MooreTests/extract_filesizes.py        |    4 +-
 ...ta.py => generate_spruce_input_configs.py} |   63 +-
 .../MooreTests/generate_tistos_option_file.py |   49 +-
 python/MooreTests/line-and-stream-rates.py    |   33 +-
 python/MooreTests/list_event_numbers.py       |  136 --
 python/MooreTests/make_bandwidth_test_page.py | 1124 +++++++++--------
 python/MooreTests/read_event_numbers.py       |  159 +++
 python/MooreTests/run_bandwidth_test_jobs.py  |  133 +-
 python/PRConfig/bandwidth_helpers.py          |  198 +--
 .../benchmark-scripts/Moore_bandwidth_test.sh |  126 +-
 .../benchmark-scripts/Moore_hlt1_bandwidth.sh |   33 +-
 .../Moore_hlt2_and_spruce_bandwidth.sh        |   47 +-
 .../benchmark-scripts/Moore_hlt2_bandwidth.sh |   33 +-
 .../Moore_spruce_bandwidth.sh                 |   34 +-
 .../Moore_spruce_latest_bandwidth.sh          |   39 +-
 17 files changed, 1274 insertions(+), 948 deletions(-)
 rename python/MooreTests/{generate_hlt2_fullstream_metadata.py => generate_spruce_input_configs.py} (61%)
 delete mode 100755 python/MooreTests/list_event_numbers.py
 create mode 100755 python/MooreTests/read_event_numbers.py

diff --git a/python/MooreTests/calculate_stream_overlap.py b/python/MooreTests/calculate_stream_overlap.py
index 777b342e..637cf0cb 100755
--- a/python/MooreTests/calculate_stream_overlap.py
+++ b/python/MooreTests/calculate_stream_overlap.py
@@ -101,7 +101,7 @@ def main():
             '--stream-config',
             type=str,
             help='Choose production or per-WG stream configuration',
-            choices=['production', 'wg'],
+            choices=['production', 'wg', 'wgpass', 'turcal'],
             required=True)
     args = parser.parse_args()
 
diff --git a/python/MooreTests/combine_rate_output.py b/python/MooreTests/combine_rate_output.py
index e7b242af..7137b816 100755
--- a/python/MooreTests/combine_rate_output.py
+++ b/python/MooreTests/combine_rate_output.py
@@ -68,7 +68,8 @@ def rates_all_lines(stream_config: str, fname_helper: FileNameHelper,
     df.columns = _columns_per_line(process).keys()
 
     df = _sorted_df_by_bandwidth(df)
-    df.to_csv(fname_helper.final_rate_table_all_lines_path("csv"))
+    df.to_csv(
+        fname_helper.final_rate_table_all_lines_path(stream_config, "csv"))
 
     def highlight_vals(val, threshold, color='red'):
         return f'background-color: {color}' if val > threshold else ''
@@ -87,7 +88,9 @@ def rates_all_lines(stream_config: str, fname_helper: FileNameHelper,
     html = styler.format(
         '{:.3g}', subset=df.columns[
             df.columns != 'Line']).set_table_attributes("border=1").to_html()
-    with open(fname_helper.final_rate_table_all_lines_path("html"), 'w') as f:
+    with open(
+            fname_helper.final_rate_table_all_lines_path(
+                stream_config, "html"), 'w') as f:
         f.write(html)
 
     return
@@ -254,7 +257,7 @@ if __name__ == "__main__":
     parser.add_argument(
         '--stream-config',
         type=str,
-        choices=['streamless', 'production', 'wg'],
+        choices=['streamless', 'production', 'wg', 'wgpass', 'turcal'],
         required=True)
     args = parser.parse_args()
 
diff --git a/python/MooreTests/extract_filesizes.py b/python/MooreTests/extract_filesizes.py
index b4589232..15089635 100644
--- a/python/MooreTests/extract_filesizes.py
+++ b/python/MooreTests/extract_filesizes.py
@@ -45,7 +45,7 @@ def extract_filesizes(process, stream_config, streams):
                 )
             filesizes[stream]["compressed"] = fsize(fpath + '.zst')
 
-    with open(fname_helper.filesize_path(), 'w') as ofile:
+    with open(fname_helper.filesize_path(stream_config), 'w') as ofile:
         json.dump(filesizes, ofile, indent=2)
 
 
@@ -61,7 +61,7 @@ if __name__ == "__main__":
     parser.add_argument(
         '--stream-config',
         type=str,
-        choices=['streamless', 'production', 'wg'],
+        choices=['streamless', 'production', 'wg', 'wgpass', 'turcal'],
         required=True)
     parser.add_argument('--streams', nargs='+', type=str, required=True)
 
diff --git a/python/MooreTests/generate_hlt2_fullstream_metadata.py b/python/MooreTests/generate_spruce_input_configs.py
similarity index 61%
rename from python/MooreTests/generate_hlt2_fullstream_metadata.py
rename to python/MooreTests/generate_spruce_input_configs.py
index 4cbbccbd..5db383d4 100755
--- a/python/MooreTests/generate_hlt2_fullstream_metadata.py
+++ b/python/MooreTests/generate_spruce_input_configs.py
@@ -1,5 +1,5 @@
 ###############################################################################
-# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration      #
+# (c) Copyright 2023-2024 CERN for the benefit of the LHCb Collaboration      #
 #                                                                             #
 # This software is distributed under the terms of the GNU General Public      #
 # Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
@@ -16,12 +16,9 @@
 import argparse
 import json
 import yaml
-from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml
+from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml, FULL_STREAM_LINES_KEY
 from PRConfig.TestFileDB import test_file_db
 
-STREAM_CONFIG = "production"
-STREAM = "full"
-
 
 def main():
     parser = argparse.ArgumentParser()
@@ -31,6 +28,8 @@ def main():
         type=str,
         required=True,
         help='Path to yaml config file defining the input.')
+    parser.add_argument('--stream-config', type=str, required=True)
+    parser.add_argument('--stream', type=str, required=True)
     args = parser.parse_args()
     fname_helper = FileNameHelper(process="hlt2")
     config = parse_yaml(args.config)
@@ -55,45 +54,57 @@ def main():
         opts["conddb_tag"] = config["conddb_tag"]
 
     ifile = fname_helper.event_no_fname(
-        stream_config=STREAM_CONFIG, stream=STREAM)
+        stream_config=args.stream_config, stream=args.stream)
     with open(ifile, 'r') as f:
         # json = {stream: [evt_numbers]}
-        n_triggered_full = len(json.load(f)['full'])
+        n_triggered = len(json.load(f)[args.stream])
 
-    n_hlt2_input = int(parse_yaml(fname_helper.input_nevts_json())['n_evts'])
+    n_hlt2_input = int(
+        parse_yaml(fname_helper.input_info_json(args.stream_config))['n_evts'])
     input_rate = config['input_rate']
-    opts["input_rate"] = round((input_rate * n_triggered_full / n_hlt2_input),
-                               4)
-    opts["n_evts"] = n_triggered_full
+    opts["input_rate"] = round((input_rate * n_triggered / n_hlt2_input), 4)
+
+    # Load up the streaming configuration and therefore get the list of full-stream lines
+    # Needed for TISTOS
+    if args.stream == 'full':
+        with open(
+                fname_helper.stream_config_json_path(
+                    args.stream_config, full_path=True), 'r') as f:
+            opts[FULL_STREAM_LINES_KEY] = json.load(f)["full"]
 
     # Now split into outputs for two files - one to allow reading of HLT2 output that lives on eos, and one for reading from local disk
-    # TODO provide only 1 interface to these paths with an arg that tells you whether to look locally or on eos
     # Metadata with 'eos' file paths.
     opts["input_files"] = [
-        fname_helper.mdfdst_prwww_path(
-            stream_config=STREAM_CONFIG, stream=STREAM, ext=".mdf")
+        fname_helper.mdfdst_fname_for_reading(
+            stream_config=args.stream_config,
+            stream=args.stream,
+            ext=".mdf",
+            on_eos=True,
+            full_path=False)
     ]
-    opts["input_manifest_file"] = fname_helper.manifest_prwww_path(
-        STREAM_CONFIG)
-    metadata_config_for_use_in_sprucing_hlt2_output_from_eos_test = fname_helper.metadata_path(
-        stream_config=STREAM_CONFIG, stream=STREAM, for_local_use=False)
-    with open(metadata_config_for_use_in_sprucing_hlt2_output_from_eos_test,
-              'w') as f:
+    opts["input_manifest_file"] = fname_helper.tck(
+        args.stream_config, on_eos=True, full_path=False)
+    spruce_config_to_put_on_eos = fname_helper.config_file_path(
+        stream=args.stream, send_to_eos=True)
+    with open(spruce_config_to_put_on_eos, 'w') as f:
         yaml.dump(opts, f, default_flow_style=False)
 
     # Metadata with 'local' file paths.
     local_opts = opts
     local_opts["input_files"] = [
         fname_helper.mdfdst_fname_for_reading(
-            stream_config=STREAM_CONFIG, stream=STREAM, ext='.mdf')
+            stream_config=args.stream_config,
+            stream=args.stream,
+            ext='.mdf',
+            on_eos=False,
+            full_path=True)
     ]
     local_opts["input_manifest_file"] = fname_helper.tck(
-        stream_config=STREAM_CONFIG)
-    metadata_config_for_use_in_sprucing_output_locally_test = fname_helper.metadata_path(
-        stream_config=STREAM_CONFIG, stream=STREAM, for_local_use=True)
+        stream_config=args.stream_config, on_eos=False, full_path=True)
+    spruce_config_for_local_use = fname_helper.config_file_path(
+        stream=args.stream, send_to_eos=False)
 
-    with open(metadata_config_for_use_in_sprucing_output_locally_test,
-              'w') as f:
+    with open(spruce_config_for_local_use, 'w') as f:
         yaml.dump(local_opts, f, default_flow_style=False)
 
 
diff --git a/python/MooreTests/generate_tistos_option_file.py b/python/MooreTests/generate_tistos_option_file.py
index 122d62f6..1364ed84 100644
--- a/python/MooreTests/generate_tistos_option_file.py
+++ b/python/MooreTests/generate_tistos_option_file.py
@@ -9,34 +9,49 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 import argparse
-import json
-from PRConfig.bandwidth_helpers import FileNameHelper
+from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml, FULL_STREAM_LINES_KEY
+
+
+def full_stream_content(config):
+    # get list of lines from yaml config
+    tistos_content = ''
+    if "fixed_list_of_full_stream_lines" in config.keys():
+        tistos_content += f'from Hlt2Conf.sprucing_settings.fixed_line_configs import {config["fixed_list_of_full_stream_lines"]} as lines_for_TISTOS'
+    elif FULL_STREAM_LINES_KEY in config.keys():
+        tistos_content += f'lines_for_TISTOS = {config[FULL_STREAM_LINES_KEY]}'
+    else:
+        raise ValueError(
+            f"Expected either fixed_list_of_full_stream_lines or {FULL_STREAM_LINES_KEY} in the config file."
+        )
+
+    return [
+        tistos_content,
+        'from Moore.persistence.hlt2_tistos import list_of_full_stream_lines',
+        'list_of_full_stream_lines.global_bind(lines=lines_for_TISTOS)',
+    ]
 
 
 def main():
+    """
+    Generate the option file for TISTOS for the Spruce job.
+    Read input config: either get a fixed key for a list of lines
+    stored in Moore, or we get a list of lines in the file.
+    """
     parser = argparse.ArgumentParser(description=main.__doc__)
     parser.add_argument(
-        '--stream-config-location',
+        '-c',
+        '--config',
         type=str,
         required=True,
-        help=
-        "Location for stream_config, if empty prompts the usage of fixed_line_configs from Moore"
-    )
-
+        help='Path to yaml config file defining the input.')
+    parser.add_argument('--stream-config', type=str, required=True)
     args = parser.parse_args()
+    config = parse_yaml(args.config)
 
-    hlt2_fnames = FileNameHelper('hlt2')
-    tistos_content = f'with open(\'{args.stream_config_location}\', "r") as f: lines_for_TISTOS=json.load(f)["full"]' if args.stream_config_location else 'from Hlt2Conf.sprucing_settings.fixed_line_configs import lines_for_TISTOS_BW_March2024 as lines_for_TISTOS'
-
-    content = [
-        'import json',
-        tistos_content,
-        'from Moore.persistence.hlt2_tistos import list_of_full_stream_lines',
-        'list_of_full_stream_lines.global_bind(lines=lines_for_TISTOS)',
-    ]
+    content = full_stream_content(config) if args.stream_config == "wg" else []
 
     output_fname_helper = FileNameHelper("spruce")
-    ofile = output_fname_helper.tistos_option_file()
+    ofile = output_fname_helper.tistos_option_file(args.stream_config)
     with open(ofile, "w") as f:
         f.write('\n'.join(content))
 
diff --git a/python/MooreTests/line-and-stream-rates.py b/python/MooreTests/line-and-stream-rates.py
index dc515ec9..fca5ec34 100644
--- a/python/MooreTests/line-and-stream-rates.py
+++ b/python/MooreTests/line-and-stream-rates.py
@@ -260,7 +260,8 @@ def rates_per_stream(file_totals: FileBW, stream: str, input_rate: float,
 
 
 def rates_per_wg_intra_stream(wg_bws: dict[str, WGBW], input_rate: float,
-                              output_file_path: str) -> None:
+                              output_file_path: str,
+                              compression_factor: float) -> None:
     # Get inclusive rates/bandwidths of each WG within this stream
 
     n_events = LHCbApp().EvtMax
@@ -269,10 +270,10 @@ def rates_per_wg_intra_stream(wg_bws: dict[str, WGBW], input_rate: float,
         rate = bw_info.n_triggered * input_rate / n_events  # kHz
         wg_bw_infos[wg] = [
             rate,
-            input_rate * (bw_info.raw * B_to_kB) * MBps_to_GBps /
-            n_events,  # bandwidth, GBs
-            input_rate * (bw_info.dst * B_to_kB) * MBps_to_GBps /
-            n_events,  # dst bandwidth GB/s
+            compression_factor * input_rate * (bw_info.raw * B_to_kB) *
+            MBps_to_GBps / n_events,  # bandwidth, GBs
+            compression_factor * input_rate * (bw_info.dst * B_to_kB) *
+            MBps_to_GBps / n_events,  # dst bandwidth GB/s
         ]
 
     n_metrics = len(wg_bw_infos['TotalInclusive'])
@@ -307,7 +308,7 @@ if __name__ == '__main__':
         '--stream-config',
         type=str,
         help='Choose production, per-WG or streamless stream configuration',
-        choices=['streamless', 'production', 'wg'],
+        choices=['streamless', 'production', 'wg', 'wgpass', 'turcal'],
         required=True)
     parser.add_argument(
         '--file-type',
@@ -319,18 +320,11 @@ if __name__ == '__main__':
 
     fname_helper = FileNameHelper(args.process)
 
-    n_events = int(parse_yaml(fname_helper.input_nevts_json())['n_evts'])
+    n_events = int(
+        parse_yaml(fname_helper.input_info_json(args.stream_config))['n_evts'])
 
     input_config = parse_yaml(args.config)
 
-    if args.process == "spruce" and args.stream_config != "wg":
-        raise RuntimeError(
-            '"production" and "streamless" stream configs are not defined for sprucing. Please use "wg".'
-        )
-    if args.process == "hlt1" and args.stream_config != "streamless":
-        raise RuntimeError(
-            '"production" and "wg" stream configs are not defined for hlt1. Please use "streamless".'
-        )
     LHCbApp(DataType="Upgrade", Simulation=True, EvtMax=n_events)
     EventSelector().PrintFreq = 10000
     IODataManager(DisablePFNWarning=True)
@@ -349,7 +343,10 @@ if __name__ == '__main__':
     # to read MDF/DST output from Sprucing
     # Hlt1 requires different unpacking than hlt2/sprucing.
     # TODO might be able to absorb into do_unpacking now
-    input_process = args.process.capitalize()
+    if args.stream_config == "wgpass":
+        input_process = "Turbo"
+    else:
+        input_process = args.process.capitalize()
     if args.process == "hlt1":
         unpacker = LHCb__UnpackRawEvent(
             "UnpackRawEvent",
@@ -393,7 +390,7 @@ if __name__ == '__main__':
 
     compression_factor = _compression_factor(
         process=args.process,
-        filesize_path=fname_helper.filesize_path(),
+        filesize_path=fname_helper.filesize_path(args.stream_config),
         file_totals=file_totals,
         stream=args.stream)
 
@@ -419,4 +416,4 @@ if __name__ == '__main__':
         rates_per_wg_intra_stream(
             wg_bws_for_hlt2, input_rate,
             fname_helper.tmp_rate_table_intra_stream_path(
-                args.stream_config, args.stream))
+                args.stream_config, args.stream), compression_factor)
diff --git a/python/MooreTests/list_event_numbers.py b/python/MooreTests/list_event_numbers.py
deleted file mode 100755
index c6d9e750..00000000
--- a/python/MooreTests/list_event_numbers.py
+++ /dev/null
@@ -1,136 +0,0 @@
-###############################################################################
-# (c) Copyright 2000-2023 CERN for the benefit of the LHCb Collaboration      #
-#                                                                             #
-# This software is distributed under the terms of the GNU General Public      #
-# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
-#                                                                             #
-# In applying this licence, CERN does not waive the privileges and immunities #
-# granted to it by virtue of its status as an Intergovernmental Organization  #
-# or submit itself to any jurisdiction.                                       #
-###############################################################################
-import argparse
-import json
-import GaudiPython as GP
-from GaudiConf.reading import __unpack_rawevent
-from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
-                           EventSelector, createODIN)
-from GaudiConf import IOHelper
-from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml
-
-
-def _extract_evt_numbers(ifiles,
-                         evtmax,
-                         file_type,
-                         is_sprucing_output,
-                         stream="dummy_stream_name"):
-    """
-        Extract all event numbers saved to a single MDF/DST.
-        stream only required to be the correct stream name if is_sprucing_output
-        this is because the raw event location has the stream name in it if so
-    """
-    # TODO this is a standard setup for the BW test analysis scripts. Share in a header.
-    LHCbApp(
-        DataType="Upgrade",
-        Simulation=True,
-        DDDBtag="dddb-20171126",
-        CondDBtag="sim-20171127-vc-md100",
-        EvtMax=evtmax)
-    EventSelector(PrintFreq=10000)
-    IODataManager(DisablePFNWarning=True)
-
-    raw_event_unpacker_kwargs = dict(bank_types=['ODIN'])
-    if is_sprucing_output:
-        raw_event_unpacker_kwargs["input_process"] = "Spruce"
-        raw_event_unpacker_kwargs["stream"] = stream
-    appMgr = ApplicationMgr(TopAlg=[
-        __unpack_rawevent(**raw_event_unpacker_kwargs),
-        createODIN(ODIN='myODIN')
-    ])
-    IOHelper(file_type).inputFiles(ifiles)
-
-    appMgr = GP.AppMgr()
-    evt = appMgr.evtsvc()
-
-    event_numbers = []
-    # Loop over all events
-    i_evt = 0
-    while i_evt < evtmax:
-
-        # Iterate 1 event in file
-        appMgr.run(1)
-
-        header = evt["/Event/myODIN"]
-        if not header:
-            break  # ran out of events in file
-        event_numbers.append(header.eventNumber())
-        i_evt += 1
-    return event_numbers
-
-
-def input_nevts(input_files, evtmax, file_type):
-    """
-    For an arbitrary set of input_files, returns: min(total number of events, evtmax).
-    Used to work out max number of events to run Moore over, therefore never runs over
-    sprucing output
-    """
-    event_numbers = _extract_evt_numbers(
-        input_files, evtmax, file_type, is_sprucing_output=False)
-    n_evts = len(event_numbers)
-
-    print(f"Found {n_evts} event numbers in input_files")
-    return n_evts
-
-
-def main():
-    """
-    For a given stream's MDF/DST output, finds all event_numbers and saves them in a json file for later use.
-    Useful for similarity between streams later.
-    """
-    parser = argparse.ArgumentParser(description=main.__doc__)
-    parser.add_argument(
-        '-p',
-        '--process',
-        type=str,
-        required=True,
-        choices=['hlt2', 'spruce'],
-        help="Stage of the trigger.")
-    parser.add_argument(
-        '-sc',
-        '--stream-config',
-        type=str,
-        required=True,
-        choices=["wg", "production"],
-        help='Name of the stream config')
-    parser.add_argument(
-        '-s', '--stream', type=str, required=True, help='Name of the stream')
-    parser.add_argument(
-        '--file-type',
-        choices=("ROOT", "MDF"),
-        required=True,
-        help=
-        "File type of incoming Moore output - ROOT for .dst or MDF for .mdf")
-    args = parser.parse_args()
-
-    fname_helper = FileNameHelper(args.process)
-    file_ext = fname_helper.input_type_to_file_ext(args.file_type)
-    ifile = fname_helper.mdfdst_fname_for_reading(
-        args.stream_config, args.stream, ext=file_ext)
-    evtmax = int(parse_yaml(fname_helper.input_nevts_json())['n_evts'])
-
-    event_numbers = _extract_evt_numbers(
-        ifiles=[ifile],
-        evtmax=evtmax,
-        file_type=args.file_type,
-        is_sprucing_output=args.process == "spruce",
-        stream=args.stream)
-
-    ofile = fname_helper.event_no_fname(args.stream_config, args.stream)
-    with open(ofile, 'w') as f:
-        json.dump({args.stream: event_numbers}, f)
-    print(
-        f"Found {len(event_numbers)} event numbers for {args.stream} stream. Saved list to {ofile}."
-    )
-
-
-if __name__ == "__main__":
-    main()
diff --git a/python/MooreTests/make_bandwidth_test_page.py b/python/MooreTests/make_bandwidth_test_page.py
index 32b51009..6ef70bfe 100644
--- a/python/MooreTests/make_bandwidth_test_page.py
+++ b/python/MooreTests/make_bandwidth_test_page.py
@@ -13,6 +13,7 @@ import jinja2
 import matplotlib.pyplot as plt
 import pandas as pd
 import os
+import json
 from math import log10
 from dataclasses import dataclass, field
 from typing import List
@@ -21,93 +22,243 @@ from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml, guess_wg, KNO
 
 plt.ioff()
 
-SINGLE_PROCESS_REPORT_TEMPLATE = jinja2.Template("""
-<html>
-<head></head>
-<body>
-{{TOP_LEVEL_HEADER}}
-<p style="color:{{EXIT_CODE_COLOUR}}">
-    <b>{{EXIT_CODE_SENTENCE}}</b>
-</p>
-<p>
-    This page contains the results of the {{PROCESS}} bandwidth test. Main results: <br>
-    <object type="image/png" data="{{HEADLINE_BAR_CHART_PATH}}"></object>
-</p>
-{{main_rate_table}}
-<p>
-    The streaming configuration (i.e. which lines went to each stream) can be found in JSON format
-    <a href="{{BASE_PATH}}/{{stream_config_json}}">here</a>. <br>
-    This streaming configuration is our current set of lines to be used in the next data-taking period. <br>
-    "DstData" is the raw bank to which reconstructed information (candidates, other reconstructed tracks etc.) are saved. <br>
-    The "DstData bandwidth" is therefore the bandwidth counting only that raw bank. <br>
-    The total event size (and total bandwidth) count all raw banks (incl. DstData, and detector raw banks if present) in the file. 
-    <b>NB:
-    In real data-taking, raw banks are now left uncompressed when writing, and then the whole file is compressed afterwards. 
-    We account for this compression by multiplying event sizes and bandwidths by a scaling factor to give accurate per-stream bandwidths.
-    The scaling factor is calculated for each file in the test as: '(size of the compressed file) / (size of the uncompressed file)'. 
-    </b>
-    <br>
-</p>
-<p> Scroll down to see: </p>
-<ul>
-    <li> Bar charts of rate and bandwidth for each WG within each stream (HLT2 only), </li>
-    <li> A pie chart of all lines split by WGs (HLT2 and sprucing only), </li>
-    <li> Information about the input sample, </li>
-    <li> Stacked histograms of all lines, split by WG, of rate/bandwidth metrics, </li>
-    <li> Memory consumption of the test as a function of time. </li>
-</ul>
-<p>
-    Further results can be found in the links below:
-</p>
-<ul>
-    {{LIST_OF_LINKS}}
-    $${{PROCESS}}__comparison$$
-    </b></b>
-</ul>
-<p> See: <a href="https://lbfence.cern.ch/alcm/public/figure/details/32">RTA & DPA Workflow</a> for reference figures regarding bandwidth.</p>
-{{BAR_CHARTS}}
-{{LINES_PER_WG}}
-<p>
-    <b>Input sample information:</b>
+MAIN_HISTOGRAMS = ["rate", "total_size", "tot_bandwidth"]
+EXTRA_HISTOGRAMS = ["dst_data_size", "dst_bandwidth"]
+MAIN_BAR_CHARTS = {
+    "rate": 'Rate (kHz)',
+    "bandwidth": 'Bandwidth (GB/s)',
+}
+EXTRA_BAR_CHARTS = {"dstbandwidth": 'DstData Bandwidth (GB/s)'}
+TDR_BANDWIDTHS = {
+    "hlt2": {
+        "production": {
+            "full": 5.90,
+            "turbo": 2.50,
+            "turcal": 1.60,
+            "total": 10.00,
+        },
+    },
+    "spruce": {
+        "wg": {
+            "total": 0.80
+        },
+        "wgpass": {
+            "total": 2.50
+        },
+        "turcal": {
+            "total": 0.2
+        }
+    }
+}
+PRETTY_STREAM_NAMES = {
+    "slepton": "SL",
+    "sl": "SL",
+    "qee": "QEE",
+    "rd": "RD",
+    "bandq": "B&Q",
+    "b_to_open_charm": "B2OC",
+    "b2oc": "B2OC",
+    "bnoc": "BnoC",
+    "b_to_charmonia": "B2CC",
+    "b2cc": "B2CC",
+    "charm": "Charm",
+    "ift": "IFT",
+    "full": "Full",
+    "turbo": "Turbo",
+    "turcal": "TurCal",
+    "Turcal_mDST": "MDST",
+    "Turcal_persistreco": "PersistReco",
+    "Turcal_rawbanks": "RawBanks",
+    "Turcal_persistrecorawbanks": "PRRawBanks"
+}
+MAP_STREAMS = {
+    "streamless": "streamless",
+    "production": "production",
+    "wg": "Full",
+    "wgpass": "Turbo",
+    "turcal": "TurCal"
+}  # TODO obviated if spruce stream configs actually had these names
+
+
+def render_top_level_page(script_path: str,
+                          base_path: str,
+                          test_configs: List[tuple[str, str]],
+                          to_disk_bar_chart=False):
+    html_str = f"""
+    <html>
+    <head></head>
+    <body>
+    <p>
+        slot.build_id: $$version$$<br>
+        start time: $$start_time$$<br>
+        end time: $$end_time$$<br>
+        platform: $$platform$$<br>
+        hostname: $$hostname$$<br>
+        cpu_info: $$cpu_info$$<br>
+        testing script path: {script_path}
+    </p>
+    <ul>
+        <li><a href="{base_path}/run.log">Logs</a></li>
+    </ul>
+    <p>
+        The bandwidth test ran the following sub-tests (process, streaming configuration): {test_configs}<br>
+        The appropriate webpages can be found below for each test below. Scroll down for a report of the test's memory consumption.
+    <ul>
+    """
+    for process, stream_config in test_configs:
+        fname_helper = FileNameHelper(process)
+        extra_sfx = f" (of {MAP_STREAMS[stream_config]}-stream output)" if process == "spruce" else ""
+        html_str += f"""
+        <li><a href="{base_path}/{fname_helper.index_html_page_path(stream_config)}">{process.capitalize()} {stream_config}{extra_sfx} results</a></li>
+        """
+    html_str += """</ul></p>"""
+
+    if to_disk_bar_chart:
+        html_str += f"""
+        <p>
+            Summary of bandwidth of all streams to disk (only available for those tests that run all sprucing stages):<br>
+        </p>
+        <object type="image/png" data="{fname_helper.to_disk_bar_chart_path(full_path=False)}"></object>
+        """
+
+    html_str += """
+    <p>
+        <b> Memory consumption of this test: </b>
+    </p>
+    <object type="image/png" data="memory_consumption.png"></object>
+    <p>
+        Memory consumption as functions of Wall-time. <br>
+        The virtual memory size is the total amount of memory the process may hypothetically access. <br>
+        The resident set size (RSS) is the portion of memory occupied by the run that is held in main memory (RAM). <br>
+        The proportional set size (PSS) is the private memory occupied by the run itself plus the proportion of shared memory with one or more other processes. <br>
+        As we only launch one test at the same time, PSS should be close to RSS in this case, and PSS gives the real memory that is used by this test. <br>
+        Swap memory is used when RAM is full. <br>
+        The maximum resident set size usage is $$max_rss$$ GB. <br>
+        The maximum proportional set size usage is $$max_pss$$ GB. <br>
+    </p>
+    </body>
+    </html>"""
+    return _render(html_str)
+
+
+def render_single_test_page(process: str, stream_config: str,
+                            input_config_path: str, streams: List[str],
+                            args: argparse.Namespace):
+
+    fname_helper = FileNameHelper(process)
+    base_path = fname_helper.base_html_path(args.building_locally)
+    input_info = parse_yaml(input_config_path)
+
+    exit_code = 1  # Assume failure
+    with open(fname_helper.message_path(), "r") as f:
+        exit_code = int(json.load(f)[process][stream_config]["code"])
+
+    if exit_code == 0:
+        exit_code_sentence = "All sub-jobs in this test exited successfully."
+    else:
+        exit_code_sentence = "There were errors in some of the sub-jobs of this test; please see the logs."
+
+    headline_bar_chart_path = ""
+    if process != "hlt1":
+        headline_bar_chart_path = fname_helper.headline_bar_chart_path(
+            stream_config, full_path=False)
+
+    html_str = f"""
+    <html>
+    <head></head>
+    <body>
+    <p style="color:{'green' if exit_code == 0 else 'red'}">
+        <b>{exit_code_sentence}</b>
+    </p>
+    <p>
+        This page contains the results of the {process} bandwidth test with the {stream_config} streaming configuration. Scroll down to see:
     <ul>
-    <li>Config file: {{INPUT_CONFIG_PATH}}</li>
-    <li>Input rate: {{INPUT_RATE}} kHz</li>
-    <li>Number of interactions per bunch crossing (&#957): {{INPUT_NU}}</li>
-    <li>Radius of VELO opening: {{INPUT_VELO_RADIUS}} mm</li>
+        <li> Summary of main results, </li>
+        <li> Details of the streaming configuration, </li>
+        <li> Links to other html pages produced by this test, </li>
+        <li> Bar charts of rate and bandwidth for each WG within each stream (HLT2 only), </li>
+        <li> A pie chart of all lines split by WGs (HLT2 and sprucing only), </li>
+        <li> Information about the input sample, </li>
+        <li> Stacked histograms of all lines, split by WG, of rate/bandwidth metrics. </li>
     </ul>
-</p>
-<p>
-    <b>Stacked histograms of all lines, split by WG, of rate/bandwidth metrics:</b> <br>
-    The total distributions are shown as a stacked histogram, split into several histograms of WGs. <br>
-    The distributions per WG is attached in the html page linked above. <br>
-    Total event size is calculated from summing all raw banks in the file (including DstData) and then multiplying by a per-stream compression factor. <br>
-    Where appropriate, the DstData raw bank size and DstData bandwidth are calculated from summing only the DstData raw bank and then multiplying by a per-stream compression factor. <br>
-</p>
-<object type="image/png" data="{{PROCESS}}__hist__tot_bandwidth.png"></object>
-<object type="image/png" data="{{PROCESS}}__hist__rate.png"></object>
-<object type="image/png" data="{{PROCESS}}__hist__total_size.png"></object>
-{{DST_DATA_HIST}}
-{{MEMORY_CONSUMPTION}}
-</body>
-</html>
-""")
-
-HLT2_AND_SPRUCE_REPORT_TEMPLATE = jinja2.Template("""
-<html>
-<head></head>
-<body>
-{{TOP_LEVEL_HEADER}}
-<p>
-    The bandwidth test ran an Hlt2 test, and then a Sprucing test on the Full-stream output. <br>
-    The appropriate process-specific webpages can be found below.
-</p>
-<ul>
-    <li><a href="{{BASE_PATH}}/hlt2__index.html">Hlt2 results</a></li>
-    <li><a href="{{BASE_PATH}}/spruce__index.html">Sprucing results</a></li>
-</ul>
-{{MEMORY_CONSUMPTION}}
-</body>
-</html>""")
+    <b>Main results:</b> <br>
+    <object type="image/png" data="{headline_bar_chart_path}"></object>
+    </p>
+    """
+    with open(
+            fname_helper.final_rate_table_all_streams_path(stream_config),
+            "r") as rate_html:
+        html_str += rate_html.read()
+    total_rate, total_bw = total_rate_and_bw(fname_helper, stream_config)
+    html_str += f"""
+    <p>
+        <b>The total bandwidth (rate) was measured to be {total_bw:.2f} GB/s ({total_rate:.2f} kHz).</b><br>
+    </p>
+    """
+
+    stream_config_json_path = fname_helper.stream_config_json_path(
+        stream_config, full_path=False)
+    html_str += f"""
+    <p>
+        The streaming configuration (i.e. which lines went to each stream) can be found in JSON format
+        <a href="{base_path}/{stream_config_json_path}">here</a>. <br>
+        This streaming configuration is our current set of lines to be used in the next data-taking period. <br>
+        "DstData" is the raw bank to which reconstructed information (candidates, other reconstructed tracks etc.) are saved. <br>
+        The "DstData bandwidth" is therefore the bandwidth counting only that raw bank. <br>
+        The total event size (and total bandwidth) count all raw banks (incl. DstData, and detector raw banks if present) in the file. <br>
+    """
+    if process != "hlt1":
+        html_str += """
+            <b>NB:
+            In real data-taking, raw banks are now left uncompressed when writing, and then the whole file is compressed afterwards.
+            We account for this compression by multiplying event sizes and bandwidths by a scaling factor to give accurate per-stream bandwidths.
+            The scaling factor is calculated for each file in the test as: '(size of the compressed file) / (size of the uncompressed file)'.
+            </b>
+            <br>
+            </p>
+        """
+    else:
+        html_str += """</p>"""
+
+    html_str += f"""
+    <p>
+        Further results can be found in the links below:
+    </p>
+    <ul>
+        {list_of_links_html(fname_helper, stream_config, args.building_locally)}
+        $${fname_helper.comparison_str(stream_config)}$$
+        </b></b>
+    </ul>
+    <p> See: <a href="https://lbfence.cern.ch/alcm/public/figure/details/32">RTA & DPA Workflow</a> for reference figures regarding bandwidth.</p>
+    {render_bar_charts(fname_helper, stream_config, streams)}
+    {render_lines_pie_chart(fname_helper, stream_config)}
+    <p>
+        <b>Input sample information:</b>
+        <ul>
+        <li>Config file: {os.path.expandvars(input_config_path)}</li>
+        <li>Input rate: {input_info['input_rate']} kHz</li>
+        <li>Number of interactions per bunch crossing (&#957): {input_info['nu']}</li>
+        <li>Radius of VELO opening: {input_info['velo_radial_opening']} mm</li>
+        </ul>
+    </p>
+    <p>
+        <b>Stacked histograms of all lines, split by WG, of rate/bandwidth metrics:</b> <br>
+        The total distributions are shown as a stacked histogram, split into several histograms of WGs. <br>
+        The distributions per WG is attached in the html page linked above. <br>
+        Total event size is calculated from summing all raw banks in the file (including DstData). <br>
+        Where appropriate, the DstData raw bank size and DstData bandwidth are calculated from summing only the DstData raw bank. <br>
+    </p>
+    """
+    for hist_suffix in MAIN_HISTOGRAMS:
+        html_str += f"""
+        <object type="image/png" data="{fname_helper.hist_path(stream_config, hist_suffix, full_path=False)}"></object>
+        """
+    html_str += f"""
+    {render_dst_data_hists(fname_helper, stream_config)}
+    </body>
+    </html>
+    """
+    return _render(html_str)
 
 
 @dataclass
@@ -126,7 +277,6 @@ LineRateBWInfo = namedtuple(
 
 def histo_maker(entry_list,
                 xlabel,
-                title,
                 plot_path,
                 nbins=100,
                 range=None,
@@ -135,9 +285,10 @@ def histo_maker(entry_list,
                 stacked=False,
                 labels=[],
                 legend=False):
+    title = ""
     if take_log:
         safe_log = lambda rate: log10(rate) if rate > float(f'1e{log_th}') else log_th - 1
-        title = f"{title} (all values <= log10(1e{log_th}) are in the first bin)"
+        title = f"(all values <= log10(1e{log_th}) are in the first bin)"
         if stacked:
             # entry_list is a list of lists
             entry_list = [[safe_log(rate) for rate in lst]
@@ -160,8 +311,15 @@ def histo_maker(entry_list,
     plt.close(fig)
 
 
-def make_plots(all_lines_bw_info, tot_rate, tot_bandwidth, fname_helper,
-               process):
+def list_of_other_lines(process, all_lines_bw_info):
+    return [
+        line for line in all_lines_bw_info.keys()
+        if guess_wg(line, process) == "Other"
+    ]
+
+
+def make_plots(all_lines_bw_info: dict[str, LineRateBWInfo],
+               fname_helper: FileNameHelper, stream_config: str):
     '''
     Make plots of rate, bandwidth and event sizes of all lines.
     It will create 5 stacked histograms containing distributions of all lines
@@ -169,14 +327,8 @@ def make_plots(all_lines_bw_info, tot_rate, tot_bandwidth, fname_helper,
 
     Arguments:
         all_lines_bw_info: dict(line_name: LineRateBWInfo object)
-        tot_rate: total rate of all lines (arithmetic sum of stream rates)
-        tot_bandwidth: total bandwidth of all lines (arithmetic sum of stream BWs)
         fname_helper: instance of FileNameHelper
-        process: `hlt1`, `hlt2` or `spruce`
-
-    Returns:
-        - list of found WGs with >= 1 line
-        - list of lines that didnt fit into 1 WG
+        stream_config: e.g. "production" or "wg"
     '''
 
     # Count number of lines and rates/evt sizes per WG
@@ -184,13 +336,9 @@ def make_plots(all_lines_bw_info, tot_rate, tot_bandwidth, fname_helper,
         wg: WGRateBWInfo()
         for wg in KNOWN_WORKING_GROUPS + ["Other"]
     }
-    list_other_lines = []
     for line, bw_info in all_lines_bw_info.items():
-        wg_guess = guess_wg(line, process)
+        wg_guess = guess_wg(line, fname_helper.process)
         rate_info_per_wg[wg_guess].nlines += 1
-        if wg_guess == "Other":
-            list_other_lines.append(line)
-
         for attrib in ["rate", "dst_size", "tot_size", "dst_bw", "tot_bw"]:
             getattr(rate_info_per_wg[wg_guess], attrib).append(
                 getattr(bw_info, attrib))
@@ -212,96 +360,43 @@ def make_plots(all_lines_bw_info, tot_rate, tot_bandwidth, fname_helper,
         loc='center',
         bbox_to_anchor=(1, 0.5),
         bbox_transform=plt.gcf().transFigure)
-    plt.title(f"Number of {process.capitalize()} lines per WG")
+    plt.title(f"Number of {fname_helper.process.capitalize()} lines per WG")
     plt.savefig(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "lines_per_wg.png"),
+        fname_helper.pie_chart_path(stream_config, full_path=True),
         format="png",
         bbox_inches='tight')
     plt.close(fig)
 
     # Stacked histograms
-    title = f"{process.capitalize()}"
-    for attrib, xtitle, title, plot_bit, take_log, log_th, range in zip(
-        ["rate", "dst_size", "tot_size", "dst_bw", "tot_bw"], [
-            "Log10(Rate [Hz])", "DstData RawBank Size [kB]",
-            "Total Event Size [kB]",
-            "Log10(Bandwidth from DstData Size [GB/s])",
-            "Log10(Bandwidth from Total Event Size [GB/s])"
-        ], [
-            f"Total Rate: {tot_rate:.2f} kHz", "", "", "",
-            f"Total bandwidth: {tot_bandwidth:.2f} GB/s"
-        ], [
-            "rate", "dst_data_size", "total_size", "dst_bandwidth",
-            "tot_bandwidth"
-        ], [True, False, False, True, True], [-1, 0, 0, -4, -4],
-        [(-2, 7), (0, 500 if process == 'hlt2' else 1000),
-         (0, 500 if process == 'hlt2' else 1000), (-5, 2), (-5, 2)]):
+    for attrib, xtitle, plot_bit, log_th, range in zip(
+        ["rate", "tot_size", "tot_bw", "dst_size", "dst_bw"], [
+            "Log10(Rate [Hz])", "Total Event Size [kB]",
+            "Log10(Bandwidth from Total Event Size [GB/s])",
+            "DstData RawBank Size [kB]",
+            "Log10(Bandwidth from DstData Size [GB/s])"
+        ], MAIN_HISTOGRAMS + EXTRA_HISTOGRAMS, [-1, 0, -4, 0, -4],
+        [(-2, 7),
+         (0, 500 if fname_helper.process == 'hlt2' else 1000), (-5, 2),
+         (0, 500 if fname_helper.process == 'hlt2' else 1000), (-5, 2)]):
         histo_maker(
             [getattr(info, attrib) for info in rate_info_per_wg.values()],
             xtitle,
-            title,
-            fname_helper.process_dependent_html_page_outputs_path(
-                f"hist__{plot_bit}.png"),
+            fname_helper.hist_path(stream_config, plot_bit, full_path=True),
             range=range,
-            take_log=take_log,
+            take_log="Log10" in xtitle,
             log_th=log_th,
             stacked=True,
             legend=True,
             labels=list(rate_info_per_wg.keys()))
 
-    return list_other_lines
-
+    return
 
-def headline_bar_charts(rates_df: pd.DataFrame, process: str, plot_path: str):
-    """Headline bar chart of rate/bandwidth per stream c.f. TDR"""
-
-    TDR_BANDWIDTHS = {
-        "hlt2": {
-            "full": 5.90,
-            "turbo": 2.50,
-            "turcal": 1.60,
-            "total": 10.00,
-        },
-        "spruce": {
-            "total": 0.80
-        }
-    }
-    TITLES = {
-        "hlt2": "Hlt2 (output to tape)",
-        "spruce": "Excl. Sprucing of WG streams to disk"
-    }
-    PRETTY_STREAM_NAMES = {
-        "slepton": "SL",
-        "qee": "QEE",
-        "rd": "RD",
-        "bandq": "B&Q",
-        "b_to_open_charm": "B2OC",
-        "bnoc": "BnoC",
-        "b_to_charmonia": "B2CC",
-        "full": "Full",
-        "turbo": "Turbo",
-        "turcal": "TurCal",
-        "ift": "IFT"
-    }
-
-    bandwidths = {
-        "Current":
-        dict(zip(rates_df['Stream'], rates_df['Total Bandwidth (GB/s)'])),
-        "TDR": {
-            stream: TDR_BANDWIDTHS[process].get(stream, 0)
-            for stream in rates_df['Stream'].to_list()
-        }
-    }
-    for series in ["Current", "TDR"]:
-        bandwidths[series] = {
-            PRETTY_STREAM_NAMES.get(stream, stream): val
-            for stream, val in bandwidths[series].items()
-        }
-
-    bandwidths['Current']['Total'] = sum(bandwidths['Current'].values())
-    bandwidths['TDR']['Total'] = TDR_BANDWIDTHS[process]['total']
 
+def _important_bar_chart_maker(bandwidths: dict[str, dict[str, float]],
+                               process: str,
+                               stream_config="",
+                               is_to_total_to_disk_bar_chart=True):
+    fname_helper = FileNameHelper(process)
     colors = {'Current': 'tab:orange', 'TDR': 'tab:grey'}
     width = 0.4
 
@@ -315,7 +410,7 @@ def headline_bar_charts(rates_df: pd.DataFrame, process: str, plot_path: str):
                       label=label,
                       zorder=3,
                       color=colors[label])
-        if process == "spruce":
+        if process == "spruce" and not is_to_total_to_disk_bar_chart:
             # Only label the last bar - dont have per-WG expectations
             ax.bar_label(
                 bars,
@@ -327,21 +422,70 @@ def headline_bar_charts(rates_df: pd.DataFrame, process: str, plot_path: str):
                 labels=[
                     round(val, 2) for val in bandwidths_by_stream.values()
                 ])
-
     ax.set_ylabel('Bandwidth (GB/s)')
-    ax.set_title(TITLES[process])
+
+    if is_to_total_to_disk_bar_chart:
+        title = "Sprucing (output to disk)"
+    else:
+        title = {
+            "hlt2": "Hlt2 (output to tape)",
+            "spruce":
+            f"Sprucing of {MAP_STREAMS[stream_config]} stream to disk"
+        }[process]
+    ax.set_title(title)
+
+    # Have to do weird stuff with ticks for (spruce and not important chart) as only have 1 TDR bar
+    tick_pos_opt = 'weird' if process == 'spruce' and not is_to_total_to_disk_bar_chart else 'default'
     tick_positions = {
-        'hlt2': [x + width / 2 for x in range(len(bandwidths_by_stream))],
-        'spruce': [x for x in range(len(bandwidths_by_stream) - 1)] +
+        'default': [x + width / 2 for x in range(len(bandwidths_by_stream))],
+        'weird': [x for x in range(len(bandwidths_by_stream) - 1)] +
         [len(bandwidths_by_stream) - 1 + width / 2]
-    }[process]
+    }[tick_pos_opt]
     ax.set_xticks(tick_positions, bandwidths_by_stream.keys())
+
     ax.legend(loc='upper center', ncols=2)
+    plot_path = fname_helper.to_disk_bar_chart_path(
+        full_path=True
+    ) if is_to_total_to_disk_bar_chart else fname_helper.headline_bar_chart_path(
+        stream_config, full_path=True)
     plt.savefig(plot_path, format="png")
     plt.close(fig)
 
 
-def make_bar_charts(rates_df, column, stream, plot_path):
+def headline_bar_charts(fname_helper: FileNameHelper, stream_config: str):
+    """Headline bar chart of rate/bandwidth per stream c.f. TDR"""
+
+    process = fname_helper.process
+    rates_df = pd.read_csv(
+        fname_helper.final_rate_table_all_streams_path(
+            stream_config, ext='csv'))
+
+    bandwidths = {
+        "Current":
+        dict(zip(rates_df['Stream'], rates_df['Total Bandwidth (GB/s)'])),
+        "TDR": {
+            stream: TDR_BANDWIDTHS[process][stream_config].get(stream, 0)
+            for stream in rates_df['Stream'].to_list()
+        }
+    }
+    for series in ["Current", "TDR"]:
+        bandwidths[series] = {
+            PRETTY_STREAM_NAMES.get(stream, stream): val
+            for stream, val in bandwidths[series].items()
+        }
+
+    bandwidths['Current']['Total'] = sum(bandwidths['Current'].values())
+    bandwidths['TDR']['Total'] = TDR_BANDWIDTHS[process][stream_config][
+        'total']
+
+    _important_bar_chart_maker(
+        bandwidths,
+        process,
+        stream_config,
+        is_to_total_to_disk_bar_chart=False)
+
+
+def _make_bar_chart(rates_df, column, stream, plot_path):
     """Bar charts of the WG-by-WG rates within 1 stream"""
 
     fig = plt.figure()
@@ -356,6 +500,25 @@ def make_bar_charts(rates_df, column, stream, plot_path):
     plt.close(fig)
 
 
+def make_per_wg_bar_charts(fname_helper: FileNameHelper, stream_config: str,
+                           streams: list[str]):
+    all_bar_charts = {**MAIN_BAR_CHARTS, **EXTRA_BAR_CHARTS}
+    for stream in streams:
+        intra_stream_rates_df = pd.read_csv(
+            fname_helper.tmp_rate_table_intra_stream_path(
+                stream_config, stream),
+            header=None)
+
+        # NOTE beware if the ordering of the columns ever changes in line-and-stream-rates.py
+        intra_stream_rates_df.columns = ['WG'] + list(all_bar_charts.values())
+        for metric, column in all_bar_charts.items():
+            _make_bar_chart(
+                intra_stream_rates_df, column, stream,
+                fname_helper.bar_chart_path(
+                    stream_config, stream, metric, full_path=True))
+    return
+
+
 def write_html_page(page_path, rendered_html):
     if rendered_html:
         with open(page_path, "w") as html_file:
@@ -366,9 +529,9 @@ def _render(html_str):
     return jinja2.Template(html_str).render()
 
 
-def render_all_lines_page(fname_helper, building_locally):
+def render_all_lines_page(fname_helper, stream_config, building_locally):
     csv_path = fname_helper.final_rate_table_all_lines_path(
-        "csv", full_path=False)
+        stream_config, "csv", full_path=False)
     html_str = f"""
     <p>
         Rates, event sizes and bandwidths of all lines, listed descending in bandwidth. <br>
@@ -377,48 +540,13 @@ def render_all_lines_page(fname_helper, building_locally):
         These numbers are also saved in a csv file: <a href="{fname_helper.base_html_path(building_locally)}/{csv_path}">{csv_path}</a>
     </p>
     """
-    with open(fname_helper.final_rate_table_all_lines_path("html"),
-              "r") as rate_table:
+    with open(
+            fname_helper.final_rate_table_all_lines_path(
+                stream_config, "html"), "r") as rate_table:
         html_str += rate_table.read()
     return _render(html_str)
 
 
-def render_top_level_header(script_path, base_path):
-    return _render(f"""
-        <p>
-            slot.build_id: $$version$$<br>
-            start time: $$start_time$$<br>
-            end time: $$end_time$$<br>
-            platform: $$platform$$<br>
-            hostname: $$hostname$$<br>
-            cpu_info: $$cpu_info$$<br>
-            testing script path: {script_path}
-        </p>
-        <ul>
-            <li><a href="{base_path}/run.log">Logs</a></li>
-        </ul>
-    """)
-
-
-def render_memory_consumption():
-    return _render("""
-        <p>
-            <b> Memory consumption of this test: </b>
-        </p>
-        <object type="image/png" data="memory_consumption.png"></object>
-        <p>
-            Memory consumption as functions of Wall-time. <br>
-            The virtual memory size is the total amount of memory the process may hypothetically access. <br>
-            The resident set size (RSS) is the portion of memory occupied by the run that is held in main memory (RAM). <br>
-            The proportional set size (PSS) is the private memory occupied by the run itself plus the proportion of shared memory with one or more other processes. <br>
-            As we only launch one test at the same time, PSS should be close to RSS in this case, and PSS gives the real memory that is used by this test. <br>
-            Swap memory is used when RAM is full. <br>
-            The maximum resident set size usage is $$max_rss$$ GB. <br>
-            The maximum proportional set size usage is $$max_pss$$ GB. <br>
-        </p>
-    """)
-
-
 def render_other_line_table(process, lines):
     if process == "hlt1":
         return _render("")
@@ -441,26 +569,26 @@ def render_other_line_table(process, lines):
     return _render(html_str)
 
 
-def render_dst_data_hists(process):
-    if process == "hlt1":
+def render_dst_data_hists(fname_helper: FileNameHelper, stream_config: str):
+    if fname_helper.process == "hlt1":
         return _render("")
 
     html_str = ''
-    for hist_suffix in ("data_size", "bandwidth"):
+    for hist_suffix in EXTRA_HISTOGRAMS:
         html_str += f"""
-            <object type="image/png" data="{process}__hist__dst_{hist_suffix}.png"></object>
+            <object type="image/png" data="{fname_helper.hist_path(stream_config, hist_suffix, full_path=False)}"></object>
         """
     return _render(html_str)
 
 
-def render_lines_pie_chart(process):
-    if process == "hlt1":
+def render_lines_pie_chart(fname_helper: FileNameHelper, stream_config: str):
+    if fname_helper.process == "hlt1":
         return _render("")
     return _render(f"""
         <p>
             <b>The number of selection lines per working group:</b> <br>
         </p>
-        <object type="image/png" data="{process}__lines_per_wg.png"></object>
+        <object type="image/png" data="{fname_helper.pie_chart_path(stream_config, full_path=False)}"></object>
         <p>
             "Other" category contains those lines with a parsed name that doesn't belong to any known WG. <br>
             To make lines properly categorized, one should follow the naming convention -
@@ -469,11 +597,11 @@ def render_lines_pie_chart(process):
     """)
 
 
-def render_bar_charts(process,
-                      stream_config,
-                      streams,
-                      metrics=('bandwidth', 'rate')):
-    if process != "hlt2":
+def render_bar_charts(fname_helper: FileNameHelper,
+                      stream_config: str,
+                      streams: list[str],
+                      metrics=MAIN_BAR_CHARTS.keys()):
+    if fname_helper.process != "hlt2":
         return _render("")
 
     html_str = ''
@@ -491,12 +619,12 @@ def render_bar_charts(process,
             html_str += f'''
             <p>
             <b>Note:</b> The WG bars in the HLT2 Turbo stream correspond almost exactly to the output streams of the Sprucing passthrough of Turbo.<br>
-            Furthermore, this means <b>the "SumWGs" {metric} bar of HLT2 turbo is equal to the total physical {metric} of Turbo post-sprucing.</b><br>
+            Furthermore, this means <b>the "SumWGs" {metric} bar of HLT2 turbo is approximately equal to the total physical {metric} of Turbo post-sprucing.</b><br>
             </p>
             '''
         for stream in streams:
             html_str += f'''
-                <object type="image/png" data="{process}__{metric}_bar_chart__{stream_config}__{stream}.png"></object>
+                <object type="image/png" data="{fname_helper.bar_chart_path(stream_config, stream, metric, full_path=False)}"></object>
             '''
     html_str += '''
     <p>
@@ -507,22 +635,49 @@ def render_bar_charts(process,
     return _render(html_str)
 
 
-def render_extra_sim_matrices(process, stream_config, streams):
-    if process != "hlt2":
+SIM_MATRICES_DESCR = """
+    <p>
+        The overlap between two streams, A and B, w.r.t to one of the stream, A, is computed as |A n B| / |A|.
+        It shows how much events in the stream A are covered by another stream B. <br>
+        The columns in the overlap matrices are target streams (A) and the rows are comparison streams (B),
+        i.e. the numbers correspond to overlaps w.r.t to the column streams. <br>
+    </p>
+    <p>
+        The Jaccard index between two streams, A and B, is computed as |A n B| / |A u B|.
+        It shows how similar the two streams are and is useful in bandwidth division. <br>
+    </p>
+"""
+
+
+def render_sim_matrices_page(fname_helper: FileNameHelper, stream_config: str):
+    if fname_helper.process == "hlt1":
         return _render("")
 
-    html_str = """
+    html_str = SIM_MATRICES_DESCR
+    html_str += f"""
         <p>
-            The overlap between two streams, A and B, w.r.t to one of the stream, A, is computed as |A n B| / |A|.
-            It shows how much events in the stream A are covered by another stream B. <br>
-            The columns in the overlap matrices are target streams (A) and the rows are comparison streams (B),
-            i.e. the numbers correspond to overlaps w.r.t to the column streams. <br>
+            The overlap matrix of the {stream_config} streams is:
         </p>
+    """
+    with open(fname_helper.overlap_matrix_path(stream_config), "r") as overlap:
+        html_str += overlap.read()
+    html_str += f"""
         <p>
-            The Jaccard index between two streams, A and B, is computed as |A n B| / |A u B|.
-            It shows how similar the two streams are and is useful in bandwidth division. <br>
+            The Jaccard similarity matrix of the {stream_config} streams is:
         </p>
     """
+    with open(fname_helper.jaccard_similarities_path(stream_config),
+              "r") as jaccard:
+        html_str += jaccard.read()
+    return _render(html_str)
+
+
+def render_extra_sim_matrices(fname_helper: FileNameHelper, stream_config: str,
+                              streams: list[str]):
+    if fname_helper.process != "hlt2":
+        return _render("")
+
+    html_str = SIM_MATRICES_DESCR
     for stream in streams:
         html_str += f"""
             <p>
@@ -545,344 +700,253 @@ def render_extra_sim_matrices(process, stream_config, streams):
     return _render(html_str)
 
 
-def list_of_links_html(process: str, fname_helper: FileNameHelper,
-                       stream_config: str, building_locally: bool):
+def list_of_links_html(fname_helper: FileNameHelper, stream_config: str,
+                       building_locally: bool):
     base_path = fname_helper.base_html_path(building_locally)
+    _all_rates_path = fname_helper.all_rates_html_page_path(
+        stream_config, full_path=False)
     links = [
-        f"""<li><a href="{base_path}/{process}__all_rates.html"> A single rate/bandwidth table featuring every trigger line in all streams</a></li>"""
+        f"""<li><a href="{base_path}/{_all_rates_path}"> A single rate/bandwidth table featuring every trigger line in all streams</a></li>"""
     ]
-    if process != "hlt1":
-        rate_table_split_by_stream = fname_helper.final_rate_table_all_lines_split_by_stream_path(
+    if fname_helper.process != "hlt1":
+        _rate_table_split_by_stream = fname_helper.final_rate_table_all_lines_split_by_stream_path(
             stream_config, full_path=False)
         links.append(
-            f"""<li><a href="{base_path}/{rate_table_split_by_stream}"> Rate/bandwidth tables for each stream, with 1 row per trigger line</a></li>"""
+            f"""<li><a href="{base_path}/{_rate_table_split_by_stream}"> Rate/bandwidth tables for each stream, with 1 row per trigger line</a></li>"""
         )
-        rate_table_by_stream_by_wg = fname_helper.final_rate_table_all_lines_split_by_stream_by_wg_path(
+        _rate_table_by_stream_by_wg = fname_helper.final_rate_table_all_lines_split_by_stream_by_wg_path(
             stream_config, full_path=False)
         links.append(
-            f"""<li><a href="{base_path}/{rate_table_by_stream_by_wg}"> Rate/bandwidth tables for each stream, split also by WG, with 1 row per trigger line</a></li>"""
+            f"""<li><a href="{base_path}/{_rate_table_by_stream_by_wg}"> Rate/bandwidth tables for each stream, split also by WG, with 1 row per trigger line</a></li>"""
         )
+        _sim_matrices_path = fname_helper.sim_matrices_html_page_path(
+            stream_config, full_path=False)
         links.append(
-            f"""<li><a href="{base_path}/{process}__similarity_matrices.html"> Jaccard similarity and overlap matrices between streams</a></li>"""
+            f"""<li><a href="{base_path}/{_sim_matrices_path}"> Jaccard similarity and overlap matrices between streams</a></li>"""
         )
 
-    if process == "hlt2":
+    if fname_helper.process == "hlt2":
+        _extra_bar_charts_path = fname_helper.extra_bar_charts_html_page_path(
+            stream_config, full_path=False)
+        _extra_sim_matrices_path = fname_helper.extra_sim_matrices_html_page_path(
+            stream_config, full_path=False)
         links += [
-            f"""<li><a href="{base_path}/{process}__extra_bar_charts.html">Bar charts as below for DstData bandwidth</a></li>""",
-            f"""<li><a href="{base_path}/{process}__extra_similarity_matrices.html">Similarity and overlap matrices between WGs within each stream</a></li>""",
+            f"""<li><a href="{base_path}/{_extra_bar_charts_path}">Bar charts as below for DstData bandwidth</a></li>""",
+            f"""<li><a href="{base_path}/{_extra_sim_matrices_path}">Similarity and overlap matrices between WGs within each stream</a></li>""",
         ]
 
-    if process != "hlt1":
+    if fname_helper.process != "hlt1":
+        _other_lines_path = fname_helper.other_lines_html_page_path(
+            stream_config, full_path=False)
+        _line_descr_path = fname_helper.line_descr_path(full_path=False)
         links += [
-            f"""<li><a href="{base_path}/{process}__other_lines.html">List of lines in "Other" category</a></li>""",
-            f"""<li><a href="{base_path}/{fname_helper.line_descr_path(full_path=False)}"> PersistReco and ExtraOutput info for all lines in all streams</a></li>"""
+            f"""<li><a href="{base_path}/{_other_lines_path}">List of lines in "Other" category</a></li>""",
+            f"""<li><a href="{base_path}/{_line_descr_path}"> PersistReco and ExtraOutput info for all lines in all streams</a></li>"""
         ]
 
     return "\n".join(links)
 
 
-def render_sim_matrices_page(process, fname_helper, stream_config):
-    if process == "hlt1":
-        return _render("")
+def total_rate_and_bw(fname_helper: FileNameHelper, stream_config: str):
+    streams_df = pd.read_csv(
+        fname_helper.final_rate_table_all_streams_path(
+            stream_config, ext="csv"))
+    return sum(streams_df['Rate (kHz)']), sum(
+        streams_df['Total Bandwidth (GB/s)'])
 
-    html_str = f"""
-        <p>
-            The overlap between two streams, A and B, w.r.t to one of the stream, A, is computed as |A n B| / |A|.
-            It shows how much events in the stream A are covered by another stream B. <br>
-            The columns in the overlap matrices are target streams (A) and the rows are comparison streams (B),
-            i.e. the numbers correspond to overlaps w.r.t to the column streams. <br>
-        </p>
-        <p>
-            The overlap matrix of the {stream_config} streams is:
-        </p>
-    """
-    with open(fname_helper.overlap_matrix_path(stream_config), "r") as overlap:
-        html_str += overlap.read()
-    html_str += f"""
-        <p>
-            The Jaccard index between two streams, A and B, is computed as |A n B| / |A u B|.
-            It shows how similar the two streams are and is useful in bandwidth division. <br>
-        </p>
-        <p>
-            The Jaccard similarity matrix of the {stream_config} streams is:
-        </p>
-    """
-    with open(fname_helper.jaccard_similarities_path(stream_config),
-              "r") as jaccard:
-        html_str += jaccard.read()
-    return _render(html_str)
 
+def write_message(fname_helper: FileNameHelper, stream_config: str,
+                  bw_info_by_line: dict[str, LineRateBWInfo]):
+    """Append to message.json for the BandwidthTestHandler to send info to Gitlab and Mattermost"""
 
-def _write_message(message,
-                   args,
-                   tot_rate,
-                   tot_bandwidth,
-                   n_low_rate,
-                   n_high_rate,
-                   process_dependent_message=False):
-    lines = [
-        f"all_jobs_successful_bool = {1 if args.exit_code == 0 else 0}\n",
-        f"total_rate = {tot_rate:.2f} kHz\n",
-        f"total_bandwidth = {tot_bandwidth:.2f} GB/s\n",
-        f"n_low_rate = {n_low_rate:d}\n", f"n_high_rate = {n_high_rate:d}\n"
-    ]
-    if process_dependent_message:
-        lines = [f'{args.process}__{line}' for line in lines]
-    message.writelines(lines)
+    high = {
+        'hlt1': 1e6,
+        'hlt2': 1000,
+        'spruce': 500
+    }[fname_helper.process]  # Hz
+    n_low_rate = len(
+        [info for info in bw_info_by_line.values() if info.rate == 0])
+    n_high_rate = len(
+        [info for info in bw_info_by_line.values() if info.rate > high])
+    tot_rate, tot_bandwidth = total_rate_and_bw(fname_helper, stream_config)
+
+    # load up message.json
+    with open(fname_helper.message_path(), "r") as message:
+        info = json.load(message)
+
+    info[fname_helper.process][stream_config]["total_rate"] = tot_rate
+    info[fname_helper.
+         process][stream_config]["total_bandwidth"] = tot_bandwidth
+    info[fname_helper.process][stream_config]["n_low_rate"] = n_low_rate
+    info[fname_helper.process][stream_config]["n_high_rate"] = n_high_rate
+
+    with open(fname_helper.message_path(), "w") as f:
+        json.dump(info, f, indent=4)
     return 0
 
 
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description='make_bandwidth_test_page')
-    parser.add_argument(
-        '-p',
-        '--process',
-        type=str,
-        choices=['hlt1', 'hlt2', 'spruce'],
-        required=True,
-        help='Which stage was the test run on.')
-    parser.add_argument(
-        '-c',
-        '--input-config',
-        type=str,
-        required=True,
-        help='Path to yaml config file defining the input.')
-    parser.add_argument(
-        '--stream-config',
-        type=str,
-        required=True,
-        choices=['wg', 'production', 'streamless'],
-    )
-    parser.add_argument(
-        '--streams',
-        type=str,
-        nargs='+',
-        required=True,
-        help='List of trigger streams.')
-    parser.add_argument(
-        '-s',
-        '--script-path',
-        type=str,
-        required=True,
-        help=
-        'Path to the top-level testing script that is running/calling this script.'
-    )
-    parser.add_argument(
-        '-e',
-        '--exit-code',
-        type=int,
-        required=True,
-        help="Cumulative exit code of all previous jobs.")
-    parser.add_argument(
-        '--make-hlt2-and-spruce-page',
-        action='store_true',
-        help="Flag to use index page appropriate for multiple processes.")
-    parser.add_argument(
-        '--skip-top-level-information-for-process-dependent-testpage',
-        action='store_true',
-        help=
-        'Flag to avoid memory-consumption and build information sections of {process}__index page.'
-    )
-    parser.add_argument(
-        '--building-locally',
-        action='store_true',
-        help=
-        'Makes links between pages work for building the pages locally rather than on the LHCbPR website.'
-    )
-    args = parser.parse_args()
+def make_html_for_single_test(process: str, stream_config: str,
+                              input_config_path: str,
+                              args: argparse.Namespace):
+    fname_helper = FileNameHelper(process)
 
-    input_info = parse_yaml(args.input_config)
-    fname_helper = FileNameHelper(args.process)
+    with open(fname_helper.stream_config_json_path(stream_config),
+              "r") as stream_config_json:
+        streams = list(json.load(stream_config_json).keys())
 
-    if args.exit_code == 0:
-        exit_code_sentence = "All sub-jobs in this test exited successfully."
-        exit_code_bool = 1
-        exit_code_col = "green"
-    else:
-        exit_code_sentence = "There were errors in some of the sub-jobs of this test; please see the logs."
-        exit_code_bool = 0
-        exit_code_col = "red"
+    ### Make plots & tables
+    # Headline bar charts
+    if stream_config != "streamless":
+        headline_bar_charts(fname_helper, stream_config)
 
-    # Read info of all lines
-    df = pd.read_csv(
-        fname_helper.final_rate_table_all_lines_path("csv"), sep=',')
-    number_of_lines = len(df)
+    if process == 'hlt2':
+        make_per_wg_bar_charts(fname_helper, stream_config, streams)
 
+    df = pd.read_csv(
+        fname_helper.final_rate_table_all_lines_path(stream_config, "csv"),
+        sep=',')
     kHz_to_Hz = 1000
     rate_bw_info_by_line = {
         df['Line'][i]: LineRateBWInfo(
             df['Rate (kHz)'][i] * kHz_to_Hz, df["Avg DstData Size (kB)"][i],
             df["Avg Total Event Size (kB)"][i],
             df["DstData Bandwidth (GB/s)"][i], df["Total Bandwidth (GB/s)"][i])
-        for i in range(number_of_lines)
+        for i in range(len(df))
     }
 
-    # Prepare messages to GitLab
-    # limits on rate: 1 MHz for Hlt1, 1 kHz for Hlt2 rate and 0.5% for Sprucing retention
-    tol = {'hlt1': 1e6, 'hlt2': 1000, 'spruce': 500}[args.process]
-    n_low_rate = len(
-        [info for info in rate_bw_info_by_line.values() if info.rate == 0])
-    n_high_rate = len(
-        [info for info in rate_bw_info_by_line.values() if info.rate > tol])
-
-    prod_df = pd.read_csv(
-        fname_helper.final_rate_table_all_streams_path(
-            args.stream_config, ext="csv"))
-    tot_rate = sum(prod_df['Rate (kHz)'])
-    tot_bandwidth = sum(prod_df['Total Bandwidth (GB/s)'])
-
-    # Make plots & tables
-    other_line_list = make_plots(
+    make_plots(
         rate_bw_info_by_line,
-        tot_rate=tot_rate,
-        tot_bandwidth=tot_bandwidth,
         fname_helper=fname_helper,
-        process=args.process)
-
-    # Headline bar charts
-    headline_bar_chart_path_for_html = ""
-    if args.stream_config != "streamless":
-        main_rate_df = pd.read_csv(
-            fname_helper.final_rate_table_all_streams_path(
-                args.stream_config, ext='csv'))
-        headline_bar_chart_path_for_html = fname_helper.bar_chart_path(
-            args.stream_config, 'headline', 'bandwidth', full_path=False)
-        headline_bar_charts(
-            main_rate_df, args.process,
-            fname_helper.bar_chart_path(args.stream_config, 'headline',
-                                        'bandwidth'))
-
-    # Bar charts within a stream - only relevant for HLT2
-    if args.process == 'hlt2':
-        for stream in args.streams:
-            intra_stream_rates_df = pd.read_csv(
-                fname_helper.tmp_rate_table_intra_stream_path(
-                    args.stream_config, stream),
-                header=None)
-            intra_stream_rates_df.columns = [
-                'WG', 'Rate (kHz)', 'Bandwidth (GB/s)',
-                'DstData Bandwidth (GB/s)'
-            ]
-            for column_header in intra_stream_rates_df.columns[1:]:
-                fname = {
-                    'Rate (kHz)': "rate",
-                    'Bandwidth (GB/s)': "bandwidth",
-                    'DstData Bandwidth (GB/s)': "dstbandwidth"
-                }[column_header]
-                make_bar_charts(
-                    intra_stream_rates_df, column_header, stream,
-                    fname_helper.bar_chart_path(args.stream_config, stream,
-                                                fname))
-
-    with open(
-            fname_helper.final_rate_table_all_streams_path(args.stream_config),
-            "r") as rate_html:
-        table_main_stream_rates = rate_html.read()
-
-    base_path = fname_helper.base_html_path(args.building_locally)
-    if args.skip_top_level_information_for_process_dependent_testpage:
-        top_level_header = ""
-        memory_consumption = ""
-    else:
-        top_level_header = render_top_level_header(args.script_path, base_path)
-        memory_consumption = render_memory_consumption()
-
-    with open(
-            fname_helper.html_page_outputs_path(f"{args.process}__index.html"),
-            "w") as html_file:
-        html = SINGLE_PROCESS_REPORT_TEMPLATE.render(
-            TOP_LEVEL_HEADER=top_level_header,
-            MEMORY_CONSUMPTION=memory_consumption,
-            BASE_PATH=base_path,
-            stream_config_json=fname_helper.stream_config_json_path(
-                args.stream_config, full_path=False),
-            main_rate_table=table_main_stream_rates,
-            BAR_CHARTS=render_bar_charts(args.process, args.stream_config,
-                                         args.streams),
-            HEADLINE_BAR_CHART_PATH=headline_bar_chart_path_for_html,
-            LIST_OF_LINKS=list_of_links_html(args.process, fname_helper,
-                                             args.stream_config,
-                                             args.building_locally),
-            LINES_PER_WG=render_lines_pie_chart(args.process),
-            DST_DATA_HIST=render_dst_data_hists(args.process),
-            INPUT_CONFIG_PATH=os.path.expandvars(args.input_config),
-            INPUT_RATE=input_info['input_rate'],
-            INPUT_NU=input_info['nu'],
-            INPUT_VELO_RADIUS=input_info['velo_radial_opening'],
-            EXIT_CODE_SENTENCE=exit_code_sentence,
-            EXIT_CODE_COLOUR=exit_code_col,
-            PROCESS=args.process)
-        html_file.write(html)
+        stream_config=stream_config)
 
     # Extra pages
     write_html_page(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "other_lines.html"),
-        render_other_line_table(args.process, other_line_list))
+        fname_helper.other_lines_html_page_path(stream_config, full_path=True),
+        render_other_line_table(
+            process, list_of_other_lines(process, rate_bw_info_by_line)))
 
     write_html_page(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "all_rates.html"),
-        render_all_lines_page(fname_helper, args.building_locally))
+        fname_helper.all_rates_html_page_path(stream_config, full_path=True),
+        render_all_lines_page(fname_helper, stream_config,
+                              args.building_locally))
 
     write_html_page(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "similarity_matrices.html"),
-        render_sim_matrices_page(args.process, fname_helper,
-                                 args.stream_config))
+        fname_helper.sim_matrices_html_page_path(
+            stream_config, full_path=True),
+        render_sim_matrices_page(fname_helper, stream_config))
 
     write_html_page(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "extra_bar_charts.html"),
+        fname_helper.extra_bar_charts_html_page_path(
+            stream_config, full_path=True),
         render_bar_charts(
-            args.process,
-            args.stream_config,
-            args.streams,
-            metrics=['dstbandwidth']))
+            fname_helper,
+            stream_config,
+            streams,
+            metrics=EXTRA_BAR_CHARTS.keys()))
 
     write_html_page(
-        fname_helper.process_dependent_html_page_outputs_path(
-            "extra_similarity_matrices.html"),
-        render_extra_sim_matrices(args.process, args.stream_config,
-                                  args.streams))
-
-    with open(fname_helper.html_page_outputs_path("index.html"),
-              "w") as html_file:
-        if args.make_hlt2_and_spruce_page:
-            html = HLT2_AND_SPRUCE_REPORT_TEMPLATE.render(
-                TOP_LEVEL_HEADER=render_top_level_header(
-                    args.script_path, base_path),
-                BASE_PATH=base_path,
-                MEMORY_CONSUMPTION=render_memory_consumption())
-            html_file.write(html)
-        else:
-            # In single-process tests, need 'index.html' to be picked up.
-            with open(
-                    fname_helper.html_page_outputs_path(
-                        f"{args.process}__index.html"),
-                    "r") as process_dependent_html_file:
-                html_file.write(process_dependent_html_file.read())
+        fname_helper.extra_sim_matrices_html_page_path(
+            stream_config, full_path=True),
+        render_extra_sim_matrices(fname_helper, stream_config, streams))
 
+    # Main page
     with open(
-            fname_helper.html_page_outputs_path(
-                f"{args.process}__message.txt"), "w") as message:
-        _write_message(
-            message=message,
-            args=args,
-            tot_rate=tot_rate,
-            tot_bandwidth=tot_bandwidth,
-            n_low_rate=n_low_rate,
-            n_high_rate=n_high_rate,
-            process_dependent_message=False)
-    with open(
-            fname_helper.html_page_outputs_path("message.txt"),
-            "a" if args.make_hlt2_and_spruce_page else "w") as message:
-        _write_message(
-            message=message,
-            args=args,
-            tot_rate=tot_rate,
-            tot_bandwidth=tot_bandwidth,
-            n_low_rate=n_low_rate,
-            n_high_rate=n_high_rate,
-            process_dependent_message=True)
-        pass
+            fname_helper.index_html_page_path(stream_config, full_path=True),
+            "w") as html_file:
+        html = render_single_test_page(process, stream_config,
+                                       input_config_path, streams, args)
+        html_file.write(html)
+
+    # Prepare messages to GitLab
+    write_message(fname_helper, stream_config, rate_bw_info_by_line)
+    return
+
+
+def total_bw_to_disk_bar_chart(stream_configs: list[str]):
+
+    disk_process = "spruce"
+    fname_helper = FileNameHelper(disk_process)
+    DISK_TDR_BANDWIDTHS = TDR_BANDWIDTHS[disk_process]
+
+    bandwidths = {
+        "Current": {
+            MAP_STREAMS[stream_config]: total_rate_and_bw(
+                fname_helper, stream_config)[1]
+            for stream_config in stream_configs
+        },
+        "TDR": {
+            MAP_STREAMS[stream_config]:
+            DISK_TDR_BANDWIDTHS[stream_config]['total']
+            for stream_config in stream_configs
+        }
+    }
+
+    bandwidths['Current']['Total'] = sum(bandwidths['Current'].values())
+    bandwidths['TDR']['Total'] = sum(bandwidths['TDR'].values())
+    print("Summary of bandwidths to disk:")
+    print(bandwidths)
+
+    _important_bar_chart_maker(
+        bandwidths, disk_process, is_to_total_to_disk_bar_chart=True)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(description='make_bandwidth_test_page')
+    parser.add_argument(
+        '--per-test-info',
+        type=str,
+        nargs='+',
+        required=True,
+        help=
+        "List of strings, each being a colon-separated list corresponding to <process>:<stream_config>:<input_config_yaml_path>"
+    )
+    parser.add_argument(
+        '-s',
+        '--script-path',
+        type=str,
+        required=True,
+        help=
+        'Path to the top-level testing script that is running/calling this script.'
+    )
+    parser.add_argument(
+        '--building-locally',
+        action='store_true',
+        help=
+        'Makes links between pages work for building the pages locally rather than on the LHCbPR website.'
+    )
+    args = parser.parse_args()
+
+    processes_and_stream_configs = []
+    # Unpack args.per_test_info into process, stream_config, input_config
+    for per_test_info in args.per_test_info:
+        assert len(
+            per_test_info.split(':')
+        ) == 3, "per_test_info must be colon-separated list of <process>:<stream_config>:<input_config_yaml_path>"
+        process, stream_config, input_config = per_test_info.split(':')
+        assert process in ['hlt1', 'hlt2', 'spruce'
+                           ], "process must be one of 'hlt1', 'hlt2', 'spruce'"
+
+        make_html_for_single_test(process, stream_config, input_config, args)
+        processes_and_stream_configs.append((process, stream_config))
+
+    # Bar chart of total bandwidth to disk
+    expected_stream_configs_to_disk = ['wg', 'wgpass', 'turcal']
+    to_disk_stream_configs = [
+        stream_config
+        for process, stream_config in processes_and_stream_configs
+        if process == 'spruce'
+    ]
+    make_total_bw_to_disk_bar_chart = sorted(
+        expected_stream_configs_to_disk) == sorted(to_disk_stream_configs)
+    if make_total_bw_to_disk_bar_chart:
+        total_bw_to_disk_bar_chart(to_disk_stream_configs)
+
+    # Top-level page
+    base_path = FileNameHelper.base_html_path(args.building_locally)
+    with open(FileNameHelper.top_level_index_html_path(), "w") as html_file:
+        html = render_top_level_page(
+            args.script_path,
+            base_path,
+            processes_and_stream_configs,
+            to_disk_bar_chart=make_total_bw_to_disk_bar_chart)
+        html_file.write(html)
diff --git a/python/MooreTests/read_event_numbers.py b/python/MooreTests/read_event_numbers.py
new file mode 100755
index 00000000..e45a1169
--- /dev/null
+++ b/python/MooreTests/read_event_numbers.py
@@ -0,0 +1,159 @@
+###############################################################################
+# (c) Copyright 2022-2024 CERN for the benefit of the LHCb Collaboration      #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+import argparse
+import json
+import GaudiPython as GP
+from GaudiConf.reading import __unpack_rawevent
+from Configurables import (ApplicationMgr, LHCbApp, IODataManager,
+                           EventSelector, createODIN)
+from GaudiConf import IOHelper
+from PRConfig.bandwidth_helpers import FileNameHelper
+
+
+def _extract_evt_numbers(ifiles,
+                         file_type,
+                         is_sprucing_output,
+                         evt_max=-1,
+                         stream="dummy_stream_name"):
+    """
+        Extract all event numbers saved to a single MDF/DST.
+        stream only required to be the correct stream name if is_sprucing_output
+        this is because the raw event location has the stream name in it if so
+    """
+    LHCbApp(
+        DataType="Upgrade",
+        Simulation=True,
+        DDDBtag="dddb-20171126",
+        CondDBtag="sim-20171127-vc-md100")
+    EventSelector(PrintFreq=10000)
+    IODataManager(DisablePFNWarning=True)
+
+    raw_event_unpacker_kwargs = dict(bank_types=['ODIN'])
+    if is_sprucing_output:
+        raw_event_unpacker_kwargs["input_process"] = "Spruce"
+        raw_event_unpacker_kwargs["stream"] = stream
+    appMgr = ApplicationMgr(TopAlg=[
+        __unpack_rawevent(**raw_event_unpacker_kwargs),
+        createODIN(ODIN='myODIN')
+    ])
+    IOHelper(file_type).inputFiles(ifiles)
+
+    appMgr = GP.AppMgr()
+    evt = appMgr.evtsvc()
+
+    event_numbers = []
+    if evt_max > 0:
+        for i in range(evt_max):
+            # Iterate 1 event in file
+            appMgr.run(1)
+
+            header = evt["/Event/myODIN"]
+            if not header:
+                break  # ran out of events in file
+            event_numbers.append(header.eventNumber())
+    else:  # all events in the file
+        got_events = True
+        while got_events:
+            appMgr.run(1)
+            header = evt["/Event/myODIN"]
+            if not header:
+                got_events = False
+            else:
+                event_numbers.append(header.eventNumber())
+    return event_numbers
+
+
+def main():
+    """
+    For a given stream's MDF/DST output, finds all event_numbers and saves them in a json file for later use.
+    Useful for similarity between streams later.
+    """
+    parser = argparse.ArgumentParser(description=main.__doc__)
+    subparsers = parser.add_subparsers(help='Mode of execution', dest='mode')
+    event_numbers_parser = subparsers.add_parser("store_output_event_numbers")
+    event_numbers_parser.add_argument(
+        '-s', '--stream', type=str, required=True, help='Name of the stream')
+
+    rate_denom_parser = subparsers.add_parser("count_input_events")
+    rate_denom_parser.add_argument(
+        '-n',
+        '--evt-max',
+        required=True,
+        type=lambda x: int(round(float(x))),
+        help='maximum nb of events to process per job')
+    for sp in [event_numbers_parser, rate_denom_parser]:
+        sp.add_argument(
+            '-p',
+            '--process',
+            type=str,
+            required=True,
+            choices=['hlt1', 'hlt2', 'spruce'])
+        sp.add_argument(
+            '-sc',
+            '--stream-config',
+            type=str,
+            required=True,
+            choices=["streamless", "wg", "production", "wgpass", "turcal"],
+            help='Name of the stream config')
+        sp.add_argument(
+            '--file-type',
+            choices=("ROOT", "MDF"),
+            required=True,
+            help=
+            "File type of incoming Moore output - ROOT for .dst or MDF for .mdf"
+        )
+
+    args = parser.parse_args()
+
+    fname_helper = FileNameHelper(args.process)
+    if args.mode == "store_output_event_numbers":
+        file_ext = fname_helper.input_type_to_file_ext(args.file_type)
+        ifile = fname_helper.mdfdst_fname_for_reading(
+            args.stream_config, args.stream, ext=file_ext)
+
+        event_numbers = _extract_evt_numbers(
+            ifiles=[ifile],
+            file_type=args.file_type,
+            is_sprucing_output=args.process == "spruce",
+            stream=args.stream)
+
+        ofile = fname_helper.event_no_fname(args.stream_config, args.stream)
+        with open(ofile, 'w') as f:
+            json.dump({args.stream: event_numbers}, f)
+        print(
+            f"Found {len(event_numbers)} event numbers for {args.stream} stream. Saved list to {ofile}."
+        )
+
+    else:
+        # Load up the filenames
+        with open(fname_helper.input_info_json(args.stream_config)) as f:
+            input_info = json.load(f)
+
+        # Wont ever be sprucing output as this is for working out how many events in *input* files
+        n_evts_in_file = len(
+            _extract_evt_numbers(
+                input_info["fnames"],
+                file_type=args.file_type,
+                is_sprucing_output=False,
+                evt_max=args.evt_max))
+        rate_denom = n_evts_in_file
+
+        output_info = input_info
+        output_info["n_evts"] = rate_denom
+        with open(fname_helper.input_info_json(args.stream_config), 'w') as f:
+            json.dump(output_info, f)
+        print(
+            f"Found {n_evts_in_file} events in input files (max of {args.evt_max}), therefore n_evts in denominator of rates: {rate_denom}"
+        )
+
+
+if __name__ == "__main__":
+    main()
diff --git a/python/MooreTests/run_bandwidth_test_jobs.py b/python/MooreTests/run_bandwidth_test_jobs.py
index 1e3f581e..d78aa8d3 100644
--- a/python/MooreTests/run_bandwidth_test_jobs.py
+++ b/python/MooreTests/run_bandwidth_test_jobs.py
@@ -22,52 +22,39 @@ import argparse
 import logging
 import subprocess
 import os
-import socket
 import tempfile
 import atexit
 import shutil
 import json
 from PRConfig.bandwidth_helpers import FileNameHelper, parse_yaml
-from MooreTests.list_event_numbers import input_nevts
 from datetime import datetime
 
-# Default cache dir is the current working directory as this is most convenient for the machine
-# that the test runs on periodically. It assumes the working directory is not cleaned up often,
-# and so the files remain available for subsequent jobs.
-DEFAULT_CACHE_DIRS = {'default': ['.']}
-
 # prefer XDG_RUNTIME_DIR which should be on tmpfs
 FALLBACK_CACHE_DIR = os.getenv('XDG_RUNTIME_DIR', tempfile.gettempdir())
 
-
-def default_cache_dirs():
-    hostname = socket.getfqdn()
-    dirs = DEFAULT_CACHE_DIRS.get(hostname, DEFAULT_CACHE_DIRS['default'])
-    return dirs
+# Default cache dir is the current working directory as this is most convenient for the machine
+# that the test runs on periodically. It assumes the working directory is not cleaned up often,
+# and so the files remain available for subsequent jobs.
+DEFAULT_CACHE_DIR = '.'
 
 
 def is_remote(url):
     return url.startswith('mdf:root:') or url.startswith('root:')
 
 
-def dump_nevts(n_evts, process):
+def dump_fnames(process, stream_config, filenames):
     fname_helper = FileNameHelper(process)
-    ofile = fname_helper.input_nevts_json()
+    ofile = fname_helper.input_info_json(stream_config)
     with open(ofile, 'w') as f:
-        json.dump({"n_evts": f"{n_evts}"}, f)
-    return 0
+        json.dump({"fnames": filenames}, f)
+    return
 
 
 def run_gaudi_job(args, config, job_input):
     # Build command line
-    input_file_type = "ROOT" if args.digi else "MDF"
-    n_evts = input_nevts(
-        input_files=job_input, evtmax=args.evt_max, file_type=input_file_type)
-    dump_nevts(n_evts, args.process)
-
     extra_options = [
         f"n_threads = {args.threads}", f"n_event_slots = {args.evtSlots}",
-        f"evt_max = {n_evts}",
+        f"evt_max = {args.evt_max}",
         f"input_raw_format = {config['input_raw_format']}",
         f"input_files = {job_input}"
     ]
@@ -150,8 +137,8 @@ if __name__ == '__main__':
         "Download files to local disk before running Moore. Achieves big speedup (5x) in Moore, but only worth it if the downloading is fast (probably only true if you're at CERN.)"
     )
     parser.add_argument(
-        '--cache-dirs',
-        default=None,
+        '--cache-dir',
+        default=DEFAULT_CACHE_DIR,
         help='Comma separated paths to directories, one per job, where the '
         'input files will be cached (default is hostname dependent or '
         '$XDG_RUNTIME_DIR).')
@@ -167,10 +154,6 @@ if __name__ == '__main__':
         help=
         "Flag to access and include config[input_manifest_file] as an extra option in the job."
     )
-    parser.add_argument(
-        '--read-evt-max-from-config',
-        action='store_true',
-        help="Flag to replace args.evtmax with config[n_evts]")
     parser.add_argument(
         '-p',
         '--process',
@@ -178,20 +161,13 @@ if __name__ == '__main__':
         help='Compute for Hlt1, Hlt2 or Sprucing lines',
         choices=['hlt1', 'hlt2', 'spruce'],
         required=True)
+    parser.add_argument('-sc', '--stream-config', type=str, required=True)
     args = parser.parse_args()
 
     logging.basicConfig(
         format='%(levelname)-7s %(message)s',
         level=(logging.DEBUG if args.debug else logging.INFO))
 
-    if args.read_evt_max_from_config:
-        if args.process != "spruce":
-            raise RuntimeError(
-                'read_evt_max_from_config only makes sense for Sprucing jobs with config = metadata generated about Hlt2 BW job outputs.'
-            )
-        config = parse_yaml(args.config)
-        args.evt_max = min(args.evt_max, int(config['n_evts']))
-
     if args.evt_max == -1 or args.evt_max > 1e5:
         raise RuntimeError(
             "The BW tests are limited to 1e5 events to keep them to a reasonable runtime. Please re-configure"
@@ -218,53 +194,48 @@ if __name__ == '__main__':
         raise KeyError(
             f'{args.config} does not provide either the "testfiledb_key" or "input_files".'
         )
-    job_inputs = [
-        inputs_fns
-    ]  # This is a list to allow for possible NUMA extension: see discussion on !316.
+    job_inputs = inputs_fns
 
-    # Set up local directories where inputs are cached
     if args.download_input_files:
-        if args.cache_dirs:
-            args.cache_dirs = args.cache_dirs.split(',')
-        else:
-            args.cache_dirs = default_cache_dirs()
-            if any(not os.path.isdir(d) for d in args.cache_dirs):
-                fallback_dir = tempfile.mkdtemp(
-                    prefix='bandwidth-', dir=FALLBACK_CACHE_DIR)
-                logging.warning(
-                    'not all default cache dirs {!r} exist, using {}'.format(
-                        args.cache_dirs, fallback_dir))
-                args.cache_dirs = [fallback_dir]
-                # if we use the fallback directory, clean up after ourselves
-                atexit.register(shutil.rmtree, fallback_dir)
+
+        # Set up local directories where inputs are cached
+        if not os.path.isdir(args.cache_dir):
+            fallback_dir = tempfile.mkdtemp(
+                prefix='bandwidth-', dir=FALLBACK_CACHE_DIR)
+            logging.warning(
+                'Default cache dir {!r} does not exist, using {}'.format(
+                    args.cache_dir, fallback_dir))
+            args.cache_dir = fallback_dir
+            # if we use the fallback directory, clean up after ourselves
+            atexit.register(shutil.rmtree, fallback_dir)
 
         # Now download files
-        for i, inputs in enumerate(job_inputs):
-            if all(is_remote(url) for url in inputs):
-                from Moore.qmtest.context import download_mdf_inputs_locally, download_digi_inputs_locally
-                # download_inputs_locally only downloads if files
-                # are not already available locally on the machine
-                before_copy = datetime.now()
-                logging.info(
-                    f'Downloading inputs for bandwidth job to {args.cache_dirs[i]}'
-                )
-                logging.info(
-                    f'There are {len(inputs)} input files: [{inputs[0]} ' +
-                    ']' if len(inputs) < 2 else '{inputs[1]}, ... ]')
-                kB_to_GB = 1e3
-                download_inputs_locally = download_digi_inputs_locally if args.digi else download_mdf_inputs_locally
-                job_inputs[i] = download_inputs_locally(
-                    inputs,
-                    args.cache_dirs[i],
-                    max_size=args.avg_evt_size * kB_to_GB * args.evt_max)
-                logging.info(
-                    f"Finished file downloads. This took: {datetime.now() - before_copy}"
-                )
-            elif any(is_remote(url) for url in inputs_fns):
-                parser.error('inputs must either be all xrootd or all local')
-            else:
-                pass  # They're all local so don't worry about it...
+        if all(is_remote(url) for url in job_inputs):
+            from Moore.qmtest.context import download_mdf_inputs_locally, download_digi_inputs_locally
+            # download_inputs_locally only downloads if files
+            # are not already available locally on the machine
+            before_copy = datetime.now()
+            logging.info(
+                f'Downloading inputs for bandwidth job to {args.cache_dir}')
+            logging.info(
+                f'There are {len(job_inputs)} input files: [{job_inputs[0]} ' +
+                ']' if len(job_inputs) < 2 else f'{job_inputs[1]}, ... ]')
+            kB_to_GB = 1e3
+            download_inputs_locally = download_digi_inputs_locally if args.digi else download_mdf_inputs_locally
+            job_inputs = download_inputs_locally(
+                job_inputs,
+                args.cache_dir,
+                max_size=args.avg_evt_size * kB_to_GB * args.evt_max)
+            logging.info(
+                f"Finished file downloads. This took: {datetime.now() - before_copy}"
+            )
+        elif any(is_remote(url) for url in job_inputs):
+            parser.error('inputs must either be all xrootd or all local')
+        else:
+            pass  # They're all local so don't worry about it...
 
-            run_gaudi_job(args, config, job_inputs[i])
-    else:
-        run_gaudi_job(args, config, job_inputs[0])
+    # Dump the input file names so we can work out the rate denominator afterwards
+    # Dump local file names that have been downloaded?
+    dump_fnames(args.process, args.stream_config, job_inputs)
+
+    run_gaudi_job(args, config, job_inputs)
diff --git a/python/PRConfig/bandwidth_helpers.py b/python/PRConfig/bandwidth_helpers.py
index 5385b84d..ab01c458 100644
--- a/python/PRConfig/bandwidth_helpers.py
+++ b/python/PRConfig/bandwidth_helpers.py
@@ -16,7 +16,7 @@ KNOWN_WORKING_GROUPS = [
     "B2CC", "B2OC", "BandQ", "BnoC", "Calib", "Calo", "Charm", "DPA", "HLT",
     "IFT", "Luminosity", "PID", "QCD", "QEE", "RD", "RTA", "SLB", "Topo",
     "Tagging", "Tracking", "TrackEff", "Monitoring", "CutBasedDiLep",
-    "InclDetDiLep"
+    "InclDetDiLep", "TurCalPID", "TurCalTrackEff"
 ]
 
 CUSTOM_WGS = {
@@ -28,9 +28,12 @@ CUSTOM_WGS = {
         "Hlt2_Psi2SToMuMu"
     ],
     "Charm": ["Hlt2HadInt"],
-    "TrackEff": ["Hlt2TurboVelo2Long_Kshort", "Hlt2TurCalVelo2Long_Kshort"]
+    "TrackEff": ["Hlt2TurboVelo2Long_Kshort", "Hlt2TurCalVelo2Long_Kshort"],
+    "IFT": ["Hlt2IFTFull", "Hlt2IFTTurbo"]
 }
 
+FULL_STREAM_LINES_KEY = "full_stream_lines"
+
 
 def guess_wg(line_name, process):
     # First, expect it follows naming convention e.g. <Hlt2,Spruce><WG>_MyLine
@@ -56,7 +59,9 @@ class FileNameHelper(object):
     mdf_subdir = 'MDF'
     output_subdir = 'Output'
     inter_subsubdir = 'Inter'
+    to_eos_subdir = 'to_eos'
     gitlab_config_webdir = "https://gitlab.cern.ch/lhcb/Moore/-/blob/master/Hlt/Hlt2Conf/tests/options/bandwidth"
+    current_hlt2_output_dir = "root://eoslhcb.cern.ch//eos/lhcb/storage/lhcbpr/www/UpgradeRateTest/current_hlt2_output"
 
     def __init__(self, process):
         self.process = process
@@ -78,8 +83,7 @@ class FileNameHelper(object):
                             fname) if full_path else fname
 
     def _prwww_path(self, fname, starts_mdf):
-        lhcbpr_www_dir = "root://eoslhcb.cern.ch//eos/lhcb/storage/lhcbpr/www/UpgradeRateTest/current_hlt2_output"
-        baseurl = f"mdf:{lhcbpr_www_dir}" if starts_mdf else lhcbpr_www_dir
+        baseurl = f"mdf:{self.current_hlt2_output_dir}" if starts_mdf else self.current_hlt2_output_dir
         return os.path.join(baseurl, fname)
 
     @staticmethod
@@ -100,9 +104,14 @@ class FileNameHelper(object):
                                  stream_config,
                                  stream,
                                  ext=".mdf",
+                                 on_eos=False,
                                  full_path=True):
-        return self._incomplete_mdfdst_fname(
+        basic_path = self._incomplete_mdfdst_fname(
             stream_config, ext, full_path=full_path).format(stream_bit=stream)
+        if on_eos:
+            return self._prwww_path(basic_path, starts_mdf=("mdf" in ext))
+        else:
+            return basic_path
 
     def get_stream_from_bw_path(self, bw_file_path):
         # useful for globbing
@@ -110,10 +119,15 @@ class FileNameHelper(object):
         stream_and_ext_bit = bw_file_path.split("__")[-1]
         return stream_and_ext_bit.split('.')[0]
 
-    def tck(self, stream_config, full_path=True):
+    def tck(self, stream_config, on_eos=False, full_path=True):
         fname = self._join(self._file_pfx(), stream_config) + ".tck.json"
-        return os.path.join(self.base_dir, self.mdf_subdir,
-                            fname) if full_path else fname
+        basic_path = os.path.join(self.base_dir, self.mdf_subdir,
+                                  fname) if full_path else fname
+        if on_eos:
+            assert not full_path, "File paths on eos dont have directory structure"
+            return self._prwww_path(basic_path, starts_mdf=False)
+        else:
+            return basic_path
 
     def stream_config_json_path(self, stream_config, full_path=True):
         fname = self._join(self._file_pfx(), "streaming",
@@ -121,52 +135,30 @@ class FileNameHelper(object):
         return os.path.join(self.base_dir, self.mdf_subdir,
                             fname) if full_path else fname
 
-    def metadata_path(self,
-                      stream_config,
-                      stream,
-                      full_path=True,
-                      for_local_use=False):
-        fname = self._join(
-            self._file_pfx(), stream_config, stream, "metadata__for_local_use"
-            if for_local_use else "metadata") + ".yaml"
+    def config_file_path(self, stream: str, send_to_eos: bool):
+        assert self.process == "hlt2"
+        fname = f"spruce_bandwidth_latest_input__{stream}.yaml"
+        if send_to_eos:
+            return os.path.join(self.base_dir, self.to_eos_subdir, fname)
+        else:
+            return os.path.join(self.base_dir, fname)
+
+    def tistos_option_file(self, stream_config, full_path=True):
+        fname = self._join(self.process, "tistos_option_file",
+                           stream_config) + ".py"
         return os.path.join(self.base_dir, self.mdf_subdir,
                             fname) if full_path else fname
 
-    def tistos_option_file(self, full_path=True):
-        fname = self._join(self.process, "tistos_option_file") + ".py"
-        return os.path.join(self.base_dir, self.mdf_subdir,
-                            fname) if full_path else fname
+    def input_info_json(self, stream_config):
+        return os.path.join(
+            self.base_dir, self.mdf_subdir,
+            self._join(self._file_pfx(), stream_config,
+                       "input_info")) + ".json"
 
-    def filesize_path(self):
-        fname = self._join(self.process, "filesizes") + ".json"
+    def filesize_path(self, stream_config):
+        fname = self._join(self.process, stream_config, "filesizes") + ".json"
         return os.path.join(self.base_dir, self.output_subdir, fname)
 
-    def stream_config_json_prwww_path(self, stream_config):
-        return self._prwww_path(
-            fname=self.stream_config_json_path(
-                stream_config=stream_config, full_path=False),
-            starts_mdf=False)
-
-    def mdfdst_prwww_path(self, stream_config, stream, ext=".mdf"):
-        return self._prwww_path(
-            fname=self.mdfdst_fname_for_reading(
-                stream_config, stream, ext, full_path=False),
-            starts_mdf=("mdf" in ext))
-
-    def manifest_prwww_path(self, stream_config):
-        return self._prwww_path(
-            fname=self.tck(stream_config, full_path=False), starts_mdf=False)
-
-    def metadata_prwww_path(self, stream_config, stream):
-        return self._prwww_path(
-            fname=self.metadata_path(
-                stream_config, stream, full_path=False, for_local_use=False),
-            starts_mdf=False)
-
-    def input_nevts_json(self):
-        return os.path.join(self.base_dir, self.mdf_subdir,
-                            self._join(self._file_pfx(), "n_evts")) + ".json"
-
     def line_descr_path(self, full_path=True):
         fname = self._join(self.process, "line_descriptives") + ".html"
         return os.path.join(self.base_dir, self.output_subdir,
@@ -226,15 +218,22 @@ class FileNameHelper(object):
             self._join(self.process, f"rates_wgs_within_{stream}",
                        stream_config, stream) + ".csv")
 
-    def final_rate_table_all_lines_path(self, ext="html", full_path=True):
-        fname = self._join(self.process, "rates_for_all_lines") + f".{ext}"
+    def final_rate_table_all_lines_path(self,
+                                        stream_config,
+                                        ext="html",
+                                        full_path=True):
+        fname = self._join(self.process, stream_config,
+                           "rates_for_all_lines") + f".{ext}"
         return os.path.join(self.base_dir, self.output_subdir,
                             fname) if full_path else fname
 
     def final_rate_table_all_lines_split_by_stream_path(
             self, stream_config, full_path=True):
-        fname = self._join(self.process, "rates_for_all_lines_split_by_stream",
-                           stream_config) + ".html"
+        fname = self._join(
+            self.process,
+            stream_config,
+            "rates_for_all_lines_split_by_stream",
+        ) + ".html"
         return os.path.join(self.base_dir, self.output_subdir,
                             fname) if full_path else fname
 
@@ -242,35 +241,88 @@ class FileNameHelper(object):
                                           stream_config,
                                           ext="html",
                                           full_path=True):
-        fname = self._join(self.process, "rates_for_all_streams",
-                           stream_config) + f".{ext}"
+        fname = self._join(self.process, stream_config,
+                           "rates_for_all_streams") + f".{ext}"
         return os.path.join(self.base_dir, self.output_subdir,
                             fname) if full_path else fname
 
     def final_rate_table_all_lines_split_by_stream_by_wg_path(
             self, stream_config, full_path=True):
-        fname = self._join(self.process,
-                           "rates_for_all_lines_split_by_stream_by_wg",
-                           stream_config) + ".html"
+        fname = self._join(
+            self.process, stream_config,
+            "rates_for_all_lines_split_by_stream_by_wg") + ".html"
         return os.path.join(self.base_dir, self.output_subdir,
                             fname) if full_path else fname
 
-    def base_html_path(self, running_locally: bool):
+    ### Below helpers only used in make_bandwidth_test_page.py
+    @staticmethod
+    def base_html_path(running_locally: bool):
         return "." if running_locally else "https://cern.ch/lhcbpr-hlt/UpgradeRateTest/$$dirname$$"
 
-    def html_page_outputs_path(self, fname):
-        return os.path.join(self.base_dir, self.output_subdir, fname)
+    @classmethod
+    def _full_path(cls, fname):
+        return os.path.join(cls.base_dir, cls.output_subdir, fname)
 
-    def process_dependent_html_page_outputs_path(self, fname):
-        return os.path.join(self.base_dir, self.output_subdir,
-                            self._join(self.process, fname))
-
-    def bar_chart_path(self,
-                       main_stream_config,
-                       stream,
-                       metric,
-                       full_path=True):
-        fname = self._join(self.process, f'{metric}_bar_chart',
-                           main_stream_config, stream) + ".png"
-        return os.path.join(self.base_dir, self.output_subdir,
-                            fname) if full_path else fname
+    def _plot_path(self, stream_config, fname, ext="png", full_path=False):
+        fname = self._join(self.process, stream_config, fname) + f".{ext}"
+        return self._full_path(fname) if full_path else fname
+
+    def pie_chart_path(self, stream_config, full_path=False):
+        return self._plot_path(
+            stream_config, "lines_per_wg", full_path=full_path)
+
+    def hist_path(self, stream_config, hist_suffix, full_path=False):
+        return self._plot_path(
+            stream_config, f"hist__{hist_suffix}", full_path=full_path)
+
+    def bar_chart_path(self, stream_config, stream, metric, full_path=False):
+        return self._plot_path(
+            stream_config,
+            f"{stream}__bar_chart__{metric}",
+            full_path=full_path)
+
+    def headline_bar_chart_path(self, stream_config, full_path=False):
+        return self.bar_chart_path(
+            stream_config, "all", "bandwidth", full_path=full_path)
+
+    def to_disk_bar_chart_path(self, full_path=False):
+        return self.bar_chart_path(
+            "all_to_disk", "all", "bandwidth", full_path=full_path)
+
+    def _extra_html_page_path(self, stream_config, page_name, full_path):
+        fname = self._join(self.process, stream_config, page_name) + ".html"
+        return self._full_path(fname) if full_path else fname
+
+    def other_lines_html_page_path(self, stream_config, full_path=False):
+        return self._extra_html_page_path(stream_config, "other_lines",
+                                          full_path)
+
+    def all_rates_html_page_path(self, stream_config, full_path=False):
+        return self._extra_html_page_path(stream_config, "all_rates",
+                                          full_path)
+
+    def sim_matrices_html_page_path(self, stream_config, full_path=False):
+        return self._extra_html_page_path(stream_config, "similarity_matrices",
+                                          full_path)
+
+    def extra_bar_charts_html_page_path(self, stream_config, full_path=False):
+        return self._extra_html_page_path(stream_config, "extra_bar_charts",
+                                          full_path)
+
+    def extra_sim_matrices_html_page_path(self, stream_config,
+                                          full_path=False):
+        return self._extra_html_page_path(
+            stream_config, "extra_similarity_matrices", full_path)
+
+    def comparison_str(self, stream_config):
+        return self._join(self.process, stream_config, "comparison")
+
+    def index_html_page_path(self, stream_config, full_path=False):
+        return self._extra_html_page_path(stream_config, "index", full_path)
+
+    @classmethod
+    def top_level_index_html_path(cls):
+        return cls._full_path("index.html")
+
+    def message_path(self):
+        return self._full_path("message.json")
diff --git a/scripts/benchmark-scripts/Moore_bandwidth_test.sh b/scripts/benchmark-scripts/Moore_bandwidth_test.sh
index 09157e2e..e112cb63 100755
--- a/scripts/benchmark-scripts/Moore_bandwidth_test.sh
+++ b/scripts/benchmark-scripts/Moore_bandwidth_test.sh
@@ -27,6 +27,7 @@ Usage: Moore/run /path/to/Moore_bandwidth_test.sh [options] 2>&1 | tee <path-to-
 --process: "hlt1", "hlt2" or "spruce".
 --input-data: "nominal", "hlt2-output-locally" or "hlt2-output-from-eos".
     "hlt2-output-from-eos" and "hlt2-output-locally" are not currently available for process == hlt1 or hlt2.
+--stream-config: name of streaming configuration to use in the job e.g. 'production' for hlt2, or 'wg', 'wgpass' or 'turcal' for spruce.
 -h|--help: print this message and exit.
 
 EOF
@@ -77,10 +78,13 @@ while [[ $# -gt 0 ]]; do
 	    shift # parse argument
 	    shift # parse value
 	    ;;
-    --skip-top-level-information-for-process-dependent-testpage)
-        export TOP_LEVEL_FLAG="--skip-top-level-information-for-process-dependent-testpage"
-	    shift # parse argument
-	    ;;
+    --stream-config)
+        parse_value_and_export STREAM_CONFIG $1 $2
+        # n_vals=${#STREAM_CONFIGS[@]}
+        shift # parse argument
+        shift # parse value
+        # for i in $(seq $n_vals); do shift; done
+        ;;
 	*)
 	    echo "ERROR: Unknown argument \"$1\""
 	    exit 1
@@ -99,17 +103,12 @@ if [ -z "$INPUTDATA" ]; then
     exit 1
 fi
 
-mkdir -p tmp/MDF
-mkdir -p tmp/Output
-mkdir -p tmp/Output/Inter
-
 # Set configuration variables and check configuration makes sense
 if [ -z "${OVERRIDE_EVTMAX}" ]; then
     EVTMAX=1e5
 else
     EVTMAX=${OVERRIDE_EVTMAX}
 fi
-TEST_PAGE_EXTRA_OPTS=""
 
 case $PROCESS in
     hlt1)
@@ -118,10 +117,8 @@ case $PROCESS in
     EVENT_SIZE_UPPER_LIMIT=200
     GAUDIRUN_INPUT_PROCESS="Hlt1"
     OUTPUT_TYPE="MDF"
-    STREAM_CONFIG="streamless"
     case $INPUTDATA in
         nominal)
-        CONFIG_FILE="${TEST_PATH_PREFIX}hlt1_bandwidth_input.yaml"
         EXTRA_OPTS="-e 1" # See next comment up
         ;;
         *)
@@ -136,10 +133,8 @@ case $PROCESS in
     EVENT_SIZE_UPPER_LIMIT=200
     GAUDIRUN_INPUT_PROCESS="Hlt2"
     OUTPUT_TYPE="MDF"
-    STREAM_CONFIG="production"
     case $INPUTDATA in
         nominal)
-        CONFIG_FILE="${TEST_PATH_PREFIX}hlt2_bandwidth_input_2024.yaml"
         EXTRA_OPTS=''
         ;;
         *)
@@ -154,24 +149,19 @@ case $PROCESS in
     EVENT_SIZE_UPPER_LIMIT=300
     GAUDIRUN_INPUT_PROCESS="Spruce"
     OUTPUT_TYPE="ROOT"
-    STREAM_CONFIG="wg"
     case $INPUTDATA in
         nominal)
-        CONFIG_FILE="${TEST_PATH_PREFIX}spruce_bandwidth_input.yaml"
         EXTRA_OPTS='-e 1' # Requires #EvtSlots==1 due to writing dsts, must be single threaded.
         ;;
         hlt2-output-locally)
         # "hlt2-output-locally" corresponds to using the locally-run full-stream output from "process=hlt2, input-data=nominal" test.
-        CONFIG_FILE="tmp/spruce_hlt2_output_locally_input.yaml"
-        EXTRA_OPTS='-e 1 -um --read-evt-max-from-config'
+        EXTRA_OPTS='-e 1 -um'
         # Flag to make a top-level human-readable output page directing to the Hlt2 and Spruce output pages.
-        TEST_PAGE_EXTRA_OPTS='--make-hlt2-and-spruce-page'
         ;;
         hlt2-output-from-eos)
         # "hlt2-output-from-eos" corresponds to using the uploaded full-stream output from a "process=hlt2, input-data=nominal" test.
         # These files are overwritten during "lhcb-master" builds of "process=hlt2, input-data=nominal", i.e. ~daily.
-        CONFIG_FILE="tmp/spruce_hlt2_output_from_eos_input.yaml"
-        EXTRA_OPTS='-e 1 -um --read-evt-max-from-config'
+        EXTRA_OPTS='-e 1 -um'
         ;;
         *)
         echo "ERROR: --input-data must be \"nominal\", \"hlt2-output-locally\", \"hlt2-output-from-eos\" for process \"$PROCESS\""
@@ -187,45 +177,64 @@ esac
 
 ### Now run the tests
 # 0. Pre-Job initialising
-if [ $PROCESS = "spruce" ]; then
-    if  [ $INPUTDATA = "hlt2-output-from-eos" ]; then
-        echo "Downloading the Hlt2 output metadata to use as input config."
-        DOWNLOAD_INPUT_CONFIG_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('hlt2'); print( hlpr.metadata_prwww_path(stream_config='production', stream='full') )"`)
-        xrdcp -f $DOWNLOAD_INPUT_CONFIG_LOCATION $CONFIG_FILE
-        STORE_ERR_CODE
+if [ $PROCESS = "hlt1" ]; then
+    CONFIG_FILE="hlt1_bandwidth_input.yaml"
+    CONFIG_PATH="${TEST_PATH_PREFIX}${CONFIG_FILE}"
+
+elif [ $PROCESS = "hlt2" ]; then
+    CONFIG_FILE="hlt2_bandwidth_input_2024.yaml"
+    CONFIG_PATH="${TEST_PATH_PREFIX}${CONFIG_FILE}"
+
+else ## Spruce
+    # First sort out the map between spruce STREAM_CONFIG and the Hlt2 STREAM_CONFIG
+    # Could simplify this by making the spruce STREAM_CONFIGS == full, turbo and turcal
+    if [ $STREAM_CONFIG = "wg" ]; then
+        HLT2_STREAM_CONFIG="full"
+    elif [ $STREAM_CONFIG = "wgpass" ]; then
+        HLT2_STREAM_CONFIG="turbo"
+    elif [ $STREAM_CONFIG = "turcal" ]; then
+        HLT2_STREAM_CONFIG="turcal"
+    else
+        echo "ERROR: Unrecognised stream configuration \"$STREAM_CONFIG\". It must be \"wg\", \"wgpass\" or \"turcal\"."
+        exit 1
+    fi
+    LATEST_CONFIG_FILE="spruce_bandwidth_latest_input__${HLT2_STREAM_CONFIG}.yaml"
+    STATIC_CONFIG_FILE="spruce_bandwidth_input.yaml"
 
-        echo "Downloading the Hlt2 stream config json to generate lines for TisTos"
-        TISTOS_STREAM_CONFIG_LOCATION='tmp/spruce__from_eos__stream_config_location.json'
-        DOWNLOAD_TISTOS_STREAM_CONFIG_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('hlt2'); print( hlpr.stream_config_json_prwww_path(stream_config='production') )"`)
-        xrdcp -f $DOWNLOAD_TISTOS_STREAM_CONFIG_LOCATION $TISTOS_STREAM_CONFIG_LOCATION
+    # Now can set the config file path
+    if  [ $INPUTDATA = "hlt2-output-from-eos" ]; then
+        echo "Downloading ${LATEST_CONFIG_FILE} to use as input config."
+        CONFIG_PATH="tmp/${LATEST_CONFIG_FILE}"
+        PRWWW_PREFIX=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print( hlpr.current_hlt2_output_dir )"`)
+        xrdcp -f ${PRWWW_PREFIX}/${LATEST_CONFIG_FILE} $CONFIG_PATH
         STORE_ERR_CODE
     elif [ $INPUTDATA = "hlt2-output-locally" ]; then
-        echo "Use local Hlt2 output metadata from previous job as input config"
-        INPUT_CONFIG_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('hlt2'); print( hlpr.metadata_path(stream_config='production', stream='full', for_local_use=True)) "`)
-        cp -f $INPUT_CONFIG_LOCATION $CONFIG_FILE
-
-        echo "Using the local Hlt2 stream config json to generate lines for TisTos"
-        TISTOS_STREAM_CONFIG_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('hlt2'); print( hlpr.stream_config_json_path(stream_config='production') )"`)
-    elif [ $INPUTDATA = "nominal" ]; then
-        echo "Using the fixed_line_configs to generate lines for TisTos"
-        TISTOS_STREAM_CONFIG_LOCATION=''
+        echo "Using ${LATEST_CONFIG_FILE} generated in previous job as input config."
+        CONFIG_PATH="tmp/${LATEST_CONFIG_FILE}"
+    else
+        CONFIG_PATH="${TEST_PATH_PREFIX}${STATIC_CONFIG_FILE}"
     fi
 
     echo "Generating TISTOS option file"
-    time python -m MooreTests.generate_tistos_option_file --stream-config-location=$TISTOS_STREAM_CONFIG_LOCATION
+    time python -m MooreTests.generate_tistos_option_file -c $CONFIG_PATH --stream-config $STREAM_CONFIG
     STORE_ERR_CODE
-    TISTOS_OPTION_FILE_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print( hlpr.tistos_option_file() ) "`)
+    TISTOS_OPTION_FILE_LOCATION=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print( hlpr.tistos_option_file('${STREAM_CONFIG}') ) "`)
     EXTRA_OPTS+=" ${TISTOS_OPTION_FILE_LOCATION}"
-
 fi
 
 # 1. Run Moore.
 # -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
-echo "Running trigger to obtain MDF/DST files with ${STREAM_CONFIG} streams for comparison over ${CONFIG_FILE}"
-time python -m MooreTests.run_bandwidth_test_jobs -d -c=$CONFIG_FILE -n=$EVTMAX -p=$PROCESS -t=$MOORE_THREADS -a=$EVENT_SIZE_UPPER_LIMIT $EXTRA_OPTS "${TEST_PATH_PREFIX}${PROCESS}_bandwidth_${STREAM_CONFIG}_streams.py"
+echo "Running trigger to obtain MDF/DST files with ${STREAM_CONFIG} streams for comparison over ${CONFIG_PATH}"
+time python -m MooreTests.run_bandwidth_test_jobs -d -c=$CONFIG_PATH -sc=$STREAM_CONFIG -n=$EVTMAX -p=$PROCESS -t=$MOORE_THREADS -a=$EVENT_SIZE_UPPER_LIMIT $EXTRA_OPTS "${TEST_PATH_PREFIX}${PROCESS}_bandwidth_${STREAM_CONFIG}_streams.py"
+STORE_ERR_CODE
+
+# 2. Work out how many events you ran over - needed for denominator of the rates
+# Inputs always MDF files - generalise if ever needed
+time python -m MooreTests.read_event_numbers count_input_events -p $PROCESS -sc $STREAM_CONFIG -n $EVTMAX --file-type "MDF"
 STORE_ERR_CODE
 
-# 2. Compute line descriptives: persist reco, extra output
+# 3. Compute line descriptives: persist reco, extra output
+# TODO: line_descriptives should use the lines from the streaming configuration
 if [ $PROCESS = "hlt1" ]; then
     echo 'Skipping line descriptives as $PROCESS = "hlt1"'
 else
@@ -234,6 +243,8 @@ else
     STORE_ERR_CODE
 fi
 
+echo "Doing analysis for the ${STREAM_CONFIG} streaming configuration..."
+
 # 3. Work out what the streams are from the config JSON; needed for later steps
 STREAM_CONFIG_JSON_PATH=(`python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print(hlpr.stream_config_json_path('${STREAM_CONFIG}'))"`)
 STREAM_STR=`(jq -r 'keys | @sh' ${STREAM_CONFIG_JSON_PATH})`
@@ -252,7 +263,7 @@ else
     echo "Obtaining similarity matrix for ${STREAM_CONFIG}-stream configuration"
     for stream in "${STREAMS[@]}"; do
         echo "Stream name: ${stream}"
-        time python $PRCONFIGROOT/python/MooreTests/list_event_numbers.py -p $PROCESS --stream-config $STREAM_CONFIG --stream $stream --file-type $OUTPUT_TYPE
+        time python -m MooreTests.read_event_numbers store_output_event_numbers -p $PROCESS --stream-config $STREAM_CONFIG --stream $stream --file-type $OUTPUT_TYPE
         STORE_ERR_CODE
     done
     time python $PRCONFIGROOT/python/MooreTests/calculate_stream_overlap.py inter_stream --streams ${STREAMS[@]} -p $PROCESS --stream-config $STREAM_CONFIG
@@ -263,7 +274,7 @@ fi
 echo "Obtaining rates and bandwidth for ${STREAM_CONFIG}-stream configuration"
 for stream in "${STREAMS[@]}"; do
     echo "Stream name: ${stream}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c $CONFIG_FILE -p $PROCESS -s $stream --stream-config $STREAM_CONFIG --file-type $OUTPUT_TYPE
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c $CONFIG_PATH -p $PROCESS -s $stream --stream-config $STREAM_CONFIG --file-type $OUTPUT_TYPE
     STORE_ERR_CODE
 done
 
@@ -284,15 +295,26 @@ STORE_ERR_CODE
 
 # 9. Required information for 'hlt2-output-locally' or 'hlt2-output-from-eos' sprucing jobs.
 if [ $PROCESS = "hlt2" ] && [ $INPUTDATA = "nominal" ]; then
-    echo 'Generating yaml metadata to potentially use in a sprucing ["hlt2-output-locally", "hlt2-output-from-eos"] test'
-    time python -m MooreTests.generate_hlt2_fullstream_metadata -c $CONFIG_FILE
+    echo 'Generating yaml spruce input config to potentially use in a sprucing ["hlt2-output-locally", "hlt2-output-from-eos"] test'
+    for STREAM in "${STREAMS[@]}"; do
+        time python -m MooreTests.generate_spruce_input_configs -c $CONFIG_PATH --stream-config $STREAM_CONFIG --stream $STREAM
+        STORE_ERR_CODE
+    done
+
+    # Also copy the mdf and manifest files to eos
+    echo 'Copying MDF and manifest files to to_eos/'
+    cp `python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print(hlpr.tck('${STREAM_CONFIG}') )"` tmp/to_eos/
     STORE_ERR_CODE
+    for STREAM in "${STREAMS_TO_SPRUCE[@]}"; do
+        cp `python -c "from PRConfig.bandwidth_helpers import FileNameHelper; hlpr = FileNameHelper('${PROCESS}'); print(hlpr.mdfdst_fname_for_reading('${STREAM_CONFIG}', '${STREAM}') )"` tmp/to_eos/
+        STORE_ERR_CODE
+    done
 fi
 
 # 10. Copy the stream config JSON from tmp/MDF -> tmp/Output so the handler can pick it up. Bit of a hack
+# Needed so it can be put in the html page
 cp $STREAM_CONFIG_JSON_PATH tmp/Output/
 STORE_ERR_CODE
 
-# 11. Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
-echo 'Making plots and HTML pages'
-time python -m MooreTests.make_bandwidth_test_page -p $PROCESS -c $CONFIG_FILE -s $SCRIPT_PATH -e $ERR_CODE --streams ${STREAMS[@]} --stream-config $STREAM_CONFIG $TOP_LEVEL_FLAG $TEST_PAGE_EXTRA_OPTS
+# Return the ERR_CODE, can be picked up one level higher to get the message out before making html
+exit $ERR_CODE
diff --git a/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh
index 034deaae..65fce849 100755
--- a/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_hlt1_bandwidth.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 ###############################################################################
-# (c) Copyright 2022-2023 CERN for the benefit of the LHCb Collaboration      #
+# (c) Copyright 2022-2024 CERN for the benefit of the LHCb Collaboration      #
 #                                                                             #
 # This software is distributed under the terms of the GNU General Public      #
 # Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
@@ -11,10 +11,39 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
+mkdir -p tmp/MDF
+mkdir -p tmp/Output
+mkdir -p tmp/Output/Inter
+mkdir -p tmp/to_eos/
+
 # this path ends up printed on the BW test page; export so it can be picked up in the child process
 export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
 
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt1 --input-data nominal
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt1 --input-data nominal --stream-config streamless
+err_code=$?
+
+# Write error codes now to a file for robust error handling
+error_codes=$(cat <<EOF
+{
+    "hlt1": {
+        "streamless": { "code": $err_code }
+    }
+}
+EOF
+)
+echo "$error_codes" > tmp/Output/message.json
+echo "Error codes written to tmp/Output/message.json"
+
+# Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+echo 'Making plots and HTML pages'
+time python -m MooreTests.make_bandwidth_test_page --per-test-info \
+    hlt1:streamless:'$HLT1CONFROOT/tests/options/bandwidth/hlt1_bandwidth_input.yaml' \
+    -s $SCRIPT_PATH
+
+# TODO would like to also add the html page error code, but it gives a non-zero error code now despite everyhting working fine
+# last_msg="{\"make_html_page\": {\"code\": ${$?}} }"
+# echo $last_msg
+# jq --argjson last_msg "$last_msg" '. += $last_msg' tmp/Output/message.json > tmp/Output/tmp_message.json && mv tmp/Output/tmp_message.json tmp/Output/message.json
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_and_spruce_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt2_and_spruce_bandwidth.sh
index 37b5c6e7..54caa123 100755
--- a/scripts/benchmark-scripts/Moore_hlt2_and_spruce_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_and_spruce_bandwidth.sh
@@ -11,11 +11,54 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
+mkdir -p tmp/MDF
+mkdir -p tmp/Output
+mkdir -p tmp/Output/Inter
+mkdir -p tmp/to_eos/
+
 # this path ends up printed on the BW test page; export so it can be picked up in the child process
 export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
 
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt2 --input-data nominal --skip-top-level-information-for-process-dependent-testpage
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-locally --skip-top-level-information-for-process-dependent-testpage
+# TODO pass in the config path - you have it duplicated here and in Moore_bandiwdth_test.sh
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt2 --input-data nominal --stream-config production
+hlt2_err_code=$?
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-locally --stream-config wg
+spruce_wg_err_code=$?
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-locally --stream-config wgpass
+spruce_wgpass_err_code=$?
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-locally --stream-config turcal
+spruce_turcal_err_code=$?
+
+# Write error codes now to a file for robust error handling
+error_codes=$(cat <<EOF
+{
+    "hlt2": {
+        "production": { "code": $hlt2_err_code }
+    },
+    "spruce": {
+        "wg": { "code": $spruce_wg_err_code },
+        "wgpass": { "code": $spruce_wgpass_err_code },
+        "turcal": { "code": $spruce_turcal_err_code }
+    }
+}
+EOF
+)
+echo "$error_codes" > tmp/Output/message.json
+echo "Error codes written to tmp/Output/message.json"
+
+# Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+echo 'Making plots and HTML pages'
+time python -m MooreTests.make_bandwidth_test_page --per-test-info \
+    hlt2:production:'$HLT2CONFROOT/tests/options/bandwidth/hlt2_bandwidth_input_2024.yaml' \
+    spruce:wg:'tmp/spruce_bandwidth_latest_input__full.yaml' \
+    spruce:wgpass:'tmp/spruce_bandwidth_latest_input__turbo.yaml' \
+    spruce:turcal:'tmp/spruce_bandwidth_latest_input__turcal.yaml' \
+    -s $SCRIPT_PATH
+
+# TODO would like to also add the html page error code, but it gives a non-zero error code now despite everyhting working fine
+# last_msg="{\"make_html_page\": {\"code\": ${$?}} }"
+# echo $last_msg
+# jq --argjson last_msg "$last_msg" '. += $last_msg' tmp/Output/message.json > tmp/Output/tmp_message.json && mv tmp/Output/tmp_message.json tmp/Output/message.json
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
index 813829ee..1e658021 100755
--- a/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 ###############################################################################
-# (c) Copyright 2022-2023 CERN for the benefit of the LHCb Collaboration      #
+# (c) Copyright 2022-2024 CERN for the benefit of the LHCb Collaboration      #
 #                                                                             #
 # This software is distributed under the terms of the GNU General Public      #
 # Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
@@ -11,10 +11,39 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
+mkdir -p tmp/MDF
+mkdir -p tmp/Output
+mkdir -p tmp/Output/Inter
+mkdir -p tmp/to_eos/
+
 # this path ends up printed on the BW test page; export so it can be picked up in the child process
 export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
 
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt2 --input-data nominal
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process hlt2 --input-data nominal --stream-config production
+hlt2_err_code=$?
+
+# Write error codes now to a file for robust error handling
+error_codes=$(cat <<EOF
+{
+    "hlt2": {
+        "production": { "code": $hlt2_err_code }
+    }
+}
+EOF
+)
+echo "$error_codes" > tmp/Output/message.json
+echo "Error codes written to tmp/Output/message.json"
+
+# Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+echo 'Making plots and HTML pages'
+time python -m MooreTests.make_bandwidth_test_page --per-test-info \
+    hlt2:production:'$HLT2CONFROOT/tests/options/bandwidth/hlt2_bandwidth_input_2024.yaml' \
+    -s $SCRIPT_PATH
+
+# TODO would like to also add the html page error code, but it gives a non-zero error code now despite everyhting working fine
+# last_msg="{\"make_html_page\": {\"code\": ${$?}} }"
+# echo $last_msg
+# jq --argjson last_msg "$last_msg" '. += $last_msg' tmp/Output/message.json > tmp/Output/tmp_message.json && mv tmp/Output/tmp_message.json tmp/Output/message.json
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh b/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
index 333d2161..a231638b 100755
--- a/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 ###############################################################################
-# (c) Copyright 2022-2023 CERN for the benefit of the LHCb Collaboration      #
+# (c) Copyright 2022-2024 CERN for the benefit of the LHCb Collaboration      #
 #                                                                             #
 # This software is distributed under the terms of the GNU General Public      #
 # Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
@@ -11,10 +11,40 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
+mkdir -p tmp/MDF
+mkdir -p tmp/Output
+mkdir -p tmp/Output/Inter
+mkdir -p tmp/to_eos/
+
 # this path ends up printed on the BW test page; export so it can be picked up in the child process
 export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
 
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data nominal
+# wgpass & turcal not valid here: there's only a full-stream output made and put in TestfileDB periodically
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data nominal --stream-config wg
+err_code=$?
+
+# Write error codes now to a file for robust error handling
+error_codes=$(cat <<EOF
+{
+    "spruce": {
+        "wg": { "code": $err_code }
+    }
+}
+EOF
+)
+echo "$error_codes" > tmp/Output/message.json
+echo "Error codes written to tmp/Output/message.json"
+
+# Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+echo 'Making plots and HTML pages'
+time python -m MooreTests.make_bandwidth_test_page --per-test-info \
+    spruce:wg:'$HLT2CONFROOT/tests/options/bandwidth/spruce_bandwidth_input.yaml' \
+    -s $SCRIPT_PATH
+
+# TODO would like to also add the html page error code, but it gives a non-zero error code now despite everyhting working fine
+# last_msg="{\"make_html_page\": {\"code\": ${$?}} }"
+# echo $last_msg
+# jq --argjson last_msg "$last_msg" '. += $last_msg' tmp/Output/message.json > tmp/Output/tmp_message.json && mv tmp/Output/tmp_message.json tmp/Output/message.json
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
\ No newline at end of file
diff --git a/scripts/benchmark-scripts/Moore_spruce_latest_bandwidth.sh b/scripts/benchmark-scripts/Moore_spruce_latest_bandwidth.sh
index 115d06b1..7292f0ea 100755
--- a/scripts/benchmark-scripts/Moore_spruce_latest_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_spruce_latest_bandwidth.sh
@@ -11,10 +11,47 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
+mkdir -p tmp/MDF
+mkdir -p tmp/Output
+mkdir -p tmp/Output/Inter
+mkdir -p tmp/to_eos/
+
 # this path ends up printed on the BW test page; export so it can be picked up in the child process
 export SCRIPT_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
 
-$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-from-eos
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-from-eos --stream-config wg
+spruce_wg_err_code=$?
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-from-eos --stream-config wgpass
+spruce_wgpass_err_code=$?
+$PRCONFIGROOT/scripts/benchmark-scripts/Moore_bandwidth_test.sh --process spruce --input-data hlt2-output-from-eos --stream-config turcal
+spruce_turcal_err_code=$?
+
+# Write error codes now to a file for robust error handling
+error_codes=$(cat <<EOF
+{
+    "spruce": {
+        "wg": { "code": $spruce_wg_err_code },
+        "wgpass": { "code": $spruce_wgpass_err_code },
+        "turcal": { "code": $spruce_turcal_err_code }
+    }
+}
+EOF
+)
+echo "$error_codes" > tmp/Output/message.json
+echo "Error codes written to tmp/Output/message.json"
+
+# Produce plots and HTML pages; add the --building-locally flag to make the links work if you are building the html pages locally
+echo 'Making plots and HTML pages'
+time python -m MooreTests.make_bandwidth_test_page --per-test-info \
+    spruce:wg:'tmp/spruce_bandwidth_latest_input__full.yaml' \
+    spruce:wgpass:'tmp/spruce_bandwidth_latest_input__turbo.yaml' \
+    spruce:turcal:'tmp/spruce_bandwidth_latest_input__turcal.yaml' \
+    -s $SCRIPT_PATH
+
+# TODO would like to also add the html page error code, but it gives a non-zero error code now despite everyhting working fine
+# last_msg="{\"make_html_page\": {\"code\": ${$?}} }"
+# echo $last_msg
+# jq --argjson last_msg "$last_msg" '. += $last_msg' tmp/Output/message.json > tmp/Output/tmp_message.json && mv tmp/Output/tmp_message.json tmp/Output/message.json
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
\ No newline at end of file
-- 
GitLab