diff --git a/python/MooreTests/combine-hlt2-rate-output.py b/python/MooreTests/combine-hlt2-rate-output.py
index dd3c700411c740c4f03c88cb955c2c9fab406b21..d0c685afdee4d67945afd5718823e5ce1c39183f 100644
--- a/python/MooreTests/combine-hlt2-rate-output.py
+++ b/python/MooreTests/combine-hlt2-rate-output.py
@@ -68,9 +68,21 @@ def rates_all_lines():
 def rates_all_lines_split_wg():
 
     with open('tmp/Output/line-rates-split-wg.html', 'w') as f:
-        for file in glob.glob(f'tmp/Output/Inter/rates-all-lines-wg-*.csv'):
-            stream = str(re.search("-(?!.*-)(.*).csv", file).group(1))
+        files = glob.glob('tmp/Output/Inter/rates-all-lines-wg-*.csv')
+        files_by_stream = {
+            str(re.search("-(?!.*-)(.*).csv", file).group(1)): file
+            for file in files
+        }
+        f.write('<head></head>\n<p>')
+        f.write('Jump to:\n<ul>')
+        for stream in files_by_stream.keys():
+            f.write(
+                f'<li><a href="#{stream}_label"> {stream.upper()}</a></li>')
+        f.write('</ul>\n</p>')
+
+        for stream, file in files_by_stream.items():
             f.write(f'<head>{stream.upper()}</head>')
+            f.write(f'<a id="{stream}_label">')
             df = pd.read_csv(file, header=None)
             df.columns = [
                 'Line', 'Total Retention (%)', 'Rate (kHz)',
@@ -78,7 +90,11 @@ def rates_all_lines_split_wg():
                 'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
                 'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
             ]
+            df = df.sort_values(
+                by=['Total Retention (%)'],
+                ascending=False).reset_index(drop=True)
             f.write(df.to_html())
+            f.write('</a>')
             f.write('<br/><br/>')
 
     return
@@ -87,10 +103,21 @@ def rates_all_lines_split_wg():
 def rates_all_lines_split_stream():
 
     with open('tmp/Output/line-rates-split-production.html', 'w') as f:
-        for file in glob.glob(
-                f'tmp/Output/Inter/rates-all-lines-production-*.csv'):
-            stream = str(re.search("-(?!.*-)(.*).csv", file).group(1))
+        files = glob.glob('tmp/Output/Inter/rates-all-lines-production-*.csv')
+        files_by_stream = {
+            str(re.search("-(?!.*-)(.*).csv", file).group(1)): file
+            for file in files
+        }
+        f.write('<head></head>\n<body>\n<p>')
+        f.write('Jump to:\n<ul>')
+        for stream in files_by_stream.keys():
+            f.write(
+                f'<li><a href="#{stream}_label"> {stream.upper()}</a></li>')
+        f.write('</ul>\n</p>')
+
+        for stream, file in files_by_stream.items():
             f.write(f'<head>{stream.upper()}</head>')
+            f.write(f'<a id="{stream}_label">')
             df = pd.read_csv(file, header=None)
             df.columns = [
                 'Line', 'Total Retention (%)', 'Rate (kHz)',
@@ -98,7 +125,11 @@ def rates_all_lines_split_stream():
                 'Avg Total Event Size (kB)', 'Total Bandwidth (GB/s)',
                 'Avg DstData Size (kB)', 'DstData Bandwidth (GB/s)'
             ]
+            df = df.sort_values(
+                by=['Total Retention (%)'],
+                ascending=False).reset_index(drop=True)
             f.write(df.to_html())
+            f.write('</a>')
             f.write('<br/><br/>')
 
     return
diff --git a/python/MooreTests/line-and-stream-rates.py b/python/MooreTests/line-and-stream-rates.py
index 1150f560c79e0e76ca82c79f475079b321924169..710ce4480090bd63112d03efafa1da8094342a63 100644
--- a/python/MooreTests/line-and-stream-rates.py
+++ b/python/MooreTests/line-and-stream-rates.py
@@ -22,6 +22,7 @@ import re
 import argparse
 import csv
 import os
+import yaml
 '''
 
     Run snippet with 'python line-rates.py and [1] <MDF file name> [2] <TCK config file name> [3] <JSON file name specifying configuration>'
@@ -56,6 +57,11 @@ RAW_BANK_TYPES = [(i, LHCb.RawBank.typeName(i))
                   for i in range(LHCb.RawBank.LastType)]
 
 
+def parse_yaml(file_path):
+    with open(os.path.expandvars(file_path), 'r') as f:
+        return yaml.safe_load(f)
+
+
 def rawbank_sizes(rawevent, lst):
     """Return (name, size) for each raw bank type."""
     if rawevent:
@@ -239,6 +245,12 @@ if __name__ == '__main__':
         type=lambda x: int(round(float(x))),
         help='nb of events to process',
         required=True)
+    parser.add_argument(
+        '-c',
+        '--config',
+        type=str,
+        required=True,
+        help='Path to yaml config file defining the input.')
     parser.add_argument(
         '-r',
         '--rate',
@@ -258,8 +270,8 @@ if __name__ == '__main__':
         help='Stream configuration specified as JSON',
         required=True)
     parser.add_argument(
-        '-c',
-        '--config',
+        '-s',
+        '--stream-config',
         type=str,
         help='Choose production or per-WG stream configuration',
         choices=['production', 'wg'],
@@ -275,6 +287,8 @@ if __name__ == '__main__':
 
     n_events = args.events
 
+    input_config = parse_yaml(args.config)
+
     LHCbApp(
         DataType="Upgrade",
         Simulation=True,
@@ -319,13 +333,13 @@ if __name__ == '__main__':
         # Two conditions for HLT2 run:
         # Use production-stream config to compute rate/size/bandwidth per line and stream
         # Use wg-stream config to compute rate/size/bandwidth per line and stream
-        if args.config == 'production': configname = 'production'
-        elif args.config == 'wg': configname = 'wg'
+        if args.stream_config == 'production': configname = 'production'
+        elif args.stream_config == 'wg': configname = 'wg'
     elif args.process == 'Spruce':
         # Three conditions for Spruce run:
         # Use wg-stream config to compute rate/size/bandwidth per line
         # Use wg-stream config to compute rate/size/bandwidth per stream
-        if not args.config == 'wg': exit()
+        if not args.stream_config == 'wg': exit()
         configname = 'wg-stream'
 
     stream = str(re.search("-(?!.*-)(.*).mdf", file).group(
@@ -346,11 +360,11 @@ if __name__ == '__main__':
         dst,
         configname,
         stream,
-        input_rate=args.rate)
+        input_rate=input_config['input_rate'])
     rates_per_stream(
         evts_all,
         rawbanks_all,
         dst_all,
         configname,
         stream,
-        input_rate=args.rate)
+        input_rate=input_config['input_rate'])
diff --git a/python/MooreTests/make_bandwidth_test_page.py b/python/MooreTests/make_bandwidth_test_page.py
index 942811ea9b3cf0c938beffcd30bc7ef26e348ffd..72f2715422538258b1387c6bff05a3d4833a4973 100644
--- a/python/MooreTests/make_bandwidth_test_page.py
+++ b/python/MooreTests/make_bandwidth_test_page.py
@@ -12,6 +12,8 @@ import argparse
 import jinja2
 import matplotlib.pyplot as plt
 import pandas as pd
+import yaml
+import os
 
 plt.ioff()
 
@@ -25,11 +27,15 @@ REPORT_TEMPLATE = jinja2.Template("""
     slot.build_id: $$version$$<br>
     platform: $$platform$$<br>
     hostname: $$hostname$$<br>
-    cpu_info: $$cpu_info$$
+    cpu_info: $$cpu_info$$<br>
+    testing script path: {{SCRIPTPATH}}
 </p>
 <ul>
     <li><a href="{{WWW_BASE_URL}}/$$dirname$$/run.log">Logs</a></li>
 </ul>
+<p style="color:{{EXIT_CODE_COLOUR}}">
+    <b>{{EXIT_CODE_SENTENCE}}</b>
+</p>
 <p>
     Results per working group and stream:
     <ul>
@@ -46,7 +52,16 @@ REPORT_TEMPLATE = jinja2.Template("""
     <li>Descriptives (whether persistreco and/or extra outputs is enabled)</li>
     </ul>
 </p>
-<p> See: <a href="https://lbfence.cern.ch/alcm/public/figure/details/32">RTA Workflow</a> for reference figures regarding bandwidth.</p>
+<p> See: <a href="https://lbfence.cern.ch/alcm/public/figure/details/32">RTA & DPA Workflow</a> for reference figures regarding bandwidth.</p>
+<p>
+    Input sample information:
+    <ul>
+    <li>Config file: {{INPUT_CONFIG_PATH}}</li>
+    <li>Input rate: {{INPUT_RATE}} kHz</li>
+    <li>Number of interactions per bunch crossing (&#957): {{INPUT_NU}}</li>
+    <li>Radius of VELO opening: {{INPUT_VELO_RADIUS}} mm</li>
+    </ul>
+</p>
 {{HLT2_OR_SPRUCE_TEMPLATE}}
 <p>
     Other results are shown by plots or tables (in the links) below. <br>
@@ -66,7 +81,6 @@ REPORT_TEMPLATE = jinja2.Template("""
     A line is considered to be "problematic" if it has a rate of 0 Hz
     or larger than 1 kHz, which requires some attention. <br>
     The rates of all lines are listed in a html page attached below. <br>
-    {{INPUT_RATE_SENTENCE}}
 </p>
 <object type="image/png" data="hist_dst_size.png"></object>
 <p>
@@ -518,6 +532,11 @@ def make_plots_per_wg_list(wg_list):
     return list_html_str
 
 
+def parse_yaml(file_path):
+    with open(os.path.expandvars(file_path), 'r') as f:
+        return yaml.safe_load(f)
+
+
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='make_bandwidth_test_page')
     parser.add_argument(
@@ -534,13 +553,38 @@ if __name__ == '__main__':
         required=True,
         help='Which stage was the test run on')
     parser.add_argument(
-        '-r',
-        '--rate',
-        default=500,  # kHz
-        type=float,
-        help='Input rate corresponding to the input file in kHz')
+        '-c',
+        '--input-config',
+        type=str,
+        required=True,
+        help='Path to yaml config file defining the input.')
+    parser.add_argument(
+        '-s',
+        '--script-path',
+        type=str,
+        required=True,
+        help=
+        'Path to the top-level testing script that is running/calling this script'
+    )
+    parser.add_argument(
+        '-e',
+        '--exit-code',
+        type=int,
+        required=True,
+        help="Cumulative exit code of all previous jobs.")
     args = parser.parse_args()
 
+    input_info = parse_yaml(args.input_config)
+
+    if args.exit_code == 0:
+        exit_code_sentence = "All sub-jobs in this test exited successfully."
+        exit_code_bool = 1
+        exit_code_col = "green"
+    else:
+        exit_code_sentence = "There were errors in some of the sub-jobs of this test; please see the logs."
+        exit_code_bool = 0
+        exit_code_col = "red"
+
     # Read info of all lines
     df = pd.read_csv(f'{args.input}/rates-for-all-lines.csv', sep=',')
     number_of_lines = len(df)
@@ -596,7 +640,6 @@ if __name__ == '__main__':
             table_5stream_rates = rate_html.read()
         hlt2_or_spruce_template = HLT2_REPORT_TEMPLATE.render(
             WWW_BASE_URL=WWW_BASE_URL, table_5stream_rates=table_5stream_rates)
-        input_rate_sentence = f"The input rate to this job was {args.rate} kHz (output rate of Hlt1)."
     elif args.process == 'Sprucing':
         with open(f"{args.input}/rates-wg-stream-configuration.html",
                   "r") as rate_html:
@@ -604,13 +647,18 @@ if __name__ == '__main__':
         hlt2_or_spruce_template = SPRUCE_REPORT_TEMPLATE.render(
             WWW_BASE_URL=WWW_BASE_URL,
             table_wgstream_rates=table_wgstream_rates)
-        input_rate_sentence = f"The input rate to this job was {args.rate} kHz (output rate of Hlt2)."
 
     with open(f"{args.input}/index.html", "w") as html_file:
         html = REPORT_TEMPLATE.render(
+            SCRIPTPATH=args.script_path,
             WWW_BASE_URL=WWW_BASE_URL,
             HLT2_OR_SPRUCE_TEMPLATE=hlt2_or_spruce_template,
-            INPUT_RATE_SENTENCE=input_rate_sentence)
+            INPUT_CONFIG_PATH=os.path.expandvars(args.input_config),
+            INPUT_RATE=input_info['input_rate'],
+            INPUT_NU=input_info['nu'],
+            INPUT_VELO_RADIUS=input_info['velo_radial_opening'],
+            EXIT_CODE_SENTENCE=exit_code_sentence,
+            EXIT_CODE_COLOUR=exit_code_col)
         html_file.write(html)
 
     with open(f"{args.input}/other_lines.html", "w") as html_file:
@@ -691,6 +739,9 @@ if __name__ == '__main__':
                 html_file.write(rate_html.read())
 
         with open(f"{args.input}/message.txt", "w") as message:
+            message.write(
+                f'all_jobs_successful_bool = {1 if args.exit_code == 0 else 0}\n'
+            )
             message.write(f'total_rate = {tot_rate:.2f} kHz\n')
             message.write(f'total_bandwidth = {tot_bandwidth:.2f} GB/s\n')
             message.write(f'n_low_rate = {n_low_rate:d}\n')
diff --git a/python/MooreTests/run_bandwidth_test_jobs.py b/python/MooreTests/run_bandwidth_test_jobs.py
index 40d7a669959bda4f34542339ff51305abce3a0d7..5151e35a7dd922e74d732808e8a0cb0491ca8605 100644
--- a/python/MooreTests/run_bandwidth_test_jobs.py
+++ b/python/MooreTests/run_bandwidth_test_jobs.py
@@ -50,8 +50,8 @@ def is_remote(url):
     return url.startswith('mdf:root:') or url.startswith('root:')
 
 
-def parse_yaml(f):
-    with open(os.path.expandvars(args.config), 'r') as f:
+def parse_yaml(file_path):
+    with open(os.path.expandvars(file_path), 'r') as f:
         return yaml.safe_load(f)
 
 
@@ -148,8 +148,10 @@ if __name__ == '__main__':
         format='%(levelname)-7s %(message)s',
         level=(logging.DEBUG if args.debug else logging.INFO))
 
-    # run at most 1e5 events for each test
-    if args.events == -1 or args.events > 1e5: args.events = 1e5
+    if args.events == -1 or args.events > 1e5:
+        raise RuntimeError(
+            "The BW tests are limited to 1e5 events to keep them to a reasonable runtime. Please re-configure"
+        )
 
     if args.evtSlots is None:
         args.evtSlots = max(int(round(1.2 * args.threads)), 1 + args.threads)
diff --git a/scripts/benchmark-scripts/Moore_hlt2_2023_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt2_2023_bandwidth.sh
index 462986b7d4e70e417d0d56acfe70fccc0f2ae5b9..200c8ab2ee9ea3b00cb5886203948a6d2ce77d72 100755
--- a/scripts/benchmark-scripts/Moore_hlt2_2023_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_2023_bandwidth.sh
@@ -13,6 +13,10 @@
 
 # Run with Moore/run /path/to/Moore_hlt2_2023_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
 # Output (MDFs, html pages, plots of rate/bandwidth etc. will be written to ./tmp/)
+ERR_CODE=0
+function STORE_ERR_CODE () {
+    ERR_CODE=$(( $? + $ERR_CODE))
+}
 
 export MOORE_THREADS=$(nproc)
 export EVTS_STREAMLESS=1e5 # Used for both streamless and 16-stream (per WG) config
@@ -20,7 +24,6 @@ export EVTS_PRODUCTION=1e5
 test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
 config_file='hlt2_bandwidth_input_2023.yaml'
 event_size_upper_limit=200
-export HLT1_OUTPUT_RATE=1e3 # in kHz
 
 mkdir -p tmp/MDF
 mkdir -p tmp/Output
@@ -31,62 +34,76 @@ mkdir -p tmp/Output/Inter
 # Options for CALO decoding and old muon geometry are not required.
 echo 'Running trigger to obtain MDF files for comparison for 2023 conditions'
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_streamless.py'  # No streaming
+STORE_ERR_CODE
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_PRODUCTION -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_production_streams.py' # Turbo/Turcal/Full/Monitoring/IFT
+STORE_ERR_CODE
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d ${test_path_prefix}'hlt2_bandwidth_input_2023_extra_opts.py' ${test_path_prefix}'hlt2_bandwidth_wg_streams.py' # Streaming per module
+STORE_ERR_CODE
 
 # 2. Compute line descriptives: persist reco, extra output
 echo 'Obtaining line descriptives'
 time gaudirun.py $PRCONFIGROOT/python/MooreTests/line-descriptives.py
+STORE_ERR_CODE
 
 # 3. Compute similarity matrix for all lines based on streamless file
 echo 'Obtaining similarity matrix and rates for all lines computed using streamless MDF file'
 time python $PRCONFIGROOT/python/MooreTests/line-similarity.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json
+STORE_ERR_CODE
 
 # 4. Compute similarity matrix including each production stream
 echo 'Obtaining similarity matrix for production stream configuration'
 time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-production-stream-config.json -c production
+STORE_ERR_CODE
 
 # 5. Compute similarity matrix including each working-group
 echo 'Obtaining similarity matrix for WG-stream configuration'
 time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c wg
+STORE_ERR_CODE
 
 # 6. Computing rates per stream as well as per line (tables split by stream)
 echo 'Obtaining rates and bandwidth for 5-stream configuration'
 for stream in turbo full turcal; do
     echo "Stream name: ${stream}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_PRODUCTION -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -c 'production'
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c ${test_path_prefix}${config_file} -n $EVTS_PRODUCTION -p Hlt2 -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -s 'production'
+    STORE_ERR_CODE
 done
 
 # 7. Computing rates per WG as well as per line (tables split by WG)
 echo 'Obtaining rates and bandwidth for WG-stream configuration'
 for module in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
     echo "Stream name: ${module}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMLESS -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c 'wg'
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c ${test_path_prefix}${config_file} -n $EVTS_STREAMLESS -p Hlt2 -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -s 'wg'
+    STORE_ERR_CODE
 done
 
 # 8. Combine all output into tables
 echo 'Combining all tables from previous two sets of jobs'
 time python $PRCONFIGROOT/python/MooreTests/combine-hlt2-rate-output.py
+STORE_ERR_CODE
 
 # 9. Check filesize of baseline models in terms of events and bytes
 # Can be run on any manifest/MDF by changing the command-line arguments
 
 echo 'Computing size for baseline models'
 time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -o tmp/Output/filesize.txt
+STORE_ERR_CODE
 
 echo 'Computing filesizes for production stream allocation'
 for strm in full turbo turcal; do
    time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-production-"${strm}".mdf -t tmp/MDF/baseline-production-streams.tck.json -o tmp/Output/filesize.txt
+   STORE_ERR_CODE
 done
 
 echo 'Computing filesizes for per-WG stream allocation'
 for strm in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
    time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-wg-"${strm}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -o tmp/Output/filesize.txt
+   STORE_ERR_CODE
 done
 
 # 10. Produce plots and HTML pages
 echo 'Making plots and HTML pages'
-time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -r $HLT1_OUTPUT_RATE
+script_path="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
+time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -c ${test_path_prefix}${config_file} -s ${script_path} -e $ERR_CODE
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
diff --git a/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh b/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
index f123a2bf446ad929988b89fb461ecb80d2c3515a..c5ceb57f29f45753289911252ceddb42eafe5097 100755
--- a/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_hlt2_bandwidth.sh
@@ -13,6 +13,10 @@
 
 # Run with Moore/run /path/to/Moore_hlt2_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
 # Output (MDFs, html pages, plots of rate/bandwidth etc. will be written to ./tmp/)
+ERR_CODE=0
+function STORE_ERR_CODE () {
+    ERR_CODE=$(( $? + $ERR_CODE))
+}
 
 export MOORE_THREADS=$(nproc)
 export EVTS_STREAMLESS=1e5 # Used for both streamless and 16-stream (per WG) config
@@ -20,7 +24,6 @@ export EVTS_PRODUCTION=1e5
 test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
 config_file='hlt2_bandwidth_input_nominal_with_gec.yaml'
 event_size_upper_limit=200
-export HLT1_OUTPUT_RATE=500 # in kHz
 
 mkdir -p tmp/MDF
 mkdir -p tmp/Output
@@ -30,62 +33,76 @@ mkdir -p tmp/Output/Inter
 # -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
 echo 'Running trigger to obtain MDF files for comparison'
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_streamless.py' # No streaming
+STORE_ERR_CODE
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_PRODUCTION -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_production_streams.py' # Turbo/Turcal/Full/Monitoring/IFT
+STORE_ERR_CODE
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=$event_size_upper_limit -d '$MOOREROOT/options/calo_decoding_packed.py' '$MOOREROOT/options/muon_geometry_v2.py' ${test_path_prefix}'hlt2_bandwidth_wg_streams.py' # Streaming per module
+STORE_ERR_CODE
 
 # 2. Compute line descriptives: persist reco, extra output
 echo 'Obtaining line descriptives'
 time gaudirun.py $PRCONFIGROOT/python/MooreTests/line-descriptives.py
+STORE_ERR_CODE
 
 # 3. Compute similarity matrix for all lines based on streamless file
 echo 'Obtaining similarity matrix and rates for all lines computed using streamless MDF file'
 time python $PRCONFIGROOT/python/MooreTests/line-similarity.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json
+STORE_ERR_CODE
 
 # 4. Compute similarity matrix including each production stream
 echo 'Obtaining similarity matrix for production stream configuration'
 time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-production-stream-config.json -c production
+STORE_ERR_CODE
 
 # 5. Compute similarity matrix including each working-group
 echo 'Obtaining similarity matrix for WG-stream configuration'
 time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c wg
+STORE_ERR_CODE
 
 # 6. Computing rates per stream as well as per line (tables split by stream)
 echo 'Obtaining rates and bandwidth for 5-stream configuration'
 for stream in turbo full turcal; do
     echo "Stream name: ${stream}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_PRODUCTION -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -c 'production'
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c ${test_path_prefix}${config_file} -n $EVTS_PRODUCTION -p Hlt2 -i tmp/MDF/baseline-production-"${stream}".mdf -t tmp/MDF/baseline-production-streams.tck.json -j tmp/Output/hlt2-production-stream-config.json -s 'production'
+    STORE_ERR_CODE
 done
 
 # 7. Computing rates per WG as well as per line (tables split by WG)
 echo 'Obtaining rates and bandwidth for WG-stream configuration'
 for module in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
     echo "Stream name: ${module}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMLESS -p Hlt2 -r $HLT1_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -c 'wg'
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c ${test_path_prefix}${config_file} -n $EVTS_STREAMLESS -p Hlt2 -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/hlt2-wg-stream-config.json -s 'wg'
+    STORE_ERR_CODE
 done
 
 # 8. Combine all output into tables
 echo 'Combining all tables from previous two sets of jobs'
 time python $PRCONFIGROOT/python/MooreTests/combine-hlt2-rate-output.py
+STORE_ERR_CODE
 
 # 9. Check filesize of baseline models in terms of events and bytes
 # Can be run on any manifest/MDF by changing the command-line arguments
 
 echo 'Computing size for baseline models'
 time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -o tmp/Output/filesize.txt
+STORE_ERR_CODE
 
 echo 'Computing filesizes for production stream allocation'
 for strm in full turbo turcal; do
    time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-production-"${strm}".mdf -t tmp/MDF/baseline-production-streams.tck.json -o tmp/Output/filesize.txt
+   STORE_ERR_CODE
 done
 
 echo 'Computing filesizes for per-WG stream allocation'
 for strm in b_to_open_charm b_to_charmonia bandq bnoc c_to_dimuon charm dilepton pid qee rd slepton topo_b trackeff monitoring ift; do
    time python -m MooreTests.filesize -p Hlt2 -i tmp/MDF/baseline-wg-"${strm}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -o tmp/Output/filesize.txt
+   STORE_ERR_CODE
 done
 
 # 10. Produce plots and HTML pages
 echo 'Making plots and HTML pages'
-time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -r $HLT1_OUTPUT_RATE
+script_path="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
+time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Hlt2 -c ${test_path_prefix}${config_file} -s ${script_path} -e $ERR_CODE
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
\ No newline at end of file
diff --git a/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh b/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
index 7929d18a91e3c109159b66ed04aa0fa6f31fd511..4029e22a286ecdc695ac7c308cba18d6046d9f8d 100755
--- a/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
+++ b/scripts/benchmark-scripts/Moore_spruce_bandwidth.sh
@@ -12,13 +12,17 @@
 ###############################################################################
 
 # Run with Moore/run /path/to/Moore_spruce_bandwidth.sh 2>&1 | tee <path-for-output.txt> to collect all output as a log file
+# Output (MDFs, html pages, plots of rate/bandwidth etc. will be written to ./tmp/)
+ERR_CODE=0
+function STORE_ERR_CODE () {
+    ERR_CODE=$(( $? + $ERR_CODE))
+}
 
 export MOORE_THREADS=$(nproc)
 export EVTS_STREAMLESS=1e4 # Used for both streamless and 16-stream (per WG) config
 export EVTS_STREAMED=1e5
 test_path_prefix='$HLT2CONFROOT/tests/options/bandwidth/'
 config_file='spruce_bandwidth_input.yaml'
-export HLT2_OUTPUT_RATE=124 # in kHz, upgrade_minbias_hlt1_filtered HLT1 output rate (1.65 MHz - see MooreAnalysis#42) * HLT2 retention of file in spruce_bandwidth_input.yaml (~7.5%)
 
 mkdir -p tmp/MDF
 mkdir -p tmp/Output
@@ -28,46 +32,57 @@ mkdir -p tmp/Output/Inter
 # -d downloads the input files locally for speed-up running Moore. Not helpful unless that download is fast for you (e.g. you're at CERN)
 echo 'Running trigger to obtain MDF files for comparison'
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -d -n=$EVTS_STREAMLESS -t=$MOORE_THREADS -a=300 ${test_path_prefix}'spruce_bandwidth_streamless.py' # No streaming
+STORE_ERR_CODE
 time python -m MooreTests.run_bandwidth_test_jobs -c=${test_path_prefix}${config_file} -d -n=$EVTS_STREAMED -t=$MOORE_THREADS -a=300 ${test_path_prefix}'spruce_bandwidth_wg_streams.py' # One stream per WG
+STORE_ERR_CODE
 
 # 2. Compute line descriptives: persist reco, extra output
 echo 'Obtaining line descriptives'
 time gaudirun.py --option 'from Moore import options;options.input_process="Spruce"' $PRCONFIGROOT/python/MooreTests/line-descriptives.py
+STORE_ERR_CODE
 
 # 3. Compute similarity matrix for all lines based on streamless file
 echo 'Obtaining similarity matrix and rates for all lines computed using streamless MDF file'
 time python $PRCONFIGROOT/python/MooreTests/line-similarity.py -p Spruce -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json
+STORE_ERR_CODE
 
 # 4. Compute similarity matrix + rate and bandwidth calculation custom stream configurations (currently running per-WG stream configuration)
 echo 'Writing JSON files for custom stream configurations'
 time python $PRCONFIGROOT/python/MooreTests/spruce-stream-configs.py
+STORE_ERR_CODE
 
 echo 'Obtaining similarity matrix for wg-stream configuration'
 time python $PRCONFIGROOT/python/MooreTests/stream-overlap.py -p Spruce -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -j tmp/Output/wg-stream-config.json -c wg
+STORE_ERR_CODE
 
 echo 'Obtaining rates and bandwidth for wg-stream configuration'
 for module in b_to_open_charm rd bandq charm qee charm b_to_charmonia slepton c_to_dimuon bnoc; do
     echo "Stream name: ${module}"
-    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -n $EVTS_STREAMED -p Spruce -r $HLT2_OUTPUT_RATE -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/wg-stream-config.json -c wg
+    time python $PRCONFIGROOT/python/MooreTests/line-and-stream-rates.py -c ${test_path_prefix}${config_file} -n $EVTS_STREAMED -p Spruce -i tmp/MDF/baseline-wg-"${module}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -j tmp/Output/wg-stream-config.json -s wg
+    STORE_ERR_CODE
 done
 
 echo 'Combining all tables from previous two jobs'
 time python $PRCONFIGROOT/python/MooreTests/combine-spruce-rate-output.py
+STORE_ERR_CODE
 
 # 5. Check filesize of baseline models in terms of events and bytes
 # Can be run on any manifest/MDF by changing the command-line arguments
 
 echo 'Computing size for baseline models'
 time python -m MooreTests.filesize -p Spruce -i tmp/MDF/baseline-streamless-all.mdf -t tmp/MDF/baseline-streamless-all.tck.json -o tmp/Output/filesize.txt
+STORE_ERR_CODE
 
 echo 'Computing filesizes for per-WG stream allocation'
 for strm in b_to_open_charm rd bandq charm qee charm b_to_charmonia slepton c_to_dimuon bnoc; do
    time python -m MooreTests.filesize -p Spruce -i tmp/MDF/baseline-wg-"${strm}".mdf -t tmp/MDF/baseline-wg-streams.tck.json -o tmp/Output/filesize.txt
+   STORE_ERR_CODE
 done
 
 # 6. Produce plots and HTML pages
 echo 'Making plots and HTML pages'
-time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Sprucing -r $HLT2_OUTPUT_RATE
+script_path="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/$(basename "$0")"
+time python -m MooreTests.make_bandwidth_test_page -i tmp/Output -p Sprucing -c ${test_path_prefix}${config_file} -s ${script_path} -e $ERR_CODE
 
 # force 0 return code so the handler runs even for failed jobs
 exit 0
\ No newline at end of file