diff --git a/Hlt/Hlt2Conf/options/hlt2_line_example.py b/Hlt/Hlt2Conf/options/hlt2_line_example.py
deleted file mode 100644
index 7fed664d919d5dfb43ff0112bad09a7741ad6aed..0000000000000000000000000000000000000000
--- a/Hlt/Hlt2Conf/options/hlt2_line_example.py
+++ /dev/null
@@ -1,164 +0,0 @@
-###############################################################################
-# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration           #
-#                                                                             #
-# This software is distributed under the terms of the GNU General Public      #
-# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
-#                                                                             #
-# In applying this licence, CERN does not waive the privileges and immunities #
-# granted to it by virtue of its status as an Intergovernmental Organization  #
-# or submit itself to any jurisdiction.                                       #
-###############################################################################
-"""Define HLT2 line for ``Lambda_b0 -> Lambda_c+ pi+``.
-
-With ``Lambda_c+ -> p+ K- pi+``.
-
-An example input file and Moore configuration is also given at the bottom of
-this file, so that it can be run as-is.
-"""
-import Functors as F
-from Functors.math import in_range
-from GaudiKernel.SystemOfUnits import GeV, MeV, mm
-
-from Moore.config import register_line_builder
-from Moore.lines import Hlt2Line
-from RecoConf.reconstruction_objects import (
-    make_pvs_v2 as make_pvs,
-    upfront_reconstruction,
-)
-# For code inside files under Hlt2Conf/python/lines you should reference these
-# two modules using relative imports:
-#
-#     from ..standard_particles import make_has_rich_long_kaons
-from Hlt2Conf.standard_particles import (
-    make_has_rich_long_kaons,
-    make_has_rich_long_pions,
-    make_has_rich_long_protons,
-)
-from Hlt2Conf.algorithms_thor import ParticleCombiner, ParticleFilter, require_all
-
-all_lines = {}
-
-
-def filter_protons(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9,
-                   dllp_min=5):
-    cut = require_all(
-        F.PT > pt_min,
-        F.MINIPCHI2(pvs) > mipchi2_min,
-        F.PID_P > dllp_min,
-    )
-    return ParticleFilter(particles, F.FILTER(cut))
-
-
-def filter_kaons(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9, dllk_min=5):
-    cut = require_all(
-        F.PT > pt_min,
-        F.MINIPCHI2(pvs) > mipchi2_min,
-        F.PID_K > dllk_min,
-    )
-    return ParticleFilter(particles, F.FILTER(cut))
-
-
-def filter_pions(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9, dllk_max=5):
-    cut = require_all(
-        F.PT > pt_min,
-        F.MINIPCHI2(pvs) > mipchi2_min,
-        F.PID_K < dllk_max,
-    )
-    return ParticleFilter(particles, F.FILTER(cut))
-
-
-def make_lambdacs(protons,
-                  kaons,
-                  pions,
-                  pvs,
-                  two_body_comb_maxdocachi2=9.0,
-                  comb_m_min=2080 * MeV,
-                  comb_m_max=2480 * MeV,
-                  comb_pt_min=2000 * MeV,
-                  comb_maxdoca=0.1 * mm,
-                  vchi2pdof_max=10,
-                  bpvvdchi2_min=25):
-    two_body_combination_code = F.MAXDOCACHI2CUT(two_body_comb_maxdocachi2)
-    combination_code = require_all(
-        in_range(comb_m_min, F.MASS, comb_m_max),
-        F.SUM(F.PT) > comb_pt_min,
-        F.MAXDOCACUT(comb_maxdoca),
-    )
-    vertex_code = require_all(
-        F.CHI2DOF < vchi2pdof_max,
-        F.BPVFDCHI2(pvs) > bpvvdchi2_min,
-    )
-    return ParticleCombiner(
-        [protons, kaons, pions],
-        DecayDescriptor="[Lambda_c+ -> p+ K- pi+]cc",
-        Combination12Cut=two_body_combination_code,
-        CombinationCut=combination_code,
-        CompositeCut=vertex_code,
-    )
-
-
-def make_lambdabs(lcs,
-                  pions,
-                  pvs,
-                  comb_m_min=5000 * MeV,
-                  comb_m_max=7000 * MeV,
-                  comb_pt_min=4000 * MeV,
-                  comb_maxdoca=0.1 * mm,
-                  vchi2pdof_max=10,
-                  bpvvdchi2_min=25):
-    combination_code = require_all(
-        in_range(comb_m_min, F.MASS, comb_m_max),
-        F.SUM(F.PT) > comb_pt_min,
-        F.MAXDOCACUT(comb_maxdoca),
-    )
-    vertex_code = require_all(
-        F.CHI2DOF < vchi2pdof_max,
-        F.BPVFDCHI2(pvs) > bpvvdchi2_min,
-    )
-    return ParticleCombiner(
-        [lcs, pions],
-        DecayDescriptor="[Lambda_b0 -> Lambda_c+ pi-]cc",
-        CombinationCut=combination_code,
-        CompositeCut=vertex_code,
-    )
-
-
-@register_line_builder(all_lines)
-def lbtolcpi_lctopkpi_line(name="Hlt2LbToLcpPim_LcToPpKmPipLine", prescale=1):
-    pvs = make_pvs()
-    protons = filter_protons(make_has_rich_long_protons(), pvs)
-    kaons = filter_kaons(make_has_rich_long_kaons(), pvs)
-    pions = filter_pions(make_has_rich_long_pions(), pvs)
-    lcs = make_lambdacs(protons, kaons, pions, pvs)
-    lbs = make_lambdabs(lcs, pions, pvs)
-
-    return Hlt2Line(
-        name=name,
-        algs=upfront_reconstruction() + [lbs],
-        prescale=prescale,
-    )
-
-
-# Moore configuration
-from Moore import options, run_moore
-
-# In a normal options file, we would import the line from Hlt2Conf where it is
-# defined
-# from Hlt2Conf.lines.LbToLcPi import lbtolcpi_lctopkpi_line
-
-# Temporary workaround for TrackStateProvider
-from RecoConf.global_tools import stateProvider_with_simplified_geom
-public_tools = [stateProvider_with_simplified_geom()]
-
-
-def all_lines():
-    return [lbtolcpi_lctopkpi_line()]
-
-
-options.set_input_and_conds_from_testfiledb('Upgrade_MinBias_LDST')
-options.input_raw_format = 4.3
-options.evt_max = 100
-options.control_flow_file = 'control_flow.gv'
-options.data_flow_file = 'data_flow.gv'
-
-run_moore(options, all_lines, public_tools)
diff --git a/Hlt/Hlt2Conf/options/run_hlt2_line_example.py b/Hlt/Hlt2Conf/options/run_hlt2_line_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d2133cd7279670b7686467fd94dd641af1c7562
--- /dev/null
+++ b/Hlt/Hlt2Conf/options/run_hlt2_line_example.py
@@ -0,0 +1,46 @@
+###############################################################################
+# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+# Moore configuration
+from Moore import options, run_moore
+
+# For production modules, we would use
+# from Hlt2Conf.lines.<wg>.<module> import <line>
+from Hlt2Conf.lines.hlt2_line_example import (all_lines, lb0tolcpmum_line)
+
+from GaudiKernel.SystemOfUnits import MeV
+
+# Temporary workaround for TrackStateProvider
+from RecoConf.global_tools import stateProvider_with_simplified_geom
+public_tools = [stateProvider_with_simplified_geom()]
+
+options.set_input_and_conds_from_testfiledb('Upgrade_MinBias_LDST')
+options.input_raw_format = 4.3
+options.evt_max = 100
+options.control_flow_file = 'control_flow.gv'
+options.data_flow_file = 'data_flow.gv'
+
+# We have long names for algorithms and would like to see them in full glory
+options.msg_svc_format = "% F%56W%S%7W%R%T %0W%M"
+
+
+def make_lines():
+    standard_lines = [line_builder() for line_builder in all_lines.values()]
+
+    # This is to demonstrate how `configurable`/`bind` works. We could also pass the function arguments directly lb0tolcpmum_line()
+    with lb0tolcpmum_line.bind(
+            name="Hlt2Tutorial_Lb0ToLcpMumNu_LcpToPpKmPip_Pip_pt450MeV_Line",
+            pi_pt_min=450 * MeV):
+        modified_line = lb0tolcpmum_line()
+
+    return standard_lines + [modified_line]
+
+
+run_moore(options, make_lines, public_tools)
diff --git a/Hlt/Hlt2Conf/options/run_starterkit_bs_to_jpsiphi.py b/Hlt/Hlt2Conf/options/run_starterkit_bs_to_jpsiphi.py
new file mode 100644
index 0000000000000000000000000000000000000000..9eeb97e9468667893707ac274111f23573f4bd6d
--- /dev/null
+++ b/Hlt/Hlt2Conf/options/run_starterkit_bs_to_jpsiphi.py
@@ -0,0 +1,73 @@
+###############################################################################
+# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+With a stack set up, run as
+Moore/run gaudirun.py '$HLT2CONFROOT/options/run_starterkit_bs_to_jpsiphi.py' 2>&1 | tee starterkit_bs_to_jpsiphi_minbias.log
+"""
+from Moore import options, run_moore
+
+# For production modules, we would use
+# from Hlt2Conf.lines.<wg>.<module> import <line>
+from Hlt2Conf.lines.starterkit import all_lines
+
+from RecoConf.global_tools import stateProvider_with_simplified_geom
+from RecoConf.hlt1_tracking import default_ft_decoding_version
+from RecoConf.reconstruction_objects import reconstruction
+from Moore.tcks import dump_hlt2_configuration
+from RecoConf.hlt2_global_reco import reconstruction as hlt2_reconstruction, make_fastest_reconstruction
+
+# Minimum bias means that collisions are simulated as they would happen during data-taking and all events are kept.
+# This is contrary to generator settings in which a certain decay is required to happen, as in our signal MC.
+minbias = True
+
+if minbias:
+    default_ft_decoding_version.global_bind(value=2)
+    # hlt1_filtered means that only events that passed hlt1 have been persisted in this minbias sample.
+    options.set_input_and_conds_from_testfiledb(
+        'upgrade_minbias_hlt1_filtered')
+    options.input_raw_format = 4.3
+else:
+    default_ft_decoding_version.global_bind(value=6)
+    options.input_type = "ROOT"
+    options.input_files = [
+        f"root://eoslhcb.cern.ch//eos/lhcb/grid/prod/lhcb/MC/Upgrade/XDIGI/00143675/0000/00143675_000000{i}_1.xdigi"
+        for i in [
+            "15", "26", "37", "34", "41", "44", "47", "43", "46", "49", "45",
+            "51", "55", "56", "57", "59", "61", "63", "65", "66", "70", "76",
+            "79", "81", "82", "85", "88", "91", "96", "97", "98", "99", "64",
+            "23", "95", "33", "84", "48", "80", "87", "54"
+        ]
+    ]
+    options.conddb_tag = "sim-20210617-vc-md100"
+    options.dddb_tag = "dddb-20210617"
+    # We only need to register an output file when running on signal MC, as we don't expect to find anything in minbias.
+    options.output_file = "hlt2_starterkit_bs_to_jpsiphi.dst"
+    options.output_type = "ROOT"
+
+options.evt_max = 3600
+options.print_freq = 360
+options.input_type = "ROOT"
+options.msg_svc_format = "% F%56W%S%7W%R%T %0W%M"
+
+
+def get_lines():
+    return [line_builder() for line_builder in all_lines.values()]
+
+
+public_tools = [stateProvider_with_simplified_geom()]
+
+if minbias:
+    run_moore(options, get_lines, public_tools)
+else:
+    with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(
+            make_reconstruction=make_fastest_reconstruction):
+        config = run_moore(options, get_lines, public_tools)
+    dump_hlt2_configuration(config, "hlt2_starterkit_bs_to_jpsiphi.tck.json")
diff --git a/Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b870aa500528c7b0713c3c663e651637ad444a9
--- /dev/null
+++ b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py
@@ -0,0 +1,189 @@
+###############################################################################
+# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Define HLT2 lines for
+``Lambda_b0 -> Lambda_c+ pi-``       Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line
+``Lambda_b0 -> Lambda_c+ mu- (nu)``  Hlt2Tutorial_Lb0ToLcpMumNu_LcpToPpKmPip_Line
+Both use ``Lambda_c+ -> p+ K- pi+``.
+
+With a stack set up, run as
+Moore/run gaudirun.py '$HLT2CONFROOT/options/run_hlt2_line_example.py'
+"""
+import Functors as F
+from Functors.math import in_range
+from GaudiKernel.SystemOfUnits import (GeV, MeV, mm)
+
+from Moore.config import register_line_builder
+from Moore.lines import Hlt2Line
+from RecoConf.reconstruction_objects import (
+    make_pvs_v2 as make_pvs,
+    upfront_reconstruction,
+)
+from RecoConf.hlt1_tracking import require_pvs
+# For code inside files under Hlt2Conf/python/lines you should reference
+# the following modules using relative imports:
+#     from ...standard_particles import (...)
+#     from ...algorithms_thor
+# We don't use relative imports here since this module is used to build the documentation.
+from Hlt2Conf.standard_particles import (
+    make_has_rich_long_kaons,
+    make_has_rich_long_pions,
+    make_has_rich_long_protons,
+    make_ismuon_long_muon,
+)
+from Hlt2Conf.algorithms_thor import (ParticleCombiner, ParticleFilter,
+                                      require_all)
+from PyConf import configurable
+
+
+# We define the basic building blocks for this module, following
+# https://lhcbdoc.web.cern.ch/lhcbdoc/moore/master/tutorials/hlt2_line.html#code-design-guidelines
+def _protons_for_charm():
+    pvs = make_pvs()
+    cut = require_all(
+        F.PT > 0.5 * GeV,
+        F.MINIPCHI2(pvs) > 9.,
+        F.PID_P > 5.,
+    )
+    return ParticleFilter(
+        make_has_rich_long_protons(),
+        F.FILTER(cut),
+        # We would usually not give a name to ParticleFilter. This is for development purposes.
+        name="Tutorial_protons_for_charm",
+    )
+
+
+def _kaons_for_charm():
+    pvs = make_pvs()
+    cut = require_all(
+        F.PT > 0.5 * GeV,
+        F.MINIPCHI2(pvs) > 9.,
+        F.PID_K > 5.,
+    )
+    return ParticleFilter(
+        make_has_rich_long_kaons(),
+        F.FILTER(cut),
+        # We would usually not give a name to ParticleFilter. This is for development purposes.
+        name="Tutorial_kaons_for_charm",
+    )
+
+
+# For development purposes, we want to study the pT cut.
+# Once tuned, it should be moved inside the function body.
+def _pions_for_charm_and_beauty(pt_min=0.5 * GeV):
+    pvs = make_pvs()
+    cut = require_all(
+        F.PT > pt_min,
+        F.MINIPCHI2(pvs) > 9.,
+        # PID_X is a likelihood ratio of X w.r.t. the pion hypothesis. PID_PI is 0 by definition.
+        F.PID_K < 5.,
+    )
+    return ParticleFilter(
+        make_has_rich_long_pions(),
+        F.FILTER(cut),
+        # We would usually not give a name to ParticleFilter. This is for development purposes.
+        name="Tutorial_pions_for_charm_and_beauty",
+    )
+
+
+def _make_lambdacs_for_beauty(protons, kaons, pions, pvs):
+    two_body_combination_code = require_all(
+        F.MAXDOCACHI2CUT(9.), F.MAXDOCACUT(0.1 * mm))
+    combination_code = require_all(
+        in_range(2080 * MeV, F.MASS, 2480 * MeV),  # mass of the combination
+        F.PT > 1.4 * GeV,  # pT of the 3-track combination
+        F.SUM(F.PT) > 2 * GeV,
+        F.MAXDOCACHI2CUT(9.),
+        F.MAXDOCACUT(0.1 * mm),
+    )
+    vertex_code = require_all(
+        in_range(2100 * MeV, F.MASS, 2460 * MeV),  # mass after the vertex fit
+        F.PT > 1.6 * GeV,  # pT after the vertex fit
+        F.CHI2DOF < 10.,
+        F.BPVFDCHI2(pvs) > 25.,
+    )
+    return ParticleCombiner(
+        [protons, kaons, pions],
+        DecayDescriptor="[Lambda_c+ -> p+ K- pi+]cc",
+        name="Tutorial_Lcp_Combiner",
+        Combination12Cut=two_body_combination_code,
+        CombinationCut=combination_code,
+        CompositeCut=vertex_code,
+    )
+
+
+def _make_lambdabs(lcps, bachelors, pvs, decay_descriptor):
+    combination_code = require_all(
+        in_range(5 * GeV, F.MASS, 7 * GeV),
+        F.SUM(F.PT) > 4 * GeV,
+        F.MAXDOCACUT(0.1 * mm),
+    )
+    vertex_code = require_all(
+        F.CHI2DOF < 10.,
+        F.BPVFDCHI2(pvs) > 16.,
+    )
+    return ParticleCombiner(
+        [lcps, bachelors],
+        name="Tutorial_Lb0_Combiner",
+        DecayDescriptor=decay_descriptor,
+        CombinationCut=combination_code,
+        CompositeCut=vertex_code,
+    )
+
+
+all_lines = {}
+
+
+@register_line_builder(all_lines)
+def lb0_to_lcpim_line(name="Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line",
+                      prescale=1):
+    pvs = make_pvs()
+    protons = _protons_for_charm()
+    kaons = _kaons_for_charm()
+    pions = _pions_for_charm_and_beauty()
+    lcs = _make_lambdacs_for_beauty(protons, kaons, pions, pvs)
+    lbs = _make_lambdabs(lcs, pions, pvs, "[Lambda_b0 -> Lambda_c+ pi-]cc")
+
+    return Hlt2Line(
+        name=name,
+        algs=upfront_reconstruction() + [require_pvs(pvs), lbs],
+        # Passing `prescale` to `Hlt2Line` is optional; The default value is 1
+        prescale=prescale,
+    )
+
+
+# The order of decorators matters!
+@register_line_builder(all_lines)
+@configurable
+def lb0tolcpmum_line(name="Hlt2Tutorial_Lb0ToLcpMumNu_LcpToPpKmPip_Line",
+                     pi_pt_min=0.5 * GeV):
+    pvs = make_pvs()
+    protons = _protons_for_charm()
+    kaons = _kaons_for_charm()
+    pions = _pions_for_charm_and_beauty(pi_pt_min)
+    lcs = _make_lambdacs_for_beauty(protons, kaons, pions, pvs)
+    muon_cut = require_all(
+        F.PT > 1 * GeV,
+        F.MINIPCHI2(pvs) > 9.,
+        F.PID_MU > 0.,
+    )
+    muons = ParticleFilter(
+        make_ismuon_long_muon(),
+        F.FILTER(muon_cut),
+        # We would usually not give a name to ParticleFilter. This is for development purposes.
+        name="Tutorial_muons_for_lb0",
+    )
+    lbs = _make_lambdabs(lcs, muons, pvs, "[Lambda_b0 -> Lambda_c+ mu-]cc")
+
+    return Hlt2Line(
+        name=name,
+        algs=upfront_reconstruction() + [require_pvs(pvs), muons, lbs],
+    )
diff --git a/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/__init__.py b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6d643d6d151997ea599d0f1f090b1722ebead9e
--- /dev/null
+++ b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/__init__.py
@@ -0,0 +1,19 @@
+###############################################################################
+# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Submodule that defines all the starterkit HLT2 lines
+"""
+
+from . import bs_to_jpsiphi
+
+# provide "all_lines" for correct registration by the overall HLT2 lines module
+all_lines = {}
+all_lines.update(bs_to_jpsiphi.all_lines)
diff --git a/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/bs_to_jpsiphi.py b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/bs_to_jpsiphi.py
new file mode 100644
index 0000000000000000000000000000000000000000..5edb5a9cfdb5e509c8eeda930fc996536293730b
--- /dev/null
+++ b/Hlt/Hlt2Conf/python/Hlt2Conf/lines/starterkit/bs_to_jpsiphi.py
@@ -0,0 +1,184 @@
+###############################################################################
+# (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Define HLT2 lines for
+  - ``B_s0 -> J/psi phi``       Hlt2Starterkit_Bs0ToJpsiPhi_PR_Line
+  - ``B_s0 -> K+ K- mu+ mu-``   Hlt2Starterkit_Bs0ToKmKpMumMup_SP_Line
+Both lines select essentially the same events; the former does so with an "traditional"
+approach of three 2-body combinations, while the latter uses a 4-body combiner directly.
+Which one is faster?
+Another difference is that the first line uses perist reco,
+and the second latter selective persistence.
+
+TODO: With this selection we measured rates above 1 kHz on an Hlt1-filtered MinBias sample.
+The signal rate should be around 
+(2000*10^30 cm^-2 s^-1)*(87*0.254*10^-30 cm^2)*(0.001*0.06*0.5) = 1.32 Hz
+(instantaneous luminosity)*(B+ production cross section @13 TeV * f_s/f_u)*(Br(Bs->J/psi phi)*Br(J/psi->mu+mu-)*Br(phi->K+K-))
+See https://lhcbdoc.web.cern.ch/lhcbdoc/moore/master/selection/hlt2_guidelines.html#true-signal-rate ,
+the PDG, and https://lhcbproject.web.cern.ch/lhcbproject/Publications/l/LHCb-PAPER-2020-046.html.
+
+Can you help out by improving the selection?
+"""
+import Functors as F
+from Functors.math import in_range
+from GaudiKernel.SystemOfUnits import (GeV, MeV, mm)
+from PyConf.Algorithms import VoidFilter
+
+from Moore.config import register_line_builder
+from Moore.lines import Hlt2Line
+from RecoConf.reconstruction_objects import (
+    make_pvs_v2 as make_pvs,
+    upfront_reconstruction,
+)
+from RecoConf.hlt1_tracking import require_pvs
+from ...standard_particles import (make_long_kaons, make_ismuon_long_muon,
+                                   make_long_pions)
+from ...algorithms_thor import (ParticleCombiner, ParticleFilter, require_all)
+
+all_lines = {}
+
+
+def _muons_for_jpsi():
+    pvs = make_pvs()
+    cut = require_all(
+        F.PT > 300 * MeV,
+        F.MINIPCHI2(pvs) > 3.,
+    )
+    return ParticleFilter(make_ismuon_long_muon(), F.FILTER(cut))
+
+
+def _kaons_for_phi():
+    pvs = make_pvs()
+    cut = require_all(
+        F.PT > 120 * MeV,
+        F.MINIPCHI2(pvs) > 4.,
+    )
+    return ParticleFilter(make_long_kaons(), F.FILTER(cut))
+
+
+@register_line_builder(all_lines)
+def bs0_to_jpsiphi_line(name="Hlt2Starterkit_Bs0ToJpsiPhi_PR_Line"):
+
+    pvs = make_pvs()
+    muons = _muons_for_jpsi()
+    kaons = _kaons_for_phi()
+
+    jpsi_combination_code = require_all(
+        in_range(2.7 * GeV, F.MASS, 3.4 * GeV),
+        F.SUM(F.PT) > 0.8 * GeV,
+        F.MAXDOCACUT(0.2 * mm),
+    )
+    jpsi_vertex_code = require_all(
+        in_range(2.8 * GeV, F.MASS, 3.3 * GeV),
+        F.CHI2DOF < 12.,
+    )
+    jpsis = ParticleCombiner(
+        [muons, muons],
+        name="Starterkit_BsToJpsiPhi_JpsiToMumMup_Combiner",
+        DecayDescriptor="J/psi(1S) -> mu+ mu-",
+        CombinationCut=jpsi_combination_code,
+        CompositeCut=jpsi_vertex_code,
+    )
+
+    phi_combination_code = require_all(
+        F.MASS < 1100 * MeV,
+        F.MAXDOCACUT(0.2 * mm),
+    )
+    phi_vertex_code = require_all(
+        F.MASS < 1080 * MeV,
+        F.CHI2DOF < 12.,
+    )
+    phis = ParticleCombiner(
+        [kaons, kaons],
+        name="Starterkit_BsToJpsiPhi_PhiToKmKp_Combiner",
+        DecayDescriptor="phi(1020) -> K+ K-",
+        CombinationCut=phi_combination_code,
+        CompositeCut=phi_vertex_code,
+    )
+
+    bs_combination_code = require_all(
+        in_range(4.9 * GeV, F.MASS, 6.3 * GeV),
+        F.SUM(F.PT) > 3 * GeV,
+        F.MAXDOCACUT(0.2 * mm),
+    )
+    bs_vertex_code = require_all(
+        F.CHI2DOF < 12.,
+        in_range(5 * GeV, F.MASS, 6.2 * GeV),
+        F.BPVFDCHI2(pvs) > 6.,
+    )
+    bss = ParticleCombiner(
+        [jpsis, phis],
+        name="Starterkit_BsToJpsiPhi_BsToJpsiPhi_Combiner",
+        DecayDescriptor="B_s0 -> J/psi(1S) phi(1020)",
+        CombinationCut=bs_combination_code,
+        CompositeCut=bs_vertex_code,
+    )
+
+    return Hlt2Line(
+        name=name,
+        algs=upfront_reconstruction() + [require_pvs(pvs), jpsis, bss],
+        persistreco=True,
+    )
+
+
+@register_line_builder(all_lines)
+def bs0_to_kkmumu_line(name="Hlt2Starterkit_Bs0ToKmKpMumMup_SP_Line"):
+
+    pvs = make_pvs()
+    muons = _muons_for_jpsi()
+    require_at_least_two_muons = VoidFilter(
+        name='Starterkit_BsToJpsiPhi_require_two_muons',
+        Cut=F.SIZE(muons) > 1,
+    )
+    kaons = _kaons_for_phi()
+
+    bs_two_body_combination_code = require_all(
+        in_range(2.7 * GeV, F.MASS, 3.4 * GeV),
+        F.SUM(F.PT) > 0.8 * GeV,
+        F.MAXDOCACUT(0.2 * mm),
+    )
+    bs_three_body_combination_code = F.MAXDOCACUT(0.2 * mm)
+    bs_combination_code = require_all(
+        in_range(4.9 * GeV, F.MASS, 6.3 * GeV),
+        F.SUM(F.PT) > 3 * GeV,
+        F.MAXDOCACUT(0.2 * mm),
+        F.SUBCOMB(Functor=F.MASS < 1100 * MeV, Indices=[3, 4]),
+    )
+    bs_vertex_code = require_all(
+        F.CHI2DOF < 12.,
+        in_range(5 * GeV, F.MASS, 6.2 * GeV),
+        F.BPVFDCHI2(pvs) > 6.,
+        F.SUBCOMB(
+            Functor=in_range(2.8 * GeV, F.MASS, 3.3 * GeV), Indices=[1, 2]),
+        F.SUBCOMB(Functor=F.MASS < 1100 * MeV, Indices=[3, 4]),
+    )
+    bss = ParticleCombiner(
+        [muons, muons, kaons, kaons],
+        name="Starterkit_BsToJpsiPhi_BsToKKMuMu_Combiner",
+        DecayDescriptor="B_s0 -> mu+ mu- K+ K-",
+        Combination12Cut=bs_two_body_combination_code,
+        Combination123Cut=bs_three_body_combination_code,
+        CombinationCut=bs_combination_code,
+        CompositeCut=bs_vertex_code,
+    )
+
+    pion_cut = require_all(
+        F.PT > 200 * MeV,
+        F.MINIPCHI2(pvs) > 4.,
+    )
+    pions_for_bc = ParticleFilter(make_long_pions(), F.FILTER(pion_cut))
+
+    return Hlt2Line(
+        name=name,
+        algs=upfront_reconstruction() +
+        [require_pvs(pvs), require_at_least_two_muons, bss],
+        extra_outputs=[("PiForBc", pions_for_bc)],
+    )
diff --git a/Hlt/Hlt2Conf/tests/qmtest/test_hlt2_line_example.qmt b/Hlt/Hlt2Conf/tests/qmtest/test_hlt2_line_example.qmt
index 4c14fe7c41a6eb02f26c9b86974e1e0000b780e1..a4d1e1eb28e712b08b87d282c892527e6a761b84 100644
--- a/Hlt/Hlt2Conf/tests/qmtest/test_hlt2_line_example.qmt
+++ b/Hlt/Hlt2Conf/tests/qmtest/test_hlt2_line_example.qmt
@@ -15,7 +15,7 @@ Make sure the HLT2 line example configures and runs without errors
 <extension class="GaudiTest.GaudiExeTest" kind="test">
 <argument name="program"><text>gaudirun.py</text></argument>
 <argument name="args"><set>
-  <text>$HLT2CONFROOT/options/hlt2_line_example.py</text>
+  <text>$HLT2CONFROOT/options/run_hlt2_line_example.py</text>
 </set></argument>
 <argument name="use_temp_dir"><enumeral>true</enumeral></argument>
 <argument name="validator"><text>
diff --git a/MooreCache/CMakeLists.txt b/MooreCache/CMakeLists.txt
index fe1b08d7c83193f60d73e2694a455da0788923ba..a90a9c2dc90dafa665b2fc559e2070cac66495f5 100644
--- a/MooreCache/CMakeLists.txt
+++ b/MooreCache/CMakeLists.txt
@@ -47,7 +47,7 @@ endforeach()
 set(LOKI_FUNCTORS_CACHE_POST_ACTION_OPTS)
 
 list(APPEND hlt1_settings hlt1_pp_default hlt1_pp_comparison hlt1_smog_example)
-list(APPEND hlt2_settings options/hlt2_pp_default options/hlt2_pp_thor tests/options/thor/loki_comparison tests/options/hlt2_reco_plus_thor_selections)
+list(APPEND hlt2_settings options/hlt2_pp_default options/hlt2_pp_thor tests/options/thor/loki_comparison tests/options/hlt2_reco_plus_thor_selections options/run_starterkit_bs_to_jpsiphi)
 
 foreach(name IN LISTS hlt1_settings)
     # note that we don't use DisableLoKiCacheFunctors.py from Rec since it is not installed (with
diff --git a/doc/scripts/hlt2_line_example.py b/doc/scripts/hlt2_line_example.py
index ba6595bcd9624e3acb23838788d17fe689d1c8ec..d2ba6cb83a48b06bcd0f9ceda654b7d5723116e1 120000
--- a/doc/scripts/hlt2_line_example.py
+++ b/doc/scripts/hlt2_line_example.py
@@ -1 +1 @@
-../../Hlt/Hlt2Conf/options/hlt2_line_example.py
\ No newline at end of file
+../../Hlt/Hlt2Conf/options/run_hlt2_line_example.py
\ No newline at end of file
diff --git a/doc/tutorials/hlt2_line.rst b/doc/tutorials/hlt2_line.rst
index dd1caa49caa1bb5790b3c1ed1bcce37226aa9944..22ac99d06716a6aecd7dfb2621c3aeeea4596213 100644
--- a/doc/tutorials/hlt2_line.rst
+++ b/doc/tutorials/hlt2_line.rst
@@ -1,10 +1,14 @@
 Writing an HLT2 line
 ====================
 
-.. |lb-decay| replace:: :math:`\Lambda_{b}^{0} \to \Lambda_{c}^{+} \pi^{+}`
+.. |lb-had-decay| replace:: :math:`\Lambda_{b}^{0} \to \Lambda_{c}^{+} \pi^{-}`
+.. |lb-sl-decay| replace:: :math:`\Lambda_{b}^{0} \to \Lambda_{c}^{+} \mu^{-} \bar{\nu}_{\mu}`
 .. |lc-decay| replace:: :math:`\Lambda_{c}^{+} \to p K^{-} \pi^{+}`
 .. |lb| replace:: :math:`\Lambda_{b}^{0}`
 .. |lc| replace:: :math:`\Lambda_{c}^{+}`
+.. |pT| replace:: :math:`p_\text{T}`
+.. |chi2ip| replace:: :math:`\chi^2_\text{IP}`
+
 
 An HLT *line* is a sequence of steps that collectively define whether an event
 contains an object of interest which should be kept for later analysis. This
@@ -12,11 +16,11 @@ object is typically a reconstructed candidate physics process, such as an
 exclusive particle decay.
 
 This page will walk you through defining an HLT2 line step by step. We'll
-reconstruct candidate |lb-decay| decays with |lc-decay|, explaining the details
-of how to encode this within Moore.
+reconstruct candidate |lb-had-decay| and |lb-sl-decay| decays with |lc-decay|,
+explaining the details of how to encode this within Moore.
 
-To follow along, it's expected that you have a :doc:`development setup <developing>` built and
-ready to go.
+To follow along, it's expected that you have a :doc:`development setup <developing>`
+built and ready to go.
 
 File structure
 --------------
@@ -31,41 +35,54 @@ First look there to get a sense of how things are structured. Some files are
 further organised into sub-folders. For us, we can just create a file directly
 under ``lines``::
 
-    touch Hlt/Hlt2Conf/python/Hlt2Conf/lines/lambdab_to_lambdacpi.py
+    touch Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_tutorial.py
 
 Open the newly-created file in your text editor of choice.
 
+A possible final result also exists already as::
+
+    Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py
+
+It can be run with::
+
+    Moore/run gaudirun.py '$HLT2CONFROOT/options/run_hlt2_line_example.py'
+
 Prototyping
 -----------
 
-Think about how you would reconstruct candidates for our decay of interest,
-|lb-decay| with |lc-decay|. This is always the first step before writing any
+We focus first on the |lb-had-decay| with |lc-decay| and add the |lb-sl-decay| decay later
+to highlight certain aspects of the framework.
+
+Think about how you would reconstruct/make candidates for our decays of interest.
+This is always the first step before writing any
 code. How could we do it?
 
-1. Reconstruct protons, kaons, and pions needed for the |lc| candidates.
-2. Filter those candidates if necessary.
-3. Reconstruct |lc-decay| candidates.
-4. Reconstruct pion candidates needed for the |lb| candidates.
-5. Filter the pion candidates if necessary.
-6. Reconstruct |lb-decay| candidates.
+1. Reconstruct and filter/select protons, kaons, pions. They are the basic building blocks for this module.
+2. Reconstruct |lc-decay| candidates.
+3. Reconstruct pion candidates needed for the |lb| candidates.
+4. Reconstruct |lb-had-decay| candidates.
 
-Each step, like 'reconstruct proton', 'filter proton', and 'reconstruct |lc|',
+Each (sub-)step, like 'reconstruct and filter proton', and 'reconstruct |lc|',
 represents the running of an *algorithm*, a C++ component defined within the
-LHCb selection framework. We want to *configure* these algorithms so they
+LHCb selection framework. We want to *configure* these algorithms, so they
 behave in a way that creates the candidates we want.
+We have consolidated the reconstruction and filtering sub-steps to
+define module-specific input particles. Those could also be common to
+your working-group or the entire experiment, in which case they would be
+imported from a shared module.
 
 But there is one step missing here, which historically has been implicit:
 reconstruct primary vertex (PV) candidates! These are necessary if we want to
-cut on quantities releted to PVs such as the impact parameter and flight
+cut on quantities related to PVs such as the impact parameter and flight
 distance.
 
 So, let's outline a function that does these steps::
 
-    def lbtolcpi_lctopkpi_line():
+    def lb0_to_lcpim_line():
         pvs = make_pvs()
-        protons = filter_protons(make_protons(), pvs)
-        kaons = filter_kaons(make_kaons(), pvs)
-        pions = filter_pions(make_pions(), pvs)
+        protons = protons_for_charm()
+        kaons = kaons_for_charm()
+        pions = pions_for_charm_and_beauty()
         lcs = make_lambdacs(protons, kaons, pions, pvs)
         lbs = make_lambdabs(lcs, pions, pvs)
 
@@ -73,13 +90,13 @@ So, let's outline a function that does these steps::
 
 This is a step-by-step encoding of what we want our line to do. Of course, this
 version doesn't run yet because we haven't defined the various ``make_`` and
-``filter_`` functions yet, and it's not clear what will happen to the return
+``h_for_charm`` functions yet, and it's not clear what will happen to the return
 value ``lbs``, but this function is already very close to what our final
 function will look like.
 
 .. note::
 
-    We 'skipped' steps 4 and 5 in our outline because we'll assume that for
+    We 'skipped' step 3 in our outline because we'll assume that for
     *this* line the pions used for the |lc| also meet our criteria for |lb|
     pions.
 
@@ -89,9 +106,9 @@ There are just a few changes we need to make to our file and function to be
 consistent with what Moore expects.
 
 1. Return an object that Moore understands from the function.
-2. Define a 'line registry' object that will hold all of the lines defined
-   within this file. Moore will expect this object to be present, and will use
-   it to discover all lines it should run.
+2. Define a 'line registry' object that will hold all the lines defined
+   within this file/module. Moore will expect this object to be present, and
+   will use it to discover all lines it should run.
 3. Add our line definition function to this registry.
 
 Line declaration
@@ -100,16 +117,17 @@ Line declaration
 The first step means returning a `Moore.lines.Hlt2Line` object. This contains
 some metadata about the information that Moore will use, such as a name, in
 addition to the :doc:`control flow <../pyconf/control_flow>` defining the
-line. Let's return that object first and then discuss it::
+line. Let's return that object first and then discuss it:
 
-    from Moore.lines import Hlt2Line
+.. code-block:: python
 
+    from Moore.lines import Hlt2Line
 
-    def lbtolcpi_lctopkpi_line(name="Hlt2LbToLcpPim_LcToPpKmPipLine", prescale=1):
+    def lb0_to_lcpim_line(name="Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line", prescale=1):
         pvs = make_pvs()
-        protons = filter_protons(make_protons(), pvs)
-        kaons = filter_kaons(make_kaons(), pvs)
-        pions = filter_pions(make_pions(), pvs)
+        protons = protons_for_charm()
+        kaons = kaons_for_charm()
+        pions = pions_for_charm_and_beauty()
         lcs = make_lambdacs(protons, kaons, pions, pvs)
         lbs = make_lambdabs(lcs, pions, pvs)
 
@@ -119,13 +137,14 @@ line. Let's return that object first and then discuss it::
             prescale=prescale,
          )
 
+
 There are three new things going on:
 
 1. The `Moore.lines.Hlt2Line` object needs to be created with a name, so we've parameterised
    this as a function argument, with a default value, and passed it to
    `Moore.lines.Hlt2Line`.
-2. The `Moore.lines.Hlt2Line` object also needs to be created with a prescale, so we've
-   parameterised this in a similar way as for the name.
+2. The `Moore.lines.Hlt2Line` object *can* be created with a prescale, so we've
+   parameterised this similarly as for the name.
 3. Finally, we defined the *control flow* of the line as the ``algs`` parameter
    of `Moore.lines.Hlt2Line`. The control flow specifies how Moore should evaluate whether
    this line made a positive *decision* or not. The filters of a line are
@@ -136,16 +155,24 @@ There are three new things going on:
    of |lb| candidates, this line should be considered as having 'passed' (also
    called 'fired').
 
-Parameterising the function in the way we have allows us to easily create
-multiple lines with different names and prescales, just by calling the
-function with different arguments. We'll see later :ref:`how to run multiple
-instances of the line with different cuts <modifying_thresholds>`.
+Parameterising the function in the way we have allows for two things:
 
-.. note::
+1. For development purposes we can easily create multiple lines with different
+   names, prescales just by calling the function with different arguments.
+   Of course, we are free to add further arguments, like cut values.
+   We'll see later :ref:`how to run multiple instances of the line with
+   different cuts <modifying_thresholds>`.
+2. For bookkeeping purposes, we will `decorate <https://www.python.org/dev/peps/pep-0318/>`_
+   the function. The decorator expects the `name` argument to
+   :ref:`register the line <line_registration>`.
 
-    How should you decide what name to give your line? The `conventions are
-    outlined in Moore#60 <https://gitlab.cern.ch/lhcb/Moore/-/issues/60>`_. If
-    you're still unsure, just open your merge request and someone will make
+.. tip::
+
+    How should you decide what name to give your line? The conventions are
+    outlined among the :ref:`best practices <code_design_guidelines>` below,
+    `Moore#60 <https://gitlab.cern.ch/lhcb/Moore/-/issues/60>`_ , or
+    your WG might provide a dedicated naming scheme.
+    If you're still unsure, just open your merge request and someone will make
     suggestions.
 
 Control and data flow
@@ -166,7 +193,7 @@ control flow in our line, Moore needs to run the |lb| making algorithm. Before
 doing that it will *automatically deduce* what other algorithms it needs to run
 in order to satisfy the inputs to the |lb| algorithm. One input is the output
 of the |lc| algorithm, and Moore will likewise automatically deduce what
-algorithms need to run to produce the required input (that is: the proton,
+algorithms need to run to produce the required inputs (that is: the proton,
 kaon, and pion makers).  This automatic data flow resolution goes all the way
 up through the reconstruction to the raw event.
 
@@ -174,24 +201,31 @@ We could choose to impose additional requirements on the control flow if it
 makes physics sense for our line. For example:
 
 1. At least one PV must be present in the event; or
-2. Some global event cut (GEC) must be satisfied, for example the number of
-   clusters in the tracking stations should be below some value.
+2. Intermediate selection steps.
 
 Because we already have a PV making algorithm in our prototype, we could
-include this in our control flow already::
+include this in our control flow already. Including the |lc| selection
+in our case won't have any effect, as we chose to use the exact same
+pion candidates for the |lc| and |lb| selections. For our semileptonic
+decay however, we could add the muons to the control flow.
+
+.. code-block:: python
+
+    from RecoConf.hlt1_tracking import require_pvs
+
 
         return Hlt2Line(
             name=name,
-            algs=[pvs, lbs],
+            algs=[require_pvs(pvs), muons, lbs],
             prescale=prescale,
          )
 
-Moore will define the control flow for this line to be:
 
-    "First require the ``pvs`` algorithm to pass, and then require the ``lbs``
-    algorithm to pass; if both pass then the line decision is positive."
+Moore will define the control flow for this line to be:
 
-We will omit this extra requirement for simplicity here, though.
+    "First require the ``pvs`` algorithm to pass, then require the ``muons``
+    algorithm to pass, and finally require the ``lbs``
+    algorithm to pass; if all pass then the line decision is positive."
 
 .. note::
 
@@ -203,6 +237,8 @@ We will omit this extra requirement for simplicity here, though.
     individual line's node, with a logic of 'non-lazy or'. Most line authors
     don't have to worry about the different types.
 
+.. _line_registration:
+
 Line registration
 ^^^^^^^^^^^^^^^^^
 
@@ -216,26 +252,24 @@ is populated by using the Python decorator syntax with the
 
     all_lines = {}
 
-
     @register_line_builder(all_lines)
-    def lbtolcpi_lctopkpi_line(name="Hlt2LbToLcpPim_LcToPpKmPipLine", prescale=1):
+    def lb0_to_lcpim_line(name="Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line", prescale=1):
         pvs = make_pvs()
-        protons = filter_protons(make_protons(), pvs)
-        kaons = filter_kaons(make_kaons(), pvs)
-        pions = filter_pions(make_pions(), pvs)
+        protons = protons_for_charm()
+        kaons = kaons_for_charm()
+        pions = pions_for_charm_and_beauty()
         lcs = make_lambdacs(protons, kaons, pions, pvs)
         lbs = make_lambdabs(lcs, pions, pvs)
 
         return Hlt2Line(
             name=name,
-            algs=[lbs],
+            algs=[require_pvs(pvs), lbs],
             prescale=prescale,
          )
 
-The decorator just adds the line *function* to the dictionary. We'll see later
-how this dictionary is used to run the line.
 
-TODO: link to 'later'.
+The decorator just adds the line *function* to the dictionary. We'll see
+:ref:`later <running>` how this dictionary is used to run the line.
 
 .. note::
 
@@ -248,7 +282,7 @@ Standard objects
 
 In HLT2, several maker functions are already defined for general usage. These
 'standard makers' take the output of the reconstruction and make objects common
-to many HLT2 lines. These standard makers produces objects such as:
+to many HLT2 lines. These standard makers produce objects such as:
 
 * Charged tracks with predefined mass hypotheses and associated PID objects
 * Neutral objects such as photons and neutral pions
@@ -267,6 +301,7 @@ non-composite inputs:
 * Protons: `make_has_rich_long_protons <Hlt2Conf.standard_particles.make_has_rich_long_protons>`
 * Kaons: `make_has_rich_long_kaons <Hlt2Conf.standard_particles.make_has_rich_long_kaons>`
 * Pions: `make_has_rich_long_pions <Hlt2Conf.standard_particles.make_has_rich_long_pions>`
+* (Muons: `make_ismuon_long_muon <Hlt2Conf.standard_particles.make_ismuon_long_muon>`)
 
 We've chosen the `has_rich` variant because in this example we will apply PID
 cuts to all non-composite particles, so it makes sense to first require that
@@ -276,7 +311,7 @@ values.
 Primary vertices are also part of the set of standard objects, produced by the
 `make_pvs <RecoConf.reconstruction_objects.make_pvs_v2>` function.
 
-Given this information, we can flesh out our function a little bit more::
+Given this information, we can flesh out our function a bit more::
 
     from Moore.config import register_line_builder
     from Moore.lines import Hlt2Line
@@ -290,33 +325,34 @@ Given this information, we can flesh out our function a little bit more::
 
     all_lines = {}
 
-
     @register_line_builder(all_lines)
-    def lbtolcpi_lctopkpi_line(name="Hlt2LbToLcpPim_LcToPpKmPipLine", prescale=1):
+    def lb0_to_lcpim_line(name="Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line", prescale=1):
         pvs = make_pvs()
-        protons = filter_protons(make_has_rich_long_protons(), pvs)
-        kaons = filter_kaons(make_has_rich_long_kaons(), pvs)
-        pions = filter_pions(make_has_rich_long_pions(), pvs)
+        protons = protons_for_charm()
+        kaons = kaons_for_charm()
+        pions = pions_for_charm_and_beauty()
         lcs = make_lambdacs(protons, kaons, pions, pvs)
         lbs = make_lambdabs(lcs, pions, pvs)
 
         return Hlt2Line(
             name=name,
-            algs=[lbs],
+            algs=[require_pvs(pvs), lbs],
             prescale=prescale,
          )
 
+
 .. note::
 
     The functions to create reconstruction objects like PVs, tracks or protoparticles should
     only be imported from the module ``RecoConf.reconstruction_objects``.
     More explanation can be found in the tutorial :doc:`run_with_reconstruction`.
 
+.. _filters_and_combiners:
 
 Filters and combiners
 ---------------------
 
-We're nearly there! What's left is to define the various ``filter_`` and
+We're nearly there! What's left is to define the various ``h_for_charm`` and
 ``make_`` placeholders.
 
 Think about what these functions should do. They need to take input, as we've
@@ -324,9 +360,7 @@ written it in our prototype, configure the correct type of algorithm, and then
 return something. We'll be using :ref:`selection algorithms which use ThOr
 functors <thor-selection-algorithms>`.
 
-Let's start with the particle filters.
-We could consider allowing for customisation of the
-algorithms with additional arguments::
+Let's start with the basic building blocks of our module ``h_for_charm``::
 
     from GaudiKernel.SystemOfUnits import GeV
     import Functors as F
@@ -334,28 +368,23 @@ algorithms with additional arguments::
     from ..algorithms_thor import ParticleFilter, require_all
 
 
-    def filter_protons(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9, dllp_min=5):
+    def protons_for_charm():
+        pvs = make_pvs()
         cut = require_all(
-            F.PT > pt_min,
-            F.MINIPCHI2(pvs) > mipchi2_min,
-            F.PID_P > dllp_min,
+            F.PT > 0.5 * GeV,
+            F.MINIPCHI2(pvs) > 9.,
+            F.PID_P > 5.,
         )
-        return ParticleFilter(particles, F.FILTER(cut))
+        return ParticleFilter(make_has_rich_long_protons(), F.FILTER(cut))
 
 We've used the `require_all <Hlt2Conf.algorithms_thor.require_all>` helper to
 define the cut expression here.
 
 .. note::
 
-    Why parameterise exactly these things as arguments, and not others, such as the
-    full functor expression? Like writing any other function, you need to consider
-    what behaviour should be configurable. For defining physics selections, there
-    is typically some particular *intent* an author has when writing code. In this
-    instance, we *want* these particles to be displaced with respect to any PV, so
-    there's an impact parameter cut. Another physicist reading this function will
-    understand that the *intent* is to produce displaced protons. Thinking about
-    the 'interface' of a function in this way can help make the code easier to
-    understand and reason about.
+    The function has no arguments. This is `on purpose <code_design_guidelines>`
+    for production-ready selections.
+    In this way we have re-defined a custom basic building block for our lines.
 
 The return value is the configured algorithm. This can be used as an 'input' to
 other algorithms as the framework knows how to extract the (single) output the
@@ -367,56 +396,86 @@ pions.
 Next is a function which combines its input to a composite |lc| candidate::
 
     from Functors.math import in_range
-    from GaudiKernel.SystemOfUnits import MeV, mm
+    from GaudiKernel.SystemOfUnits import (GeV, MeV, mm)
 
     from ..algorithms_thor import ParticleCombiner
 
-
-    def make_lambdacs(protons,
-                      kaons,
-                      pions,
-                      pvs,
-                      two_body_comb_maxdocachi2=9.0,
-                      comb_m_min=2080 * MeV,
-                      comb_m_max=2480 * MeV,
-                      comb_pt_min=2000 * MeV,
-                      comb_maxdoca=0.1 * mm):
-        two_body_combination_code = F.MAXDOCACHI2CUT(two_body_comb_maxdocachi2)
+    def make_lambdacs_for_beauty(protons, kaons, pions, pvs):
+        two_body_combination_code = require_all(
+            F.MAXDOCACHI2CUT(9.), F.MAXDOCACUT(0.1 * mm))
         combination_code = require_all(
-            in_range(comb_m_min,  F.MASS, comb_m_max),
-            F.SUM(F.PT) > comb_pt_min,
-            F.MAXDOCACUT(comb_maxdoca),
+            in_range(2080 * MeV, F.MASS, 2480 * MeV),  # mass of the combination
+            F.PT > 1.4 * GeV,  # pT of the 3-track combination
+            F.SUM(F.PT) > 2 * GeV,
+            F.MAXDOCACHI2CUT(9.),
+            F.MAXDOCACUT(0.1 * mm),
+        )
+        vertex_code = require_all(
+            in_range(2100 * MeV, F.MASS, 2460 * MeV),  # mass after the vertex fit
+            F.PT > 1.6 * GeV,  # pT after the vertex fit
+            F.CHI2DOF < 10.,
+            F.BPVFDCHI2(pvs) > 25.,
         )
         return ParticleCombiner(
             [protons, kaons, pions],
             DecayDescriptor="[Lambda_c+ -> p+ K- pi+]cc",
+            name="Tutorial_Lcp_Combiner",
             Combination12Cut=two_body_combination_code,
-            CombinationCut=combination_code
+            CombinationCut=combination_code,
+            CompositeCut=vertex_code,
         )
 
-The concepts here follow on from the filter example, so we won't dwell on it
-for long, other than to point out that we chose *not* to parameterise the
-``DecayDescriptor`` property of the combiner algorithm. As explained above,
-that's because our *intent* in defining this function is to make a combiner for
-a specific |lc| decay.
 
-Finally, define a maker function for the |lb| candidates.
+The concepts here follow on from the ``protons_for_charm`` example.
+However, this combiner is not written as a basic building block of our selection,
+so that we pass reconstructed objects as positional arguments to make the
+data-flow explicit in the function using this combiner.
+
+Combiners are always instances of `ParticleCombiner`, and length of the input list
+determines whether a 2-, 3- or 4-body combiner is called on the C++ side.
+There is `detailed documentation <https://gitlab.cern.ch/lhcb/Rec/-/blob/68b392cf3e4f9f717c746724d5f7de088bd42561/Phys/ParticleCombiners/include/CombKernel/ThOrCombiner.h>`_
+for combiners in our codebase. Most notably for the configuration are:
+
+  1. The order of particles in the decay descriptor and the input list must be the same; there is no mix and match unlike Run2!
+  2. Another change w.r.t. Run2 is that particles of the same type are passed explicitly (``[pi, pi, pi], DecayDescriptor="[D+ -> pi+ pi+ pi-]cc",``)
+  3. Multiple child particles with the *same* ID must be grouped together (``D+ -> pi+ pi+ pi-`` is good, ``D+ -> pi+ pi- pi+`` is forbidden).
+  4. For performance purposes, the algorithm logic assumes that the rarest children are listed first in the decay descriptor.
+     In case you are unsure what is rarest, checking counters in the log file can help.
+
+.. note::
+
+    When adding the line for |lb-sl-decay|, we may choose to use the same `make_lambdacs_for_beauty`
+    function to build |lc| candidates as in the hadronic decay. If this function is also called
+    with the exact same inputs (``protons_for_charm``, ``kaons_for_charm``, ``pions_for_charm_and_beauty``)
+    the combiner will run only once; Because the configuration framework resolves two identically-configured
+    algorithms to the same underlying object. This is an important point for optimization and will
+    be discussed at several stages of this tutorial.
+
+    On the other hand this means, that if you change a cut slightly in one of the combiners or its inputs,
+    another instance of the algorithm is created and work is (almost) doubled.
+
+Finally, can define a maker function for the |lb| candidates based on what we have learned so far.
+
+.. _running:
 
 Running
 -------
 
-With everything in place, we have pretty much everything we need to run the
-line. The remaining piece is an options file that configures Moore with our
-line maker function. Place this in a file called ``test_line.py`` in your
-working directory::
+We now have a first prototype for a line selecting |lb-had-decay| decays.
+The remaining piece is an options file that configures Moore with our
+line maker function. You can place this in a file called ``test_line.py`` in your
+working directory.
+
+.. code-block:: python
+    :caption: test_line.py
 
     from Moore import options, run_moore
-    from Hlt2Conf.lines.lambdab_to_lambdacpi import lbtolcpi_lctopkpi_line
+    from Hlt2Conf.lines.hlt2_line_tutorial import lb0_to_lcpim_line
     from RecoConf.global_tools import stateProvider_with_simplified_geom
 
 
     def all_lines():
-        return [lbtolcpi_lctopkpi_line()]
+        return [lb0_to_lcpim_line()]
 
     public_tools = [stateProvider_with_simplified_geom()]
 
@@ -428,7 +487,7 @@ working directory::
 
 Most of the pieces we've used here are explained in the :doc:`running` page.
 All we've done is tell Moore to run with its default configuration, using our
-line maker function to create the only line that it should run, and defined an
+line definition function to create the only line that it should run, and defined an
 input to use from the test file database.
 
 The cache in the ``TrackStateProvider`` is not compatible with the new scheduler
@@ -440,7 +499,7 @@ See LHCBPS-1835_ and `Rec!1584`_ for more details.
 Moore needs to know the input file when running, so we'll just an Upgrade
 minimum bias input data options file that comes with Moore::
 
-    ./Moore/run gaudirun.py '$MOOREROOT/tests/options/default_input_and_conds_hlt2.py' test_line.py
+    ./Moore/run gaudirun.py '$MOOREROOT/tests/options/default_input_and_conds_hlt2.py' test_line.py 2>&1 | tee logs/test_line.log
 
 With any luck this will run, but it will soon fail with an error.
 
@@ -462,15 +521,14 @@ building our candidate::
 
     from RecoConf.reconstruction_objects import upfront_reconstruction
 
-    # ...
 
     @register_line_builder(all_lines)
-    def lbtolcpi_lctopkpi_line(name="Hlt2LbToLcpPim_LcToPpKmPipLine", prescale=1):
-        # ...
+    def lb0_to_lcpim_line(name="Hlt2Tutorial_Lb0ToLcpPim_LcpToPpKmPip_Line", prescale=1):
+
 
         return Hlt2Line(
             name=name,
-            algs=upfront_reconstruction() + [lbs],
+            algs=upfront_reconstruction() + [require_pvs(pvs), lbs],
             prescale=prescale,
          )
 
@@ -484,9 +542,15 @@ Run again and you'll see the command complete successfully. Look at the log and
 see how many candidates were created. Seeing as we're running over minimum bias
 data, you should expect to see very few candidates (ideally zero).
 
+.. hint::
+
+    You can now also add the line for the |lb-sl-decay|.
+
+    What differences do you see in the log files?
+
 Use the instructions in :ref:`analysing-output` section to find the commands
 for generating and inspecting the control and data flow graphs that are
-produced when the options were ran. The data flow for our example looks like
+produced when the options were run. The data flow for our example looks like
 this:
 
 .. graphviz:: ../graphviz/hlt2_line_example_data_flow.gv
@@ -501,15 +565,113 @@ Again, the presence of these is just a detail for now; in the near future the
 control flow for a line will look much simpler, for our line being just the
 |lb| combiner algorithm.
 
+Inspecting the log-file
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Some aspects of reading and understanding log files, like reading off Moore configuration
+options and the control flow table are documented in the :doc:`"Running Moore" section <running>`.
+
+Here, we focus on counters of filters and combiners, and the timing table.
+If you did not pass a ``name`` to `ParticleFilter`, the default will be ``ParticleRangeFilter``.
+Every time a new instance of `ParticleFilter` is created, the name changes automatically
+to ``ParticleRangeFilter#i`` for the ``i+1`` st instance in the sequence.
+In a similar manner combiners have as default names ``TwoBodyCombiner``, ``ThreeBodyCombiner``
+and ``FourBodyCombiner``.
+
+.. note::
+
+    As mentioned in :ref:`the discussion on filters and combiners <filters_and_combiners>`,
+    the framework de-duplicates algorithms with the exact same configuration.
+    For filters of basic particles, the likelihood of "accidentaly" sharing the configuration
+    with any line in Moore is relatively large. If there are multiple instances
+    with different names, PyConf will raise an exception at compile time.
+    Therefore we recommend to only give names to combiners.
+
+These names appear in counters, the timing, and control flow table (if they are part of the control flow).
+We can search for them, for example with::
+
+    grep -A 7 'Tutorial_Lcp_Combiner' logs/test_line.log
+
+and find something like::
+
+    151:Tutorial_Lcp_Combiner                                      INFO Number of counters : 6
+    152- |    Counter                                      |     #     |    sum     | mean/eff^* | rms/err^*  |     min     |     max     |
+    153- |*"# passed"                                      |       100 |          0 |(  0.000000 +-  0.000000)% |
+    154- |*"# passed Combination12Cut"                     |       128 |          4 |( 3.125000 +- 1.537892)% |
+    155- |*"# passed CombinationCut"                       |        10 |          0 |(  0.000000 +-  0.000000)% |
+    156- | "Input1 size"                                   |       100 |        152 |     1.5200 |
+    157- | "Input2 size"                                   |       100 |        133 |     1.3300 |
+    158- | "Input3 size"                                   |       100 |        244 |     2.4400 |
+    --
+    212: | "Tutorial_Lcp_Combiner"                                              |             100 |           0.045 |          454.773 |
+    213- | "FunctionalParticleMaker#3"                                          |             100 |           0.010 |          109.340 |
+    ...
+
+Let's go through this line by line.
+   * The first line (``151``) prints the name of this `GaudiAlgorithm` instance and the number of counters.
+   * Line ``152`` shows the header; as there are different types of counters, the header contains columns that are not filled for all counters of this combiner.
+   * Line ``153`` counts the number of *events* in which the decision has been positive. In our case 0 out of 100 for which this combiner has been run.
+   * Line ``154`` counts the number of *candidates* in which the two-body combination cut decision of is positive.
+     Here, there were 128 combinations (which implicitly passed the combiner's decay descriptor), 4 out of which passed the two-body combination cut.
+     As the `Combination12Cut` is a very cheap operation, and helps to reject background early on, you should always consider applying one.
+   * A similar counter in line 155 shows the positive combination cut decisions. In this case, we combined 4 candidates passing the ``Combination12Cut``
+     with the ``Input3`` (pion) container of this event. This leads to 10 candidates, out of which 0 are selected.
+   * Lines ``156`` to ``158`` display the sizes and average per event multiplicities of input containers.
+     They are in the same order as we passed them to the combiner, i.e. proton, kaon and pion candidates.
+   * You might have noticed that there is no counter for the ``CompositeCut`` of our combiner.
+     It has been suppressed, since there was no input to be processed.
+
+.. tip::
+
+    Understanding counters is extremely useful for developing and debugging your selection.
+
+More on counters in `this talk <https://indico.cern.ch/event/663813/contributions/3191663/attachments/1745169/2825049/NewCounters.pdf>`_.
+
+The second part of our ``grep`` result shows an except of the timing table where the header::
+
+    | Name of Algorithm                                                    | Execution Count | Total Time / s  | Avg. Time / us   |
+
+is further up in the log.
+
+.. tip::
+
+    To speed up your selection, you are mainly interested in the ``Total Time`` of your filters and combiners.
+    This can be reduced by a
+
+        1. tighter selection on the inputs;
+        2. tighter selection in the ``Combination(12(3))Cut``;
+        3. well chosen configuration of the control flow.
+
+
 Full example
 ------------
 
 A full implementation example of the line described here can be found at
-``doc/scripts/hlt2_line_example.py``. Have a look at this and see how it
-differs from yours. In particuar, see how the imports have been organised near
+``Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py``.
+Have a look at this and see how it differs from yours.
+In particular, see how the imports have been organised near
 the top of the file, and everything has a consistent look.
 
-Try to follow these elements from the example in your own line. Remember that
+You may have noticed that the ``make_`` and ``h_for_charm`` functions
+have a leading underscore in their name. This is a detail that we
+will follow up on when discussing :ref:`code design guidelines <code_design_guidelines>`.
+
+.. hint::
+
+    The full example runs 3 lines. One for the |lb-had-decay| decay, and two instances
+    of the |lb-sl-decay| line with a slightly modified pion |pT| cut.
+    Have a close look at the counters that the example produces.
+    Can you understand all of them?
+
+        * We have two counters for the ``Tutorial_pions_for_charm_and_beauty`` filter. Which one is which?
+        * Why does one of them have fewer inputs?
+        * We have two counters for the ``Tutorial_Lcp_Combiner`` and three for ``Tutorial_Lb0_Combiner`` combiners,
+          two of which look identical. Why is that?
+        * Some combiners don't seem to run on all 100 events. Why is that?
+        * There is no ``Combination12Cut`` counter for ``Tutorial_Lcp_Combiner#1``,
+          even though the input containers for one event are not empty. What happened?
+
+Try to follow elements from the example in your own line. Remember that
 it is *your* line, and you should feel free to really *own* it. Show off and
 make it nice!
 
@@ -529,114 +691,175 @@ values' that are used by running elsewhere, which can be confusing.)
 
 What if you wanted to run a couple of instances of this line, but one with the
 standard cuts and one with some thresholds slightly modified? This can be
-acheived by using the `@configurable <PyConf.tonic.configurable>` decorator::
-
-    from PyConf import configurable
+achieved by using the `@configurable <PyConf.tonic.configurable>` decorator.
 
-    @configurable
-    def filter_protons(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9, dllp_min=5):
-        # ...
+In the full example ``Hlt/Hlt2Conf/python/Hlt2Conf/lines/hlt2_line_example.py``,
+we made use of this functionality
 
-Functions decorated in this way can be configured using the ``with...bind``
-syntax::
+.. code-block:: python
+    :caption: run_hlt2_line_example
 
-    from lambdab_to_lambdacpi import filter_protons,
+    def make_lines():
+        standard_lines = [line_builder() for line_builder in all_lines.values()]
 
+        # This is to demonstrate how `configurable`/`bind` works. We could also pass the function arguments directly lb0tolcpmum_line()
+        with lb0tolcpmum_line.bind(
+                name="Hlt2Tutorial_Lb0ToLcpMumNu_LcpToPpKmPip_Pip_pt450MeV_Line",
+                pi_pt_min=450 * MeV):
+            modified_line = lb0tolcpmum_line()
 
-    def all_lines():
-        standard_line = lbtolcpi_lctopkpi_line()
+        return standard_lines + [modified_line]
 
-        with filter_protons.bind(dllp_min=-999):
-            modified_line = lbtolcpi_lctopkpi_line(name="Hlt2DifferentNameLine")
 
-        return [standard_line, modified_line]
+    run_moore(options, make_lines, public_tools)
 
-    run_moore(options, all_lines)
+This configuration will run 3 lines: the |lb-had-decay| and |lb-sl-decay| lines with default configuration,
+and the |lb-sl-decay|
 
-This changes any calls to the function ``filter_protons`` within the ``with``
-context to override the value of the ``dllp_min=-999``. The advantages of using
-this approach are:
+When running with::
 
-1. You don't need to modify the source code (but it is often better to do
-   that!).
-2. You don't need to 'expose' everything you want to change on the top-level
-   line maker, you just modify the behaviour of ``@configurable`` functions
-   directly.
+    Moore/run gaudirun.py '$HLT2CONFROOT/options/run_hlt2_line_example.py' 2>&1 | tee hlt2_line_example.log
 
-You can read a lot more about the ``@configurable`` decorator in the :ref:`Tonic documentation <tonic-design>`.
 
-.. note::
+.. hint::
 
-    Many of the components of the two lines we are now running are identical,
-    such as the kaon and pion makers. Will this configuration end up running
-    two versions of each maker that do exactly the same thing? No!
+    1. You don't need to modify the source code (but it is often better to do
+       that!).
+    2. You don't need to 'expose' everything you want to change on the top-level
+       line maker, you just modify the behaviour of ``@configurable`` functions
+       directly.
 
-    The configuration framework resolves two identically-configured algorithms
-    to the same underlying object. This means that the second time the kaon
-    maker is configured (identically to the first time), we will get exactly
-    the same instance back as the first time. There is then only one instance
-    that's added in the dataflow, and the scheduler will see this and know not
-    to run the same thing twice.
+You can read a lot more about the ``@configurable`` decorator in the :ref:`Tonic documentation <tonic-design>`.
 
-    The algorithms 'downstream' of the proton makers, such as the |lc|
-    combiner, *will* be different, because "configuration" includes the tree of
-    all inputs, and one of them (the proton maker) is different between the two
-    lines.
+.. _code_design_guidelines:
+
+Code design guidelines
+----------------------
+
+These guidelines are not set in stone, and up for debate.
+
+.. important::
+
+    **Summary of best practices**
+
+    1. Basic building blocks of the selections should be identified and declared locally if they cannot be taken from a shared location.
+    2. Builder functions (``_make_*``) should only be used if called more than once. ``DataHandles`` of reconstructed objects need to be passed
+       as positional arguments. Optional arguments like names, decay descriptors or bool/enum-style
+       variables should also be passed as positional arguments.
+       Using cut values as arguments or passing ``*args`` or ``**kwargs`` is discouraged.
+    3. Selection steps with "rare" outputs should be part of the control flow. Best efforts on the order of objects should be made based on
+       speed and rarity of the selection step.
+    4. Functions should never be imported from a module which registers lines. Functions that are only used in one module should start with an underscore,
+       see the `PEP 8 Style Guide for Python Code <https://www.python.org/dev/peps/pep-0008/#descriptive-naming-styles>`_
+    5. A consistent naming scheme for lines and combiners can help with code readability and debugging.
+    6. A docstring at the beginning of a file can act like a table of contents and help to navigate through it.
+       Searching a line name from a list in that comment can help to jump to the right place in the code.
+
+    Corollary: selection cuts are exposed once in the configuration, and cannot be overwritten.
+
+
+The reasoning for these choices is as follows:
+The principles follow those of the :doc:`../recoconf/recoconf` package, most notably
+
+  1. Reconstructed objects “flow” through the ``make_*`` functions.
+  2. Functions define a single “logical” step. The global data flow is configured in as “flat” as possible functions, where the logical steps are pieced together.
+
+We make some distinctions for the selection configuration, as selections sit on top of a relatively long data flow.
+Exposing this to adhere to rule 1. would be too explicit, as the starting point would be the raw event.
+It thus makes sense to re-define starting points or basic building blocks for selections.
+In the example case these are high level
+objects like `_protons_for_charm`, `_kaons_for_charm` and `_pions_for_charm_and_beauty`.
+
+Defining these building blocks is a design choice that each line-author
+`has to make <further_work>`. In our case, we might even want to choose the |lc| candidate
+as a point of entry.
+The basic building blocks can be declared locally in the module with the lines,
+or shared within a working group or with everybody (`standard_particles`).
+
+Another distinction to the reconstruction configuration is that the majority of
+the selection uses exactly two algorithms: `ParticleFilter` and `ParticleCombiner`,
+but will create a vast number of instances of them to express the various selections.
+On the other hand, the reconstruction mostly uses dedicated algorithms,
+most of them with their own default parameter tuning.
+We would like to express our production-ready selections similarly, i.e
+have well tuned cuts for specific selection purposes (`_make_lambdacs_for_beauty`).
+Note that we have the flexibility to tune cuts to the last digit for
+every specific purpose separately, but the price of such an approach should be made clear:
+
+.. attention::
+
+    Every call to `ParticleFilter` or `ParticleCombiner` with different inputs
+    or different selection cuts will create a new instance of the algorithm.
+
+    To be explicit, imagine the following: After inspecting first data from
+    both our example decays, we found that the hadronic selection would profit from
+    a tighter |pT| cut on the |lc| candidate. To avoid boilerplate code, we
+    could add an argument to the `_make_lambdacs_for_beauty` function
+    that defines the |pT| cut value and passes it to the functor. We
+    would then call this function with different values from the line-defining
+    function.
+    But this will create 2 instances of `ParticleCombiner`, meaning that the
+    full algorithm will run twice with slightly different cuts.
+
+    This kind of duplication should be avoided as much as possible,
+    especially for relatively expensive operations like 3-body combinations
+    with highly abundant inputs.
+    A faster way in such a case might be to run a `Particlefilter` on the
+    common |lc| candidate, that only performs the tighter |pT| cut.
+
+    Can you modify the example to confirm that this really speeds up the selection?
+
+Selections live at the end of the trigger-food-chain, and the data-flow determines which steps need
+to be taken to make candidates for our line. However, selection steps often have more
+than one input, and the configuration offers the possibility to create artificial barriers in the data-flow.
+Taking |lb-sl-decay| as an example, we can for example decide to run the combiner for |lc-decay|
+only after a muon with large |pT| and high |chi2ip| has been found.
+Such a configuration of course only brings an advantage if the majority of events does not contain such a muon.
+
+.. seealso::
+
+    To find out if a certain control flow configuration speeds up the selection,
+    in most cases it is sufficient to look at the timing tables and counters
+    of a log file when running on HLT1-filtered minBias data.
+
+Importing a selection or function from a module that defines Hlt2 lines is discouraged.
+That is because the line authors of the module might not be aware that their function is
+used elsewhere and modify their selection, rename the function or change its behaviour.
+If the function to import defines a selection that *should* be common to both modules,
+the question is if this function should be moved to a shared file within the WG or LHCb,
+or if both selections should be part of the same module.
+
+Line names have been briefly discussed in `the section on Line declaration <line-declaration>`.
+There is no strict common set of best practices, but it makes sense to think about consistent
+names of lines beforehand, as we would like to avoid re-naming lines during data-taking.
+For debugging purposes, it has proven useful to overwrite the default names of combiners
+(e.g. ``TwoBodyCombiner#123`` to ``Tutorial_Lb0_Combiner``). For (machine-)readability
+it is useful to have names like ``MyWG_MyModule_MyCombiner``.
+We don't recommend naming filters, as it can easily lead to clashes during the automatic
+code-deduplication stage. See also `Moore#378 <https://gitlab.cern.ch/lhcb/Moore/-/issues/378>`_ and
+`Moore#380 <https://gitlab.cern.ch/lhcb/Moore/-/issues/380>`_ .
+The naming of builder-, filter- and line-defining functions itself is, apart from the leading underscore
+for local functions, not of great concern. We recommend keeping them in `snake_case`, short and
+descriptive.
+
+Adding comments to the code is recommended. The comments should add information on the selection,
+provide pointers to further documentation, or remind the authors and others of future steps (``# TODO``).
+Commenting out code is discouraged.
 
-Further work
-------------
+Next steps
+----------
 
 An important aspect of authoring an HLT2 line is stepping back and
 spotting instances of code duplication. Multiple instances of the
 same intent can be refactored into a common function. This reduces
-any maintanence burden and decreases the likelihood of two
+any maintenance burden and decreases the likelihood of two
 implementations slowly drifting apart over time (if someone changes
 one but does not know about the existence of the other).
 
-There is some redundancy is our functions, particularly in the ``filter_`` ones
-where some selections and thresholds are duplicated. If our *intent* is that
-the momentum and impact parameters cut should be the same across all final
-state particles (protons, kaons, and pions) then it makes sense to encode this
-as a common function::
-
-    @configurable
-    def filter_particles(particles, pvs, pt_min=0.5 * GeV, mipchi2_min=9, pid=None):
-        cut = require_all(
-            F.PT > pt_min,
-            F.MINIPCHI2(pvs) > mipchi2_min,
-        )
-        # Add the PID cut if a threshold was specified
-        if pid is not None:
-            cut = require_all(cut, pid)
-        return ParticleFilter(particles, F.FILTER(cut))
-
-We still want to expose the PID cut for configuration, so we change the
-original ``filter_protons`` function to delegate to the new filter maker::
-
-    @configurable
-    def filter_protons(particles, pvs, dllp_min=5):
-        cut = F.PID_P > dllp_min
-        return filter_particles(particles, pvs, pid=cut)
-
-There are a few things to note about this implementation:
-
-1. The way we've refactored the common logic means that there is still only a
-   single filter algorithm. We could have ran one filter for the 'common'
-   selection, and a second for the PID selection, but our way is more
-   efficient.
-2. Both functions are decorated as ``@configurable``, so we can still use the
-   ``with...bind`` syntax to configure all aspects of the selection.
-3. We've ended up with two very small functions with very little logic in them.
-   This style is typical of HLT2 lines, and is done on purpose to try to
-   improve readability and allow for simpler debugging and reasoning.
-
-Next steps
-----------
-
 When writing lines it's extremely useful to be able to be able to
-:ref:`analyse the output files and log <analysing-output>`. It's also helpful
+:ref:`analyse the output files <analysing-output>`. It's also helpful
 to refer to the documentation on :doc:`debugging` in case something isn't
-working. Once your ready to start physics performance studies, you can start
+working. Once you're ready to start physics performance studies, you can start
 :doc:`hlt2_analysis`. The :doc:`ganga` page has instructions for writing
 Ganga-compatible options.