diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5ce09795e2bbefceee518a11256278d53fc971e0..e82392ce00edaf29f5f51629554f48543e425134 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -276,6 +276,7 @@ if(TARGET_DEVICE STREQUAL "CPU")
   endif()
 
   function(allen_add_host_library)
+    list(FIND ARGV STATIC is_static)
     foreach(arg IN LISTS ARGN)
       if(${arg} MATCHES "\\.cu$")
         set_source_files_properties(${arg} PROPERTIES LANGUAGE CXX)
@@ -283,17 +284,19 @@ if(TARGET_DEVICE STREQUAL "CPU")
     endforeach()
 
     add_library(${ARGV})
+
     add_library(Allen::${ARGV0} ALIAS ${ARGV0})
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      LIBRARY DESTINATION lib)
+
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
+    endif()
   endfunction()
 
   function(allen_add_device_library)
-    set(destination lib)
-    list(FIND ARGV STREAM is_stream)
-    list(FILTER ARGV EXCLUDE REGEX "STREAM")
+    list(FIND ARGV STATIC is_static)
 
     foreach(arg IN LISTS ARGN)
       if(${arg} MATCHES "\\.cu$")
@@ -303,15 +306,14 @@ if(TARGET_DEVICE STREQUAL "CPU")
 
     add_library(${ARGV})
 
-    if (NOT ${is_stream} EQUAL -1)
-      set(destination lib/sequences)
-    endif()
-
     add_library(Allen::${ARGV0} ALIAS ${ARGV0})
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      DESTINATION ${destination})
+
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        DESTINATION ${CMAKE_INSTALL_LIBDIR})
+    endif()
   endfunction()
 
   function(allen_add_executable)
@@ -323,7 +325,7 @@ if(TARGET_DEVICE STREQUAL "CPU")
 
     add_executable(${ARGV})
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
-    install(TARGETS ${ARGV0} RUNTIME DESTINATION bin)
+    install(TARGETS ${ARGV0} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
   endfunction()
 
   string(APPEND CMAKE_CXX_FLAGS " -Wall -Wextra -Wpedantic -Wnon-virtual-dtor -Wdouble-promotion")
@@ -361,23 +363,23 @@ elseif(TARGET_DEVICE STREQUAL "HIP")
   set(CMAKE_EXE_LINKER_FLAGS "-Wl,-rpath,./")
 
   function(allen_add_host_library)
+    list(FIND ARGV STATIC is_static)
+
     add_library(${ARGV})
-    add_library(Allen::${ARGV0} ALIAS ${ARGV0})
     target_include_directories(${ARGV0} PRIVATE ${HIP_PATH}/include ${ROCM_PATH}/hsa/include)
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      LIBRARY DESTINATION lib)
-  endfunction()
 
-  function(allen_add_device_library)
-    set(destination lib)
-    list(FIND ARGV STREAM is_stream)
-    list(FILTER ARGV EXCLUDE REGEX "STREAM")
+    add_library(Allen::${ARGV0} ALIAS ${ARGV0})
 
-    if (NOT ${is_stream} EQUAL -1)
-      set(destination lib/sequences)
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
     endif()
+  endfunction()
+
+  function(allen_add_device_library)
+    list(FIND ARGV STATIC is_static)
 
     hip_add_library(${ARGV} HIPCC_OPTIONS ${HIPCC_OPTIONS})
 
@@ -385,16 +387,18 @@ elseif(TARGET_DEVICE STREQUAL "HIP")
     target_include_directories(${ARGV0} PRIVATE ${HIP_PATH}/include ${ROCM_PATH}/hsa/include)
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
 
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      LIBRARY DESTINATION ${destination})
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
+    endif()
   endfunction()
 
   function(allen_add_executable)
     hip_add_executable(${ARGV} HIPCC_OPTIONS ${HIPCC_OPTIONS})
     target_include_directories(${ARGV0} PRIVATE ${HIP_PATH}/include ${ROCM_PATH}/hsa/include)
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION})
-    install(TARGETS ${ARGV0} RUNTIME DESTINATION bin)
+    install(TARGETS ${ARGV0} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
   endfunction()
 elseif(TARGET_DEVICE STREQUAL "CUDA")
 
@@ -421,45 +425,41 @@ elseif(TARGET_DEVICE STREQUAL "CUDA")
   message(STATUS "Detected CUDA include directory: " ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
 
   function(allen_add_host_library)
-    set(destination lib)
-    list(FIND ARGV STREAM is_stream)
-    list(FILTER ARGV EXCLUDE REGEX "STREAM")
-
-    if (NOT ${is_stream} EQUAL -1)
-      set(destination lib/sequences)
-    endif()
+    list(FIND ARGV STATIC is_static)
 
     add_library(${ARGV})
+    target_include_directories(${ARGV0} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} ${PROJECT_BINARY_DIR}/code_generation)
+    target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
     if(SEPARABLE_COMPILATION)
       set_property(TARGET ${ARGV0} PROPERTY CUDA_SEPARABLE_COMPILATION ON)
     endif()
-    target_include_directories(${ARGV0} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} ${PROJECT_BINARY_DIR}/code_generation)
-    target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
+
     add_library(Allen::${ARGV0} ALIAS ${ARGV0})
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      LIBRARY DESTINATION ${destination})
+
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
+    endif()
   endfunction()
 
   function(allen_add_device_library)
-    set(destination lib)
-    list(FIND ARGV STREAM is_stream)
-    list(FILTER ARGV EXCLUDE REGEX "STREAM")
-
-    if (NOT ${is_stream} EQUAL -1)
-      set(destination lib/sequences)
-    endif()
+    list(FIND ARGV STATIC is_static)
 
     add_library(${ARGV})
+    target_include_directories(${ARGV0} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
+    target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
     if(SEPARABLE_COMPILATION)
       set_property(TARGET ${ARGV0} PROPERTY CUDA_SEPARABLE_COMPILATION ON)
     endif()
-    target_include_directories(${ARGV0} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
-    target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION} ODIN_WITHOUT_GAUDI)
+
     add_library(Allen::${ARGV0} ALIAS ${ARGV0})
-    install(TARGETS ${ARGV0}
-      EXPORT Allen
-      LIBRARY DESTINATION ${destination})
+
+    if (${is_static} EQUAL -1)
+      install(TARGETS ${ARGV0}
+        EXPORT Allen
+        LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
+    endif()
   endfunction()
 
   function(allen_add_executable)
@@ -469,7 +469,7 @@ elseif(TARGET_DEVICE STREQUAL "CUDA")
     endif()
     target_include_directories(${ARGV0} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
     target_compile_definitions(${ARGV0} PRIVATE ${TARGET_DEFINITION})
-    install(TARGETS ${ARGV0} RUNTIME DESTINATION bin)
+    install(TARGETS ${ARGV0} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
   endfunction()
 
   string(APPEND CMAKE_CXX_FLAGS " -Wall -Wextra -Wpedantic -Wnon-virtual-dtor -Wdouble-promotion")
@@ -485,8 +485,6 @@ set(GROUPCOMP_DIR ${PROJECT_BINARY_DIR}/group_comp)
 file(MAKE_DIRECTORY ${GROUPCOMP_DIR})
 
 function(allen_add_device_library_unified)
-  set(destination lib)
-
   foreach(arg IN LISTS ARGN)
     if(${arg} MATCHES "\\.cu$")
       list(APPEND LIBRARY_SOURCES ${arg})
@@ -518,7 +516,7 @@ function(allen_add_device_library_unified)
 
   install(TARGETS ${ARGV0}
     EXPORT Allen
-    LIBRARY DESTINATION ${destination})
+    LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
 
   MATH(EXPR COUNTER "${COUNTER}+1")
 endfunction()
@@ -580,6 +578,14 @@ if(BUILD_TESTING)
   add_subdirectory(test/unit_tests)
 endif()
 
+# Interface library that will be used for common functionality
+add_library(AllenCommon INTERFACE)
+target_include_directories(AllenCommon
+  INTERFACE
+  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/main/include>
+  $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/test/contracts/include>)
+
 add_subdirectory(configuration)
 add_subdirectory(backend)
 add_subdirectory(host)
@@ -590,14 +596,6 @@ add_subdirectory(integration)
 add_subdirectory(zmq)
 add_subdirectory(stream)
 
-# Interface library that will be used for common functionality
-add_library(AllenCommon INTERFACE)
-target_include_directories(AllenCommon
-  INTERFACE
-  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/main/include>
-  $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
-  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/test/contracts/include>)
-
 if (STANDALONE)
   target_compile_definitions(AllenCommon INTERFACE ALLEN_STANDALONE)
 endif()
@@ -653,7 +651,7 @@ message(STATUS "CMAKE CXX FLAGS: " ${CMAKE_CXX_FLAGS})
 
 
 # Add host library
-allen_add_host_library(HostCommon STATIC
+allen_add_host_library(HostCommon SHARED
   main/src/BankTypes.cpp
   main/src/InputReader.cpp
   main/src/InputTools.cpp
@@ -668,7 +666,7 @@ allen_add_host_library(HostCommon STATIC
 target_link_libraries(AllenCommon INTERFACE
   LHCbEvent AllenFS nlohmann_json::nlohmann_json cppgsl::cppgsl)
 target_link_libraries(HostCommon PRIVATE
-  mdf EventModel Gear Backend mdf NonEventData AllenCommon LHCbEvent Boost::iostreams)
+  mdf EventModel Gear Backend mdf AllenCommon LHCbEvent Boost::iostreams)
 
 allen_add_host_library(AllenLib SHARED
   main/src/Allen.cpp
@@ -679,30 +677,60 @@ allen_add_host_library(AllenLib SHARED
   main/src/ZMQOutputSender.cpp)
 
 target_compile_definitions(AllenLib PUBLIC ${TARGET_DEFINITION})
+target_include_directories(AllenLib PUBLIC
+  $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
 
 target_link_libraries(AllenLib
   PRIVATE
+    algorithm_db
+    Associate
     Backend
-    Stream
-    HostCommon
+    Calo
+    CheckClustering
+    Combiners
+    DeviceValidators
+    EventModel
+    Examples
+    HostClustering
+    HostCombiners
+    HostDataProvider
+    HostDummyMaker
+    HostErrorBanks
     HostEventModel
+    HostGEC
+    HostInitEventList
+    HostPrefixSum
     HostRoutingBits
+    HostTAEFilter
+    Kalman
+    LHCbEvent
+    Lumi
+    Monitoring
+    Muon
     NonEventData
-    TrackChecking
     PVChecking
-    CheckClustering
+    PV_beamline
+    Plume
+    SciFi
     SelChecking
-    Monitoring
-    HostCombiners
-    EventModel
-    Gear
+    Selections
+    Stream
+    TrackChecking
+    UT
+    Utils
+    Validators
+    Velo
+    VertexFitter
     mdf
-    LHCbEvent
+    track_matching
   PUBLIC
+    Gear
     AllenCommon
+    HostCommon
     AllenRuntime
     Threads::Threads
     AllenZMQ
+    NonEventDataHeaders
     ${CMAKE_DL_LIBS})
 
 # To propagate filesystem includes/libs
@@ -710,13 +738,20 @@ target_link_libraries(AllenLib PRIVATE AllenFS)
 target_link_libraries(AllenCommon INTERFACE ROOT::RIO ROOT::Core ROOT::Cling ROOT::Hist ROOT::Tree TBB::tbb)
 
 if (NOT STANDALONE)
-  target_link_libraries(AllenLib PRIVATE AIDA::aida)
+  target_link_libraries(AllenLib
+    PRIVATE
+      AIDA::aida
+      Configuration)
+  target_link_libraries(AllenLib
+    PUBLIC
+      LHCb::HltServicesLib)
 endif()
 
 allen_add_executable(Allen main/src/main.cpp)
+add_dependencies(Allen Sequences)
 
-target_link_libraries(Allen PRIVATE AllenLib NonEventData Gear)
-
+target_link_libraries(Allen PRIVATE AllenLib)
+set_target_properties(Allen PROPERTIES BUILD_RPATH_USE_ORIGIN TRUE)
 
 if (NOT STANDALONE)
     include(FileContentMetadataRepository)
diff --git a/Dumpers/BinaryDumpers/CMakeLists.txt b/Dumpers/BinaryDumpers/CMakeLists.txt
index 74e2bdb581e447c607e221a9d09aac484cd88378..d04214f9a9770300a31a3023eee04168793e378e 100644
--- a/Dumpers/BinaryDumpers/CMakeLists.txt
+++ b/Dumpers/BinaryDumpers/CMakeLists.txt
@@ -10,11 +10,13 @@ gaudi_add_library(BinaryDumpers
                     src/lib/TestUTBoards.cpp
                     src/lib/Utils.cpp
                   LINK PUBLIC
+                    NonEventDataHeaders
+                    Gear
                     AllenCommon
                     HostCommon
                     EventModel
-                    Backend
-                    NonEventData
+                    AllenRuntime
+                    AllenLib
                     Gaudi::GaudiKernel
                     LHCb::DAQEventLib
                     LHCb::FTDAQLib
diff --git a/Dumpers/BinaryDumpers/options/allen.py b/Dumpers/BinaryDumpers/options/allen.py
index 82dd67720413d8c071b9e106bafa336b48038106..eead9ce44762f588cd0e5408a3ad71b468e166b6 100755
--- a/Dumpers/BinaryDumpers/options/allen.py
+++ b/Dumpers/BinaryDumpers/options/allen.py
@@ -5,6 +5,7 @@
 import os
 import sys
 import zmq
+import re
 from Configurables import ApplicationMgr
 from Configurables import Gaudi__RootCnvSvc as RootCnvSvc
 
@@ -142,6 +143,13 @@ parser.add_argument(
     default=False,
     help="Use binary files as the geometry",
 )
+parser.add_argument(
+    "--tck-no-bindings",
+    help="Avoid using python bindings to TCK utils",
+    dest="bindings",
+    action="store_false",
+    default=True
+)
 
 args = parser.parse_args()
 
@@ -190,12 +198,44 @@ extSvc = ["ToolSvc", "AuditorSvc", "ZeroMQSvc"]
 rootSvc = RootCnvSvc("RootCnvSvc", EnableIncident=1)
 ApplicationMgr().ExtSvc += ["Gaudi::IODataManager/IODataManager", rootSvc]
 
+# Get Allen JSON configuration
+sequence = os.path.expandvars(args.sequence)
+sequence_json = ""
+tck_option = re.compile(r"([^:]+):(0x[a-fA-F0-9]{8})")
+if (m := tck_option.match(sequence)):
+    from Allen.tck import sequence_from_git, dependencies_from_build_manifest
+    import json
+
+    repo = m.group(1)
+    tck = m.group(2)
+    sequence_json, tck_info = sequence_from_git(repo, tck, use_bindings=args.bindings)
+    tck_deps = tck_info["metadata"]["dependencies"]
+    if not sequence_json or sequence_json == 'null':
+        print(
+            f"Failed to obtain configuration for TCK {tck} from repository {repo}"
+        )
+        sys.exit(1)
+    elif (deps :=
+          dependencies_from_build_manifest()) != tck_deps:
+        print(
+            f"TCK {tck} is compatible with Allen release {deps}, not with {tck_deps}."
+        )
+        sys.exit(1)
+    else:
+        print(
+            f"Loaded TCK {tck} with sequence type {tck_info['type']} and label {tck_info['label']}."
+        )
+else:
+    with open(sequence) as f:
+        sequence_json = f.read()
+
 if args.mep:
     extSvc += ["AllenConfiguration", "MEPProvider"]
     from Configurables import MEPProvider, AllenConfiguration
 
     allen_conf = AllenConfiguration("AllenConfiguration")
-    allen_conf.JSON = args.sequence
+    # Newlines in a string property cause issues
+    allen_conf.JSON = sequence_json.replace('\n', '')
     allen_conf.OutputLevel = 3
 
     mep_provider = MEPProvider()
@@ -255,8 +295,7 @@ if not args.binary_geometry:
                     'SIMCOND': options.conddb_tag,
                 }))
 
-if not args.binary_geometry:
-    bank_types = configured_bank_types(args.sequence)
+    bank_types = configured_bank_types(sequence_json)
     cf_node = setup_allen_non_event_data_service(
         allen_event_loop=True, bank_types=bank_types)
     config.update(configure(options, cf_node, make_odin=make_odin))
@@ -278,14 +317,13 @@ for flag, value in [("g", args.det_folder), ("params", params),
                     ("r", args.repetitions), ("output-file", args.output_file),
                     ("output-batch-size", args.output_batch_size),
                     ("m", args.reserve), ("v", args.verbosity),
-                    ("p", args.print_memory),
-                    ("sequence", os.path.expandvars(args.sequence)),
+                    ("p", args.print_memory), ("sequence", sequence),
                     ("s", args.slices), ("mdf", os.path.expandvars(args.mdf)),
                     ("disable-run-changes", int(not args.enable_run_changes)),
                     ("monitoring-save-period", args.mon_save_period),
                     ("monitoring-filename", args.mon_filename),
                     ("events-per-slice", args.events_per_slice),
-                    ("device", args.device), ("run-from-json", "1"),
+                    ("device", args.device),
                     ("enable-monitoring-printing",
                      args.enable_monitoring_printing),
                     ("register-monitoring-counters",
@@ -306,8 +344,9 @@ if args.mep:
     mep_provider = gaudi.service("MEPProvider", interface=gbl.IService)
     provider = cast_service(gbl.IInputProvider, mep_provider)
 else:
-    provider = gbl.Allen.make_provider(options)
-output_handler = gbl.Allen.output_handler(provider, zmqSvc, options)
+    provider = gbl.Allen.make_provider(options, sequence_json)
+output_handler = gbl.Allen.output_handler(provider, zmqSvc, options,
+                                          sequence_json)
 
 # run Allen
 gbl.allen.__release_gil__ = 1
@@ -336,8 +375,9 @@ def allen_thread():
     if args.profile == "CUDA":
         runtime_lib.cudaProfilerStart()
 
-    gbl.allen(options, updater, shared_wrap(gbl.IInputProvider, provider),
-              output_handler, zmqSvc, con.c_str())
+    gbl.allen(options, sequence_json, updater,
+              shared_wrap(gbl.IInputProvider, provider), output_handler,
+              zmqSvc, con.c_str())
 
     if args.profile == "CUDA":
         runtime_lib.cudaProfilerStop()
diff --git a/Dumpers/BinaryDumpers/src/TransposeRawBanks.cpp b/Dumpers/BinaryDumpers/src/TransposeRawBanks.cpp
index 8b126f85b2b40a592c3876beb29f040d529c8587..a4b88eb10308d86bef98f19d95b5c7056ad339d8 100644
--- a/Dumpers/BinaryDumpers/src/TransposeRawBanks.cpp
+++ b/Dumpers/BinaryDumpers/src/TransposeRawBanks.cpp
@@ -120,7 +120,13 @@ std::array<TransposedBanks, LHCb::RawBank::types().size()> TransposeRawBanks::op
 
   // We have to deal with the fact that calo banks can come in different types
   for (auto bt : m_bankTypes.value()) {
-    if (bt == LHCb::RawBank::EcalPacked || bt == LHCb::RawBank::HcalPacked) {
+    if (bt == LHCb::RawBank::VP || bt == LHCb::RawBank::VPRetinaCluster) {
+      if (rawBanks[LHCb::RawBank::VP].empty() && rawBanks[LHCb::RawBank::VPRetinaCluster].empty()) {
+        // Both VP and Retina banks are empty
+        throw GaudiException {"Cannot find " + toString(bt) + " raw bank.", "", StatusCode::FAILURE};
+      }
+    }
+    else if (bt == LHCb::RawBank::EcalPacked || bt == LHCb::RawBank::HcalPacked) {
       if (rawBanks[bt].empty() && rawBanks[LHCb::RawBank::Calo].empty()) {
         // Old-style calo banks empty and new-style calo banks also empty
         throw GaudiException {"Cannot find " + toString(bt) + " raw bank.", "", StatusCode::FAILURE};
diff --git a/Dumpers/BinaryDumpers/src/lib/Utils.cpp b/Dumpers/BinaryDumpers/src/lib/Utils.cpp
index 7c11567f1c49474282ed333cf1d832237ba09d1c..642653e6cf7e23845e81ee328c4f0c2d6a3cab68 100644
--- a/Dumpers/BinaryDumpers/src/lib/Utils.cpp
+++ b/Dumpers/BinaryDumpers/src/lib/Utils.cpp
@@ -6,7 +6,6 @@
 
 #include <Dumpers/Utils.h>
 #include <Dumpers/IUpdater.h>
-#include <Updater.h>
 
 namespace {
   namespace fs = boost::filesystem;
@@ -38,12 +37,3 @@ size_t MuonUtils::size_index(
     return index + 4 * tile.nY() - 2 * gridY[idx] + (2 * tile.nX() / gridX[idx]);
   }
 }
-
-Allen::NonEventData::IUpdater* binary_updater(std::map<std::string, std::string> const& options)
-{
-  static std::unique_ptr<Allen::NonEventData::IUpdater> updater;
-  if (!updater) {
-    updater = std::make_unique<Allen::NonEventData::Updater>(options);
-  }
-  return updater.get();
-}
diff --git a/Dumpers/BinaryDumpers/tests/qmtest/lhcb_geometry_allen_event_loop.qmt b/Dumpers/BinaryDumpers/tests/qmtest/lhcb_geometry_allen_event_loop.qmt
index 9d37b026f7e736e42503d2c41cc9dda4b5ae437e..1d8e098e69e9e7c4496112c403bf4eb2e3171d01 100644
--- a/Dumpers/BinaryDumpers/tests/qmtest/lhcb_geometry_allen_event_loop.qmt
+++ b/Dumpers/BinaryDumpers/tests/qmtest/lhcb_geometry_allen_event_loop.qmt
@@ -18,6 +18,7 @@
     <text>--tags=dd4hep:trunk,master|detdesc:dddb-20220705,sim-20220705-vc-md100</text>
     <text>--mdf</text><text>root://eoslhcb.cern.ch///eos/lhcb/wg/rta/samples/mc/Jira_LHCBGAUSS-2635/Minbias_MagDown_Boole_lhcbhead3337_dddb-20220705_sim-20220705-vc-md100.mdf</text>
     <text>--sequence</text><text>$ALLEN_INSTALL_DIR/constants/hlt1_pp_no_ut.json</text>
+    <text>--monitoring-filename</text><text>allen_event_loop.root</text>
     <text>-n</text><text>10000</text>
   </set></argument>
   <arguement name="timeout"><integer>600</integer></arguement>
diff --git a/Rec/Allen/CMakeLists.txt b/Rec/Allen/CMakeLists.txt
index f3eb8a8fff9ee0a317f8b477d489532e24f24894..66e26dd065711460af9e3c8687a9e1d72c5f73de 100755
--- a/Rec/Allen/CMakeLists.txt
+++ b/Rec/Allen/CMakeLists.txt
@@ -36,7 +36,6 @@ gaudi_add_module(AllenWrapper
                    HostCommon
                    HostEventModel
                    HostRoutingBits
-                   NonEventData
                    TrackChecking
                    PVChecking
                    CheckClustering
@@ -78,24 +77,26 @@ gaudi_add_module(AllenAlgorithms
                  LINK
                    AllenLib
                    Backend
-                   Stream
+                   CheckClustering
+                   EventModel
+                   Gear
+                   HostCombiners
                    HostCommon
                    HostEventModel
                    HostRoutingBits
+                   LHCbEvent
+                   Monitoring
+                   MuonCommon
                    NonEventData
-                   TrackChecking
                    PVChecking
-                   CheckClustering
                    SelChecking
-                   Monitoring
-                   HostCombiners
-                   EventModel
-                   Gear
-                   mdf
-                   LHCbEvent
-                   EventModel
-                   MuonCommon
+                   SelectionsHeaders
+                   Stream
+                   TrackChecking
+                   UTCommon
+                   Utils
                    WrapperInterface
+                   mdf
                    Gaudi::GaudiAlgLib
                    LHCb::DAQEventLib
                    LHCb::DAQKernelLib
diff --git a/Rec/Allen/python/Allen/__init__.py b/Rec/Allen/python/Allen/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..b4612602b5514182334ac8c0b340a4fad8f196cc 100644
--- a/Rec/Allen/python/Allen/__init__.py
+++ b/Rec/Allen/python/Allen/__init__.py
@@ -0,0 +1,23 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+import os
+
+__path__ += [
+    d for d in [
+        os.path.realpath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "..",
+                "..",
+                "configuration",
+            ))
+    ] if os.path.exists(d)
+]
diff --git a/Rec/Allen/python/Allen/config.py b/Rec/Allen/python/Allen/config.py
index 742a96b1a567e8e3f7437a22e1068f4bbba9b890..4ebedce4dd281971fc8edac60a90a80256f0b6cc 100755
--- a/Rec/Allen/python/Allen/config.py
+++ b/Rec/Allen/python/Allen/config.py
@@ -92,14 +92,12 @@ def allen_detectors(allen_node):
 
 
 def configured_bank_types(sequence_json):
+    sequence_json = json.loads(sequence_json)
     bank_types = set()
-    with open(sequence_json) as json_file:
-        j = json.load(json_file)
-        for t, n, c in j["sequence"]["configured_algorithms"]:
-            props = j.get(n, {})
-            if c == "ProviderAlgorithm" and not bool(
-                    props.get('empty', False)):
-                bank_types.add(props['bank_type'])
+    for t, n, c in sequence_json["sequence"]["configured_algorithms"]:
+        props = sequence_json.get(n, {})
+        if c == "ProviderAlgorithm" and not bool(props.get('empty', False)):
+            bank_types.add(props['bank_type'])
     return bank_types
 
 
diff --git a/Rec/Allen/python/Allen/qmtest/utils.py b/Rec/Allen/python/Allen/qmtest/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c77fa5d9b21575ae3b75bea5ad26b696c6a1ff1
--- /dev/null
+++ b/Rec/Allen/python/Allen/qmtest/utils.py
@@ -0,0 +1,34 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from collections import defaultdict
+
+
+def good_sequence(s):
+    physics = s.startswith('hlt1') and 'validation' not in s
+    extra = s in ('calo_prescaled_plus_lumi', 'passthrough')
+    return physics or extra
+
+
+def print_sequence_differences(a, b):
+    diff_keys = set(a.keys()).symmetric_difference(set(b.keys()))
+    diff = defaultdict(dict)
+    ka = [k for k in a.keys() if k not in diff_keys]
+    for k in ka:
+        props_a = a[k]
+        props_b = b[k]
+        diff_prop_keys = set(props_a.keys()).symmetric_difference(
+            set(props_b.keys()))
+        pka = [k for k in props_a.keys() if k not in diff_prop_keys]
+        for prop_key in pka:
+            if props_a[prop_key] != props_b[prop_key]:
+                diff[k][prop_key] = (props_a[prop_key], props_b[prop_key])
+
+    return dict(diff)
diff --git a/Rec/Allen/python/Allen/tck.py b/Rec/Allen/python/Allen/tck.py
new file mode 100644
index 0000000000000000000000000000000000000000..a854f5360938af2a79517fe4a7a73000bf5743b3
--- /dev/null
+++ b/Rec/Allen/python/Allen/tck.py
@@ -0,0 +1,381 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""Utilities to create and retrieve Allen configurations from the
+persistance format saved in a git repository.
+
+The persistent format is organised as a collection of JSON files, with
+each JSON file representing the configuration of a single
+component. In the case of Allen the only components that exist are
+algorithms.
+
+Each JSON file is stored in the repository at a path that is formatted as:
+
+"scope/namespace::type/instance_name",
+
+where scope is the defined as part of the Allen configuration and may
+be DeviceAlgorithm, HostAlgorithm, BarrierAlgorithm, ProviderAlgorithm
+or SelectionAlgorithm. There is an additional "folder" under Scheduler
+where the more-or-less free-form (but still in JSON files)
+configuration of the scheduler/sequence is persisted.
+
+The layout of the configuration of a single algorithm/component is for example for
+"DeviceAlgorithm/velo_search_by_triplet::velo_search_by_triplet_t/velo_search_by_triplet":
+
+{
+    "Kind": "DeviceAlgorithm",
+    "Name": "velo_search_by_triplet",
+    "Properties": {
+        "block_dim_x": "64",
+        "max_scatter": "0.08",
+        "max_skipped_modules": "1",
+        "phi_tolerance": "0.045",
+        "verbosity": "3"
+    },
+    "Type": "velo_search_by_triplet::velo_search_by_triplet_t"
+}
+
+It should be noted that the JSON type of all the values in
+"Properties" are strings and not JSON types. This is a requirement of
+the persistent format.
+
+For Allen/HLT1 the Scheduler "folder" contains four files:
+argument_dependencies, configured_algorithms, configured_arguments and
+configured_sequence_arguments. These are the same as the entries that
+an Allen-layout configuration expects under "sequence".
+
+Some additional metadata is needed when a configuration is persisted, in particular e.g.:
+
+{
+   "Release2Type": {
+        "ALLEN_v3r6": "hlt1_pp_only_matching"
+    },
+    "TCK": "0x10000016",
+    "label": "test"
+}
+
+Each of these entries is stored with a "digest" as key, whose value is
+not important, but is also used as a key for the corresponding
+configuration when it is extracted from the git repository.
+
+Some of the code that is needed to persist configurations is not
+available as python bindings, but instead through the
+"hlt_tck_tool" executable that resides in
+LHCb/Hlt/HltServices. It is also needed to create a JSON manifest that
+contains all configurations available in the repository.
+"""
+
+import json
+import os
+import sys
+import re
+import importlib
+import importlib.util
+from pathlib import Path
+from lxml import etree
+from hashlib import md5
+from subprocess import PIPE, run
+
+
+def format_tck(tck: int):
+    return f"0x{tck:08X}"
+
+
+def dependencies_from_build_manifest():
+    """Get the built/installed version of Allen from the
+    build/install manifest in the format ALLEN_vXrYpZ where pZ is
+    optional.
+    """
+
+    if "ALLEN_INSTALL_DIR" in os.environ:
+        manifest_tree = etree.parse(
+            os.path.expandvars("${ALLEN_INSTALL_DIR}/manifest.xml"))
+        projects = (
+            [manifest_tree.find("project")] +
+            [p for p in manifest_tree.find("used_projects").iterchildren()])
+        deps = {p.get("name"): p.get("version") for p in projects}
+        deps["LCG"] = manifest_tree.find("heptools").find("version").text
+        return deps
+    else:
+        return {}
+
+
+def sequence_to_tck(config: dict):
+    """Convert an "Allen" configuration to the format required for
+    persistence. This includes in particular the (JSON) serialization
+    of all property values to strings.
+    """
+
+    tck_config = {"Scheduler/" + k: v for k, v in config["sequence"].items()}
+
+    for alg_type, alg_name, alg_kind in config["sequence"][
+            "configured_algorithms"]:
+        properties = {
+            k: v if type(v) == str else json.dumps(v)
+            for k, v in config[alg_name].items()
+        }
+        tck_config[f"{alg_kind}/{alg_type}/{alg_name}"] = {
+            "Name": alg_name,
+            "Kind": alg_kind,
+            "Type": alg_type,
+            "Properties": properties,
+        }
+
+    return tck_config
+
+
+def tck_to_sequence(config: dict):
+    """Convert a persisted configuration to an "Allen" configuration.
+    """
+
+    scheduler_entries = [
+        k.split("/")[1] for k in config.keys() if k.startswith("Scheduler/")
+    ]
+    sequence_config = {
+        "sequence": {e: config["Scheduler/" + e]
+                     for e in scheduler_entries}
+    }
+
+    for alg_type, alg_name, alg_kind in sequence_config["sequence"][
+            "configured_algorithms"]:
+        tck_props = config[f"{alg_kind}/{alg_type}/{alg_name}"]["Properties"]
+        properties = {}
+        for k, v in tck_props.items():
+            try:
+                properties[k] = json.loads(v)
+            except json.JSONDecodeError:
+                properties[k] = v
+        sequence_config[alg_name] = properties
+
+    return sequence_config
+
+
+def json_tck_db(configuration: dict, sequence_type: str, metadata: dict,
+                tck: int):
+    """Create a JSON-formatted string that hlt_tck_tool can
+    write to a git repository.
+
+    The hlt_tck_tool resides in LHCb/Hlt/HltServices. It is passed a
+    JSON-formatted string through stdin. The JSON contains two
+    entries: a single-entry manifest with the key "manifest" and the
+    respective configuration with its digest as key. The same digest
+    is used as the key for the entry containing the metadata in the
+    manifest.
+
+    """
+    if len(hex(tck)) != 10 or hex(tck)[2] != "1":
+        raise ValueError(
+            "Badly formatted TCK, it must be a 32 bit hex number with most significant byte set to 1"
+        )
+
+    # Add the configuration to the TCK
+    tck_config = sequence_to_tck(configuration)
+
+    # The value of the digest is not important as long as it matches
+    # between the manifest and the key of the configuration. Use MD5
+    # as that was used extensively and more meaningfully in Run 2.
+    # The digest is calculated without including metadata.json
+    # (which contains the digest itself!)
+    digest = md5(json.dumps(tck_config).encode("utf-8")).hexdigest()
+    metadata = metadata.copy()
+    metadata["digest"] = digest
+
+    # Add the metadata to the TCK in a file called "metadata.json"
+    # This is a name we can "never" change!
+    tck_config['metadata.json'] = metadata
+
+    manifest = {
+        # FIXME the digest, TCK and branch are redundant, they're all in metadata
+        digest: {
+            "TCK": hex(tck),
+            "branch": sequence_type,
+            "metadata": metadata
+        }
+    }
+    return {"manifest": manifest, digest: tck_config}
+
+
+def sequence_from_python(python_file: Path, node_name="hlt1_node") -> dict:
+    """Retrieve an Allen configuration in JSON format from a python module
+    """
+
+    from AllenCore.allen_standalone_generator import generate, build_sequence
+    from AllenCore.AllenSequenceGenerator import generate_json_configuration
+
+    module_name = python_file.stem
+
+    node = None
+    with generate.bind(noop=True):
+        if python_file.suffix == "":
+            # Load sequence module from installed sequence
+            mod = importlib.import_module(f"AllenSequences.{module_name}")
+        else:
+            # Load sequence module from python file
+            spec = importlib.util.spec_from_file_location(
+                module_name, python_file)
+            mod = importlib.util.module_from_spec(spec)
+            sys.modules[module_name] = mod
+            spec.loader.exec_module(mod)
+
+        node = getattr(mod, node_name)
+
+    if node is None:
+        print(
+            f"Failed to get {node_name} from sequence file {str(python_file)}")
+        return None
+
+    algorithms = build_sequence(node, verbose=False)
+    return generate_json_configuration(algorithms)
+
+
+def sequence_to_git(
+        repository: Path,
+        sequence: dict,
+        sequence_type: str,
+        label: str,
+        tck: int,
+        stack: str,
+        extra_metadata={},
+        write_intermediate=False,
+):
+    """Write an Allen configuration to a git repository with metadata.
+    """
+    from Allen import TCK
+
+    if not re.match(r"^0x1[0-9A-F]{7}$", format_tck(tck)):
+        raise ValueError(
+            f"TCK {format_tck(tck)} does not match 0x1XXXXXXX pattern")
+
+    # Collect metadata for TCK
+    metadata = extra_metadata.copy()
+    metadata["version"] = 1  # updating this must be synchronised with TCKUtils
+    metadata["TCK"] = format_tck(tck)
+    metadata["config_version"] = ["Allen", TCK.config_version]
+    metadata[
+        "application"] = "Hlt1"  # match the "SourceID" or the "process/stage"
+    metadata["label"] = label
+    metadata["type"] = sequence_type
+    metadata["stack"] = {
+        "name": stack,
+        "projects": dependencies_from_build_manifest()
+    }
+
+    # Craete JSON TCK DB
+    db = json_tck_db(sequence, sequence_type, metadata, tck)
+    if write_intermediate:
+        with open(hex(tck) + ".json", "w") as f:
+            json.dump(db, f, indent=4, sort_keys=True)
+
+    p = run(
+        ["hlt_tck_tool", "--convert-to-git", "-", f"{str(repository)}"],
+        stdout=PIPE,
+        stderr=PIPE,
+        input=json.dumps(db, indent=4, sort_keys=True),
+        encoding="ascii",
+    )
+
+    if p.returncode != 0:
+        print(p.stdout)
+        print(p.stderr)
+        raise RuntimeError("Failed to convert sequence to git repo")
+
+
+def sequence_from_git(repository: Path, tck: str, use_bindings=True) -> str:
+    """Retrieve the Allen configuration identified by the given TCK
+    from a git repository.
+
+    use_bindings determines wether a Python module (default) or
+    hlt_tck_tool is used to retrieve the JSON configuration.
+    """
+
+    if use_bindings:
+        from Allen import TCK
+        sequence, info = TCK.sequence_from_git(str(repository), tck)
+        tck_info = {
+            k: getattr(info, k)
+            for k in ("digest", "tck", "release", "type", "label")
+        }
+        tck_info["metadata"] = json.loads(info.metadata)
+        return (sequence, tck_info)
+    else:
+        p = run(
+            [
+                "hlt_tck_tool",
+                f"--tck={tck}",
+                "--convert-to-json",
+                f"{str(repository)}",
+                "-",
+            ],
+            stdout=PIPE,
+        )
+        if p.returncode != 0:
+            print("Failed to convert configuration in git repo to JSON")
+            return None
+        tck_db = json.loads(p.stdout)
+        digest, manifest_entry = next(
+            ((k, m) for k, m in tck_db["manifest"].items() if m["TCK"] == tck),
+            None)
+        release, seq_type = next(
+            (k, v) for k, v in manifest_entry["Release2Type"].items())
+        tck = manifest_entry["TCK"]
+        label = manifest_entry["label"]
+        metadata = manifest_entry["metadata"]
+        info = {
+            "digest": digest,
+            "tck": tck,
+            "metadata": metadata,
+            "type": seq_type,
+            "label": label,
+            "metadata": metadata
+        }
+        return (json.dumps(tck_to_sequence(tck_db[digest])), info)
+
+
+def property_from_git(repository: Path,
+                      tck: str,
+                      algorithm=".*",
+                      property=".*"):
+    alg_re = re.compile(algorithm)
+    prop_re = re.compile(property)
+    """Retrieve an Allen configuration identified by TCK from a git
+    repository and extract specific properties from it using regexes
+    to match algorithm name and property key
+    """
+
+    s, _ = sequence_from_git(repository, tck)
+    sequence = json.loads(s)
+
+    result = {}
+    for alg, props in sequence.items():
+        if alg == "scheduler" or not alg_re.match(alg):
+            continue
+        prop_result = {k: v for k, v in props.items() if prop_re.match(k)}
+        if prop_result:
+            result[alg] = prop_result
+
+    return result
+
+
+def manifest_from_git(repository: Path):
+    """Use hlt_tck_tool to retrieve the manifest for a git
+    repositry
+    """
+
+    args = [
+        "hlt_tck_tool", "--list-manifest-as-json", f"{str(repository)}", "-"
+    ]
+    p = run(args, stdout=PIPE, stderr=PIPE)
+    if p.returncode != 0:
+        print("Failed to convert manifest from git repo to JSON")
+        print(p.stdout)
+        print(p.stderr)
+        return None
+    else:
+        return json.loads(p.stdout)
diff --git a/Rec/Allen/python/AllenAlgorithms/__init__.py b/Rec/Allen/python/AllenAlgorithms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8044040360a54550a0d9fbe64a0ef0b52fab8ccb
--- /dev/null
+++ b/Rec/Allen/python/AllenAlgorithms/__init__.py
@@ -0,0 +1,25 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+import os
+
+__path__ += [
+    d for d in [
+        os.path.realpath(
+            os.path.join(
+                os.path.dirname(__file__),
+                "..",
+                "..",
+                "code_generation",
+                "sequences",
+                "AllenAlgorithms",
+            ))
+    ] if os.path.exists(d)
+]
diff --git a/Rec/Allen/scripts/create_hlt1_tck.py b/Rec/Allen/scripts/create_hlt1_tck.py
new file mode 100644
index 0000000000000000000000000000000000000000..85cd7c9d0c862a8153f564c96c7f1ebe486c7ac6
--- /dev/null
+++ b/Rec/Allen/scripts/create_hlt1_tck.py
@@ -0,0 +1,169 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+import os
+import argparse
+import json
+import sys
+import subprocess
+import logging
+from PyConf.filecontent_metadata import flush_key_registry, retrieve_encoding_dictionary, metainfo_repos, ConfigurationError, FILE_CONTENT_METADATA
+from Allen.tck import sequence_to_git, sequence_from_python
+from pathlib import Path
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+
+parser = argparse.ArgumentParser(description="""
+Persist an Allen configuration in a git repository identified by a TCK
+
+The configuration can be obtained from:
+- a JSON file
+- a python module that generates a configuration
+- a python file that generatea a configuration
+
+Some metadata is also persisted.
+""")
+parser.add_argument("stack")
+parser.add_argument("sequence")
+parser.add_argument("repository")
+parser.add_argument("tck", help="A 32-bit hexadecimal number")
+parser.add_argument(
+    "-t,--hlt1-type",
+    type=str,
+    help=
+    "Sequence type to use; also used as branch name in the Git repository.",
+    default='',
+    dest='sequence_type')
+parser.add_argument(
+    "--python-hlt1-node",
+    type=str,
+    help=
+    "Name of the variable that stores the configuration in the python module or file",
+    default="hlt1_node",
+    dest="hlt1_node",
+)
+parser.add_argument(
+    "--label",
+    help="Label persisted as metadata together with the TCK",
+    default="test",
+    type=str,
+)
+
+args = parser.parse_args()
+
+sequence_arg = Path(args.sequence)
+repository = Path(args.repository)
+tck = int(args.tck, 16)
+type_arg = args.sequence_type if args.sequence_type != '' else sequence_arg.stem
+
+local_metainfo_repo = Path("./lhcb-metainfo/.git")
+tck_metainfo_repos = [(str(local_metainfo_repo.resolve()), "master"),
+                      (FILE_CONTENT_METADATA, "master")]
+
+# Unset this environment variable to force generation of new encoding
+# keys in a local repo if they are not in the cvmfs one
+build_metainfo_repo = os.environ.pop('LHCbFileContentMetaDataRepo', None)
+if build_metainfo_repo is not None and not local_metainfo_repo.exists():
+    result = subprocess.run([
+        'git', 'clone', '-q', build_metainfo_repo,
+        str(local_metainfo_repo.resolve()).removesuffix('/.git')
+    ],
+                            capture_output=True,
+                            text=True,
+                            check=False)
+    if result.returncode != 0:
+        print(
+            f"Failed to clone build metainfo repo {build_metainfo_repo} to local repo"
+        )
+        sys.exit(1)
+
+
+def dec_reporter_name(conf):
+    return next(
+        (n for t, n, _ in conf["sequence"]["configured_algorithms"]
+         if t == "dec_reporter::dec_reporter_t"),
+        None,
+    )
+
+
+sequence = None
+if sequence_arg.suffix in (".py", ""):
+    from AllenCore.configuration_options import is_allen_standalone
+
+    is_allen_standalone.global_bind(standalone=True)
+
+    from AllenConf.persistency import make_dec_reporter
+
+    sequence, dn = {}, None
+    # Load the python module to get the sequence configuration; set
+    # the TCK to the right value and flush encoding keys
+    with (make_dec_reporter.bind(TCK=tck), flush_key_registry()):
+        sequence = sequence_from_python(sequence_arg, node_name=args.hlt1_node)
+        sequence = json.loads(json.dumps(sequence, sort_keys=True))
+
+    # Check that at least the dec_reporter is part of the sequence,
+    # otherwise it's meaningless to create a TCK for this sequence.
+    dn = dec_reporter_name(sequence)
+    if dn is None:
+        print(
+            f"Cannot create TCK {hex(tck)} for sequence {type_arg}, because it does not contain the dec_reporter"
+        )
+        sys.exit(1)
+elif sequence_arg.suffix == ".json":
+    # Load the sequence configuration from a JSON file
+    sequence, dn = {}, None
+    with open(sequence_arg, "r") as sequence_file:
+        sequence = json.load(sequence_file)
+
+    # Get the dec reporter and set its TCK property to the right value
+    # before creating the TCK from the configuration
+    dn = dec_reporter_name(sequence)
+    if dn is None:
+        print(
+            f"Cannot create TCK {hex(tck)} for sequence {type_arg}, because it does not contain the dec_reporter"
+        )
+        sys.exit(1)
+    else:
+        sequence[dn]["tck"] = tck
+
+# Store the configuration in the Git repository and tag it with the TCK
+try:
+    sequence_to_git(repository, sequence, type_arg, args.label, tck,
+                    args.stack, {"settings": sequence_arg.stem}, True)
+    print(f"Created TCK {hex(tck)} for sequence {type_arg}")
+except RuntimeError as e:
+    print(e)
+    sys.exit(1)
+
+
+def get_encoding_key(repo):
+    try:
+        with metainfo_repos.bind(repos=[(repo, "master")]):
+            return retrieve_encoding_dictionary(
+                reports_key, require_key_present=True)
+    except ConfigurationError:
+        return None
+
+
+# Check that the encoding key is either in CVMFS or in the local
+# metadata repository
+reports_key = sequence[dn]["encoding_key"]
+
+local_key, key_present = (False, False)
+if local_metainfo_repo.exists():
+    encoding = get_encoding_key(str(local_metainfo_repo.resolve()))
+    key_present = local_key = encoding is not None
+if not local_key:
+    encoding = get_encoding_key(FILE_CONTENT_METADATA)
+    key_present = encoding is not None
+
+if not key_present:
+    print("Key {} cannot be found!".format(hex(reports_key)))
+    sys.exit(1)
diff --git a/Rec/Allen/tests/options/compare_hlt1_tcks.py b/Rec/Allen/tests/options/compare_hlt1_tcks.py
new file mode 100644
index 0000000000000000000000000000000000000000..54828bece5fd6bcfe8c7f4acde32159464dfc72b
--- /dev/null
+++ b/Rec/Allen/tests/options/compare_hlt1_tcks.py
@@ -0,0 +1,74 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""For all configurations persisted by the create_hlt1_tcks
+test/options file, load them from two repositories and from the JSON
+files generated at build time. The entries in the two git repositories
+were created from the JSON files generated at build time and directly
+from their respective python modules.
+
+If all configurations are identical then neither the persistence nor
+the generation of configurations alters the content of the
+configuration.
+"""
+
+import os
+import sys
+import json
+from Allen.qmtest.utils import print_sequence_differences
+from Allen.tck import manifest_from_git, sequence_from_git
+from pathlib import Path
+
+seq_dir = Path(os.path.expandvars("${ALLEN_INSTALL_DIR}/constants"))
+json_repo = Path(os.getenv("PREREQUISITE_0", "")) / "config_json.git"
+python_repo = Path(os.getenv("PREREQUISITE_0", "")) / "config_python.git"
+
+manifest_json = manifest_from_git(json_repo)
+manifest_python = manifest_from_git(python_repo)
+
+# Digests are not necessarily the same, but manifest values should be
+entries_json = sorted(manifest_json.values(), key=lambda v: v["TCK"])
+entries_python = sorted(manifest_python.values(), key=lambda v: v["TCK"])
+
+error = entries_json != entries_python
+if error:
+    print("ERROR: Manifests are not the same")
+
+for m, suf in ((manifest_json, "json"), (manifest_python, "python")):
+    with open(f"manifest_{suf}.json", "w") as f:
+        json.dump(m, f)
+
+for info in entries_json:
+    sequence_json = json.loads(sequence_from_git(json_repo, info["TCK"])[0])
+    sequence_python = json.loads(sequence_from_git(json_repo, info["TCK"])[0])
+    sequence_type = next(v for v in info["Release2Type"].values())
+    sequence_direct = None
+    tck = info["TCK"]
+
+    with open(str((seq_dir / f"{sequence_type}.json").resolve())) as js:
+        sequence_direct = json.load(js)
+        # Fixup the TCK here for comparison purposes because it's not
+        # set when running from the JSON file
+        sequence_direct['dec_reporter']['tck'] = int(tck, 16)
+
+    if sequence_json != sequence_python:
+        print(
+            f"ERROR: sequences loaded from JSON and python git repos for TCK {tck} are not the same"
+        )
+        error = True
+    if sequence_json != sequence_direct:
+        print(
+            f"ERROR: sequences loaded directly from JSON and from JSON git repo for {tck} are not the same"
+        )
+
+        print_sequence_differences(sequence_direct, sequence_json)
+        error = True
+
+sys.exit(error)
diff --git a/Rec/Allen/tests/options/create_hlt1_tcks.py b/Rec/Allen/tests/options/create_hlt1_tcks.py
new file mode 100644
index 0000000000000000000000000000000000000000..47964a6ac64fb01ee585059223fa4cb57123ab42
--- /dev/null
+++ b/Rec/Allen/tests/options/create_hlt1_tcks.py
@@ -0,0 +1,65 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""Create TCKs for a respresentative set of configurations. The
+configurations should at least contain the dec_reporter algorithm for
+this to make sense. Use both JSON files and the python modules they
+were generated from and store the resulting configurations in
+different git repositories with the same TCK.
+"""
+
+import os
+import sys
+import subprocess
+from pathlib import Path
+from Allen.qmtest.utils import good_sequence
+
+seq_dir = os.path.expandvars("${ALLEN_INSTALL_DIR}/constants")
+tck_script = os.path.expandvars("${ALLENROOT}/scripts/create_hlt1_tck.py")
+
+error = False
+sequences = [s for s in os.listdir(seq_dir) if good_sequence(s)]
+# Create TCKs for few sequences in the DBG build to avoid timeouts
+if '-dbg' in os.environ['BINARY_TAG']:
+    import random
+    random.seed("HLT1TCKs-dbg")
+    random.shuffle(sequences)
+    sequences = sequences[:5]
+
+
+for i, seq in enumerate(sequences):
+    seq = Path(seq_dir) / seq
+    tck = hex(0x10000001 + i)
+
+    # Create TCKs from python configurations
+    # Note, these are created first such that missing encoding keys
+    # will be added to the test-local metainfo repository
+    r = subprocess.run([
+        "python", tck_script, "RTA/2050.01.01", seq.stem, "config_python.git",
+        tck
+    ])
+    if r.returncode != 0:
+        error = True
+    else:
+        print(f"Created TCK {tck} from Python configuration {seq.stem}")
+    os.rename(f"{tck}.json", f"{tck}_python.json")
+
+    # Create TCKs from JSON files
+    r = subprocess.run([
+        "python", tck_script, "RTA/2050.01.01",
+        str(seq), "config_json.git", tck
+    ])
+    if r.returncode != 0:
+        error = True
+    else:
+        print(f"Created TCK {tck} from JSON configuration {str(seq)}")
+
+if error:
+    sys.exit(error)
diff --git a/Rec/Allen/tests/options/test_tck_allen_write_config.py b/Rec/Allen/tests/options/test_tck_allen_write_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..10ab50cd342736ca614a6dbe167ba1383cad4f2d
--- /dev/null
+++ b/Rec/Allen/tests/options/test_tck_allen_write_config.py
@@ -0,0 +1,80 @@
+###############################################################################
+# (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the Apache License          #
+# version 2 (Apache-2.0), copied verbatim in the file "COPYING".              #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""Load all available configuration from a test repository, use them
+to configure Allen and in turn dump Allen's configuration to a JSON
+file. Then compare the configuration dumped by Allen to the original
+TCK and check that they are identical.
+
+This ensures that no properties are changed after they are set and a
+round trip of load configuration, configure Allen, dump configuration
+does not alter any properties as a side effect of configuration or
+persistence.
+"""
+
+import os
+import json
+from pathlib import Path
+from subprocess import PIPE, run
+from Allen.qmtest.utils import print_sequence_differences
+from Allen.tck import manifest_from_git, sequence_from_git
+
+tck_repo = Path(os.getenv("PREREQUISITE_0", "")) / "config_json.git"
+
+manifest = manifest_from_git(tck_repo)
+
+# Take 5 configurations to check against Allen
+manifest_entries = sorted(manifest.values(), key=lambda v: v["TCK"])
+
+for info in manifest_entries:
+    s, tck_info = sequence_from_git(tck_repo, info["TCK"])
+    tck_sequence = json.loads(s)
+    print('{release} {type:30s} {tck}'.format(**tck_info))
+    tck = info["TCK"]
+
+    cmd = [
+        "Allen",
+        "-g",  # Use default binary geometry, irrelavant in this mode, but needs to be found
+        os.path.expandvars(
+            "${ALLEN_PROJECT_ROOT}/input/detector_configuration"),
+        "--param",
+        os.path.expandvars("${PARAMFILESROOT}"),
+        "--mdf",  # No input file
+        '""',
+        "--sequence",  # Load configuration from TCK
+        f"config_json.git:{tck}",
+        "--write-configuration",  # Write configuration to config.json
+        "1",
+    ]
+
+    p = run(
+        cmd,
+        stdout=PIPE,
+        stderr=PIPE,
+    )
+    if p.returncode != 0:
+        print(f"Failed to write configuration from Allen for TCK {tck}")
+        print(" ".join(cmd))
+        print(p.stdout.decode())
+        print(p.stderr.decode())
+        error = True
+    else:
+        # Open configuration JSON written by Allen
+        allen_sequence = ""
+        with open("config.json") as f:
+            allen_sequence = json.load(f)
+
+        # Compare configurations
+        if allen_sequence != tck_sequence:
+            diffs = print_sequence_differences(tck_sequence, allen_sequence)
+            print(
+                "Differences between input configuration from TCK and written by Allen:"
+            )
+            print(diffs)
diff --git a/Rec/Allen/tests/qmtest/compare_tck_allen_config.qmt b/Rec/Allen/tests/qmtest/compare_tck_allen_config.qmt
new file mode 100644
index 0000000000000000000000000000000000000000..f1ff251339655225ac2f6529aed67a72bbcb17dc
--- /dev/null
+++ b/Rec/Allen/tests/qmtest/compare_tck_allen_config.qmt
@@ -0,0 +1,24 @@
+<?xml version="1.0" ?><!DOCTYPE extension  PUBLIC '-//QM/2.3/Extension//EN'  'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
+<!--
+    (c) Copyright 2020 CERN for the benefit of the LHCb Collaboration
+-->
+<!--
+#######################################################
+# SUMMARY OF THIS TEST
+# ...................
+# Author: Roel Aaij
+# Purpose: Compare a number of TCKs to the output of Allen
+#          \-\-write-configuration 1
+#######################################################
+-->
+<extension class="GaudiTest.GaudiExeTest" kind="test">
+  <argument name="program"><text>python</text></argument>
+  <argument name="args"><set>
+    <text>${ALLENROOT}/tests/options/test_tck_allen_write_config.py</text>
+  </set></argument>
+  <argument name="prerequisites"><set>
+    <tuple><text>create_tcks</text><enumeral>PASS</enumeral></tuple>
+  </set></argument>
+  <argument name="timeout"><integer>600</integer></argument>
+  <argument name="use_temp_dir"><enumeral>per-test</enumeral></argument>
+</extension>
diff --git a/Rec/Allen/tests/qmtest/compare_tcks.qmt b/Rec/Allen/tests/qmtest/compare_tcks.qmt
new file mode 100644
index 0000000000000000000000000000000000000000..3ebd0969e646659fc0c57762ebe3f25202c05e72
--- /dev/null
+++ b/Rec/Allen/tests/qmtest/compare_tcks.qmt
@@ -0,0 +1,34 @@
+<?xml version="1.0" ?><!DOCTYPE extension  PUBLIC '-//QM/2.3/Extension//EN'  'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
+<!--
+    (c) Copyright 2020 CERN for the benefit of the LHCb Collaboration
+-->
+<!--
+#######################################################
+# SUMMARY OF THIS TEST
+# ...................
+# Author: Roel Aaij
+# Purpose: Use ROOT python bindings to obtain the geometry directly
+#          from the stack and run the Allen event loop
+#######################################################
+-->
+<extension class="GaudiTest.GaudiExeTest" kind="test">
+  <argument name="program"><text>python</text></argument>
+  <argument name="args"><set>
+    <text>${ALLENROOT}/tests/options/compare_hlt1_tcks.py</text>
+  </set></argument>
+  <argument name="prerequisites"><set>
+    <tuple><text>create_tcks</text><enumeral>PASS</enumeral></tuple>
+  </set></argument>
+  <argument name="timeout"><integer>600</integer></argument>
+  <argument name="use_temp_dir"><enumeral>per-test</enumeral></argument>
+<argument name="validator"><text>
+
+# No validator for now: only check the exit code
+
+import glob
+workdir = self._common_tmpdir
+for fn in glob.glob(workdir + "/*.json"):
+    result[os.path.basename(fn)] = open(fn).read()
+
+</text></argument>
+</extension>
diff --git a/Rec/Allen/tests/qmtest/create_tcks.qmt b/Rec/Allen/tests/qmtest/create_tcks.qmt
new file mode 100644
index 0000000000000000000000000000000000000000..7174b0a0a90bf02ec7ca2420b4aa8e156fd60346
--- /dev/null
+++ b/Rec/Allen/tests/qmtest/create_tcks.qmt
@@ -0,0 +1,21 @@
+<?xml version="1.0" ?><!DOCTYPE extension  PUBLIC '-//QM/2.3/Extension//EN'  'http://www.codesourcery.com/qm/dtds/2.3/-//qm/2.3/extension//en.dtd'>
+<!--
+    (c) Copyright 2020 CERN for the benefit of the LHCb Collaboration
+-->
+<!--
+#######################################################
+# SUMMARY OF THIS TEST
+# ...................
+# Author: Roel Aaij
+# Purpose: Use ROOT python bindings to obtain the geometry directly
+#          from the stack and run the Allen event loop
+#######################################################
+-->
+<extension class="GaudiTest.GaudiExeTest" kind="test">
+  <argument name="program"><text>python</text></argument>
+  <argument name="args"><set>
+    <text>${ALLENROOT}/tests/options/create_hlt1_tcks.py</text>
+  </set></argument>
+  <argument name="timeout"><integer>600</integer></argument>
+  <argument name="use_temp_dir"><enumeral>per-test</enumeral></argument>
+</extension>
diff --git a/Rec/Allen/tests/qmtest/mdf_input.qmt b/Rec/Allen/tests/qmtest/mdf_input.qmt
index 09658166ffa4420d9a680fc273083b84c937d3e8..254dd70d22a9995de49ea50dffc7fd1eb63576d7 100644
--- a/Rec/Allen/tests/qmtest/mdf_input.qmt
+++ b/Rec/Allen/tests/qmtest/mdf_input.qmt
@@ -20,7 +20,6 @@
     <text>--sequence</text><text>${ALLEN_INSTALL_DIR}/constants/hlt1_pp_matching.json</text>
     <text>--tags=detdesc:dddb-20220705,sim-20220705-vc-md100|dd4hep:trunk,master</text>
     <text>--events-per-slice</text><text>500</text>
-    <text>--monitoring-filename</text><text>""</text>
     <text>-m</text><text>600</text>
     <text>-s</text><text>3</text>
     <text>-t</text><text>2</text>
diff --git a/backend/CMakeLists.txt b/backend/CMakeLists.txt
index 0fc9807ed1d8d5693abdc194ce4e4715ad7ea892..728159868203f532ba883a0fe1e94ad88c49c626 100644
--- a/backend/CMakeLists.txt
+++ b/backend/CMakeLists.txt
@@ -15,17 +15,19 @@ elseif(TARGET_DEVICE STREQUAL "CUDA")
 elseif(TARGET_DEVICE STREQUAL "HIP")
   list(APPEND backend_sources src/HIPBackend.cpp)
   target_link_libraries(AllenRuntime INTERFACE ${HIP_RUNTIME_LIB})
+  target_include_directories(AllenRuntime INTERFACE ${HIP_PATH}/include ${ROCM_PATH}/hsa/include)
 endif()
 
 target_include_directories(AllenRuntime INTERFACE
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/Backend>
   $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
+target_compile_definitions(AllenRuntime INTERFACE ${TARGET_DEFINITION})
+target_link_libraries(AllenRuntime INTERFACE umesimd::umesimd)
 
 # Backend library
 allen_add_host_library(Backend STATIC ${backend_sources})
-target_link_libraries(Backend PUBLIC AllenRuntime Gear AllenCommon umesimd::umesimd)
-target_compile_definitions(Backend PUBLIC ${TARGET_DEFINITION})
+target_link_libraries(Backend PUBLIC AllenRuntime Gear AllenCommon)
 
 set(BackendHeaders)
 foreach(header
diff --git a/cmake/AllenDependencies.cmake b/cmake/AllenDependencies.cmake
index 1254d49d554f27222919e3049e7cb36d0386f6e4..48ec7eebc5251d85e9dd821f336f08efe078890f 100644
--- a/cmake/AllenDependencies.cmake
+++ b/cmake/AllenDependencies.cmake
@@ -104,10 +104,13 @@ find_package(umesimd REQUIRED)
 find_package(PkgConfig)
 pkg_check_modules(zmq libzmq REQUIRED IMPORTED_TARGET)
 pkg_check_modules(sodium libsodium REQUIRED IMPORTED_TARGET)
+if(NOT STANDALONE)
+  pkg_check_modules(git2 libgit2 REQUIRED IMPORTED_TARGET)  # for GitEntityResolver
+endif()
 
 if(WITH_Allen_PRIVATE_DEPENDENCIES)
   # We need a Python 3 interpreter
-  find_package(Python 3 REQUIRED Interpreter)
+  find_package(Python 3 REQUIRED Interpreter Development.Module)
 
   # Catch2 for tests
   find_package(Catch2 REQUIRED)
@@ -139,6 +142,13 @@ if(WITH_Allen_PRIVATE_DEPENDENCIES)
   if(NOT STANDALONE)
     find_package(Rangev3 REQUIRED)
     find_package(yaml-cpp REQUIRED)
+
+    # pybind11 is available in LCG, but it's installed with setup.py,
+    # so the CMake files are in a non-standard location and we have to
+    # make sure we can find them
+    execute_process(COMMAND ${Python_EXECUTABLE} -c "import pybind11; print(pybind11.get_cmake_dir(), end=\"\");" OUTPUT_VARIABLE PYBIND11_CMAKE_DIR)
+    list(APPEND CMAKE_PREFIX_PATH ${PYBIND11_CMAKE_DIR})
+    find_package(pybind11 CONFIG REQUIRED)
   endif()
 endif()
 
@@ -151,6 +161,8 @@ if (STANDALONE)
   elseif($ENV{ROOTSYS}) # ROOT was compiled with configure/make
     set(ALLEN_ROOT_CMAKE $ENV{ROOTSYS}/etc)
   endif()
+else()
+  set(Allen_PERSISTENT_OPTIONS TARGET_DEVICE)
 endif()
 
 find_package(ROOT REQUIRED HINTS ${ALLEN_ROOT_CMAKE} COMPONENTS RIO Core Cling Hist Tree)
diff --git a/cmake/GenerateConfiguration.cmake b/cmake/GenerateConfiguration.cmake
index d6f0736b8e6b3b73d2a143720dd8e381052d433b..caf41bd29745838d1d8108617ae682120ad282bc 100644
--- a/cmake/GenerateConfiguration.cmake
+++ b/cmake/GenerateConfiguration.cmake
@@ -15,6 +15,7 @@ set(ALLEN_PARSER_DIR ${PROJECT_SEQUENCE_DIR}/parser)
 set(ALGORITHMS_OUTPUTFILE ${ALLEN_ALGORITHMS_DIR}/allen_standalone_algorithms.py)
 set(PARSED_ALGORITHMS_OUTPUTFILE ${CODE_GENERATION_DIR}/parsed_algorithms.pickle)
 set(ALGORITHMS_GENERATION_SCRIPT ${PROJECT_SOURCE_DIR}/configuration/parser/ParseAlgorithms.py)
+set(DEFAULT_PROPERTIES_SRC ${PROJECT_SOURCE_DIR}/configuration/src/default_properties.cpp)
 
 include_guard(GLOBAL)
 
@@ -25,7 +26,7 @@ file(MAKE_DIRECTORY ${ALLEN_ALGORITHMS_DIR})
 
 # We will invoke the parser a few times, set its required environment in a variable
 # Add the scripts folder only if we are invoking with a CMAKE_TOOLCHAIN_FILE
-if(CMAKE_TOOLCHAIN_FILE) 
+if(CMAKE_TOOLCHAIN_FILE)
   set(PARSER_ENV PYTHONPATH=$ENV{PYTHONPATH}:${PROJECT_SOURCE_DIR}/scripts LD_LIBRARY_PATH=${LIBCLANG_LIBDIR}:$ENV{LD_LIBRARY_PATH})
 else()
   set(PARSER_ENV PYTHONPATH=$ENV{PYTHONPATH}:${LIBCLANG_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages LD_LIBRARY_PATH=${LIBCLANG_LIBDIR}:$ENV{LD_LIBRARY_PATH})
@@ -59,31 +60,40 @@ add_custom_command(
   DEPENDS "${PROJECT_SOURCE_DIR}/configuration/python/AllenConf" "${PROJECT_SOURCE_DIR}/configuration/python/AllenCore")
 add_custom_target(generate_conf_core DEPENDS "${SEQUENCE_DEFINITION_DIR}" "${ALLEN_CORE_DIR}")
 
+# Generate Allen AlgorithmDB
+add_custom_command(
+  OUTPUT "${CODE_GENERATION_DIR}/AlgorithmDB.cpp"
+  COMMENT "Generating AlgorithmDB"
+  COMMAND ${CMAKE_COMMAND} -E env ${PARSER_ENV} ${Python_EXECUTABLE} ${ALGORITHMS_GENERATION_SCRIPT} --generate db --filename "${CODE_GENERATION_DIR}/AlgorithmDB.cpp" --parsed_algorithms "${PARSED_ALGORITHMS_OUTPUTFILE}"
+  WORKING_DIRECTORY ${ALLEN_PARSER_DIR}
+  DEPENDS "${PARSED_ALGORITHMS_OUTPUTFILE}")
+add_custom_target(algorithm_db_generation DEPENDS "${CODE_GENERATION_DIR}/AlgorithmDB.cpp")
+add_library(algorithm_db OBJECT "${CODE_GENERATION_DIR}/AlgorithmDB.cpp")
+add_dependencies(algorithm_db algorithm_db_generation)
+target_link_libraries(algorithm_db
+  PUBLIC
+    EventModel
+    HostEventModel
+    Backend
+    AllenCommon
+    Gear)
+
+add_executable(default_properties ${DEFAULT_PROPERTIES_SRC})
+target_link_libraries(default_properties PRIVATE AllenLib HostEventModel EventModel)
+
 # Generate allen standalone algorithms file
 add_custom_command(
   OUTPUT "${ALGORITHMS_OUTPUTFILE}"
   COMMAND
-    ${CMAKE_COMMAND} -E env ${PARSER_ENV} ${Python_EXECUTABLE} ${ALGORITHMS_GENERATION_SCRIPT} --generate views --filename "${ALGORITHMS_OUTPUTFILE}" --parsed_algorithms "${PARSED_ALGORITHMS_OUTPUTFILE}" &&
+    ${CMAKE_COMMAND} -E env ${PARSER_ENV} ${Python_EXECUTABLE} ${ALGORITHMS_GENERATION_SCRIPT} --generate views --filename "${ALGORITHMS_OUTPUTFILE}" --parsed_algorithms "${PARSED_ALGORITHMS_OUTPUTFILE}" --default_properties $<TARGET_FILE:default_properties> &&
     ${CMAKE_COMMAND} -E touch ${ALLEN_ALGORITHMS_DIR}/__init__.py
   WORKING_DIRECTORY ${ALLEN_PARSER_DIR}
-  DEPENDS "${PARSED_ALGORITHMS_OUTPUTFILE}" "${SEQUENCE_DEFINITION_DIR}" "${ALLEN_CORE_DIR}")
+  DEPENDS "${PARSED_ALGORITHMS_OUTPUTFILE}" "${SEQUENCE_DEFINITION_DIR}" "${ALLEN_CORE_DIR}" default_properties)
 add_custom_target(generate_algorithms_view DEPENDS "${ALGORITHMS_OUTPUTFILE}")
 install(FILES "${ALGORITHMS_OUTPUTFILE}" DESTINATION python/AllenAlgorithms)
 
-# Generate Allen AlgorithmDB
-add_custom_command(
-  OUTPUT "${ALLEN_GENERATED_INCLUDE_FILES_DIR}/AlgorithmDB.h"
-  COMMENT "Generating AlgorithmDB"
-  COMMAND ${CMAKE_COMMAND} -E env ${PARSER_ENV} ${Python_EXECUTABLE} ${ALGORITHMS_GENERATION_SCRIPT} --generate db --filename "${ALLEN_GENERATED_INCLUDE_FILES_DIR}/AlgorithmDB.h" --parsed_algorithms "${PARSED_ALGORITHMS_OUTPUTFILE}"
-  WORKING_DIRECTORY ${ALLEN_PARSER_DIR}
-  DEPENDS "${PARSED_ALGORITHMS_OUTPUTFILE}")
-add_custom_target(algorithm_db_generation DEPENDS "${ALLEN_GENERATED_INCLUDE_FILES_DIR}/AlgorithmDB.h")
-add_library(algorithm_db INTERFACE)
-add_dependencies(algorithm_db algorithm_db_generation "${ALLEN_GENERATED_INCLUDE_FILES_DIR}/AlgorithmDB.h")
-target_include_directories(algorithm_db INTERFACE $<BUILD_INTERFACE:${ALLEN_GENERATED_INCLUDE_FILES_DIR}>)
-install(TARGETS algorithm_db
-      EXPORT Allen
-      LIBRARY DESTINATION lib)
+# Target that the generation of the sequences can depend on
+add_custom_target(Sequences DEPENDS generate_algorithms_view)
 
 if(SEPARABLE_COMPILATION)
   add_custom_command(
@@ -198,6 +208,6 @@ function(generate_sequence sequence)
       WORKING_DIRECTORY ${sequence_dir})
   endif()
   add_custom_target(sequence_${sequence} DEPENDS "${PROJECT_BINARY_DIR}/${sequence}.json")
-  add_dependencies(Stream sequence_${sequence})
+  add_dependencies(Sequences sequence_${sequence})
   install(FILES "${PROJECT_BINARY_DIR}/${sequence}.json" DESTINATION constants)
 endfunction()
diff --git a/cmake/modules/FindPythonLibsNew.cmake b/cmake/modules/FindPythonLibsNew.cmake
deleted file mode 100644
index b29b287de72bc1249e51279a99643891a743e9a5..0000000000000000000000000000000000000000
--- a/cmake/modules/FindPythonLibsNew.cmake
+++ /dev/null
@@ -1,195 +0,0 @@
-# - Find python libraries
-# This module finds the libraries corresponding to the Python interpreter
-# FindPythonInterp provides.
-# This code sets the following variables:
-#
-#  PYTHONLIBS_FOUND           - have the Python libs been found
-#  PYTHON_PREFIX              - path to the Python installation
-#  PYTHON_LIBRARIES           - path to the python library
-#  PYTHON_INCLUDE_DIRS        - path to where Python.h is found
-#  PYTHON_MODULE_EXTENSION    - lib extension, e.g. '.so' or '.pyd'
-#  PYTHON_MODULE_PREFIX       - lib name prefix: usually an empty string
-#  PYTHON_SITE_PACKAGES       - path to installation site-packages
-#  PYTHON_IS_DEBUG            - whether the Python interpreter is a debug build
-#
-# Thanks to talljimbo for the patch adding the 'LDVERSION' config
-# variable usage.
-
-#=============================================================================
-# Copyright 2001-2009 Kitware, Inc.
-# Copyright 2012 Continuum Analytics, Inc.
-#
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# * Neither the names of Kitware, Inc., the Insight Software Consortium,
-# nor the names of their contributors may be used to endorse or promote
-# products derived from this software without specific prior written
-# permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#=============================================================================
-
-# Checking for the extension makes sure that `LibsNew` was found and not just `Libs`.
-if(PYTHONLIBS_FOUND AND PYTHON_MODULE_EXTENSION)
-    return()
-endif()
-
-# Use the Python interpreter to find the libs.
-if(PythonLibsNew_FIND_REQUIRED)
-    find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} REQUIRED)
-else()
-    find_package(PythonInterp ${PythonLibsNew_FIND_VERSION})
-endif()
-
-if(NOT PYTHONINTERP_FOUND)
-    set(PYTHONLIBS_FOUND FALSE)
-    return()
-endif()
-
-# According to http://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter
-# testing whether sys has the gettotalrefcount function is a reliable, cross-platform
-# way to detect a CPython debug interpreter.
-#
-# The library suffix is from the config var LDVERSION sometimes, otherwise
-# VERSION. VERSION will typically be like "2.7" on unix, and "27" on windows.
-execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
-    "from distutils import sysconfig as s;import sys;import struct;
-print('.'.join(str(v) for v in sys.version_info));
-print(sys.prefix);
-print(s.get_python_inc(plat_specific=True));
-print(s.get_python_lib(plat_specific=True));
-print(s.get_config_var('SO'));
-print(hasattr(sys, 'gettotalrefcount')+0);
-print(struct.calcsize('@P'));
-print(s.get_config_var('LDVERSION') or s.get_config_var('VERSION'));
-print(s.get_config_var('LIBDIR') or '');
-print(s.get_config_var('MULTIARCH') or '');
-"
-    RESULT_VARIABLE _PYTHON_SUCCESS
-    OUTPUT_VARIABLE _PYTHON_VALUES
-    ERROR_VARIABLE _PYTHON_ERROR_VALUE)
-
-if(NOT _PYTHON_SUCCESS MATCHES 0)
-    if(PythonLibsNew_FIND_REQUIRED)
-        message(FATAL_ERROR
-            "Python config failure:\n${_PYTHON_ERROR_VALUE}")
-    endif()
-    set(PYTHONLIBS_FOUND FALSE)
-    return()
-endif()
-
-# Convert the process output into a list
-string(REGEX REPLACE ";" "\\\\;" _PYTHON_VALUES ${_PYTHON_VALUES})
-string(REGEX REPLACE "\n" ";" _PYTHON_VALUES ${_PYTHON_VALUES})
-list(GET _PYTHON_VALUES 0 _PYTHON_VERSION_LIST)
-list(GET _PYTHON_VALUES 1 PYTHON_PREFIX)
-list(GET _PYTHON_VALUES 2 PYTHON_INCLUDE_DIR)
-list(GET _PYTHON_VALUES 3 PYTHON_SITE_PACKAGES)
-list(GET _PYTHON_VALUES 4 PYTHON_MODULE_EXTENSION)
-list(GET _PYTHON_VALUES 5 PYTHON_IS_DEBUG)
-list(GET _PYTHON_VALUES 6 PYTHON_SIZEOF_VOID_P)
-list(GET _PYTHON_VALUES 7 PYTHON_LIBRARY_SUFFIX)
-list(GET _PYTHON_VALUES 8 PYTHON_LIBDIR)
-list(GET _PYTHON_VALUES 9 PYTHON_MULTIARCH)
-
-# Make sure the Python has the same pointer-size as the chosen compiler
-# Skip if CMAKE_SIZEOF_VOID_P is not defined
-if(CMAKE_SIZEOF_VOID_P AND (NOT "${PYTHON_SIZEOF_VOID_P}" STREQUAL "${CMAKE_SIZEOF_VOID_P}"))
-    if(PythonLibsNew_FIND_REQUIRED)
-        math(EXPR _PYTHON_BITS "${PYTHON_SIZEOF_VOID_P} * 8")
-        math(EXPR _CMAKE_BITS "${CMAKE_SIZEOF_VOID_P} * 8")
-        message(FATAL_ERROR
-            "Python config failure: Python is ${_PYTHON_BITS}-bit, "
-            "chosen compiler is  ${_CMAKE_BITS}-bit")
-    endif()
-    set(PYTHONLIBS_FOUND FALSE)
-    return()
-endif()
-
-# The built-in FindPython didn't always give the version numbers
-string(REGEX REPLACE "\\." ";" _PYTHON_VERSION_LIST ${_PYTHON_VERSION_LIST})
-list(GET _PYTHON_VERSION_LIST 0 PYTHON_VERSION_MAJOR)
-list(GET _PYTHON_VERSION_LIST 1 PYTHON_VERSION_MINOR)
-list(GET _PYTHON_VERSION_LIST 2 PYTHON_VERSION_PATCH)
-
-# Make sure all directory separators are '/'
-string(REGEX REPLACE "\\\\" "/" PYTHON_PREFIX ${PYTHON_PREFIX})
-string(REGEX REPLACE "\\\\" "/" PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE_DIR})
-string(REGEX REPLACE "\\\\" "/" PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES})
-
-if(CMAKE_HOST_WIN32)
-    set(PYTHON_LIBRARY
-        "${PYTHON_PREFIX}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib")
-
-    # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the
-    # original python installation. They may be found relative to PYTHON_INCLUDE_DIR.
-    if(NOT EXISTS "${PYTHON_LIBRARY}")
-        get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY)
-        set(PYTHON_LIBRARY
-            "${_PYTHON_ROOT}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib")
-    endif()
-
-    # raise an error if the python libs are still not found.
-    if(NOT EXISTS "${PYTHON_LIBRARY}")
-        message(FATAL_ERROR "Python libraries not found")
-    endif()
-
-else()
-    if(PYTHON_MULTIARCH)
-        set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}")
-    else()
-        set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}")
-    endif()
-    #message(STATUS "Searching for Python libs in ${_PYTHON_LIBS_SEARCH}")
-    # Probably this needs to be more involved. It would be nice if the config
-    # information the python interpreter itself gave us were more complete.
-    find_library(PYTHON_LIBRARY
-        NAMES "python${PYTHON_LIBRARY_SUFFIX}"
-        PATHS ${_PYTHON_LIBS_SEARCH}
-        NO_DEFAULT_PATH)
-
-    # If all else fails, just set the name/version and let the linker figure out the path.
-    if(NOT PYTHON_LIBRARY)
-        set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX})
-    endif()
-endif()
-
-MARK_AS_ADVANCED(
-  PYTHON_LIBRARY
-  PYTHON_INCLUDE_DIR
-)
-
-# We use PYTHON_INCLUDE_DIR, PYTHON_LIBRARY and PYTHON_DEBUG_LIBRARY for the
-# cache entries because they are meant to specify the location of a single
-# library. We now set the variables listed by the documentation for this
-# module.
-SET(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}")
-SET(PYTHON_LIBRARIES "${PYTHON_LIBRARY}")
-SET(PYTHON_DEBUG_LIBRARIES "${PYTHON_DEBUG_LIBRARY}")
-
-find_package_message(PYTHON
-    "Found PythonLibs: ${PYTHON_LIBRARY}"
-    "${PYTHON_EXECUTABLE}${PYTHON_VERSION}")
-
-set(PYTHONLIBS_FOUND TRUE)
diff --git a/cmake/modules/Findpybind11.cmake b/cmake/modules/Findpybind11.cmake
deleted file mode 100644
index 31afb224a1a287133ee04fc05ac4280a762b8fb6..0000000000000000000000000000000000000000
--- a/cmake/modules/Findpybind11.cmake
+++ /dev/null
@@ -1,97 +0,0 @@
-# - Find the NumPy libraries
-
-# This module finds if Pybind11 is installed, and sets the following variables
-# indicating where it is.
-#
-# TODO: Update to provide the libraries and paths for linking npymath lib.
-#
-#  PYBIND11_FOUND               - was Pybind11 found
-#  PYBIND11_VERSION             - the version of Pybind11 found as a string
-#  PYBIND11_VERSION_MAJOR       - the major version number of Pybind11
-#  PYBIND11_VERSION_MINOR       - the minor version number of Pybind11
-#  PYBIND11_VERSION_PATCH       - the patch version number of Pybind11
-#  PYBIND11_VERSION_DECIMAL     - e.g. version 1.6.1 is 10601
-#  PYBIND11_INCLUDE_DIRS        - path to the Pybind11 include files
-
-#============================================================================
-# Copyright 2012 Continuum Analytics, Inc.
-#
-# MIT License
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to permit
-# persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#============================================================================
-
-# Finding Pybind11 involves calling the Python interpreter
-find_package(pybind11 CONFIG QUIET)
-if (pybind11_FOUND)
-  set(PYBIND11_FOUND TRUE)
-else()
-  if(Pybind11_FIND_REQUIRED)
-    find_package(PythonInterp REQUIRED)
-  else()
-    find_package(PythonInterp)
-  endif()
-
-  if(NOT PYTHONINTERP_FOUND)
-    set(PYBIND11_FOUND FALSE)
-  endif()
-
-  execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
-    "import pybind11 as pb; print(pb.__version__); print(pb.get_include());"
-    RESULT_VARIABLE _PYBIND11_SEARCH_SUCCESS
-    OUTPUT_VARIABLE _PYBIND11_VALUES
-    ERROR_VARIABLE _PYBIND11_ERROR_VALUE
-    OUTPUT_STRIP_TRAILING_WHITESPACE)
-
-  if(NOT _PYBIND11_SEARCH_SUCCESS MATCHES 0)
-    if(Pybind11_FIND_REQUIRED)
-      message(FATAL_ERROR
-        "pybind11 import failure:\n${_PYBIND11_ERROR_VALUE}")
-    endif()
-    set(PYBIND11_FOUND FALSE)
-  else()
-    set(PYBIND11_FOUND TRUE)
-  endif()
-
-  if (PYBIND11_FOUND)
-    # Convert the process output into a list
-    string(REGEX REPLACE ";" "\\\\;" _PYBIND11_VALUES ${_PYBIND11_VALUES})
-    string(REGEX REPLACE "\n" ";" _PYBIND11_VALUES ${_PYBIND11_VALUES})
-    list(GET _PYBIND11_VALUES 0 PYBIND11_VERSION)
-    list(GET _PYBIND11_VALUES 1 PYBIND11_INCLUDE_DIRS)
-
-    # Make sure all directory separators are '/'
-    string(REGEX REPLACE "\\\\" "/" PYBIND11_INCLUDE_DIRS ${PYBIND11_INCLUDE_DIRS})
-
-    # Get the major and minor version numbers
-    string(REGEX REPLACE "\\." ";" _PYBIND11_VERSION_LIST ${PYBIND11_VERSION})
-    list(GET _PYBIND11_VERSION_LIST 0 PYBIND11_VERSION_MAJOR)
-    list(GET _PYBIND11_VERSION_LIST 1 PYBIND11_VERSION_MINOR)
-    list(GET _PYBIND11_VERSION_LIST 2 PYBIND11_VERSION_PATCH)
-    string(REGEX MATCH "[0-9]*" PYBIND11_VERSION_PATCH ${PYBIND11_VERSION_PATCH})
-    math(EXPR PYBIND11_VERSION_DECIMAL
-      "(${PYBIND11_VERSION_MAJOR} * 10000) + (${PYBIND11_VERSION_MINOR} * 100) + ${PYBIND11_VERSION_PATCH}")
-
-    find_package_message(PYBIND11
-      "Found Pybind11: version \"${PYBIND11_VERSION}\" ${PYBIND11_INCLUDE_DIRS}"
-      "${PYBIND11_INCLUDE_DIRS}${PYBIND11_VERSION}")
-  endif()
-endif()
diff --git a/cmake/pybind11Tools.cmake b/cmake/pybind11Tools.cmake
deleted file mode 100644
index 52a70c23c409850378ec4f69719ad877b536feff..0000000000000000000000000000000000000000
--- a/cmake/pybind11Tools.cmake
+++ /dev/null
@@ -1,209 +0,0 @@
-# tools/pybind11Tools.cmake -- Build system for the pybind11 modules
-#
-# Copyright (c) 2015 Wenzel Jakob <wenzel@inf.ethz.ch>
-#
-# All rights reserved. Use of this source code is governed by a
-# BSD-style license that can be found in the LICENSE file.
-
-cmake_minimum_required(VERSION 2.8.12)
-
-# Add a CMake parameter for choosing a desired Python version
-if(NOT PYBIND11_PYTHON_VERSION)
-  set(PYBIND11_PYTHON_VERSION "" CACHE STRING "Python version to use for compiling modules")
-endif()
-
-set(Python_ADDITIONAL_VERSIONS 3.7 3.6 3.5 3.4)
-find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} REQUIRED)
-
-include(CheckCXXCompilerFlag)
-include(CMakeParseArguments)
-
-if(NOT PYBIND11_CPP_STANDARD AND NOT CMAKE_CXX_STANDARD)
-  if(NOT MSVC)
-    check_cxx_compiler_flag("-std=c++14" HAS_CPP14_FLAG)
-
-    if (HAS_CPP14_FLAG)
-      set(PYBIND11_CPP_STANDARD -std=c++14)
-    else()
-      check_cxx_compiler_flag("-std=c++11" HAS_CPP11_FLAG)
-      if (HAS_CPP11_FLAG)
-        set(PYBIND11_CPP_STANDARD -std=c++11)
-      else()
-        message(FATAL_ERROR "Unsupported compiler -- pybind11 requires C++11 support!")
-      endif()
-    endif()
-  elseif(MSVC)
-    set(PYBIND11_CPP_STANDARD /std:c++14)
-  endif()
-
-  set(PYBIND11_CPP_STANDARD ${PYBIND11_CPP_STANDARD} CACHE STRING
-      "C++ standard flag, e.g. -std=c++11, -std=c++14, /std:c++14.  Defaults to C++14 mode." FORCE)
-endif()
-
-# Checks whether the given CXX/linker flags can compile and link a cxx file.  cxxflags and
-# linkerflags are lists of flags to use.  The result variable is a unique variable name for each set
-# of flags: the compilation result will be cached base on the result variable.  If the flags work,
-# sets them in cxxflags_out/linkerflags_out internal cache variables (in addition to ${result}).
-function(_pybind11_return_if_cxx_and_linker_flags_work result cxxflags linkerflags cxxflags_out linkerflags_out)
-  set(CMAKE_REQUIRED_LIBRARIES ${linkerflags})
-  check_cxx_compiler_flag("${cxxflags}" ${result})
-  if (${result})
-    set(${cxxflags_out} "${cxxflags}" CACHE INTERNAL "" FORCE)
-    set(${linkerflags_out} "${linkerflags}" CACHE INTERNAL "" FORCE)
-  endif()
-endfunction()
-
-# Internal: find the appropriate link time optimization flags for this compiler
-function(_pybind11_add_lto_flags target_name prefer_thin_lto)
-  if (NOT DEFINED PYBIND11_LTO_CXX_FLAGS)
-    set(PYBIND11_LTO_CXX_FLAGS "" CACHE INTERNAL "")
-    set(PYBIND11_LTO_LINKER_FLAGS "" CACHE INTERNAL "")
-
-    if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang")
-      set(cxx_append "")
-      set(linker_append "")
-      if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND NOT APPLE)
-        # Clang Gold plugin does not support -Os; append -O3 to MinSizeRel builds to override it
-        set(linker_append ";$<$<CONFIG:MinSizeRel>:-O3>")
-      elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
-        set(cxx_append ";-fno-fat-lto-objects")
-      endif()
-
-      if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND prefer_thin_lto)
-        _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO_THIN
-          "-flto=thin${cxx_append}" "-flto=thin${linker_append}"
-          PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
-      endif()
-
-      if (NOT HAS_FLTO_THIN)
-        _pybind11_return_if_cxx_and_linker_flags_work(HAS_FLTO
-          "-flto${cxx_append}" "-flto${linker_append}"
-          PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
-      endif()
-    elseif (CMAKE_CXX_COMPILER_ID MATCHES "Intel")
-      # Intel equivalent to LTO is called IPO
-      _pybind11_return_if_cxx_and_linker_flags_work(HAS_INTEL_IPO
-      "-ipo" "-ipo" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
-    elseif(MSVC)
-      # cmake only interprets libraries as linker flags when they start with a - (otherwise it
-      # converts /LTCG to \LTCG as if it was a Windows path).  Luckily MSVC supports passing flags
-      # with - instead of /, even if it is a bit non-standard:
-      _pybind11_return_if_cxx_and_linker_flags_work(HAS_MSVC_GL_LTCG
-        "/GL" "-LTCG" PYBIND11_LTO_CXX_FLAGS PYBIND11_LTO_LINKER_FLAGS)
-    endif()
-
-    if (PYBIND11_LTO_CXX_FLAGS)
-      message(STATUS "LTO enabled")
-    else()
-      message(STATUS "LTO disabled (not supported by the compiler and/or linker)")
-    endif()
-  endif()
-
-  # Enable LTO flags if found, except for Debug builds
-  if (PYBIND11_LTO_CXX_FLAGS)
-    target_compile_options(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_CXX_FLAGS}>")
-  endif()
-  if (PYBIND11_LTO_LINKER_FLAGS)
-    target_link_libraries(${target_name} PRIVATE "$<$<NOT:$<CONFIG:Debug>>:${PYBIND11_LTO_LINKER_FLAGS}>")
-  endif()
-endfunction()
-
-# Build a Python extension module:
-# pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
-#                     [NO_EXTRAS] [THIN_LTO] source1 [source2 ...])
-#
-function(pybind11_add_module target_name)
-  set(options MODULE SHARED EXCLUDE_FROM_ALL NO_EXTRAS THIN_LTO)
-  cmake_parse_arguments(ARG "${options}" "" "" ${ARGN})
-
-  if(ARG_MODULE AND ARG_SHARED)
-    message(FATAL_ERROR "Can't be both MODULE and SHARED")
-  elseif(ARG_SHARED)
-    set(lib_type SHARED)
-  else()
-    set(lib_type MODULE)
-  endif()
-
-  if(ARG_EXCLUDE_FROM_ALL)
-    set(exclude_from_all EXCLUDE_FROM_ALL)
-  endif()
-
-  add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS})
-
-  target_include_directories(${target_name}
-    PRIVATE ${PYBIND11_INCLUDE_DIR}  # from project CMakeLists.txt
-    PRIVATE ${pybind11_INCLUDE_DIR}  # from pybind11Config
-    PRIVATE ${PYTHON_INCLUDE_DIRS})
-
-  # Python debug libraries expose slightly different objects
-  # https://docs.python.org/3.6/c-api/intro.html#debugging-builds
-  # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
-  if(PYTHON_IS_DEBUG)
-    target_compile_definitions(${target_name} PRIVATE Py_DEBUG)
-  endif()
-
-  # The prefix and extension are provided by FindPythonLibsNew.cmake
-  set_target_properties(${target_name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}")
-  set_target_properties(${target_name} PROPERTIES SUFFIX "${PYTHON_MODULE_EXTENSION}")
-
-  # -fvisibility=hidden is required to allow multiple modules compiled against
-  # different pybind versions to work properly, and for some features (e.g.
-  # py::module_local).  We force it on everything inside the `pybind11`
-  # namespace; also turning it on for a pybind module compilation here avoids
-  # potential warnings or issues from having mixed hidden/non-hidden types.
-  set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden")
-
-  if(WIN32 OR CYGWIN)
-    # Link against the Python shared library on Windows
-    target_link_libraries(${target_name} PRIVATE ${PYTHON_LIBRARIES})
-  elseif(APPLE)
-    # It's quite common to have multiple copies of the same Python version
-    # installed on one's system. E.g.: one copy from the OS and another copy
-    # that's statically linked into an application like Blender or Maya.
-    # If we link our plugin library against the OS Python here and import it
-    # into Blender or Maya later on, this will cause segfaults when multiple
-    # conflicting Python instances are active at the same time (even when they
-    # are of the same version).
-
-    # Windows is not affected by this issue since it handles DLL imports
-    # differently. The solution for Linux and Mac OS is simple: we just don't
-    # link against the Python library. The resulting shared library will have
-    # missing symbols, but that's perfectly fine -- they will be resolved at
-    # import time.
-
-    target_link_libraries(${target_name} PRIVATE "-undefined dynamic_lookup")
-
-    if(ARG_SHARED)
-      # Suppress CMake >= 3.0 warning for shared libraries
-      set_target_properties(${target_name} PROPERTIES MACOSX_RPATH ON)
-    endif()
-  endif()
-
-  # Make sure C++11/14 are enabled
-  target_compile_options(${target_name} PUBLIC ${PYBIND11_CPP_STANDARD})
-
-  if(ARG_NO_EXTRAS)
-    return()
-  endif()
-
-  _pybind11_add_lto_flags(${target_name} ${ARG_THIN_LTO})
-
-  if (NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug)
-    # Strip unnecessary sections of the binary on Linux/Mac OS
-    if(CMAKE_STRIP)
-      if(APPLE)
-        add_custom_command(TARGET ${target_name} POST_BUILD
-                           COMMAND ${CMAKE_STRIP} -x $<TARGET_FILE:${target_name}>)
-      else()
-        add_custom_command(TARGET ${target_name} POST_BUILD
-                           COMMAND ${CMAKE_STRIP} $<TARGET_FILE:${target_name}>)
-      endif()
-    endif()
-  endif()
-
-  if(MSVC)
-    # /MP enables multithreaded builds (relevant when there are many files), /bigobj is
-    # needed for bigger binding projects due to the limit to 64k addressable sections
-    target_compile_options(${target_name} PRIVATE /MP /bigobj)
-  endif()
-endfunction()
diff --git a/configuration/CMakeLists.txt b/configuration/CMakeLists.txt
index 5494f64a93b15194fb5e75f9d05e46c249d45891..aab142e0bab1e8d41349960030cf818c3e400eb9 100644
--- a/configuration/CMakeLists.txt
+++ b/configuration/CMakeLists.txt
@@ -12,8 +12,41 @@ if (NOT STANDALONE)
     add_test(NAME Allen:pytest
       COMMAND ${CMAKE_BINARY_DIR}/run bash ${CMAKE_CURRENT_SOURCE_DIR}/tests/test_configuration.sh ${CMAKE_CURRENT_SOURCE_DIR})
     gaudi_install(PYTHON)
-endif()
 
-# Configuration interface library
-add_library(Configuration INTERFACE)
-target_include_directories(Configuration INTERFACE ${PROJECT_BINARY_DIR}/configuration/sequences)
+    # Configuration library
+    allen_add_host_library(Configuration
+      STATIC
+        src/TCK.cpp)
+    target_include_directories(Configuration
+      PUBLIC
+      $<BUILD_INTERFACE:${DETECTOR_PREFIX_DIR}/include>  # for DETECTOR_VERSION.h
+      $<BUILD_INTERFACE:${LBCOM_PREFIX_DIR}/include>  # for LBCOM_VERSION.h
+      $<BUILD_INTERFACE:${REC_PREFIX_DIR}/include>  # for REC_VERSION.h
+      $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include>  # for ALLEN_VERSION.h
+      include)
+
+    # Link privately to HltServicesLib here, it will be propagated by
+    # linking AllenLib to it as well
+    target_link_libraries(Configuration
+      PUBLIC
+        AllenCommon
+        Boost::headers
+        LHCb::HltServicesLib)
+
+    # Install the header so it can be used in AllenOnline
+    install(FILES include/TCK.h
+      DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/Allen)
+
+    pybind11_add_module(TCK NO_EXTRAS src/bindings.cpp)
+    target_link_libraries(TCK
+      PRIVATE
+        Configuration
+      PUBLIC
+        pybind11::pybind11
+        ${Python_LIBRARIES})
+    install(TARGETS TCK
+      EXPORT
+        Allen
+      LIBRARY DESTINATION
+        ${GAUDI_INSTALL_PYTHONDIR}/Allen)
+endif()
diff --git a/configuration/include/TCK.h b/configuration/include/TCK.h
new file mode 100644
index 0000000000000000000000000000000000000000..da417ed03dda3c476a8912036e2d670ab65277eb
--- /dev/null
+++ b/configuration/include/TCK.h
@@ -0,0 +1,33 @@
+/*****************************************************************************\
+* (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           *
+*                                                                             *
+* This software is distributed under the terms of the Apache License          *
+* version 2 (Apache-2.0), copied verbatim in the file "COPYING".              *
+*                                                                             *
+* In applying this licence, CERN does not waive the privileges and immunities *
+* granted to it by virtue of its status as an Intergovernmental Organization  *
+* or submit itself to any jurisdiction.                                       *
+\*****************************************************************************/
+#pragma once
+
+#include <string>
+#include <iostream>
+#include <HltServices/TCKUtils.h>
+
+namespace Allen {
+
+  namespace TCK {
+    static constexpr unsigned config_version = 1u;
+
+    std::map<std::string, std::string> project_dependencies();
+
+    std::tuple<bool, std::string> check_projects(nlohmann::json metadata);
+
+    void create_git_repository(std::string repo);
+  } // namespace TCK
+
+  std::tuple<std::string, LHCb::TCK::Info> tck_from_git(std::string repo, std::string tck);
+
+  std::tuple<std::string, LHCb::TCK::Info> sequence_from_git(std::string repo, std::string tck);
+
+} // namespace Allen
diff --git a/configuration/parser/ParseAlgorithms.py b/configuration/parser/ParseAlgorithms.py
index 0470d92c799df9ea83d4720449c183465d17b812..5aaf2fb0fd7a6d27cfcb17419271c45581ac0676 100755
--- a/configuration/parser/ParseAlgorithms.py
+++ b/configuration/parser/ParseAlgorithms.py
@@ -11,6 +11,7 @@ from collections import OrderedDict
 from AlgorithmTraversalLibClang import AlgorithmTraversal
 import argparse
 import pickle
+import json
 
 
 def get_clang_so_location():
@@ -30,7 +31,8 @@ class Parser():
 
     # Pattern sought in every file, prior to parsing the file for an algorithm
     __algorithm_pattern_compiled = re.compile(
-        "(?P<scope>Host|Device|Selection|Validation|Provider|Barrier)Algorithm")
+        "(?P<scope>Host|Device|Selection|Validation|Provider|Barrier)Algorithm"
+    )
 
     # File extensions considered
     __sought_extensions_compiled = [
@@ -72,9 +74,9 @@ class Parser():
 
         return algorithm_files
 
-
     @staticmethod
-    def parse_all(algorithm_files, prefix_project_folder,
+    def parse_all(algorithm_files,
+                  prefix_project_folder,
                   algorithm_parser=AlgorithmTraversal()):
         """Parses all files and traverses algorithm definitions."""
         algorithms = []
@@ -82,7 +84,7 @@ class Parser():
         for algorithm_file in algorithm_files:
             try:
                 parsed_algorithms = algorithm_parser.traverse(
-                            algorithm_file, prefix_project_folder)
+                    algorithm_file, prefix_project_folder)
                 if parsed_algorithms:
                     algorithms += parsed_algorithms
             except:
@@ -114,7 +116,7 @@ class AllenCore():
         return s
 
     @staticmethod
-    def write_algorithm_code(algorithm, i=0):
+    def write_algorithm_code(algorithm, default_properties, i=0):
         s = AllenCore.prefix(
             i) + "class " + algorithm.name + "(AllenAlgorithm):\n"
         i += 1
@@ -132,9 +134,23 @@ class AllenCore():
             s += AllenCore.prefix(i) + param.typename + " = AllenDataHandle(\"" + param.scope + "\", " + dependencies + ", \"" + param.typename + "\", \"" \
                 + AllenCore.create_var_type(param.kind) + \
                 "\", \"" + str(param.typedef) + "\"),\n"
+
+        # Properties
         for prop in algorithm.properties:
-            s += AllenCore.prefix(i) + prop.name[1:-1] + " = \"\",\n"
-        s = s[:-2]
+            # Use the python JSON parser to turn the JSON
+            # representation of default values into appropriate Python
+            # objects
+            pn = prop.name[1:-1]
+            dv = json.loads(default_properties[pn])
+
+            # Quotes have to be added for properties that hold a string
+            if type(dv) is str:
+                dv = f'"{dv}"'
+
+            # Write the code for the property and include the C++ type
+            # as a comment
+            s += f'{AllenCore.prefix(i)}{pn} = {dv}, # {prop.typedef}\n'
+        s = s[:-1]
         i -= 1
         s += "\n" + AllenCore.prefix(i) + ")\n"
 
@@ -382,8 +398,7 @@ class AllenCore():
             "std::tuple<" + ",".join(output_types) + "> output_container {};",
             "// TES wrappers", f"{tes_wrappers}",
             "// Inputs to set_arguments_size and operator()",
-            f"{tes_wrappers_reference}",
-            f"Allen::Context context{{}};",
+            f"{tes_wrappers_reference}", f"Allen::Context context{{}};",
             f"const auto argument_references = ArgumentReferences<{algorithm.namespace}::Parameters>{{tes_wrappers_references, input_aggregates_tuple}};",
             f"// set arguments size invocation",
             f"m_algorithm.set_arguments_size(argument_references, runtime_options, *constants);",
@@ -587,10 +602,32 @@ class AllenCore():
         return code
 
     @staticmethod
-    def write_algorithms_view(algorithms, filename):
+    def write_algorithms_view(algorithms, filename, default_properties):
+        from subprocess import (PIPE, run)
+
+        # Run the default_properties executable to get a JSON
+        # representation of the default values of all properties of
+        # all algorithms
+        p = run(
+            [default_properties],
+            stdout=PIPE,
+            input=';'.join([
+                "{}::{}".format(a.namespace, a.name) for a in parsed_algorithms
+            ]),
+            encoding='ascii')
+
+        default_properties = None
+        if p.returncode == 0:
+            default_properties = json.loads(p.stdout)
+        else:
+            print("Failed to obtain default property values")
+            sys.exit(-1)
+
         s = AllenCore.write_preamble()
         for algorithm in parsed_algorithms:
-            s += AllenCore.write_algorithm_code(algorithm)
+            tn = "{}::{}".format(algorithm.namespace, algorithm.name)
+            s += AllenCore.write_algorithm_code(algorithm,
+                                                default_properties[tn])
         with open(filename, "w") as f:
             f.write(s)
 
@@ -624,8 +661,7 @@ class AllenCore():
 
     @staticmethod
     def write_algorithms_db(algorithms, filename):
-        code = "\n".join(("#pragma once", "", "#include <Configuration.h>",
-                          "\n"))
+        code = "\n".join(("#include <AlgorithmDB.h>", "\n"))
         for alg in algorithms:
             code += f"namespace {alg.namespace} {{ struct {alg.name}; }}\n"
         code += "\nAllen::TypeErasedAlgorithm instantiate_allen_algorithm(const ConfiguredAlgorithm& alg) {\n"
@@ -712,6 +748,12 @@ if __name__ == '__main__':
         type=str,
         default="",
         help="location of parsed algorithms")
+    parser.add_argument(
+        "--default_properties",
+        nargs="?",
+        type=str,
+        default="",
+        help="location of default_properties executable")
     parser.add_argument(
         "--generate",
         nargs="?",
@@ -733,7 +775,8 @@ if __name__ == '__main__':
     elif args.generate == "algorithm_headers_list":
         # Write list of files including algorithm definitions
         algorithm_headers_list = Parser().find_algorithm_files(prefix_folder)
-        AllenCore.write_algorithm_filename_list(algorithm_headers_list, args.filename)
+        AllenCore.write_algorithm_filename_list(algorithm_headers_list,
+                                                args.filename)
     else:
 
         if args.parsed_algorithms:
@@ -743,11 +786,13 @@ if __name__ == '__main__':
         else:
             # Otherwise generate parsed_algorithms on the fly
             algorithm_files = Parser().find_algorithm_files(prefix_folder)
-            parsed_algorithms = Parser().parse_all(algorithm_files, prefix_folder)
+            parsed_algorithms = Parser().parse_all(algorithm_files,
+                                                   prefix_folder)
 
         if args.generate == "views":
             # Generate algorithm python views
-            AllenCore.write_algorithms_view(parsed_algorithms, args.filename)
+            AllenCore.write_algorithms_view(parsed_algorithms, args.filename,
+                                            args.default_properties)
         elif args.generate == "wrapperlist":
             # Generate Gaudi wrapper filenames
             gaudi_wrapper_filenames = AllenCore.write_gaudi_algorithms(
diff --git a/configuration/python/AllenConf/velo_reconstruction.py b/configuration/python/AllenConf/velo_reconstruction.py
index 5388e365e7aede855b77689bd28281f04566c370..395e7f4e800d8cc145d0709f45dbc68ef116d483 100644
--- a/configuration/python/AllenConf/velo_reconstruction.py
+++ b/configuration/python/AllenConf/velo_reconstruction.py
@@ -20,7 +20,7 @@ def decode_velo(retina_decoding=True):
 
     if retina_decoding:
         velo_banks = make_algorithm(
-            data_provider_t, name="velo_banks", bank_type="VPRetinaCluster")
+            data_provider_t, name="velo_banks", bank_type="VP")
 
         calculate_number_of_retinaclusters_each_sensor_pair = make_algorithm(
             calculate_number_of_retinaclusters_each_sensor_pair_t,
diff --git a/configuration/python/AllenCore/AllenSequenceGenerator.py b/configuration/python/AllenCore/AllenSequenceGenerator.py
index 58e89e94e5cedb57d20fc83093bf937c483db34e..6510154af4d7c26e2dfea87e44d66e370891a37d 100644
--- a/configuration/python/AllenCore/AllenSequenceGenerator.py
+++ b/configuration/python/AllenCore/AllenSequenceGenerator.py
@@ -1,9 +1,7 @@
 ###############################################################################
 # (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration           #
 ###############################################################################
-from collections import OrderedDict
 from PyConf.dataflow import GaudiDataHandle
-from json import dump
 
 
 def clean_prefix(s):
@@ -22,13 +20,17 @@ def add_deps_and_transitive_deps(dep, arg_deps, parameter_dependencies_set):
             parameter_dependencies_set.add(transitive_dep)
 
 
-def generate_json_configuration(algorithms, filename):
+def generate_json_configuration(algorithms):
     """Generates runtime configuration (JSON)."""
     sequence_json = {}
     # Add properties for each algorithm
     for algorithm in algorithms:
+        sequence_json[algorithm.name] = {
+            str(k): v
+            for k, v in algorithm.type.getDefaultProperties().items()
+            if not isinstance(v, GaudiDataHandle)
+        }
         if len(algorithm.properties):
-            sequence_json[algorithm.name] = {}
             for k, v in algorithm.properties.items():
                 sequence_json[algorithm.name][str(k)] = v
 
@@ -118,15 +120,4 @@ def generate_json_configuration(algorithms, filename):
         "configured_sequence_arguments": configured_sequence_arguments,
         "argument_dependencies": argument_dependencies
     }
-    with open(filename, 'w') as outfile:
-        dump(sequence_json, outfile, indent=4, sort_keys=True)
-
-
-def generate_allen_sequence(algorithms,
-                            json_configuration_filename="Sequence.json"):
-    """Generates an Allen valid sequence.
-
-    * json_configuration_filename: JSON configuration that can be changed at runtime to change
-                                   values of properties.
-    """
-    generate_json_configuration(algorithms, json_configuration_filename)
+    return sequence_json
diff --git a/configuration/python/AllenCore/allen_standalone_generator.py b/configuration/python/AllenCore/allen_standalone_generator.py
index fee5a3041bb7b8a7be5059a3458de0071e87d8e9..32059a23cd3062be0b9f78c6a051abfb41c48187 100644
--- a/configuration/python/AllenCore/allen_standalone_generator.py
+++ b/configuration/python/AllenCore/allen_standalone_generator.py
@@ -10,13 +10,13 @@
 ###############################################################################
 from AllenCore.cftree_ops import get_execution_list_for, BoolNode
 from AllenCore.event_list_utils import add_event_list_combiners
-from AllenCore.AllenSequenceGenerator import generate_allen_sequence
+from AllenCore.AllenSequenceGenerator import generate_json_configuration
 from AllenCore.allen_benchmarks import benchmark_weights, benchmark_efficiencies
 from AllenCore.algorithms import host_init_event_list_t
 from PyConf.components import Algorithm
 from PyConf.filecontent_metadata import flush_key_registry
-from os.path import exists
-import contextlib
+from PyConf.tonic import configurable
+from json import dump
 
 
 def make_algorithm(alg_type, name, **kwargs):
@@ -59,16 +59,16 @@ def initialize_event_lists(**kwargs):
     return initialize_lists
 
 
-def generate(node, json_configuration_filename="Sequence.json", verbose=True):
-    """Generates an Allen sequence out of a root node."""
-    if type(node) == dict:
-        node = node['control_flow_node']
-    with flush_key_registry() :
-        best_order, score = get_execution_list_for(node)
+def build_sequence(root, verbose=True):
+    if type(root) == dict:
+        root = root['control_flow_node']
+
+    with flush_key_registry():
+        best_order, score = get_execution_list_for(root)
         final_seq = add_event_list_combiners(best_order)
 
-        if verbose:
-            print("Generated sequence represented as algorithms with execution masks:")
+    if verbose:
+        print("Generated sequence represented as algorithms with execution masks:")
         for alg, mask_in in final_seq:
             if mask_in == None:
                 mask_in_str = ""
@@ -76,6 +76,19 @@ def generate(node, json_configuration_filename="Sequence.json", verbose=True):
                 mask_in_str = f" in:{str(mask_in).split('/')[1]}"
             elif isinstance(mask_in, BoolNode):
                 mask_in_str = f" in:{mask_in}"
-            if verbose:
-                print(f"  {alg}{mask_in_str}")
-        return generate_allen_sequence([alg for (alg, _) in final_seq], json_configuration_filename)
+            print(f"  {alg}{mask_in_str}")
+
+    return [alg for (alg, _) in final_seq]
+
+
+@configurable
+def generate(root, json_configuration_filename="Sequence.json", noop=False, verbose=True):
+    """Generates an Allen sequence out of a root node."""
+    if noop:
+        return
+
+    algorithms = build_sequence(root, verbose=verbose)
+
+    sequence_json = generate_json_configuration(algorithms)
+    with open(json_configuration_filename, 'w') as outfile:
+        dump(sequence_json, outfile, indent=4, sort_keys=True)
diff --git a/configuration/python/AllenCore/gaudi_allen_generator.py b/configuration/python/AllenCore/gaudi_allen_generator.py
index a0ee924611a88ba821878ea553c460693858aff9..f5229c0e5323f7c81d610d9397a9c59eef1c4cc3 100644
--- a/configuration/python/AllenCore/gaudi_allen_generator.py
+++ b/configuration/python/AllenCore/gaudi_allen_generator.py
@@ -60,6 +60,8 @@ def make_algorithm(algorithm, name, *args, **kwargs):
         rawbank_list = ["ODIN"]
     elif bank_type == "ECal":
         rawbank_list = ["Calo", "EcalPacked"]
+    elif bank_type == "VP":
+        rawbank_list = ["VP", "VPRetinaCluster"]
     elif bank_type:
         rawbank_list = [bank_type]
 
diff --git a/configuration/src/TCK.cpp b/configuration/src/TCK.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a9aa72916235a2215125027a74af423152adb053
--- /dev/null
+++ b/configuration/src/TCK.cpp
@@ -0,0 +1,180 @@
+/*****************************************************************************\
+* (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           *
+*                                                                             *
+* This software is distributed under the terms of the Apache License          *
+* version 2 (Apache-2.0), copied verbatim in the file "COPYING".              *
+*                                                                             *
+* In applying this licence, CERN does not waive the privileges and immunities *
+* granted to it by virtue of its status as an Intergovernmental Organization  *
+* or submit itself to any jurisdiction.                                       *
+\*****************************************************************************/
+#include <dlfcn.h>
+
+#include <boost/algorithm/string/classification.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <boost/property_tree/ptree.hpp>
+#include <boost/property_tree/xml_parser.hpp>
+#include <boost/property_tree/detail/file_parser_error.hpp>
+#include <FileSystem.h>
+
+#include <git2.h>
+#include <string>
+#include <nlohmann/json.hpp>
+
+#include <HltServices/TCKUtils.h>
+#include <TCK.h>
+
+// Version headers of dependent projects
+
+namespace {
+  namespace pt = boost::property_tree;
+}
+
+std::map<std::string, std::string> Allen::TCK::project_dependencies()
+{
+  Dl_info dl_info;
+  if (!dladdr(reinterpret_cast<void*>(Allen::TCK::create_git_repository), &dl_info)) {
+    throw std::runtime_error {"Failed to obtain path for this shared library"};
+  }
+  auto manifest_path = fs::absolute(fs::path {dl_info.dli_fname}.parent_path() / ".." / "manifest.xml");
+
+  // Create an empty property tree object
+  pt::ptree manifest;
+
+  try {
+    read_xml(manifest_path.string(), manifest);
+  } catch (pt::xml_parser_error& e) {
+    std::cout << "Failed to parse the xml string." << e.what();
+  } catch (...) {
+    std::cout << "Failed !!!";
+  }
+
+  std::map<std::string, std::string> deps;
+
+  auto add_project_dep = [&deps](auto const& project) {
+    deps[project.template get<std::string>("<xmlattr>.name")] = project.template get<std::string>("<xmlattr>.version");
+  };
+
+  deps["LCG"] = manifest.get<std::string>("manifest.heptools.version");
+  for (auto& [_, project] : manifest.get_child("manifest.used_projects")) {
+    add_project_dep(project);
+  }
+  add_project_dep(manifest.get_child("manifest.project"));
+  return deps;
+}
+
+std::tuple<bool, std::string> Allen::TCK::check_projects(nlohmann::json metadata)
+{
+  auto projects_str = [](auto const& projects) {
+    std::stringstream ps;
+    for (auto [p, v] : projects) {
+      ps << p << " " << v << "\n";
+    }
+    return ps.str();
+  };
+
+  auto projects = metadata.at("stack").at("projects").get<std::map<std::string, std::string>>();
+  auto deps = Allen::TCK::project_dependencies();
+
+  auto check = projects == deps;
+  std::string error_msg;
+  if (!check) {
+    error_msg =
+      ("dependencies " + projects_str(deps) + " are incompatible with current dependencies " + projects_str(projects));
+  }
+  return std::tuple {check, error_msg};
+}
+
+void Allen::TCK::create_git_repository(std::string repo_name)
+{
+  git_libgit2_init();
+
+  auto [repo, sig] = LHCb::TCK::Git::create_git_repository(repo_name);
+
+  git_signature_free(sig);
+  git_repository_free(repo);
+  git_libgit2_shutdown();
+}
+
+std::tuple<std::string, LHCb::TCK::Info> Allen::tck_from_git(std::string repo, std::string tck)
+{
+
+  using LHCb::TCK::Git::check;
+
+  git_libgit2_init();
+  git_repository* git_repo = nullptr;
+  check(git_repository_open_bare(&git_repo, repo.c_str()));
+  try {
+    auto tck_config = LHCb::TCK::Git::extract_json(git_repo, tck);
+    auto tck_info = LHCb::TCK::Git::tck_info(git_repo, tck);
+    git_libgit2_shutdown();
+    return {std::move(tck_config), std::move(tck_info)};
+  } catch (std::runtime_error const& e) {
+    git_libgit2_shutdown();
+    throw std::runtime_error {"Failed to extract JSON configuration for TCK " + tck + " from " + repo + ": " +
+                              e.what()};
+  }
+}
+
+std::tuple<std::string, LHCb::TCK::Info> Allen::sequence_from_git(std::string repo, std::string tck)
+{
+
+  auto [tck_config, tck_info] = tck_from_git(repo, tck);
+  if (tck_config.empty()) {
+    return {tck_config, {}};
+  }
+
+  auto tck_db = nlohmann::json::parse(tck_config);
+  nlohmann::json manifest = tck_db["manifest"];
+
+  // The configuration JSON has a digest as key. Look at the
+  // "manifest" part to find the digest. The manifests are also
+  // indexed by digest, so loop over them until the one is found that
+  // has the right TCK entry.
+  auto items = tck_db.items();
+  auto json_tck = std::find_if(items.begin(), items.end(), [&manifest, tck](auto const& e) {
+    return e.key() != "manifest" && manifest.count(e.key()) && manifest[e.key()]["TCK"] == tck;
+  });
+
+  nlohmann::json sequence;
+
+  std::vector<std::string> tokens;
+
+  for (auto const& [entry, config] : json_tck.value().items()) {
+    tokens.clear();
+    boost::algorithm::split(tokens, entry, boost::algorithm::is_any_of("/"));
+    if (tokens[0] == "Scheduler") {
+      // Put special "sequence" items where they are expected
+      sequence["sequence"][tokens[1]] = config;
+    }
+    else if (tokens.size() == 3) {
+      // The rest is algorithm configuration. In the TCK all property
+      // values are stored as strings, but Allen expects parsed JSON,
+      // so convert between the two representations here. Some
+      // properties are strings and won't parse, so we have to check
+      // that.
+      auto props = config["Properties"];
+      nlohmann::json sequence_props;
+
+      for (auto const& [prop_key, prop_val] : props.items()) {
+        auto s = prop_val.get<std::string>();
+        // Disable exceptions when parsing and test is_discarded to
+        // check if the json is valid. If it's not valid, store as a
+        // string
+        auto j = nlohmann::json::parse(s, nullptr, false);
+        if (j.is_discarded()) {
+          sequence_props[prop_key] = s;
+        }
+        else {
+          sequence_props[prop_key] = j;
+        }
+      }
+
+      std::string const& alg_name = tokens[2];
+      sequence[alg_name] = sequence_props;
+    }
+  }
+
+  return {sequence.dump(), tck_info};
+}
diff --git a/configuration/src/bindings.cpp b/configuration/src/bindings.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f6bd482ad9076e05b623884959e8aa8f1bfffa55
--- /dev/null
+++ b/configuration/src/bindings.cpp
@@ -0,0 +1,60 @@
+/*****************************************************************************\
+* (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           *
+*                                                                             *
+* This software is distributed under the terms of the Apache License          *
+* version 2 (Apache-2.0), copied verbatim in the file "COPYING".              *
+*                                                                             *
+* In applying this licence, CERN does not waive the privileges and immunities *
+* granted to it by virtue of its status as an Intergovernmental Organization  *
+* or submit itself to any jurisdiction.                                       *
+\*****************************************************************************/
+#include <pybind11/pybind11.h>
+#include <pybind11/stl.h>
+#include <pybind11/iostream.h>
+
+#include <iostream>
+#include <numeric>
+#include <cmath>
+
+#include <TCK.h>
+
+namespace {
+  namespace py = pybind11;
+}
+
+// Python Module and Docstrings
+PYBIND11_MODULE(TCK, m)
+{
+  py::class_<LHCb::TCK::Info>(m, "TCKInfo")
+    .def(py::init<>())
+    .def_readwrite("digest", &LHCb::TCK::Info::digest)
+    .def_readwrite("tck", &LHCb::TCK::Info::tck)
+    .def_readwrite("release", &LHCb::TCK::Info::release)
+    .def_readwrite("type", &LHCb::TCK::Info::type)
+    .def_readwrite("label", &LHCb::TCK::Info::label)
+    .def_readwrite("metadata", &LHCb::TCK::Info::metadata);
+
+  m.doc() = R"pbdoc(
+    Utility functions to interact with a git repository that contains
+    persisted configurations identified by so-called TCK
+
+    .. currentmodule:: TCK
+
+    .. autosummary::
+       :toctree: _generate
+
+    TCKInfo
+    tck_from_git
+    sequence_to_git
+    )pbdoc";
+
+  m.attr("config_version") = py::int_(Allen::TCK::config_version);
+
+  m.def("create_git_repository", &Allen::TCK::create_git_repository, "Create a git repository that can store TCKs");
+  m.def("tck_from_git", &Allen::tck_from_git, "Get the TCK as it is in the git repository");
+  m.def(
+    "sequence_from_git",
+    &Allen::sequence_from_git,
+    "Get the TCK and TCK information in a format that can be used to "
+    "configure Allen");
+}
diff --git a/configuration/src/default_properties.cpp b/configuration/src/default_properties.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..01bf693ea347b69afbb076ab02e52b07704c8f89
--- /dev/null
+++ b/configuration/src/default_properties.cpp
@@ -0,0 +1,55 @@
+/*****************************************************************************\
+* (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           *
+*                                                                             *
+* This software is distributed under the terms of the Apache License          *
+* version 2 (Apache-2.0), copied verbatim in the file "COPYING".              *
+*                                                                             *
+* In applying this licence, CERN does not waive the privileges and immunities *
+* granted to it by virtue of its status as an Intergovernmental Organization  *
+* or submit itself to any jurisdiction.                                       *
+\*****************************************************************************/
+#include <iostream>
+#include <iomanip>
+#include <iterator>
+#include <string>
+#include <algorithm>
+#include <boost/algorithm/string.hpp>
+#include <nlohmann/json.hpp>
+#include <AlgorithmDB.h>
+
+int main()
+{
+  // Read the semicolon-separated list of algorithms from stdin
+  std::istreambuf_iterator<char> begin(std::cin), end;
+  std::string input(begin, end);
+  if (!input.empty() && input[input.size() - 1] == '\n') {
+    input.erase(input.size() - 1);
+  }
+
+  // Split the list into algorithm namespace::type
+  std::vector<std::string> algorithms;
+  boost::split(algorithms, input, boost::is_any_of(";"));
+
+  // Use non-default JSON parser to parse all floating point numbers
+  // as floats and integers as 32 bits. This aligns with what is used
+  // in Allen
+  using json_float = nlohmann::basic_json<std::map, std::vector, std::string, bool, std::int32_t, std::uint32_t, float>;
+  json_float default_properties;
+
+  // Loop over the algorithms, instantiate each algorithm and get its
+  // (default valued) properties.
+  for (auto alg : algorithms) {
+    auto allen_alg = instantiate_allen_algorithm({alg, "algorithm", ""});
+    std::map<std::string, std::string> string_props;
+    for (auto [k, j] : allen_alg.get_properties()) {
+      // Assign to out JSON parser type to get the wanted parsing
+      // behaviour and use to_string to allow the Python JSON parser
+      // to change the values into Python objects.
+      json_float jf = j;
+      string_props[k] = to_string(jf);
+    }
+    // Save the representation in another JSON.
+    default_properties[alg] = string_props;
+  }
+  std::cout << std::setw(4) << default_properties;
+}
diff --git a/device/lumi/include/CaloLumiCounters.cuh b/device/lumi/include/CaloLumiCounters.cuh
index 258e5143bbffbf26d1187c012e499b4ed5259f61..88aca92d28a5e9c48c47de90e663b7924d2d07e8 100644
--- a/device/lumi/include/CaloLumiCounters.cuh
+++ b/device/lumi/include/CaloLumiCounters.cuh
@@ -39,21 +39,17 @@ namespace calo_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      calo_offsets_and_sizes_t,
-      "calo_offsets_and_sizes",
-      "offsets and sizes in bits for the calo counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_calo_counters>)
-    calo_offsets_and_sizes;
-    PROPERTY(
-      calo_shifts_and_scales_t,
-      "calo_shifts_and_scales",
-      "shifts and scales for the calo counters",
-      std::array<float, 2 * Lumi::Constants::n_calo_counters>)
-    calo_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void calo_lumi_counters(Parameters, const unsigned number_of_events, const char* raw_ecal_geometry);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_calo_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_calo_counters>;
+
+  __global__ void calo_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales,
+    const char* raw_ecal_geometry);
 
   struct calo_lumi_counters_t : public DeviceAlgorithm, Parameters {
 
@@ -71,12 +67,9 @@ namespace calo_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<calo_offsets_and_sizes_t> m_calo_offsets_and_sizes {
-      this,
-      {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<calo_shifts_and_scales_t> m_calo_shifts_and_scales {
-      this,
-      {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
 
   }; // struct calo_lumi_counters_t
 } // namespace calo_lumi_counters
diff --git a/device/lumi/include/MakeLumiSummary.cuh b/device/lumi/include/MakeLumiSummary.cuh
index ae9689e22a387e5275e6ad939d2cbae76b586739..54f35cbcfc8a3c48bc297c764619e6553debf56f 100644
--- a/device/lumi/include/MakeLumiSummary.cuh
+++ b/device/lumi/include/MakeLumiSummary.cuh
@@ -43,18 +43,15 @@ namespace make_lumi_summary {
       "lumi_counter_schema",
       "schema for lumi counters",
       std::map<std::string, std::pair<unsigned, unsigned>>);
-    PROPERTY(
-      basic_offsets_and_sizes_t,
-      "basic_offsets_and_sizes",
-      "offsets and sizes in bits for the ODIN and GEC counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_basic_counters>)
-    basic_offsets_and_sizes;
   }; // struct Parameters
 
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_basic_counters>;
+
   __global__ void make_lumi_summary(
     Parameters,
     const unsigned number_of_events,
     const unsigned number_of_events_passed_gec,
+    const offsets_and_sizes_t offsets_and_sizes,
     std::array<const Lumi::LumiInfo*, Lumi::Constants::n_sub_infos> lumiInfos,
     std::array<unsigned, Lumi::Constants::n_sub_infos> spanSize,
     const unsigned size_of_aggregate);
@@ -78,7 +75,8 @@ namespace make_lumi_summary {
     Property<encoding_key_full_t> m_key_full {this, 0};
     Property<lumi_sum_length_t> m_lumi_sum_length {this, 0u};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
-    Property<basic_offsets_and_sizes_t> m_basic_offsets_and_sizes {this,
-                                                                   {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+
   }; // struct make_lumi_summary_t
 } // namespace make_lumi_summary
diff --git a/device/lumi/include/MuonLumiCounters.cuh b/device/lumi/include/MuonLumiCounters.cuh
index 459f5aa23f453adf764b666349c372d2977682ba..82ea4c36d6dc4045e943d3595d295bdd3d5a6ac4 100644
--- a/device/lumi/include/MuonLumiCounters.cuh
+++ b/device/lumi/include/MuonLumiCounters.cuh
@@ -39,21 +39,16 @@ namespace muon_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      muon_offsets_and_sizes_t,
-      "muon_offsets_and_sizes",
-      "offsets and sizes in bits for the muon counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_muon_counters>)
-    muon_offsets_and_sizes;
-    PROPERTY(
-      muon_shifts_and_scales_t,
-      "muon_shifts_and_scales",
-      "shifts and scales for the muon counters",
-      std::array<float, 2 * Lumi::Constants::n_muon_counters>)
-    muon_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void muon_lumi_counters(Parameters, const unsigned number_of_events);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_muon_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_muon_counters>;
+
+  __global__ void muon_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales);
 
   struct muon_lumi_counters_t : public DeviceAlgorithm, Parameters {
     void set_arguments_size(ArgumentReferences<Parameters> arguments, const RuntimeOptions&, const Constants&) const;
@@ -70,11 +65,10 @@ namespace muon_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<muon_offsets_and_sizes_t> m_muon_offsets_and_sizes {
-      this,
-      {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<muon_shifts_and_scales_t> m_muon_shifts_and_scales {this, {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f,
-                                                                         0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f,
-                                                                         0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
+                                               0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f,
+                                               0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
   }; // struct muon_lumi_counters_t
 } // namespace muon_lumi_counters
diff --git a/device/lumi/include/PVLumiCounters.cuh b/device/lumi/include/PVLumiCounters.cuh
index ebc47a4a97bb9f119b8fffe372bbcf80e82219b5..1b240b844166a7a95fdcfea2af7f266ad11d40fa 100644
--- a/device/lumi/include/PVLumiCounters.cuh
+++ b/device/lumi/include/PVLumiCounters.cuh
@@ -39,21 +39,16 @@ namespace pv_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      pv_offsets_and_sizes_t,
-      "pv_offsets_and_sizes",
-      "offsets and sizes in bits for the PV counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_pv_counters>)
-    pv_offsets_and_sizes;
-    PROPERTY(
-      pv_shifts_and_scales_t,
-      "pv_shifts_and_scales",
-      "shifts and scales for the PV counters",
-      std::array<float, 2 * Lumi::Constants::n_pv_counters>)
-    pv_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void pv_lumi_counters(Parameters, const unsigned number_of_events);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_pv_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_pv_counters>;
+
+  __global__ void pv_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales);
 
   struct pv_lumi_counters_t : public DeviceAlgorithm, Parameters {
     void set_arguments_size(ArgumentReferences<Parameters> arguments, const RuntimeOptions&, const Constants&) const;
@@ -70,8 +65,8 @@ namespace pv_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<pv_offsets_and_sizes_t> m_pv_offsets_and_sizes {this, {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<pv_shifts_and_scales_t> m_pv_shifts_and_scales {this,
-                                                             {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
   }; // struct pv_lumi_counters_t
 } // namespace pv_lumi_counters
diff --git a/device/lumi/include/PlumeLumiCounters.cuh b/device/lumi/include/PlumeLumiCounters.cuh
index d8794d0f60bb5f2f0ec87e3c3901923ae5a291db..99f04ad5baff022f321e506ba54f40ca7c4e6c66 100644
--- a/device/lumi/include/PlumeLumiCounters.cuh
+++ b/device/lumi/include/PlumeLumiCounters.cuh
@@ -38,21 +38,16 @@ namespace plume_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      plume_offsets_and_sizes_t,
-      "plume_offsets_and_sizes",
-      "offsets and sizes in bits for the PLUME counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_plume_counters>)
-    plume_offsets_and_sizes;
-    PROPERTY(
-      plume_shifts_and_scales_t,
-      "plume_shifts_and_scales",
-      "shifts and scales for the PLUME counters",
-      std::array<float, 2 * Lumi::Constants::n_plume_counters>)
-    plume_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void plume_lumi_counters(Parameters, const unsigned number_of_events);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_plume_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_plume_counters>;
+
+  __global__ void plume_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales);
 
   struct plume_lumi_counters_t : public DeviceAlgorithm, Parameters {
     void set_arguments_size(ArgumentReferences<Parameters> arguments, const RuntimeOptions&, const Constants&) const;
@@ -69,7 +64,8 @@ namespace plume_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<plume_offsets_and_sizes_t> m_plume_offsets_and_sizes {this, {{0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<plume_shifts_and_scales_t> m_plume_shifts_and_scales {this, {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u, 0u, 0u, 0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
   }; // struct plume_lumi_counters_t
 } // namespace plume_lumi_counters
diff --git a/device/lumi/include/SciFiLumiCounters.cuh b/device/lumi/include/SciFiLumiCounters.cuh
index 65c58b903aeb6346108f83a11af9be8f7e8efa57..b8d02eed6309ac5081f53de37b734779aa5d3ff0 100644
--- a/device/lumi/include/SciFiLumiCounters.cuh
+++ b/device/lumi/include/SciFiLumiCounters.cuh
@@ -37,21 +37,17 @@ namespace scifi_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      scifi_offsets_and_sizes_t,
-      "scifi_offsets_and_sizes",
-      "offsets and sizes in bits for the SciFi counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_scifi_counters>)
-    scifi_offsets_and_sizes;
-    PROPERTY(
-      scifi_shifts_and_scales_t,
-      "scifi_shifts_and_scales",
-      "shifts and scales for the SciFi counters",
-      std::array<float, 2 * Lumi::Constants::n_scifi_counters>)
-    scifi_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void scifi_lumi_counters(Parameters, const unsigned number_of_events, const char* scifi_geometry);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_scifi_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_scifi_counters>;
+
+  __global__ void scifi_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales,
+    const char* scifi_geometry);
 
   struct scifi_lumi_counters_t : public DeviceAlgorithm, Parameters {
     void set_arguments_size(ArgumentReferences<Parameters> arguments, const RuntimeOptions&, const Constants&) const;
@@ -68,10 +64,8 @@ namespace scifi_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<scifi_offsets_and_sizes_t> m_scifi_offsets_and_sizes {this,
-                                                                   {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<scifi_shifts_and_scales_t> m_scifi_shifts_and_scales {
-      this,
-      {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
   }; // struct scifi_lumi_counters_t
 } // namespace scifi_lumi_counters
diff --git a/device/lumi/include/VeloLumiCounters.cuh b/device/lumi/include/VeloLumiCounters.cuh
index 5299844063463386cc72fd8cd18c1b7cf25ca482..d9205c66137a410a1e4cc144f2ddc295482bed8a 100644
--- a/device/lumi/include/VeloLumiCounters.cuh
+++ b/device/lumi/include/VeloLumiCounters.cuh
@@ -48,21 +48,16 @@ namespace velo_lumi_counters {
       "shifts and scales extracted from the schema for lumi counters",
       std::map<std::string, std::pair<float, float>>)
     lumi_counter_shifts_and_scales;
-    PROPERTY(
-      velo_offsets_and_sizes_t,
-      "velo_offsets_and_sizes",
-      "offsets and sizes in bits for the VELO counters",
-      std::array<unsigned, 2 * Lumi::Constants::n_velo_counters>)
-    velo_offsets_and_sizes;
-    PROPERTY(
-      velo_shifts_and_scales_t,
-      "velo_shifts_and_scales",
-      "shifts and scales for the VELO counters",
-      std::array<float, 2 * Lumi::Constants::n_velo_counters>)
-    velo_shifts_and_scales;
   }; // struct Parameters
 
-  __global__ void velo_lumi_counters(Parameters, const unsigned number_of_events);
+  using offsets_and_sizes_t = std::array<unsigned, 2 * Lumi::Constants::n_velo_counters>;
+  using shifts_and_scales_t = std::array<float, 2 * Lumi::Constants::n_velo_counters>;
+
+  __global__ void velo_lumi_counters(
+    Parameters,
+    const unsigned number_of_events,
+    const offsets_and_sizes_t offsets_and_sizes,
+    const shifts_and_scales_t shifts_and_scales);
 
   // doca and eta copied from device/event_model/common/include/CopyTrackParameters.cuh
   // to avoid extra header files requirements
@@ -99,10 +94,10 @@ namespace velo_lumi_counters {
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<lumi_counter_schema_t> m_lumi_counter_schema {this, {}};
     Property<lumi_counter_shifts_and_scales_t> m_lumi_counter_shifts_and_scales {this, {}};
-    Property<velo_offsets_and_sizes_t> m_velo_offsets_and_sizes {this, {{0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
-                                                                         0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u}}};
-    Property<velo_shifts_and_scales_t> m_velo_shifts_and_scales {
-      this,
-      {{0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f}}};
+
+    offsets_and_sizes_t m_offsets_and_sizes = {0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
+                                               0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u};
+    shifts_and_scales_t m_shifts_and_scales = {0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f,
+                                               0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f, 0.f, 1.f};
   }; // struct velo_lumi_counters_t
 } // namespace velo_lumi_counters
diff --git a/device/lumi/src/CaloLumiCounters.cu b/device/lumi/src/CaloLumiCounters.cu
index d12805c9be0d6bdd69106a6c3cef6af6610d493c..5e4a828826a648ef511f972cdeea2bc1ed179630 100644
--- a/device/lumi/src/CaloLumiCounters.cu
+++ b/device/lumi/src/CaloLumiCounters.cu
@@ -29,31 +29,26 @@ void calo_lumi_counters::calo_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_calo_counters> calo_offsets_and_sizes =
-    property<calo_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_calo_counters> calo_shifts_and_scales = property<calo_shifts_and_scales_t>();
 
-  unsigned c_idx(0u);
+  unsigned c_idx = 0u;
   for (auto counter_name : Lumi::Constants::calo_counter_names) {
     if (schema.find(counter_name) == schema.end()) {
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      calo_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      calo_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      calo_shifts_and_scales[2 * c_idx] = 0.f;
-      calo_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      calo_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      calo_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<calo_offsets_and_sizes_t>(calo_offsets_and_sizes);
-  set_property_value<calo_shifts_and_scales_t>(calo_shifts_and_scales);
 }
 
 void calo_lumi_counters::calo_lumi_counters_t::operator()(
@@ -66,12 +61,18 @@ void calo_lumi_counters::calo_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(calo_lumi_counters)(dim3(2), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments), constants.dev_ecal_geometry);
+    arguments,
+    first<host_number_of_events_t>(arguments),
+    m_offsets_and_sizes,
+    m_shifts_and_scales,
+    constants.dev_ecal_geometry);
 }
 
 __global__ void calo_lumi_counters::calo_lumi_counters(
   calo_lumi_counters::Parameters parameters,
   const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales,
   const char* raw_ecal_geometry)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
@@ -115,11 +116,11 @@ __global__ void calo_lumi_counters::calo_lumi_counters(
     for (unsigned i = 0; i < Lumi::Constants::n_calo_counters; ++i) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + i],
-        parameters.calo_offsets_and_sizes.get()[2 * i],
-        parameters.calo_offsets_and_sizes.get()[2 * i + 1],
+        offsets_and_sizes[2 * i],
+        offsets_and_sizes[2 * i + 1],
         E_vals[i],
-        parameters.calo_shifts_and_scales.get()[2 * i],
-        parameters.calo_shifts_and_scales.get()[2 * i + 1]);
+        shifts_and_scales[2 * i],
+        shifts_and_scales[2 * i + 1]);
     }
   }
 }
diff --git a/device/lumi/src/MakeLumiSummary.cu b/device/lumi/src/MakeLumiSummary.cu
index c96583970d46261407f6bbbaee4699eac2be8a06..6798228781225b289b012c9db6d17ecc807e674a 100644
--- a/device/lumi/src/MakeLumiSummary.cu
+++ b/device/lumi/src/MakeLumiSummary.cu
@@ -28,8 +28,6 @@ void make_lumi_summary::make_lumi_summary_t::set_arguments_size(
 void make_lumi_summary::make_lumi_summary_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_basic_counters> basic_offsets_and_sizes =
-    property<basic_offsets_and_sizes_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::basic_counter_names) {
@@ -37,12 +35,11 @@ void make_lumi_summary::make_lumi_summary_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      basic_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      basic_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<basic_offsets_and_sizes_t>(basic_offsets_and_sizes);
 }
 
 void make_lumi_summary::make_lumi_summary_t::operator()(
@@ -87,6 +84,7 @@ void make_lumi_summary::make_lumi_summary_t::operator()(
     arguments,
     first<host_number_of_events_t>(arguments),
     size<dev_event_list_t>(arguments),
+    m_offsets_and_sizes,
     lumiInfos,
     infoSize,
     size_of_aggregate);
@@ -125,6 +123,7 @@ __global__ void make_lumi_summary::make_lumi_summary(
   make_lumi_summary::Parameters parameters,
   const unsigned number_of_events,
   const unsigned number_of_events_passed_gec,
+  const offsets_and_sizes_t offsets_and_sizes,
   std::array<const Lumi::LumiInfo*, Lumi::Constants::n_sub_infos> lumiInfos,
   std::array<unsigned, Lumi::Constants::n_sub_infos> infoSize,
   const unsigned size_of_aggregate)
@@ -152,36 +151,23 @@ __global__ void make_lumi_summary::make_lumi_summary(
     uint64_t t0 = static_cast<uint64_t>(odin.gpsTime()) - new_bcid * 1000 / 40078;
     // event time
     setField(
-      parameters.basic_offsets_and_sizes.get()[0],
-      parameters.basic_offsets_and_sizes.get()[1],
-      lumi_summary,
-      static_cast<unsigned>(t0 & 0xffffffff),
-      sum_length);
-    setField(
-      parameters.basic_offsets_and_sizes.get()[2],
-      parameters.basic_offsets_and_sizes.get()[3],
-      lumi_summary,
-      static_cast<unsigned>(t0 >> 32),
-      sum_length);
+      offsets_and_sizes[0], offsets_and_sizes[1], lumi_summary, static_cast<unsigned>(t0 & 0xffffffff), sum_length);
+    setField(offsets_and_sizes[2], offsets_and_sizes[3], lumi_summary, static_cast<unsigned>(t0 >> 32), sum_length);
 
     // gps time offset
     setField(
-      parameters.basic_offsets_and_sizes.get()[4],
-      parameters.basic_offsets_and_sizes.get()[5],
+      offsets_and_sizes[4],
+      offsets_and_sizes[5],
       lumi_summary,
       static_cast<unsigned>(new_bcid & 0xffffffff),
       sum_length);
     setField(
-      parameters.basic_offsets_and_sizes.get()[6],
-      parameters.basic_offsets_and_sizes.get()[7],
-      lumi_summary,
-      static_cast<unsigned>(new_bcid >> 32),
-      sum_length);
+      offsets_and_sizes[6], offsets_and_sizes[7], lumi_summary, static_cast<unsigned>(new_bcid >> 32), sum_length);
 
     // bunch crossing type
     setField(
-      parameters.basic_offsets_and_sizes.get()[8],
-      parameters.basic_offsets_and_sizes.get()[9],
+      offsets_and_sizes[8],
+      offsets_and_sizes[9],
       lumi_summary,
       static_cast<unsigned>(odin.bunchCrossingType()),
       sum_length);
@@ -194,12 +180,7 @@ __global__ void make_lumi_summary::make_lumi_summary(
         break;
       }
     }
-    setField(
-      parameters.basic_offsets_and_sizes.get()[10],
-      parameters.basic_offsets_and_sizes.get()[11],
-      lumi_summary,
-      passedGEC,
-      sum_length);
+    setField(offsets_and_sizes[10], offsets_and_sizes[11], lumi_summary, passedGEC, sum_length);
 
     /// write lumi infos to the summary
     for (unsigned i = 0; i < size_of_aggregate; ++i) {
diff --git a/device/lumi/src/MuonLumiCounters.cu b/device/lumi/src/MuonLumiCounters.cu
index 95cf4d10722e283f6f4a35c87be4b20c4c638ed7..8ffa2563538e85f6fbb38e507993a25bb58d1a65 100644
--- a/device/lumi/src/MuonLumiCounters.cu
+++ b/device/lumi/src/MuonLumiCounters.cu
@@ -1,4 +1,4 @@
-/*****************************************************************************\
+/***************************************************************************** \
 * (c) Copyright 2022 CERN for the benefit of the LHCb Collaboration           *
 *                                                                             *
 * This software is distributed under the terms of the Apache License          *
@@ -27,9 +27,6 @@ void muon_lumi_counters::muon_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_muon_counters> muon_offsets_and_sizes =
-    property<muon_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_muon_counters> muon_shifts_and_scales = property<muon_shifts_and_scales_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::muon_counter_names) {
@@ -37,21 +34,19 @@ void muon_lumi_counters::muon_lumi_counters_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      muon_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      muon_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      muon_shifts_and_scales[2 * c_idx] = 0.f;
-      muon_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      muon_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      muon_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<muon_offsets_and_sizes_t>(muon_offsets_and_sizes);
-  set_property_value<muon_shifts_and_scales_t>(muon_shifts_and_scales);
 }
 
 void muon_lumi_counters::muon_lumi_counters_t::operator()(
@@ -64,12 +59,14 @@ void muon_lumi_counters::muon_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(muon_lumi_counters)(dim3(4u), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments));
+    arguments, first<host_number_of_events_t>(arguments), m_offsets_and_sizes, m_shifts_and_scales);
 }
 
 __global__ void muon_lumi_counters::muon_lumi_counters(
   muon_lumi_counters::Parameters parameters,
-  const unsigned number_of_events)
+  const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
        event_number += blockDim.x * gridDim.x) {
@@ -100,19 +97,19 @@ __global__ void muon_lumi_counters::muon_lumi_counters(
     for (unsigned i = 0; i < Lumi::Constants::n_muon_station_regions; ++i) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + i],
-        parameters.muon_offsets_and_sizes.get()[2 * i],
-        parameters.muon_offsets_and_sizes.get()[2 * i + 1],
+        offsets_and_sizes[2 * i],
+        offsets_and_sizes[2 * i + 1],
         muon_hits_offsets[muon_offsets[i + 1]] - muon_hits_offsets[muon_offsets[i]],
-        parameters.muon_shifts_and_scales.get()[2 * i],
-        parameters.muon_shifts_and_scales.get()[2 * i + 1]);
+        shifts_and_scales[2 * i],
+        shifts_and_scales[2 * i + 1]);
     }
 
     fillLumiInfo(
       parameters.dev_lumi_infos[info_offset + Lumi::Constants::n_muon_station_regions],
-      parameters.muon_offsets_and_sizes.get()[2 * Lumi::Constants::n_muon_station_regions],
-      parameters.muon_offsets_and_sizes.get()[2 * Lumi::Constants::n_muon_station_regions + 1],
+      offsets_and_sizes[2 * Lumi::Constants::n_muon_station_regions],
+      offsets_and_sizes[2 * Lumi::Constants::n_muon_station_regions + 1],
       parameters.dev_muon_number_of_tracks[event_number],
-      parameters.muon_shifts_and_scales.get()[2 * Lumi::Constants::n_muon_station_regions],
-      parameters.muon_shifts_and_scales.get()[2 * Lumi::Constants::n_muon_station_regions + 1]);
+      shifts_and_scales[2 * Lumi::Constants::n_muon_station_regions],
+      shifts_and_scales[2 * Lumi::Constants::n_muon_station_regions + 1]);
   }
 }
diff --git a/device/lumi/src/PVLumiCounters.cu b/device/lumi/src/PVLumiCounters.cu
index 156fc18a15a9f476bcc9547737109e6dd8809cd9..c80f24c88ae1aabb15fe1110853df8351a203abf 100644
--- a/device/lumi/src/PVLumiCounters.cu
+++ b/device/lumi/src/PVLumiCounters.cu
@@ -26,8 +26,6 @@ void pv_lumi_counters::pv_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_pv_counters> pv_offsets_and_sizes = property<pv_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_pv_counters> pv_shifts_and_scales = property<pv_shifts_and_scales_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::pv_counter_names) {
@@ -35,21 +33,19 @@ void pv_lumi_counters::pv_lumi_counters_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      pv_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      pv_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      pv_shifts_and_scales[2 * c_idx] = 0.f;
-      pv_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      pv_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      pv_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<pv_offsets_and_sizes_t>(pv_offsets_and_sizes);
-  set_property_value<pv_shifts_and_scales_t>(pv_shifts_and_scales);
 }
 
 void pv_lumi_counters::pv_lumi_counters_t::operator()(
@@ -62,12 +58,14 @@ void pv_lumi_counters::pv_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(pv_lumi_counters)(dim3(4u), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments));
+    arguments, first<host_number_of_events_t>(arguments), m_offsets_and_sizes, m_shifts_and_scales);
 }
 
 __global__ void pv_lumi_counters::pv_lumi_counters(
   pv_lumi_counters::Parameters parameters,
-  const unsigned number_of_events)
+  const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
        event_number += blockDim.x * gridDim.x) {
@@ -102,11 +100,11 @@ __global__ void pv_lumi_counters::pv_lumi_counters(
     for (unsigned i = 0; i < Lumi::Constants::n_pv_counters; ++i) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + i],
-        parameters.pv_offsets_and_sizes.get()[2 * i],
-        parameters.pv_offsets_and_sizes.get()[2 * i + 1],
+        offsets_and_sizes[2 * i],
+        offsets_and_sizes[2 * i + 1],
         pv_counters[i],
-        parameters.pv_shifts_and_scales.get()[2 * i],
-        parameters.pv_shifts_and_scales.get()[2 * i + 1]);
+        shifts_and_scales[2 * i],
+        shifts_and_scales[2 * i + 1]);
     }
   }
 }
diff --git a/device/lumi/src/PlumeLumiCounters.cu b/device/lumi/src/PlumeLumiCounters.cu
index d612a851e0f043342f4819442a7d0072f206de72..3672969fca81adbf0fa412041f10343db5811859 100644
--- a/device/lumi/src/PlumeLumiCounters.cu
+++ b/device/lumi/src/PlumeLumiCounters.cu
@@ -27,10 +27,6 @@ void plume_lumi_counters::plume_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_plume_counters> plume_offsets_and_sizes =
-    property<plume_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_plume_counters> plume_shifts_and_scales =
-    property<plume_shifts_and_scales_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::plume_counter_names) {
@@ -38,21 +34,19 @@ void plume_lumi_counters::plume_lumi_counters_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      plume_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      plume_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      plume_shifts_and_scales[2 * c_idx] = 0.f;
-      plume_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      plume_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      plume_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<plume_offsets_and_sizes_t>(plume_offsets_and_sizes);
-  set_property_value<plume_shifts_and_scales_t>(plume_shifts_and_scales);
 }
 
 void plume_lumi_counters::plume_lumi_counters_t::operator()(
@@ -65,12 +59,14 @@ void plume_lumi_counters::plume_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(plume_lumi_counters)(dim3(4u), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments));
+    arguments, first<host_number_of_events_t>(arguments), m_offsets_and_sizes, m_shifts_and_scales);
 }
 
 __global__ void plume_lumi_counters::plume_lumi_counters(
   plume_lumi_counters::Parameters parameters,
-  const unsigned number_of_events)
+  const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
        event_number += blockDim.x * gridDim.x) {
@@ -101,11 +97,11 @@ __global__ void plume_lumi_counters::plume_lumi_counters(
     for (unsigned i = 0u; i < Lumi::Constants::n_plume_counters; ++i) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + i],
-        parameters.plume_offsets_and_sizes.get()[2 * i],
-        parameters.plume_offsets_and_sizes.get()[2 * i + 1],
+        offsets_and_sizes[2 * i],
+        offsets_and_sizes[2 * i + 1],
         plume_counters[i],
-        parameters.plume_shifts_and_scales.get()[2 * i],
-        parameters.plume_shifts_and_scales.get()[2 * i + 1]);
+        shifts_and_scales[2 * i],
+        shifts_and_scales[2 * i + 1]);
     }
   }
 }
diff --git a/device/lumi/src/SciFiLumiCounters.cu b/device/lumi/src/SciFiLumiCounters.cu
index 2879ca650aafc083c20ff705ee58b63387abee66..17f67c4b5b809e9063ba513ba4d819329cdc36e2 100644
--- a/device/lumi/src/SciFiLumiCounters.cu
+++ b/device/lumi/src/SciFiLumiCounters.cu
@@ -30,10 +30,6 @@ void scifi_lumi_counters::scifi_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_scifi_counters> scifi_offsets_and_sizes =
-    property<scifi_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_scifi_counters> scifi_shifts_and_scales =
-    property<scifi_shifts_and_scales_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::scifi_counter_names) {
@@ -41,21 +37,19 @@ void scifi_lumi_counters::scifi_lumi_counters_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      scifi_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      scifi_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      scifi_shifts_and_scales[2 * c_idx] = 0.f;
-      scifi_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      scifi_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      scifi_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<scifi_offsets_and_sizes_t>(scifi_offsets_and_sizes);
-  set_property_value<scifi_shifts_and_scales_t>(scifi_shifts_and_scales);
 }
 
 void scifi_lumi_counters::scifi_lumi_counters_t::operator()(
@@ -68,12 +62,18 @@ void scifi_lumi_counters::scifi_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(scifi_lumi_counters)(dim3(4u), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments), constants.dev_scifi_geometry);
+    arguments,
+    first<host_number_of_events_t>(arguments),
+    m_offsets_and_sizes,
+    m_shifts_and_scales,
+    constants.dev_scifi_geometry);
 }
 
 __global__ void scifi_lumi_counters::scifi_lumi_counters(
   scifi_lumi_counters::Parameters parameters,
   const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales,
   const char* scifi_geometry)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
@@ -111,11 +111,11 @@ __global__ void scifi_lumi_counters::scifi_lumi_counters(
     for (unsigned i = 0; i < Lumi::Constants::n_scifi_counters; ++i) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + i],
-        parameters.scifi_offsets_and_sizes.get()[2 * i],
-        parameters.scifi_offsets_and_sizes.get()[2 * i + 1],
+        offsets_and_sizes[2 * i],
+        offsets_and_sizes[2 * i + 1],
         SciFiCounters[i],
-        parameters.scifi_shifts_and_scales.get()[2 * i],
-        parameters.scifi_shifts_and_scales.get()[2 * i + 1]);
+        shifts_and_scales[2 * i],
+        shifts_and_scales[2 * i + 1]);
     }
   }
 }
diff --git a/device/lumi/src/VeloLumiCounters.cu b/device/lumi/src/VeloLumiCounters.cu
index a0cfffe99333aa01b9af3928280d891fa67851b4..68ae22ce88968ebf252a6b8abe98d6e3846934d5 100644
--- a/device/lumi/src/VeloLumiCounters.cu
+++ b/device/lumi/src/VeloLumiCounters.cu
@@ -27,9 +27,6 @@ void velo_lumi_counters::velo_lumi_counters_t::init()
 {
   std::map<std::string, std::pair<unsigned, unsigned>> schema = property<lumi_counter_schema_t>();
   std::map<std::string, std::pair<float, float>> shifts_and_scales = property<lumi_counter_shifts_and_scales_t>();
-  std::array<unsigned, 2 * Lumi::Constants::n_velo_counters> velo_offsets_and_sizes =
-    property<velo_offsets_and_sizes_t>();
-  std::array<float, 2 * Lumi::Constants::n_velo_counters> velo_shifts_and_scales = property<velo_shifts_and_scales_t>();
 
   unsigned c_idx(0u);
   for (auto counter_name : Lumi::Constants::velo_counter_names) {
@@ -37,21 +34,19 @@ void velo_lumi_counters::velo_lumi_counters_t::init()
       std::cout << "LumiSummary schema does not use " << counter_name << std::endl;
     }
     else {
-      velo_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
-      velo_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
+      m_offsets_and_sizes[2 * c_idx] = schema[counter_name].first;
+      m_offsets_and_sizes[2 * c_idx + 1] = schema[counter_name].second;
     }
     if (shifts_and_scales.find(counter_name) == shifts_and_scales.end()) {
-      velo_shifts_and_scales[2 * c_idx] = 0.f;
-      velo_shifts_and_scales[2 * c_idx + 1] = 1.f;
+      m_shifts_and_scales[2 * c_idx] = 0.f;
+      m_shifts_and_scales[2 * c_idx + 1] = 1.f;
     }
     else {
-      velo_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
-      velo_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
+      m_shifts_and_scales[2 * c_idx] = shifts_and_scales[counter_name].first;
+      m_shifts_and_scales[2 * c_idx + 1] = shifts_and_scales[counter_name].second;
     }
     ++c_idx;
   }
-  set_property_value<velo_offsets_and_sizes_t>(velo_offsets_and_sizes);
-  set_property_value<velo_shifts_and_scales_t>(velo_shifts_and_scales);
 }
 
 void velo_lumi_counters::velo_lumi_counters_t::operator()(
@@ -64,12 +59,14 @@ void velo_lumi_counters::velo_lumi_counters_t::operator()(
   if (first<host_lumi_summaries_count_t>(arguments) == 0) return;
 
   global_function(velo_lumi_counters)(dim3(4u), property<block_dim_t>(), context)(
-    arguments, first<host_number_of_events_t>(arguments));
+    arguments, first<host_number_of_events_t>(arguments), m_offsets_and_sizes, m_shifts_and_scales);
 }
 
 __global__ void velo_lumi_counters::velo_lumi_counters(
   velo_lumi_counters::Parameters parameters,
-  const unsigned number_of_events)
+  const unsigned number_of_events,
+  const offsets_and_sizes_t offsets_and_sizes,
+  const shifts_and_scales_t shifts_and_scales)
 {
   for (unsigned event_number = blockIdx.x * blockDim.x + threadIdx.x; event_number < number_of_events;
        event_number += blockDim.x * gridDim.x) {
@@ -114,11 +111,11 @@ __global__ void velo_lumi_counters::velo_lumi_counters(
     for (unsigned info_index = 0u; info_index < Lumi::Constants::n_velo_counters; ++info_index) {
       fillLumiInfo(
         parameters.dev_lumi_infos[info_offset + info_index],
-        parameters.velo_offsets_and_sizes.get()[info_index * 2],
-        parameters.velo_offsets_and_sizes.get()[info_index * 2 + 1],
+        offsets_and_sizes[info_index * 2],
+        offsets_and_sizes[info_index * 2 + 1],
         velo_counters[info_index],
-        parameters.velo_shifts_and_scales.get()[2 * info_index],
-        parameters.velo_shifts_and_scales.get()[2 * info_index + 1]);
+        shifts_and_scales[2 * info_index],
+        shifts_and_scales[2 * info_index + 1]);
     }
   }
 }
diff --git a/device/selections/CMakeLists.txt b/device/selections/CMakeLists.txt
index f70a09fe11824cd93932ae1ee8b1cdcf18b6f16d..eeeaea091da2db89b0be67719f640892347b216a 100755
--- a/device/selections/CMakeLists.txt
+++ b/device/selections/CMakeLists.txt
@@ -19,21 +19,26 @@ else()
   )
 endif()
 
+add_library(SelectionsHeaders INTERFACE)
+target_include_directories(SelectionsHeaders INTERFACE
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/Hlt1/include>
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/line_types/include>)
+install(TARGETS SelectionsHeaders EXPORT Allen)
+
 target_link_libraries(Selections PRIVATE
   AllenCommon
   Backend
   HostEventModel
-  HostCommon
   EventModel
   Utils
   LHCbEvent
   Kalman
   VertexFitter
-  extern_lines)
+  extern_lines
+  SelectionsHeaders)
 
 target_include_directories(Selections PUBLIC
   $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/configuration/sequences>
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/Hlt1/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/filters/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/calibration/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/monitoring/include>
@@ -43,5 +48,4 @@ target_include_directories(Selections PUBLIC
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/electron/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/photon/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/SMOG2/include>
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/heavy_ions/include>
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/line_types/include>)
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/lines/heavy_ions/include>)
diff --git a/device/selections/Hlt1/include/DecReporter.cuh b/device/selections/Hlt1/include/DecReporter.cuh
index 207e10c33f3823f7aaca52fb81335c60b7952a9a..5eef43d92434011b55220ad1e546555bb9dcca12 100644
--- a/device/selections/Hlt1/include/DecReporter.cuh
+++ b/device/selections/Hlt1/include/DecReporter.cuh
@@ -35,7 +35,7 @@ namespace dec_reporter {
   private:
     Property<block_dim_t> m_block_dim {this, {{64, 1, 1}}};
     Property<tck_t> m_tck {this, 0};
-    Property<encoding_key_t> m_key {this, 0xDEADBEEF}; // FIXME
+    Property<encoding_key_t> m_key {this, 0xDEADBEEF};
     Property<task_id_t> m_taskID {this, 1};
   };
 } // namespace dec_reporter
diff --git a/device/utils/CMakeLists.txt b/device/utils/CMakeLists.txt
index ef109fab9564cde554f20ff50012a891fbbaa0a2..7405eea0c08bf24300c8b4f46d6b13b110f82554 100644
--- a/device/utils/CMakeLists.txt
+++ b/device/utils/CMakeLists.txt
@@ -4,19 +4,36 @@
 file(GLOB prefix_sum "prefix_sum/src/*cu")
 file(GLOB get_type_id "get_type_id/src/*cu")
 
-allen_add_device_library(Utils STATIC
-  ${prefix_sum}
-  ${get_type_id}
-)
+add_library(UtilsHeaders INTERFACE)
 
-target_link_libraries(Utils PRIVATE Backend EventModel HostEventModel)
+target_link_libraries(UtilsHeaders
+  INTERFACE
+    Backend
+    EventModel
+    HostEventModel)
 
-target_include_directories(Utils PUBLIC
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/prefix_sum/include>
+target_include_directories(UtilsHeaders INTERFACE
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/binary_search/include>
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/get_type_id/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/sorting/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/float_operations/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/memory/include>
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/scaler/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/tuple_operations/include>
   $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/warp/include>)
+
+allen_add_device_library(Utils STATIC
+  ${prefix_sum}
+  ${get_type_id}
+)
+
+target_link_libraries(Utils
+  PUBLIC
+    UtilsHeaders
+    PRIVATE
+      Backend
+      EventModel
+      HostEventModel)
+
+target_include_directories(Utils PUBLIC
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/prefix_sum/include>
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/get_type_id/include>)
diff --git a/device/selections/Hlt1/include/DeterministicScaler.cuh b/device/utils/scaler/include/DeterministicScaler.cuh
similarity index 100%
rename from device/selections/Hlt1/include/DeterministicScaler.cuh
rename to device/utils/scaler/include/DeterministicScaler.cuh
diff --git a/doc/setup/run_allen.rst b/doc/setup/run_allen.rst
index 5474aa95c968056bb0ba368f3c37fc0f43f8ea39..0ace1700c38a6ba78d1407b9649435d59cc0a2eb 100644
--- a/doc/setup/run_allen.rst
+++ b/doc/setup/run_allen.rst
@@ -33,7 +33,6 @@ A run of the Allen program with the help option `-h` will let you know the basic
      -v, --verbosity {verbosity [0-5]}=3 (info)
      -p, --print-memory {print memory usage}=0
      --sequence {sequence to run}
-     --run-from-json {run from json configuration file}=0
      --output-file {Write selected event to output file}
      --device {select device to use}=0
      --non-stop {Runs the program indefinitely}=0
@@ -94,7 +93,7 @@ Use Gaudi to update non-event data such as alignment and configuration constants
 When using MDF files as input, call from the Allen environment::
 
   ./Allen/build.${ARCHITECTURE}/run python Dumpers/BinaryDumpers/options/allen.py --mdf Allen/input/minbias/mdf/MiniBrunel_2018_MinBias_FTv4_DIGI_retinacluster_v1.mdf
- 
+
 When using MEP files as input, call from the MooreOnline environment, as MEP handling is implemented there::
 
   ./MooreOnline/build.${ARCHITECTURE}/run python Allen/Dumpers/BinaryDumpers/options/allen.py --sequence=Allen/InstallArea/${ARCHITECTURE}/constants/hlt1_pp_default.json --tags="dddb_tag,simcond_tag" --mep mep_file.mep
diff --git a/host/dummy_maker/CMakeLists.txt b/host/dummy_maker/CMakeLists.txt
index 1386ccdee5984a3e9318bc03e5230e3a32db229a..2d2f6c1e9b3e19f4be436ccb8bd9c693406f8cf4 100644
--- a/host/dummy_maker/CMakeLists.txt
+++ b/host/dummy_maker/CMakeLists.txt
@@ -7,6 +7,6 @@ allen_add_host_library(HostDummyMaker STATIC
   ${host_dummy_maker}
 )
 
-target_link_libraries(HostDummyMaker PUBLIC Selections Lumi HostEventModel EventModel Gear AllenCommon Backend)
+target_link_libraries(HostDummyMaker PRIVATE UtilsHeaders Selections Lumi HostEventModel EventModel Gear AllenCommon Backend)
 
-target_include_directories(HostDummyMaker PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
+target_include_directories(HostDummyMaker PRIVATE include)
diff --git a/integration/non_event_data/CMakeLists.txt b/integration/non_event_data/CMakeLists.txt
index ebdeca5cf5d3149643a0d9db76b6f90baebcb602..85c6642e2d53150865c1b8bf0170bd260ab1c3af 100644
--- a/integration/non_event_data/CMakeLists.txt
+++ b/integration/non_event_data/CMakeLists.txt
@@ -4,18 +4,23 @@
 file(GLOB SOURCES "src/*.cpp")
 
 allen_add_host_library(NonEventData STATIC ${SOURCES})
+add_library(NonEventDataHeaders INTERFACE)
+target_include_directories(NonEventDataHeaders INTERFACE
+  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/Dumpers/BinaryDumpers/include>
+  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>)
+install(TARGETS NonEventDataHeaders EXPORT Allen)
 
-target_link_libraries(NonEventData PRIVATE
-  HostClustering
-  Backend
-  AllenCommon
-  EventModel
-  Gear
-  Backend
-  MuonCommon
-  UTCommon
-  nlohmann_json::nlohmann_json)
-
-target_include_directories(NonEventData PUBLIC
-  $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
-  $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/Dumpers/BinaryDumpers/include>)
+target_link_libraries(NonEventData
+  PRIVATE
+    HostClustering
+    AllenRuntime
+    AllenCommon
+    HostCommon
+    EventModel
+    Gear
+    MuonCommon
+    UTCommon
+    nlohmann_json::nlohmann_json
+  PUBLIC
+    NonEventDataHeaders
+  )
diff --git a/main/include/Allen.h b/main/include/Allen.h
index d4576d3fb2144d5d9e491b84be2fadb760d0f3a9..234ba14adf3bec2977b7df25210d2ef30708537b 100644
--- a/main/include/Allen.h
+++ b/main/include/Allen.h
@@ -15,6 +15,7 @@ struct Constants;
 
 int allen(
   std::map<std::string, std::string> options,
+  std::string_view configuration,
   Allen::NonEventData::IUpdater* updater,
   std::shared_ptr<IInputProvider> input_provider,
   OutputHandler* output_handler,
diff --git a/main/include/Configuration.h b/main/include/Configuration.h
index 223e7d6497ff70e6a1c7ba42a3dae2816b9e44a1..fe3486b286941751e0d51ba3c8a32f2ddb94c714 100644
--- a/main/include/Configuration.h
+++ b/main/include/Configuration.h
@@ -13,6 +13,7 @@
 #include <stdexcept>
 #include <vector>
 #include <string>
+#include <map>
 
 struct ConfiguredAlgorithm {
   std::string id;
diff --git a/main/include/InputReader.h b/main/include/InputReader.h
index 1cfb3a53d5cef2ae3847d51305ac3ef9b8f1f59b..f52979a0e11efb66f263bf6f1edf5057a3e152e0 100644
--- a/main/include/InputReader.h
+++ b/main/include/InputReader.h
@@ -76,7 +76,7 @@ private:
 };
 
 struct ConfigurationReader {
-  ConfigurationReader(const std::string& file_name);
+  ConfigurationReader(std::string_view configuration);
   ConfigurationReader(const std::map<std::string, std::map<std::string, nlohmann::json>>& params) : m_params(params) {}
 
   std::map<std::string, nlohmann::json> params(std::string key) const
diff --git a/main/include/Provider.h b/main/include/Provider.h
index 62b800907def94c9fe02695e8a9d8c1b8e594968..97c791b3bf49ac47f16c6118ae32f971aeaf3a4f 100644
--- a/main/include/Provider.h
+++ b/main/include/Provider.h
@@ -34,14 +34,17 @@ namespace Allen {
 
   std::tuple<bool, bool> velo_decoding_type(const ConfigurationReader& configuration_reader);
 
-  std::tuple<std::string, bool> sequence_conf(std::map<std::string, std::string> const& options);
+  std::string sequence_conf(std::map<std::string, std::string> const& options);
 
-  std::shared_ptr<IInputProvider> make_provider(std::map<std::string, std::string> const& options);
+  std::shared_ptr<IInputProvider> make_provider(
+    std::map<std::string, std::string> const& options,
+    std::string_view configuration);
 
   std::unique_ptr<OutputHandler> output_handler(
     IInputProvider* input_provider,
     IZeroMQSvc* zmq_svc,
-    std::map<std::string, std::string> const& options);
+    std::map<std::string, std::string> const& options,
+    std::string_view configuration);
 
   Allen::IOConf io_configuration(
     unsigned number_of_slices,
diff --git a/main/src/Allen.cpp b/main/src/Allen.cpp
index 700f4ff8294089b4a552445f6d7123d701a1a6b1..1f1f87d0cc912e058129ffb313f5c6dabfd6ea83 100644
--- a/main/src/Allen.cpp
+++ b/main/src/Allen.cpp
@@ -85,6 +85,7 @@ namespace {
  */
 int allen(
   std::map<std::string, std::string> options,
+  std::string_view config,
   Allen::NonEventData::IUpdater* updater,
   std::shared_ptr<IInputProvider> input_provider,
   OutputHandler* output_handler,
@@ -207,7 +208,6 @@ int allen(
   logger::setVerbosity(verbosity);
 
   auto io_conf = Allen::io_configuration(n_slices, n_repetitions, number_of_threads);
-  auto const [json_configuration_file, run_from_json] = Allen::sequence_conf(options);
 
   // Set device for main thread
   auto [device_set, device_name, device_memory_alignment] = Allen::set_device(device_id, 0);
@@ -240,7 +240,7 @@ int allen(
   }
   //
   // Load constant parameters from JSON
-  configuration_reader = std::make_unique<ConfigurationReader>(json_configuration_file);
+  configuration_reader = std::make_unique<ConfigurationReader>(config);
 
   // Get the path to the parameter folder: different for standalone and Gaudi build
   // Only in case of standalone gitlab CI pipepline the parameters folder path is passed as runtime argument
@@ -345,10 +345,8 @@ int allen(
     sequence->configure_algorithms(configuration);
   }
 
-  if (run_from_json) {
-    // Print configured sequence
-    streams.front()->print_configured_sequence();
-  }
+  // Print configured sequence
+  streams.front()->print_configured_sequence();
 
   // Interrogate stream configured sequence for validation algorithms
   const auto sequence_contains_validation_algorithms = streams.front()->contains_validation_algorithms();
diff --git a/main/src/BankTypes.cpp b/main/src/BankTypes.cpp
index 57690c30aa64159b9ca1dcba1328469cc57dadd4..9b503915e814f355bff6ced0abfc93571b95a928 100644
--- a/main/src/BankTypes.cpp
+++ b/main/src/BankTypes.cpp
@@ -10,7 +10,6 @@
 
 namespace {
   const std::map<std::string, BankTypes> BankNames = {{"VP", BankTypes::VP},
-                                                      {"VPRetinaCluster", BankTypes::VP},
                                                       {"UT", BankTypes::UT},
                                                       {"FTCluster", BankTypes::FT},
                                                       {"Muon", BankTypes::MUON},
diff --git a/main/src/InputReader.cpp b/main/src/InputReader.cpp
index d7d22700873165addbfdd57d466f6358fcb31010..91487e6a8479966242862fef6054175ef0536793 100644
--- a/main/src/InputReader.cpp
+++ b/main/src/InputReader.cpp
@@ -117,14 +117,9 @@ TwoTrackMVAModelReader::TwoTrackMVAModelReader(const std::string& file_name)
   m_n_layers = m_layer_sizes.size();
 }
 
-ConfigurationReader::ConfigurationReader(const std::string& file_name)
+ConfigurationReader::ConfigurationReader(std::string_view configuration)
 {
-  if (!exists_test(file_name)) {
-    throw StrException("Configuration JSON file " + file_name + " does not exist.");
-  }
-  std::ifstream i(file_name);
-  nlohmann::json j;
-  i >> j;
+  nlohmann::json j = nlohmann::json::parse(configuration);
   for (auto& el : j.items()) {
     std::string component = el.key();
     if (component == "sequence") {
@@ -174,9 +169,15 @@ std::map<std::string, nlohmann::json> ConfigurationReader::get_sequence() const
 
 void ConfigurationReader::save(std::string file_name)
 {
-  nlohmann::json j(m_params);
+  using json_float = nlohmann::basic_json<std::map, std::vector, std::string, bool, std::int32_t, std::uint32_t, float>;
+  json_float j;
+  for (auto [alg, props] : m_params) {
+    for (auto [k, v] : props) {
+      j[alg][k] = v;
+    }
+  }
   std::ofstream o(file_name);
-  o << j.dump(4);
+  o << std::setw(4) << j;
   o.close();
 }
 
diff --git a/main/src/ProgramOptions.cpp b/main/src/ProgramOptions.cpp
index b2327f6741640700cddc53e600f3eff352c3275d..3e65b953585c946f00ccc7b3f5f3984d07e3f65d 100644
--- a/main/src/ProgramOptions.cpp
+++ b/main/src/ProgramOptions.cpp
@@ -53,7 +53,6 @@ std::vector<ProgramOption> allen_program_options()
     {{"v", "verbosity"}, "verbosity [0-5]", "3", "info"},
     {{"p", "print-memory"}, "print memory usage", "0"},
     {{"sequence"}, "sequence to run", ""},
-    {{"run-from-json"}, "run from json configuration file", "0"},
     {{"output-file"}, "Write selected event to output file", ""},
     {{"output-batch-size"}, "Write output in batches of N events", "10"},
     {{"device"}, "select device to use", "0"},
diff --git a/main/src/Provider.cpp b/main/src/Provider.cpp
index 52c5f5a61d264bb1f7f56341f674171fd1bdd141..5e9cb433ca5b3688d0419830dfc38318df9d8158 100644
--- a/main/src/Provider.cpp
+++ b/main/src/Provider.cpp
@@ -2,6 +2,9 @@
  * (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration      *
 \*****************************************************************************/
 #include <string>
+#include <iostream>
+#include <fstream>
+#include <regex>
 
 #include <MDFProvider.h>
 #include <Provider.h>
@@ -14,6 +17,10 @@
 #include <FileSystem.h>
 #include <InputReader.h>
 
+#ifndef ALLEN_STANDALONE
+#include <TCK.h>
+#endif
+
 std::tuple<bool, bool> Allen::velo_decoding_type(const ConfigurationReader& configuration_reader)
 {
   bool veloSP = false;
@@ -32,53 +39,82 @@ std::tuple<bool, bool> Allen::velo_decoding_type(const ConfigurationReader& conf
   return {veloSP, retina};
 }
 
-std::tuple<std::string, bool> Allen::sequence_conf(std::map<std::string, std::string> const& options)
+std::string Allen::sequence_conf(std::map<std::string, std::string> const& options)
 {
   static bool generated = false;
   std::string json_configuration_file = "Sequence.json";
   // Sequence to run
   std::string sequence = "hlt1_pp_default";
 
-  bool run_from_json = false;
-
   for (auto const& entry : options) {
     auto [flag, arg] = entry;
     if (flag_in(flag, {"sequence"})) {
       sequence = arg;
     }
-    else if (flag_in(flag, {"run-from-json"})) {
-      run_from_json = atoi(arg.c_str());
-    }
   }
 
-  // Determine configuration
-  if (run_from_json) {
-    if (fs::exists(sequence)) {
-      json_configuration_file = sequence;
+  std::regex tck_option {"([^:]+):(0x[a-fA-F0-9]{8})"};
+  std::smatch tck_match;
+  if (std::regex_match(sequence, tck_match, tck_option)) {
+#ifndef ALLEN_STANDALONE
+
+    auto repo = tck_match.str(1);
+    auto tck = tck_match.str(2);
+    std::string config;
+    LHCb::TCK::Info info;
+    try {
+      std::tie(config, info) = Allen::sequence_from_git(repo, tck);
+    } catch (std::runtime_error const& e) {
+      throw std::runtime_error {"Failed to obtain sequence for TCK " + tck + " from repository at " + repo + ":" +
+                                e.what()};
+    }
+
+    auto [check, check_error] = Allen::TCK::check_projects(nlohmann::json::parse(info.metadata));
+
+    if (config.empty()) {
+      throw std::runtime_error {"Failed to obtain sequence for TCK " + tck + " from repository at " + repo};
     }
-    else {
-      json_configuration_file = sequence + ".json";
+    else if (!check) {
+      throw std::runtime_error {std::string {"TCK "} + tck + ": " + check_error};
     }
+    info_cout << "TCK " << tck << " loaded " << info.type << " sequence from git with label " << info.label << "\n";
+    return config;
+#else
+    throw std::runtime_error {"Loading configuration from TCK is not supported in standalone builds"};
+#endif
   }
-  else if (!generated) {
+  else {
+    // Determine configuration
+    if (sequence.size() > 5 && sequence.substr(sequence.size() - 5, std::string::npos) == ".json") {
+      json_configuration_file = sequence;
+    }
+    else if (!generated) {
 #ifdef ALLEN_STANDALONE
-    const std::string allen_configuration_options = "--no-register-keys";
+      const std::string allen_configuration_options = "--no-register-keys";
 #else
-    const std::string allen_configuration_options = "";
+      const std::string allen_configuration_options = "";
 #endif
 
-    int error = system(
-      ("PYTHONPATH=code_generation/sequences:$PYTHONPATH python3 ../configuration/python/AllenCore/gen_allen_json.py " +
-       allen_configuration_options + " --seqpath ../configuration/python/AllenSequences/" + sequence + ".py ")
-        .c_str());
-    if (error) {
-      throw std::runtime_error("sequence generation failed");
+      int error = system(("PYTHONPATH=code_generation/sequences:$PYTHONPATH python3 "
+                          "../configuration/python/AllenCore/gen_allen_json.py " +
+                          allen_configuration_options + " --seqpath ../configuration/python/AllenSequences/" +
+                          sequence + ".py > /dev/null")
+                           .c_str());
+      if (error) {
+        throw std::runtime_error {"sequence generation failed"};
+      }
+      info_cout << "\n";
+      generated = true;
     }
-    info_cout << "\n";
-    generated = true;
-  }
 
-  return {json_configuration_file, run_from_json};
+    std::string config;
+    std::ifstream config_file {json_configuration_file};
+    if (!config_file.is_open()) {
+      throw std::runtime_error {"failed to open sequence configuration file " + json_configuration_file};
+    }
+
+    return std::string {std::istreambuf_iterator<char> {config_file}, std::istreambuf_iterator<char> {}};
+  }
 }
 
 Allen::IOConf Allen::io_configuration(
@@ -118,7 +154,9 @@ Allen::IOConf Allen::io_configuration(
   return io_conf;
 }
 
-std::shared_ptr<IInputProvider> Allen::make_provider(std::map<std::string, std::string> const& options)
+std::shared_ptr<IInputProvider> Allen::make_provider(
+  std::map<std::string, std::string> const& options,
+  std::string_view configuration)
 {
 
   unsigned number_of_slices = 0;
@@ -193,8 +231,7 @@ std::shared_ptr<IInputProvider> Allen::make_provider(std::map<std::string, std::
   setenv("CUDA_DEVICE_MAX_CONNECTIONS", std::to_string(cuda_device_max_connections).c_str(), 1);
 #endif
 
-  auto const [json_file, run_from_json] = Allen::sequence_conf(options);
-  ConfigurationReader configuration_reader {json_file};
+  ConfigurationReader configuration_reader {configuration};
 
   auto io_conf = io_configuration(number_of_slices, n_repetitions, number_of_threads, true);
 
@@ -254,11 +291,11 @@ std::shared_ptr<IInputProvider> Allen::make_provider(std::map<std::string, std::
 std::unique_ptr<OutputHandler> Allen::output_handler(
   IInputProvider* input_provider,
   IZeroMQSvc* zmq_svc,
-  std::map<std::string, std::string> const& options)
+  std::map<std::string, std::string> const& options,
+  std::string_view config)
 {
   std::string output_file;
   size_t output_batch_size = 10;
-  auto const [json_file, run_from_json] = Allen::sequence_conf(options);
 
   for (auto const& entry : options) {
     auto const [flag, arg] = entry;
@@ -277,7 +314,7 @@ std::unique_ptr<OutputHandler> Allen::output_handler(
 
   // Load constant parameters from JSON
   size_t n_lines = 0;
-  ConfigurationReader configuration_reader {json_file};
+  ConfigurationReader configuration_reader {config};
   auto const& configuration = configuration_reader.params();
   auto conf_it = configuration.find("gather_selections");
   if (conf_it != configuration.end()) {
diff --git a/main/src/RegisterConsumers.cpp b/main/src/RegisterConsumers.cpp
index c3983fac8a29213c09d17f80589608480854b527..265f5340fe01b9e1b20202b28e8a2b36280b4b01 100644
--- a/main/src/RegisterConsumers.cpp
+++ b/main/src/RegisterConsumers.cpp
@@ -1,8 +1,9 @@
 /*****************************************************************************\
 * (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration      *
 \*****************************************************************************/
-#include "RegisterConsumers.h"
-#include "Common.h"
+#include <RegisterConsumers.h>
+#include <Common.h>
+#include <Updater.h>
 
 /**
  * @brief      Register all consumers of non-event data
@@ -86,3 +87,12 @@ void register_consumers(
     updater->registerConsumer<id_t>(std::get<1>(c)());
   });
 }
+
+Allen::NonEventData::IUpdater* binary_updater(std::map<std::string, std::string> const& options)
+{
+  static std::unique_ptr<Allen::NonEventData::IUpdater> updater;
+  if (!updater) {
+    updater = std::make_unique<Allen::NonEventData::Updater>(options);
+  }
+  return updater.get();
+}
diff --git a/main/src/main.cpp b/main/src/main.cpp
index dfb79e146407b64c5b6d4b4815f95ce560e428d7..1e50b7c821ed35da38a3bb600ff8e52fe950473a 100644
--- a/main/src/main.cpp
+++ b/main/src/main.cpp
@@ -2,7 +2,7 @@
 * (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration      *
 \*****************************************************************************/
 /**
- *      CUDA HLT1
+ *      LHCb GPU HLT1 Demonstrator
  *
  *      author  -  GPU working group
  *      e-mail  -  lhcb-parallelization@cern.ch
@@ -122,9 +122,15 @@ int main(int argc, char* argv[])
 
   auto zmqSvc = makeZmqSvc();
 
+  auto configuration = Allen::sequence_conf(allen_options);
+
   Allen::NonEventData::Updater updater {allen_options};
-  auto input_provider = Allen::make_provider(allen_options);
-  auto output_handler = Allen::output_handler(input_provider.get(), zmqSvc, allen_options);
+
+  auto input_provider = Allen::make_provider(allen_options, configuration);
   if (!input_provider) return -1;
-  return allen(std::move(allen_options), &updater, std::move(input_provider), output_handler.get(), zmqSvc, "");
+
+  auto output_handler = Allen::output_handler(input_provider.get(), zmqSvc, allen_options, configuration);
+
+  return allen(
+    std::move(allen_options), configuration, &updater, std::move(input_provider), output_handler.get(), zmqSvc, "");
 }
diff --git a/mdf/CMakeLists.txt b/mdf/CMakeLists.txt
index 69248dfd6fbfd124cb3565a2d40ddbef058e2629..b387303b207a688e5372d5ae070ce52003e088ac 100644
--- a/mdf/CMakeLists.txt
+++ b/mdf/CMakeLists.txt
@@ -41,7 +41,11 @@ target_include_directories (mdf PUBLIC
   $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/main/include>)
 target_link_libraries(mdf PUBLIC LHCbEvent)
 
-set(lhcb_public_headers include/write_mdf.hpp include/read_mdf.hpp include/mdf_header.hpp daq40/sourceid.h)
+set(lhcb_public_headers
+  include/write_mdf.hpp
+  include/read_mdf.hpp
+  include/mdf_header.hpp
+  daq40/sourceid.h)
 target_include_directories(LHCbEvent INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/daq40>)
 
 if (STANDALONE)
@@ -52,16 +56,28 @@ if (STANDALONE)
     ${PROJECT_BINARY_DIR}/sequences/Gaudi/GaudiKernel/include)
   target_compile_definitions(LHCbEvent INTERFACE ODIN_WITHOUT_GAUDI)
 else()
-  target_link_libraries(mdf PUBLIC Boost::filesystem Boost::thread Boost::regex)
-  target_link_libraries(LHCbEvent INTERFACE Gaudi::GaudiKernel LHCb::DAQEventLib LHCb::LumiEventLib)
+  target_link_libraries(mdf
+    PUBLIC
+      Boost::filesystem
+      Boost::thread
+      Boost::regex)
+  target_link_libraries(LHCbEvent
+    INTERFACE
+      Gaudi::GaudiKernel
+      LHCb::DAQEventLib
+      LHCb::LumiEventLib)
 endif()
 
 set_property(TARGET LHCbEvent PROPERTY PUBLIC_HEADER ${lhcb_public_headers})
 install(TARGETS LHCbEvent EXPORT Allen
         PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/Allen)
 
-target_link_libraries(
-  mdf PRIVATE Gear Backend AllenCommon LHCbEvent)
+target_link_libraries(mdf
+  PRIVATE
+    Gear
+    Backend
+    AllenCommon
+    LHCbEvent)
 
 # These tests don't need CUDA
 remove_definitions(-DTARGET_DEVICE_CUDA)
@@ -74,7 +90,14 @@ function(test_program)
 
   find_package(Threads REQUIRED)
 
-  target_link_libraries(${test_name} PRIVATE Transpose Threads::Threads EventModel AllenZMQ Catch2::Catch2 Boost::program_options)
+  target_link_libraries(${test_name}
+    PRIVATE
+      Transpose
+      Threads::Threads
+      EventModel
+      AllenZMQ
+      Catch2::Catch2
+      Boost::program_options)
   target_compile_definitions(${test_name} PRIVATE ODIN_WITHOUT_GAUDI)
   if (NOT STANDALONE)
     find_package(fmt REQUIRED)
@@ -90,7 +113,15 @@ if (BUILD_TESTING)
     ${PROJECT_SOURCE_DIR}/main/src/Provider.cpp
     ${PROJECT_SOURCE_DIR}/main/src/ZMQOutputSender.cpp)
   target_compile_definitions(Transpose PRIVATE ODIN_WITHOUT_GAUDI)
-  target_link_libraries(Transpose PUBLIC HostCommon Backend mdf LHCbEvent AllenZMQ EventModel AllenFS)
+  target_link_libraries(Transpose
+    PUBLIC
+      HostCommon
+      Backend
+      mdf
+      LHCbEvent
+      AllenZMQ
+      EventModel
+      AllenFS)
 
   test_program(NAME mdf_test_read SOURCE test/test_read.cpp)
   test_program(NAME mdf_bench_read SOURCE test/bench_read.cpp)
@@ -100,6 +131,7 @@ if (BUILD_TESTING)
   if (NOT STANDALONE)
     test_program(NAME mep_test_banks SOURCE test/test_mep_banks.cpp)
     target_link_libraries(allen_mep_test_banks PRIVATE Gaudi::GaudiKernel)
+    target_link_libraries(Transpose PUBLIC Configuration)
   endif()
 
 endif()
diff --git a/mdf/test/test_mep_banks.cpp b/mdf/test/test_mep_banks.cpp
index 7905cd4aa47911a02cc236774a40ed07f86aa96a..dce64373fd6ce30a17c73b26601cabb67461a4af 100644
--- a/mdf/test/test_mep_banks.cpp
+++ b/mdf/test/test_mep_banks.cpp
@@ -77,7 +77,7 @@ namespace Allen {
   }
 } // namespace Allen
 
-fs::path write_json(std::unordered_set<BankTypes> const& bank_types, bool velo_sp)
+fs::path write_json(std::unordered_set<BankTypes> const& bank_types, bool velo_sp, bool transpose)
 {
 
   // Write a JSON file that can be fed to AllenConfiguration to
@@ -96,7 +96,7 @@ fs::path write_json(std::unordered_set<BankTypes> const& bank_types, bool velo_s
   }
   bank_types_json["sequence"]["configured_algorithms"] = configured_algorithms;
 
-  auto bt_filename = fs::canonical(fs::current_path()) / "bank_types.json";
+  auto bt_filename = fs::canonical(fs::current_path()) / ("bank_types"s + (transpose ? "_transpose" : "") + ".json");
   std::ofstream bt_json(bt_filename.string());
   if (!bt_json.is_open()) {
     std::cerr << "Failed to open json file for bank types configuration"
@@ -204,7 +204,7 @@ int main(int argc, char* argv[])
         s_config.sds.emplace(bt);
       }
     }
-    auto json_file = write_json(s_config.sds, velo_sp);
+    auto json_file = write_json(s_config.sds, velo_sp, s_config.transpose_mep);
 
     // Allocate providers and get slices
     std::map<std::string, std::string> options = {{"s", std::to_string(s_config.n_slices)},
@@ -212,11 +212,11 @@ int main(int argc, char* argv[])
                                                   {"v", std::to_string(s_config.debug ? 4 : 3)},
                                                   {"mdf", s_config.mdf_files},
                                                   {"sequence", json_file.string()},
-                                                  {"run-from-json", "1"},
                                                   {"events-per-slice", std::to_string(s_config.eps)},
                                                   {"disable-run-changes", "1"}};
 
-    mdf = Allen::make_provider(options);
+    auto configuration = Allen::sequence_conf(options);
+    mdf = Allen::make_provider(options, configuration);
     if (!mdf) {
       std::cerr << "Failed to obtain MDFProvider\n";
       return 1;
diff --git a/scripts/ci/config/common-build.yaml b/scripts/ci/config/common-build.yaml
index 03c3f550b3bab40855555afc6739589a46d273c8..99576dde6f3d77cd605b41fb0087ac864a8ec241 100644
--- a/scripts/ci/config/common-build.yaml
+++ b/scripts/ci/config/common-build.yaml
@@ -18,11 +18,11 @@
       # build jobs (with tests)
       - LCG_SYSTEM:
           - "x86_64_v3-el9-gcc12" # FIXME gcc12->clang12 (?)
-        LCG_OPTIMIZATION: 
+        LCG_OPTIMIZATION:
           - "opt+g"
         OPTIONS:
           - BUILD_TESTING+ENABLE_CONTRACTS+TREAT_WARNINGS_AS_ERRORS
-      
+
       - LCG_SYSTEM: "x86_64_v3-el9-gcc12"
         LCG_OPTIMIZATION: "dbg"
         LCG_QUALIFIER: "cuda12_1"
@@ -50,7 +50,9 @@
       - input
       - build*/external/ParamFiles/*
       - build*/*Allen*
-      - build*/sequences/libStream_*.so
+      - build*/libHostCommon.so
+      - build*/zmq/libAllenZMQ.so
+      - build*/integration/non_event_data/libNonEventData.so
       - build*/*.json
       - build*/CTestTestfile.cmake
       - build*/test/unit_tests/unit_tests
diff --git a/scripts/ci/jobs/run_efficiency_throughput.sh b/scripts/ci/jobs/run_efficiency_throughput.sh
index a0cd60d7002c100280575f6de7a9fd6cd11a790e..c95528df3091044cf9813a7d7c80ea8bc0993337 100644
--- a/scripts/ci/jobs/run_efficiency_throughput.sh
+++ b/scripts/ci/jobs/run_efficiency_throughput.sh
@@ -26,14 +26,14 @@ if [ "${RUN_THROUGHPUT}" != "NO_THROUGHPUT" ]; then
     fi
     # overwrite GEOMETRY if RUN_THROUGHPUT_GEOMETRY defined
     if [ ! -z ${RUN_THROUGHPUT_GEOMETRY+x} ]; then
-        GEOMETRY="${RUN_THROUGHPUT_GEOMETRY}"   
+        GEOMETRY="${RUN_THROUGHPUT_GEOMETRY}"
     # else
     #     echo "RUN_THROUGHPUT_GEOMETRY not set - abort throughput test"
     #     exit 1
     fi
     # overwrite DATA_TAG if RUN_THROUGHPUT_DATA_TAG defined
     if [ ! -z ${RUN_THROUGHPUT_DATA_TAG+x} ]; then
-        DATA_TAG="${RUN_THROUGHPUT_DATA_TAG}"   
+        DATA_TAG="${RUN_THROUGHPUT_DATA_TAG}"
     else
         echo "RUN_THROUGHPUT_DATA_TAG not set - abort throughput test"
         exit 1
@@ -50,7 +50,7 @@ if [ "${RUN_THROUGHPUT}" != "NO_THROUGHPUT" ]; then
     RUN_OPTIONS="$RUN_OPTIONS -g /scratch/allen_geometries/${GEOMETRY}"
     fi
 
-    RUN_OPTIONS="--mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}  --run-from-json 1 --params external/ParamFiles/ ${RUN_OPTIONS}"
+    RUN_OPTIONS="--mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}.json --params external/ParamFiles/ ${RUN_OPTIONS}"
 
     set -euxo pipefail
     OUTPUT_FOLDER_REL="${TEST_NAME}_output_${SEQUENCE}_${DATA_TAG}${OPTIONS}/${DEVICE_ID}"
@@ -164,7 +164,7 @@ if [ "${RUN_THROUGHPUT}" != "NO_THROUGHPUT" ]; then
     # write metric to display on MR
     echo "throughput_kHz{device=\"${DEVICE_ID}\",sequence=\"${SEQUENCE}\",dataset=\"${DATA_TAG}\"} ${THROUGHPUT_KHZ}" >> "${OUTPUT_FOLDER}/metrics.txt"
 
-    if [ "${TPUT_REPORT}" = "NO_REPORT" ]; then 
+    if [ "${TPUT_REPORT}" = "NO_REPORT" ]; then
     echo "TPUT_REPORT is set to ${TPUT_REPORT} - throughput will not be reported."
 
     touch "${OUTPUT_FOLDER}/no_throughput_report.txt"
@@ -182,28 +182,28 @@ if [ "${RUN_EFFICIENCY}" != "NO_EFFICIENCY" ]; then
 
     check_build_exists
 
-    EFF_RUN_OPTIONS="-n 10000 -m 1100 --run-from-json 1"
+    EFF_RUN_OPTIONS="-n 10000 -m 1100"
 
     # Configure the input files (--mdf) and geometry (-g)
     set +x; set +u
 
     # overwrite SEQUENCE if RUN_EFFICIENCY_SEQUENCE defined
     if [ ! -z ${RUN_EFFICIENCY_SEQUENCE+x} ]; then
-        SEQUENCE="${RUN_EFFICIENCY_SEQUENCE}"   
+        SEQUENCE="${RUN_EFFICIENCY_SEQUENCE}"
     else
         echo "RUN_EFFICIENCY_SEQUENCE not set - abort efficiency test"
         exit 1
     fi
     # overwrite GEOMETRY if RUN_EFFICIENCY_GEOMETRY defined
     if [ ! -z ${RUN_EFFICIENCY_GEOMETRY+x} ]; then
-        GEOMETRY="${RUN_EFFICIENCY_GEOMETRY}"   
+        GEOMETRY="${RUN_EFFICIENCY_GEOMETRY}"
     # else
     #     echo "RUN_EFFICIENCY_GEOMETRY not set - abort efficiency test"
     #     exit 1
     fi
     # overwrite DATA_TAG if RUN_EFFICIENCY_DATA_TAG defined
     if [ ! -z ${RUN_EFFICIENCY_DATA_TAG+x} ]; then
-        DATA_TAG="${RUN_EFFICIENCY_DATA_TAG}"   
+        DATA_TAG="${RUN_EFFICIENCY_DATA_TAG}"
     else
         echo "RUN_EFFICIENCY_DATA_TAG not set - abort efficiency test"
         exit 1
@@ -215,7 +215,7 @@ if [ "${RUN_EFFICIENCY}" != "NO_EFFICIENCY" ]; then
 
     set -euxo pipefail
 
-    EFF_RUN_OPTIONS=" --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE} --params external/ParamFiles/ ${EFF_RUN_OPTIONS}"
+    EFF_RUN_OPTIONS=" --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}.json --params external/ParamFiles/ ${EFF_RUN_OPTIONS}"
 
     OUTPUT_FOLDER="${TEST_NAME}_output_${SEQUENCE}"
 
diff --git a/scripts/ci/jobs/run_physics_efficiency.sh b/scripts/ci/jobs/run_physics_efficiency.sh
index 71c4f1bb077ec532e50ddba1571b4324c464e472..566997637c3a6dcbccc3e27610b0c19db42e63a2 100755
--- a/scripts/ci/jobs/run_physics_efficiency.sh
+++ b/scripts/ci/jobs/run_physics_efficiency.sh
@@ -11,7 +11,7 @@ fi
 check_build_exists
 
 
-RUN_OPTIONS="-n 10000 -m 1100 --run-from-json 1"
+RUN_OPTIONS="-n 10000 -m 1100"
 
 # Configure the input files (--mdf) and geometry (-g)
 set +x; set +u
@@ -21,7 +21,7 @@ fi
 
 set -euxo pipefail
 
-RUN_OPTIONS=" --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE} --params external/ParamFiles/ ${RUN_OPTIONS}"
+RUN_OPTIONS=" --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}.json --params external/ParamFiles/ ${RUN_OPTIONS}"
 
 OUTPUT_FOLDER="${TEST_NAME}_output_${SEQUENCE}"
 
diff --git a/scripts/ci/jobs/run_throughput.sh b/scripts/ci/jobs/run_throughput.sh
index b1115bdab4a0130ddfdbb8404549bd4f785fb95c..a77ad609f762afa5fc018062e5daa1c280e571e4 100755
--- a/scripts/ci/jobs/run_throughput.sh
+++ b/scripts/ci/jobs/run_throughput.sh
@@ -17,10 +17,10 @@ if [ ! -z ${GEOMETRY+x} ]; then
   RUN_OPTIONS="$RUN_OPTIONS -g /scratch/allen_geometries/${GEOMETRY}"
 fi
 
-RUN_OPTIONS="--mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}  --run-from-json 1 --params external/ParamFiles/ ${RUN_OPTIONS}"
+RUN_OPTIONS="--mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}.json --params external/ParamFiles/ ${RUN_OPTIONS}"
 
 
-if [ "${AVOID_HIP}" = "1" ]; then 
+if [ "${AVOID_HIP}" = "1" ]; then
   if [ "${TARGET}" = "HIP" ]; then
     echo "***** Variable TARGET is set to HIP, and AVOID_HIP is set to 1 - quit."
     exit 0
@@ -87,7 +87,7 @@ else
     NUMA_NODE=${CI_RUNNER_DESCRIPTION_SPLIT[2]}
     THREADS=$((${TOTAL_THREADS} / ${TOTAL_NUMA_NODES}))
     RUN_OPTIONS="${RUN_OPTIONS} ${RUN_THROUGHPUT_OPTIONS_CPU} -t ${THREADS}"
-    
+
     ALLEN="numactl --cpunodebind=${NUMA_NODE} --membind=${NUMA_NODE} ./toolchain/wrapper ./Allen ${RUN_OPTIONS}"
 
   elif [ "${TARGET}" = "CUDA" ]; then
@@ -96,7 +96,7 @@ else
     GPU_NUMBER=`nvidia-smi -L | grep ${GPU_UUID} | awk '{ print $2; }' | sed -e 's/://'`
     NUMA_NODE=`nvidia-smi topo -m | grep GPU${GPU_NUMBER} | tail -1 | awk '{ print $NF; }'`
     RUN_OPTIONS="${RUN_OPTIONS} ${RUN_THROUGHPUT_OPTIONS_CUDA}"
-    
+
     ALLEN="CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=${GPU_NUMBER} numactl --cpunodebind=${NUMA_NODE} --membind=${NUMA_NODE} ./toolchain/wrapper ./Allen ${RUN_OPTIONS}"
 
     nvidia-smi
@@ -141,7 +141,7 @@ echo "${CI_COMMIT_SHORT_SHA}" > "${OUTPUT_FOLDER}/revision.txt"
 echo "throughput_kHz{device=\"${DEVICE_ID}\",sequence=\"${SEQUENCE}\",dataset=\"${DATA_TAG}\"} ${THROUGHPUT_KHZ}" >> "${OUTPUT_FOLDER}/metrics.txt"
 
 
-if [ "${TPUT_REPORT}" = "NO_REPORT" ]; then 
+if [ "${TPUT_REPORT}" = "NO_REPORT" ]; then
   echo "TPUT_REPORT is set to ${TPUT_REPORT} - throughput will not be reported."
 
   touch "${OUTPUT_FOLDER}/no_throughput_report.txt"
diff --git a/scripts/ci/jobs/run_toggle_run_changes.sh b/scripts/ci/jobs/run_toggle_run_changes.sh
index 79f87b855ff49a4422ba2e8939705b4e0b1d8c22..201303c291488fff7d7df26d9d7a87f8ff955134 100755
--- a/scripts/ci/jobs/run_toggle_run_changes.sh
+++ b/scripts/ci/jobs/run_toggle_run_changes.sh
@@ -7,8 +7,8 @@ set -euxo pipefail
 
 check_build_exists
 
-RUN_OPTIONS="-n 1000 -m 1000 --run-from-json 1 --params external/ParamFiles/"
-JOB="./toolchain/wrapper ./Allen --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE} ${RUN_OPTIONS}"
+RUN_OPTIONS="-n 1000 -m 1000 --params external/ParamFiles/"
+JOB="./toolchain/wrapper ./Allen --mdf ${ALLEN_DATA}/mdf_input/${DATA_TAG}.mdf --sequence ${SEQUENCE}.json ${RUN_OPTIONS}"
 
 for RUN_CHANGES in ON OFF; do
   echo "RUN_CHANGES: $RUN_CHANGES"
diff --git a/scripts/ci/test_config.yaml b/scripts/ci/test_config.yaml
index 3e6907e68a72b1412bdbe9f224ebe1ff7096b36e..cd788514b20eb09e16c5bd3c8635dd8334c30068 100644
--- a/scripts/ci/test_config.yaml
+++ b/scripts/ci/test_config.yaml
@@ -9,13 +9,13 @@ config:
   # args added for specific test keys
   args:
     # Added to Allen command always
-    base: "--run-from-json 1 --params external/ParamFiles/"
+    base: "--params external/ParamFiles/"
 
     # added if "dataset:" specified
     dataset: "--mdf /scratch/allen_data/mdf_input/{dataset}.mdf"
 
     # added if "sequence:" specified
-    sequence: "--sequence {sequence}"
+    sequence: "--sequence {sequence}.json"
 
     # added if "geometry:" specified
     geometry: "-g /scratch/allen_geometries/{geometry}"
diff --git a/stream/CMakeLists.txt b/stream/CMakeLists.txt
index 1aad5aa3829cd42e0527b9b62509fba073870621..207994d4a482ab1c85fb48201ede3393895a9526 100644
--- a/stream/CMakeLists.txt
+++ b/stream/CMakeLists.txt
@@ -1,8 +1,6 @@
 ###############################################################################
 # (c) Copyright 2018-2020 CERN for the benefit of the LHCb Collaboration      #
 ###############################################################################
-include(GenerateConfiguration)
-
 # Gear interface library
 add_library(Gear INTERFACE)
 target_include_directories(Gear INTERFACE
@@ -12,45 +10,23 @@ target_include_directories(Gear INTERFACE
 target_link_libraries(Gear INTERFACE Boost::boost)
 install(TARGETS Gear EXPORT Allen)
 
+include(GenerateConfiguration)
+
 file(GLOB stream_src "sequence/src/*cpp")
 
 allen_add_host_library(Stream STATIC ${stream_src})
 
 target_link_libraries(Stream
   PRIVATE
-    HostClustering
-    HostDataProvider
-    HostDummyMaker
-    HostErrorBanks
-    HostGEC
-    HostInitEventList
-    HostPrefixSum
-    HostRoutingBits
-    HostTAEFilter
-    AllenCommon
-    Associate
+    HostEventModel
+    EventModel
     Backend
-    Calo
-    Combiners
-    DeviceValidators
-    Examples
-    Kalman
-    Lumi
-    Muon
-    PV_beamline
-    Plume
-    SciFi
-    UT
-    Validators
-    Velo
-    VertexFitter
-    algorithm_db
+    AllenCommon
+    Gear
     track_matching
-  PUBLIC
-    Utils
-    Selections)
+    MuonCommon
+  )
 
-add_dependencies(Stream generate_algorithms_view)
 if(STANDALONE)
   add_dependencies(Stream checkout_lhcb checkout_gaudi)
 endif()
diff --git a/stream/gear/include/AlgorithmDB.h b/stream/gear/include/AlgorithmDB.h
new file mode 100644
index 0000000000000000000000000000000000000000..3100dd026101baf241a7d05fdfbd39f2770476ca
--- /dev/null
+++ b/stream/gear/include/AlgorithmDB.h
@@ -0,0 +1,16 @@
+/*****************************************************************************\
+* (c) Copyright 2023 CERN for the benefit of the LHCb Collaboration           *
+*                                                                             *
+* This software is distributed under the terms of the Apache License          *
+* version 2 (Apache-2.0), copied verbatim in the file "COPYING".              *
+*                                                                             *
+* In applying this licence, CERN does not waive the privileges and immunities *
+* granted to it by virtue of its status as an Intergovernmental Organization  *
+* or submit itself to any jurisdiction.                                       *
+\*****************************************************************************/
+#pragma once
+
+#include "Configuration.h"
+#include "Algorithm.cuh"
+
+Allen::TypeErasedAlgorithm instantiate_allen_algorithm(const ConfiguredAlgorithm& alg);
diff --git a/zmq/CMakeLists.txt b/zmq/CMakeLists.txt
index 6aeebc607686306dbda6a14f5827bb9d4e2b4d53..71f6200ebf3a37de952e9a772d146b158879c607 100644
--- a/zmq/CMakeLists.txt
+++ b/zmq/CMakeLists.txt
@@ -13,7 +13,6 @@ if (NOT STANDALONE)
     Gaudi::GaudiKernel
     LHCb::ZMQLib
     LHCbEvent
-    ${ALLEN_ROOT_LIBRARIES}
     PRIVATE
     EventModel
     AllenCommon)
@@ -22,7 +21,7 @@ if (NOT STANDALONE)
   install(TARGETS AllenZMQ EXPORT Allen)
   target_link_libraries(AllenZMQ INTERFACE ZMQSvc LHCb::ZMQLib)
 else()
-  allen_add_host_library(AllenZMQ src/functions.cpp src/svc.cpp)
+  allen_add_host_library(AllenZMQ SHARED src/functions.cpp src/svc.cpp)
   target_include_directories(AllenZMQ PUBLIC
     $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
     $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/serialize>
@@ -30,8 +29,7 @@ else()
     $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/svc>
     ${PROJECT_SOURCE_DIR}/main/include)
   target_compile_definitions(AllenZMQ PUBLIC STANDALONE)
-  target_link_libraries(AllenZMQ PUBLIC PkgConfig::zmq PkgConfig::sodium Boost::headers LHCbEvent)
-  target_include_directories(AllenZMQ SYSTEM PUBLIC ${ROOT_INCLUDE_DIRS})
+  target_link_libraries(AllenZMQ PUBLIC AllenCommon PkgConfig::zmq PkgConfig::sodium Boost::headers LHCbEvent)
 endif()
 
 function(zmq_program)