diff --git a/DaVinciExamples/python/DaVinciExamples/debugging/example-PrintDecayTree.py b/DaVinciExamples/python/DaVinciExamples/debugging/example-PrintDecayTree.py
index a60d57fbf505f30b19af4ff5c6f5bf74962ad4af..7b9d4f011294d7d99bd554751c3424e85ad09727 100644
--- a/DaVinciExamples/python/DaVinciExamples/debugging/example-PrintDecayTree.py
+++ b/DaVinciExamples/python/DaVinciExamples/debugging/example-PrintDecayTree.py
@@ -20,14 +20,15 @@ from PyConf.control_flow import CompositeNode, NodeLogic
 from PyConf.Algorithms import PrintDecayTree
 
 from DaVinci import options
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
-from DaVinci.common_particles import make_std_loose_jpsi2mumu
+from DaVinci.reco_objects_from_file import upfront_reconstruction
+from DaVinci.common_particles_from_file import make_std_loose_jpsi2mumu
 
 # Following line sets options data_type, simulation and DB tags, and the inputs
 options.set_input_and_conds_from_testfiledb("Upgrade_Bd2KstarMuMu")
 options.evt_max = 100
 options.print_freq = 1
 options.msg_svc_format = "% F%40W%S%7W%R%T %0W%M"
+options.process = 'Spruce'
 
 jpsis = make_std_loose_jpsi2mumu()
 
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-AllFunctors.py b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-AllFunctors.py
index 87bc37b1ff76c10193b708262a289aa670d8ebf9..85a601e6de6b62455571c9e0fb1a25faa50a2afb 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-AllFunctors.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-AllFunctors.py
@@ -20,18 +20,18 @@ __date__ = "2021-11-23"
 import Functors as F
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
-from PyConf.application import make_data_with_FetchDataFromFile
-from DaVinci.reco_objects import make_pvs_for
+from PyConf.components import force_location
+from DaVinci.reco_objects import make_pvs_v2
 from DaVinci.algorithms import add_filter
-from DaVinci import options
 from DecayTreeFitter import DTFAlg
 
+from DaVinci import options
+
 #
 # Definition of strucing line
 #
 bd2dsk_line = "SpruceB2OC_BdToDsmK_DsmToHHH_FEST_Line"
-bd2dsk_data = make_data_with_FetchDataFromFile(
-    f"/Event/Spruce/{bd2dsk_line}/Particles")
+bd2dsk_data = force_location(f"/Event/Spruce/{bd2dsk_line}/Particles")
 
 _basic = 'basic'
 _composite = 'composite'
@@ -160,7 +160,7 @@ def alg_config():
     #
     # DecayTreeFitter Algorithm
     #
-    v2_pvs = make_pvs_for(process=options.process, data_type=options.data_type)
+    v2_pvs = make_pvs_v2()
 
     #
     # DecayTreeFitter Algorithm
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-filtered.yaml b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-filtered.yaml
index 6cd8c7642e95f8d7aaa92acb4c0712ef826fca6b..be2a9426b04820d07335fefd4488ea6bde935ade 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-filtered.yaml
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-filtered.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime.tck.json' 
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime_newPacking.tck.json' 
 evt_max: 10
 histo_file: 'DV-example-tupling-DTF-his-filtered.root'
 ntuple_file: 'DV-example-tupling-DTF-ntp-filtered.root'
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-run-mc.py b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-run-mc.py
index d1d7558606c9fd370299bc0a66e9886a8cfac0e9..2c45b7af62f04236fc57ce1ef3ed3b1c1faa703a 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-run-mc.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-DTF-run-mc.py
@@ -23,9 +23,9 @@ __author__ = "P. Koppenburg"
 __date__ = "2021-11-23"
 
 import Functors as F
-from DaVinci.standard_particles import make_detached_mumu
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
-from DaVinci.reco_objects import make_pvs
+from DaVinci.standard_particles_from_file import make_detached_mumu
+from DaVinci.reco_objects_from_file import upfront_reconstruction
+from DaVinci.reco_objects_from_file import make_pvs
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
 from DecayTreeFitter import DTFAlg, DTF_functors
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-advanced-run-mc.py b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-advanced-run-mc.py
index 29315b9616becb5345d886edd14216ad74d3e989..5c92a6675292f5931936fe4173cfbff16e971ce5 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-advanced-run-mc.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-advanced-run-mc.py
@@ -21,8 +21,8 @@ __author__ = "Davide Fazzini, Abhijit Mathad"
 __date__ = "2021-06-18"
 
 import Functors as F
-from DaVinci.standard_particles import make_detached_mumu, make_KsDD
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
+from DaVinci.standard_particles_from_file import make_detached_mumu, make_KsDD
+from DaVinci.reco_objects_from_file import upfront_reconstruction
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
 from FunTuple.functorcollections import Kinematics
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic-run-mc.py b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic-run-mc.py
index 7aecaa6e469cf7086895af7309472d6d9c2d8d09..a67b5cfcf966cc64bc68240fbe647ee646b08e65 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic-run-mc.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic-run-mc.py
@@ -21,8 +21,8 @@ __author__ = "Maurizio Martinelli, Abhijit Mathad"
 __date__ = "2021-05-03"
 
 import Functors as F
-from DaVinci.standard_particles import make_detached_mumu
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
+from DaVinci.standard_particles_from_file import make_detached_mumu
+from DaVinci.reco_objects_from_file import upfront_reconstruction
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
 
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic.py b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic.py
index 5fb3a8b61107e03a20a1cdf2a21203c08254f404..72702dccbae831fe55d100e1e0a3f5af4a69c19a 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/example-tupling-basic.py
@@ -19,8 +19,8 @@ __author__ = "Maurizio Martinelli, Abhijit Mathad"
 __date__ = "2021-03-16"
 
 from DaVinci import options, run_davinci
-from DaVinci.standard_particles import make_detached_mumu, make_KsDD
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
+from DaVinci.standard_particles_from_file import make_detached_mumu, make_KsDD
+from DaVinci.reco_objects_from_file import upfront_reconstruction
 import Functors as F
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_configFuntuple.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_configFuntuple.py
index 2fb622c9322574994df37819c4a3eaeb78f885f0..a20137d76c4f95cf97fd19654485f420c40027c1 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_configFuntuple.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_configFuntuple.py
@@ -53,12 +53,16 @@ def main():
 
     config = {
         "location":
-        "/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToHHH_Line/Particles",
-        "filters": ["HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToHHH_LineDecision')"],
+        "/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToKpKmPim_Line/Particles",
+        "filters":
+        ["HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToKpKmPim_LineDecision')"],
         "preamble": ['TRACK_MAX_PT = MAXTREE(ISBASIC & HASTRACK, PT, -1)'],
-        "tuple": "DecayTree",
-        "fields": fields,
-        "variables": variables,
+        "tuple":
+        "DecayTree",
+        "fields":
+        fields,
+        "variables":
+        variables,
     }
 
     tools = []
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_array_taggers.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_array_taggers.py
index 2f05c38f45aa00e4a2b62f7fb70dac0643fad0b7..582c39a760fce131f1847f6c2a498bfb073f6420 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_array_taggers.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_array_taggers.py
@@ -21,16 +21,21 @@
 """
 
 import Functors as F
-from PyConf.application import make_data_with_FetchDataFromFile
+from PyConf.components import force_location
 from PyConf.Algorithms import ParticleTaggerAlg, ParticleContainerMerger
 from FunTuple import FunctorCollection, FunTuple_Particles as Funtuple
 from DaVinci.algorithms import add_filter
 from DaVinci.standard_particles import make_long_pions_from_spruce
+from DaVinci.reco_objects import reconstruction
+
+from DaVinci import options
+options.process = 'Spruce'
 
 bd2dsk_line = "SpruceB2OC_BdToDsmK_DsmToHHH_FEST_Line"
-bd2dsk_data = make_data_with_FetchDataFromFile(
-    f"/Event/Spruce/{bd2dsk_line}/Particles")
-pions = make_long_pions_from_spruce()
+bd2dsk_data = force_location(f"/Event/Spruce/{bd2dsk_line}/Particles")
+
+with reconstruction.bind(process=options.process):
+    pions = make_long_pions_from_spruce()
 
 tagging_container = ParticleContainerMerger(
     InputContainers=[pions]).OutputContainer
@@ -77,8 +82,7 @@ tuple_B0DsK = Funtuple(
 filter_B0DsK = add_filter("HDRFilter_B0DsK",
                           f"HLT_PASS('{bd2dsk_line}Decision')")
 
-from DaVinci import options
-options.annsvc_config = 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime.tck.json'
+options.annsvc_config = 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime_newPacking.tck.json'
 options.histo_file = 'DV-example-tagger-his.root'
 options.ntuple_file = 'DV-example-tagger-ntp.root'
 
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.py
index 25cfa9da4d0c7371e092f92ba77a9f2eda527e40..32b6ce43285eac78b7ccdffcb05e431fcb0cbb93 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.py
@@ -12,7 +12,7 @@
 Read an HLT2 file and create an ntuple using pre-defined Functor collections.
 """
 
-from PyConf.application import make_data_with_FetchDataFromFile
+from PyConf.components import force_location
 from FunTuple import FunctorCollection, functorcollections
 from FunTuple import FunTuple_Particles as Funtuple
 from DaVinci.algorithms import add_filter
@@ -20,8 +20,7 @@ from DaVinci import options
 from DaVinci.truth_matching import configured_MCTruthAndBkgCatAlg
 from DaVinci.algorithms import get_odin, get_decreports
 
-d02kpi_data = make_data_with_FetchDataFromFile(
-    "/Event/HLT2/Hlt2CharmD0ToKmPipLine/Particles")
+d02kpi_data = force_location("/Event/HLT2/Hlt2CharmD0ToKmPipLine/Particles")
 
 #get configured "MCTruthAndBkgCatAlg" algorithm for HLT2 output
 mctruth = configured_MCTruthAndBkgCatAlg(inputs=d02kpi_data)
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.yaml b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.yaml
index ebcafd5995f81d55568a19b66e489852309abfa3..d549724914b41761dbb84873a0ecad3c97ff730b 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.yaml
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts.tck.json'
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts_newPacking.tck.json'
 evt_max: -1
 histo_file: 'tuple_D0_Kpi_10evts_collections.root'
 input_raw_format: 0.3
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.py
index 9053f88015490b70cb37f4ad0ed889ff85c8ebc0..54152995a7eca6aca61f9ca7f97799abfbe2fe06 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.py
@@ -14,13 +14,15 @@ Read an HLT2 file and create an ntuple with the new DaVinci configuration.
 import Functors as F
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
-from PyConf.application import make_data_with_FetchDataFromFile
-from DaVinci.reco_objects import make_pvs_for
+from PyConf.components import force_location
+from DaVinci.reco_objects import make_pvs_v2, reconstruction
 from DaVinci.algorithms import add_filter
 from DaVinci import options
 from DaVinci.truth_matching import configured_MCTruthAndBkgCatAlg
 from DaVinci.algorithms import get_odin, get_decreports
 
+options.process = 'Hlt2'
+
 fields = {
     "D0": "[D0 -> K- pi+]CC",
     "Kminus": "[D0 -> ^K- pi+]CC",
@@ -28,7 +30,9 @@ fields = {
 }
 
 # Creating v2 reconstructed vertices to be used in the following functor
-v2_pvs = make_pvs_for(process="Hlt2", data_type="Upgrade")
+with reconstruction.bind(process=options.process):
+    v2_pvs = make_pvs_v2()
+
 d0_variables = FunctorCollection({
     "ID": F.PARTICLE_ID,
     "KEY": F.OBJECT_KEY,
@@ -63,7 +67,7 @@ variables = {
 
 
 def main():
-    d02kpi_data = make_data_with_FetchDataFromFile(
+    d02kpi_data = force_location(
         "/Event/HLT2/Hlt2CharmD0ToKmPipLine/Particles")
 
     my_filter = add_filter("HDRFilter_D0Kpi",
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.yaml b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.yaml
index 1268f47bf9b6220635b6e28c3c865773d19476fe..66c620cfc7080ef41f280fd794c3d549d1b2c924 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.yaml
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts.tck.json'
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts_newPacking.tck.json'
 evt_max: -1
 histo_file: 'tuple_D0_Kpi_10evts_fromHlt2.root'
 lumi: false
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2_gaudirun.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2_gaudirun.py
index 18bdbed6509cec943ca96f10458714dea263e8b1..6a6775af54740fff7ccd34d770815bd52cb33f18 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2_gaudirun.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2_gaudirun.py
@@ -14,11 +14,13 @@ Read an HLT2 file and create an ntuple with the new DaVinci runned with gaudirun
 import Functors as F
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
-from PyConf.application import make_data_with_FetchDataFromFile
+#from PyConf.application import make_data_with_FetchDataFromFile
+from PyConf.components import force_location
 from DaVinci.Configuration import run_davinci_app
-from DaVinci.reco_objects import make_pvs_for
+from DaVinci.reco_objects import make_pvs_v2, reconstruction
 from DaVinci.algorithms import add_filter
 from DaVinci import options
+options.process = 'Hlt2'
 
 fields = {
     "D0": "[D0 -> K- pi+]CC",
@@ -27,7 +29,9 @@ fields = {
 }
 
 # Creating v2 reconstructed vertices to be used in the following functor
-v2_pvs = make_pvs_for(process='Hlt2', data_type="Upgrade")
+with reconstruction.bind(process=options.process):
+    v2_pvs = make_pvs_v2()
+
 d0_variables = FunctorCollection({
     "PT": F.PT,
     "BPVDIRA": F.BPVDIRA(v2_pvs),
@@ -47,7 +51,7 @@ variables = {
 
 
 def main():
-    d02kpi_data = make_data_with_FetchDataFromFile(
+    d02kpi_data = force_location(
         "/Event/HLT2/Hlt2CharmD0ToKmPipLine/Particles")
 
     my_filter = add_filter("HDRFilter_D0Kpi",
@@ -63,12 +67,13 @@ def main():
 
 
 options.ntuple_file = "tuple_D0_Kpi_10evts_gaudirun.root"
-options.annsvc_config = "root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts.tck.json"
+options.annsvc_config = "root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts_newPacking.tck.json"
 options.process = 'Hlt2'
 options.input_raw_format = 0.3
 options.user_algorithms = "$DAVINCIEXAMPLESROOT/python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2_gaudirun:main"
 options.write_fsr = False
-
-fileDB_key = "FEST_November_2021_dst"
+options.enable_unpack = True
+fileDB_key = "FEST_November_2021_dst_newPacking"
 fileDB_path = "$DAVINCIROOT/options/DaVinciDB-Example.yaml"
+
 run_davinci_app(fileDB_key, fileDB_path)
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py
index b6a9259d66cec3a1397bb9a7cc359efe753df296..33056b000d22ef092699ec01b7ecdf156991fc2e 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.py
@@ -14,13 +14,14 @@ Read the output of an Sprucing job with the new DaVinci configuration.
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
 from FunTuple.functorcollections import Kinematics
-from PyConf.application import make_data_with_FetchDataFromFile
+from PyConf.components import force_location
 from DaVinci.algorithms import add_filter
+from DaVinci import options
 
-bd2dsk_line = make_data_with_FetchDataFromFile(
+bd2dsk_line = force_location(
     "/Event/Spruce/SpruceB2OC_BdToDsmK_DsmToHHH_FEST_Line/Particles")
-bd2dspi_line = make_data_with_FetchDataFromFile(
-    "/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToHHH_Line/Particles")
+bd2dspi_line = force_location(
+    "/Event/Spruce/SpruceB2OC_BdToDsmPi_DsmToKpKmPim_Line/Particles")
 
 fields_dsk = {
     'B0': "[B0 -> D_s- K+]CC",
@@ -86,7 +87,6 @@ evt_filter = "HLT_PASS('Hlt2Topo2BodyLineDecision')"
 
 
 def main():
-    from DaVinci import options
     options.evt_pre_filters = {"Hlt2TopoLineFilter": evt_filter}
     options.ntuple_file = "DV_example_sprucing_ntp.root"
     options.histo_file = "DV_example_sprucing_his.root"
@@ -96,7 +96,7 @@ def main():
         "HLT_PASS('SpruceB2OC_BdToDsmK_DsmToHHH_FEST_LineDecision')")
     filter_B0Dspi = add_filter(
         "HDRFilter_B0Dspi",
-        "HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToHHH_LineDecision')")
+        "HLT_PASS('SpruceB2OC_BdToDsmPi_DsmToKpKmPim_LineDecision')")
 
     tools = []
     algs = {
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.yaml b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.yaml
index be289787934821e5e7d7704f61beddf1be201471..9182bce43f3666b078eb862073664c9bb5aaa8ea 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.yaml
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime.tck.json'
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime_newPacking.tck.json'
 evt_max: -1
 histo_file: 'sprucing_histos.root'
 input_raw_format: 0.3
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.py b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.py
index d03214468a8c38f9ace34cec08c78915b2249aac..577577d38c93486b191cd238b47dc0180d80980e 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.py
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.py
@@ -14,7 +14,7 @@ from FunTuple import FunTuple_Particles as Funtuple
 from FunTuple.functorcollections import Kinematics
 from DaVinci.truth_matching import configured_MCTruthAndBkgCatAlg
 from DaVinci.algorithms import add_filter
-from PyConf.application import make_data_with_FetchDataFromFile
+from PyConf.components import force_location
 
 #FunTuple: define branches.
 fields = {
@@ -34,8 +34,7 @@ variables = {
 
 def main():
 
-    B_data = make_data_with_FetchDataFromFile(
-        "/Event/Spruce/Spruce_Test_line/Particles")
+    B_data = force_location("/Event/Spruce/Spruce_Test_line/Particles")
 
     my_filter = add_filter("HDRFilter_B",
                            "HLT_PASS('Spruce_Test_lineDecision')")
diff --git a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.yaml b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.yaml
index 65c1860ebc0c0152d14db007fd4c475b529286e0..bc4fe130987a436f789f5496b93f131df2593ea0 100644
--- a/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.yaml
+++ b/DaVinciExamples/python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce_mc.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_MCtestfile.tck.json'
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_example_realtime_dstinput.tck.json'
 evt_max: -1
 histo_file: 'sprucing_mc_histos.root'
 input_raw_format: 0.3
diff --git a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_configFuntuple.qmt b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_configFuntuple.qmt
index 929b933c0ce272d178418bd9b32a229e22cba62c..fbfb35697b2c8568d97bfb17d7a503b12bf16128 100644
--- a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_configFuntuple.qmt
+++ b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_configFuntuple.qmt
@@ -27,6 +27,7 @@
 -->
 <extension class="GaudiTest.GaudiExeTest" kind="test">
   <argument name="program"><text>davinci</text></argument>
+  <argument name="timeout"><integer>1000</integer></argument>
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
@@ -37,31 +38,11 @@
   <text>--user_algorithms</text>
   <text>../../python/DaVinciExamples/tupling/option_davinci_configFuntuple:main</text>
   </set></argument>
-  <argument name="validator"><text>
-findReferenceBlock("""LAZY_AND: DaVinci                                         #=110     Sum=4           Eff=|( 3.636364 +- 1.78482 )%|
- NONLAZY_OR: FileSummaryRecords                           #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-  LAZY_AND: GenFSR                                        #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RecordStream/FSROutputStreamDstWriter                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
- NONLAZY_OR: UserAnalysis                                 #=110     Sum=4           Eff=|( 3.636364 +- 1.78482 )%|
-  LAZY_AND: B0Dspi                                        #=110     Sum=4           Eff=|( 3.636364 +- 1.78482 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/Filter_B0Dspi                          #=110     Sum=4           Eff=|( 3.636364 +- 1.78482 )%|
-   FunTupleBase_Particles/Tuple_B0Dspi                    #=4       Sum=4           Eff=|( 100.0000 +- 0.00000 )%|
-""", stdout, result, causes, signature_offset = 0, id = "Stream3")
+  <argument name="reference"><text>../refs/test_davinci_configFuntuple.ref</text></argument>
+  <argument name="error_reference"><text>../refs/empty.ref</text></argument>
+  <argument name="validator"><text>                                                                                       
+from DaVinciTests.QMTest.DaVinciExclusions import preprocessor                                                           
+validateWithReference(preproc = preprocessor) 
 countErrorLines({"FATAL":0, "ERROR":0})
-</text></argument>
+  </text></argument>
 </extension>
diff --git a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_All.qmt b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_All.qmt
index 3853fae62785147e90ddc233f416980157f270e8..885db9312ac9ab67687660633dc3342c904176fd 100755
--- a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_All.qmt
+++ b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_All.qmt
@@ -23,6 +23,7 @@
 -->
 <extension class="GaudiTest.GaudiExeTest" kind="test">
   <argument name="program"><text>davinci</text></argument>
+  <argument name="timeout"><integer>1000</integer></argument>
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
diff --git a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_collections.qmt b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_collections.qmt
index bfa5603d323f5bdda2130636e8aa1384287d7c74..6f7d6d798278a29bbcd2750ea1449f8a86e83970 100644
--- a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_collections.qmt
+++ b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_collections.qmt
@@ -28,7 +28,7 @@
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
-  <text>FEST_November_2021_dst</text>
+  <text>FEST_November_2021_dst_newPacking</text>
   <text>$DAVINCIROOT/options/DaVinciDB-Example.yaml</text>
   <text>--joboptfile</text>
   <text>../../python/DaVinciExamples/tupling/option_davinci_tupling_from_collections.yaml</text>
diff --git a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_hlt2.qmt b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_hlt2.qmt
index 8d3af8ae799e09503980b06cff02a7ca7136b380..edffbbffce6a8dc46d7ec095dc3c5fe4367aaba0 100644
--- a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_hlt2.qmt
+++ b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_hlt2.qmt
@@ -28,7 +28,7 @@
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
-  <text>FEST_November_2021_dst</text>
+  <text>FEST_November_2021_dst_newPacking</text>
   <text>$DAVINCIROOT/options/DaVinciDB-Example.yaml</text>
   <text>--joboptfile</text>
   <text>../../python/DaVinciExamples/tupling/option_davinci_tupling_from_hlt2.yaml</text>
diff --git a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_spruce.qmt b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_spruce.qmt
index 3f746f4558ff12fb61594706792b393e015c3e12..af29c24987e6890cead041f627635ca998d2c665 100644
--- a/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_spruce.qmt
+++ b/DaVinciExamples/tests/qmtest/tupling.qms/test_davinci_tupling_from_spruce.qmt
@@ -26,6 +26,7 @@
 -->
 <extension class="GaudiTest.GaudiExeTest" kind="test">
   <argument name="program"><text>davinci</text></argument>
+  <argument name="timeout"><integer>1000</integer></argument>
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
@@ -36,51 +37,11 @@
   <text>--user_algorithms</text>
   <text>../../python/DaVinciExamples/tupling/option_davinci_tupling_from_spruce:main</text>
   </set></argument>
-  <argument name="validator"><text>
-findReferenceBlock("""LAZY_AND: DaVinci                                         #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
- NONLAZY_OR: FileSummaryRecords                           #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-  LAZY_AND: GenFSR                                        #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RecordStream/FSROutputStreamDstWriter                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
- NONLAZY_OR: UserAnalysis                                 #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
-  LAZY_AND: B0DsK                                         #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
-   LoKi__HDRFilter/Hlt2TopoLineFilter                     #=110     Sum=70          Eff=|( 63.63636 +- 4.58659 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/HDRFilter_B0DsK                        #=70      Sum=55          Eff=|( 78.57143 +- 4.90433 )%|
-   FunTupleBase_Particles/B0DsK_Tuple                     #=55      Sum=55          Eff=|( 100.0000 +- 0.00000 )%|
-  LAZY_AND: B0Dspi                                        #=110     Sum=1           Eff=|(0.9090909 +- 0.904949)%|
-   LoKi__HDRFilter/Hlt2TopoLineFilter                     #=110     Sum=70          Eff=|( 63.63636 +- 4.58659 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/HDRFilter_B0Dspi                       #=70      Sum=1           Eff=|( 1.428571 +- 1.41833 )%|
-   FunTupleBase_Particles/B0Dspi_Tuple                    #=1       Sum=1           Eff=|( 100.0000 +- 0.00000 )%|
-   """, stdout, result, causes, signature_offset = 0, id = "Stream3")
+  <argument name="reference"><text>../refs/test_davinci_tupling_from_spruce.ref</text></argument>
+  <argument name="error_reference"><text>../refs/empty.ref</text></argument>
+  <argument name="validator"><text>                                                                                      
+from DaVinciTests.QMTest.DaVinciExclusions import preprocessor                                                            
+validateWithReference(preproc = preprocessor) 
 countErrorLines({"FATAL":0, "ERROR":0})
-</text></argument>
+  </text></argument>
 </extension>
diff --git a/DaVinciTests/python/DaVinciTests/QMTest/DaVinciExclusions.py b/DaVinciTests/python/DaVinciTests/QMTest/DaVinciExclusions.py
index 0c9be31cdfcf8f9557b64f453447fe3e9c15af7b..fe4a6d1daed58259f979df7c9df204e7faf2d45a 100755
--- a/DaVinciTests/python/DaVinciTests/QMTest/DaVinciExclusions.py
+++ b/DaVinciTests/python/DaVinciTests/QMTest/DaVinciExclusions.py
@@ -32,8 +32,8 @@ remove_known_warnings = LineSkipper(regexps=[
     # expected WARNINGs from the data broker
     r"HiveDataBrokerSvc +WARNING non-reentrant algorithm: .*",
     # Until tck is implemented HltPackedDataDecoder/HltDecReportsDecoder will raise warning
-    r"HltPackedDataDecoder +WARNING TCK in rawbank seems to be 0 .*",
-    r"HltPackedDataDe...WARNING TCK in rawbank seems to be 0 .*",
+    r"HltPackedBufferDecoder +WARNING TCK in rawbank seems to be 0 .*",
+    r"HltPackedBufferDe...WARNING TCK in rawbank seems to be 0 .*",
     r"HltDecReportsDecoder +WARNING TCK obtained from rawbank seems to be 0 .*",
     r"HLT2 +WARNING TCK obtained from rawbank seems to be 0 .*",
     r"Hlt2DecReports +WARNING TCK obtained from rawbank seems to be 0 .*",
diff --git a/DaVinciTests/tests/options/DVTestFunctors.py b/DaVinciTests/tests/options/DVTestFunctors.py
index d061d527665f0ab51a49b0d98cbd59994009f7b3..8d41e4cb589fdebed8c8d1f517df6d63fb102d85 100644
--- a/DaVinciTests/tests/options/DVTestFunctors.py
+++ b/DaVinciTests/tests/options/DVTestFunctors.py
@@ -17,7 +17,7 @@ import Functors as F
 from Functors.math import in_range
 from PyConf.Algorithms import ParticleRangeFilter, TwoBodyCombiner
 
-from DaVinci.common_particles import make_long_kaons
+from DaVinci.common_particles_from_file import make_long_kaons
 
 
 def make_tight_d2kk():
@@ -56,8 +56,8 @@ from PyConf.control_flow import CompositeNode, NodeLogic
 from PyConf.Algorithms import PrintDecayTree
 
 from DaVinci import options
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
-from DaVinci.common_particles import make_std_loose_d2kk
+from DaVinci.reco_objects_from_file import upfront_reconstruction
+from DaVinci.common_particles_from_file import make_std_loose_d2kk
 
 options.evt_max = -1
 options.print_freq = 1
diff --git a/DaVinciTests/tests/options/DVTestReadMooreOutput.py b/DaVinciTests/tests/options/DVTestReadMooreOutput.py
index bd28275c2b46aee299e938c9b081fd897fd7f041..735165f05521a562df0668b500b68ce98240a0dd 100644
--- a/DaVinciTests/tests/options/DVTestReadMooreOutput.py
+++ b/DaVinciTests/tests/options/DVTestReadMooreOutput.py
@@ -17,8 +17,8 @@ __date__ = "2021-03-22"
 
 from PyConf.Algorithms import PrintDecayTree
 from DaVinci import options
-from DaVinci.reco_objects import upfront_reconstruction_from_file as upfront_reconstruction
-from DaVinci.common_particles import make_std_loose_d2kk
+from DaVinci.reco_objects_from_file import upfront_reconstruction
+from DaVinci.common_particles_from_file import make_std_loose_d2kk
 
 options.evt_max = -1
 options.print_freq = 1
diff --git a/DaVinciTests/tests/options/option_davinci_recVertices.py b/DaVinciTests/tests/options/option_davinci_recVertices.py
index e72f8c24039583ec2e9a24b7fcdb647cf48e34e4..a749abb9bd725873c14aac40b23a20cdcf936d9d 100644
--- a/DaVinciTests/tests/options/option_davinci_recVertices.py
+++ b/DaVinciTests/tests/options/option_davinci_recVertices.py
@@ -14,14 +14,15 @@ Test for the function returning DataHandle for v2 RecVertices for usage in ThOr
 import Functors as F
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
-from PyConf.application import make_data_with_FetchDataFromFile
-from DaVinci.reco_objects import make_pvs_for
+from DaVinci.reco_objects import make_pvs_v2
 from DaVinci.algorithms import add_filter
+from PyConf.components import force_location
+
 from DaVinci import options
 
 bd2dsk_line = "SpruceB2OC_BdToDsmK_DsmToHHH_FEST_Line"
-bd2dsk_data = make_data_with_FetchDataFromFile(
-    f"/Event/Spruce/{bd2dsk_line}/Particles")
+bd2dsk_data = force_location(f"/Event/Spruce/{bd2dsk_line}/Particles")
+
 fields_dsk = {
     'B0': "[B0 -> D_s- K+]CC",
 }
@@ -30,7 +31,8 @@ options.ntuple_file = "test_recVertices.root"
 
 
 def alg_config():
-    v2_pvs = make_pvs_for(process=options.process, data_type=options.data_type)
+    v2_pvs = make_pvs_v2()
+
     variables_pvs = FunctorCollection({
         "BPVDIRA": F.BPVDIRA(v2_pvs),
         "BPVFDCHI2": F.BPVFDCHI2(v2_pvs),
diff --git a/DaVinciTests/tests/options/option_davinci_sprucing.py b/DaVinciTests/tests/options/option_davinci_sprucing.py
index cb5eb947de968586056f182023aa42117d40e6eb..53e8ab6c1bc881b2938452081f6d151f34a23f8b 100644
--- a/DaVinciTests/tests/options/option_davinci_sprucing.py
+++ b/DaVinciTests/tests/options/option_davinci_sprucing.py
@@ -14,10 +14,10 @@ Read the output of an Sprucing job with the new DaVinci configuration.
 from FunTuple import FunctorCollection
 from FunTuple import FunTuple_Particles as Funtuple
 from FunTuple.functorcollections import Kinematics
-from PyConf.application import make_data_with_FetchDataFromFile
 from DaVinci.algorithms import add_filter
+from PyConf.components import force_location
 
-bd2dsk_line = make_data_with_FetchDataFromFile(
+bd2dsk_line = force_location(
     "/Event/Spruce/SpruceB2OC_BdToDsmK_DsmToHHH_FEST_Line/Particles")
 
 fields = {
diff --git a/DaVinciTests/tests/options/option_davinci_sprucing.yaml b/DaVinciTests/tests/options/option_davinci_sprucing.yaml
index e710902ea86bd6195a31299d70331d46da639a66..a47dbdce795f591451c81427dddd13cb95f3548d 100644
--- a/DaVinciTests/tests/options/option_davinci_sprucing.yaml
+++ b/DaVinciTests/tests/options/option_davinci_sprucing.yaml
@@ -9,7 +9,7 @@
 # or submit itself to any jurisdiction.                                       #
 ###############################################################################
 
-annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime.tck.json'
+annsvc_config: 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime_newPacking.tck.json'
 evt_max: -1
 print_freq: 1
 process: Spruce
diff --git a/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_filters.qmt b/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_filters.qmt
index 6de235738ed343f8b0ce695760e306929b099308..ed67bfd7666c28dd0286f87109866cd0de3c834f 100644
--- a/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_filters.qmt
+++ b/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_filters.qmt
@@ -26,6 +26,7 @@
 -->
 <extension class="GaudiTest.GaudiExeTest" kind="test">
   <argument name="program"><text>davinci</text></argument>
+  <argument name="timeout"><integer>1000</integer></argument>
   <argument name="args"><set>
   <text>run-mc</text>
   <text>--inputfiledb</text>
@@ -36,49 +37,10 @@
   <text>--user_algorithms</text>
   <text>../options/option_davinci_filters:main</text>
   </set></argument>
-  <argument name="validator"><text>
-findReferenceBlock("""LAZY_AND: DaVinci                                         #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
- NONLAZY_OR: FileSummaryRecords                           #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-  LAZY_AND: GenFSR                                        #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RecordStream/FSROutputStreamDstWriter                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
- NONLAZY_OR: UserAnalysis                                 #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
-  LAZY_AND: B0DsK                                         #=110     Sum=55          Eff=|( 50.00000 +- 4.76731 )%|
-   LoKi__HDRFilter/Hlt2TopoLineFilter                     #=110     Sum=70          Eff=|( 63.63636 +- 4.58659 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/HDRFilter_B0DsK                        #=70      Sum=55          Eff=|( 78.57143 +- 4.90433 )%|
-  LAZY_AND: B0Dspi                                        #=110     Sum=1           Eff=|(0.9090909 +- 0.904949)%|
-   LoKi__HDRFilter/Hlt2TopoLineFilter                     #=110     Sum=70          Eff=|( 63.63636 +- 4.58659 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=70      Sum=70          Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/HDRFilter_B0Dspi                       #=70      Sum=1           Eff=|( 1.428571 +- 1.41833 )%|
-""", stdout, result, causes, signature_offset = 0, id = "Stream3")
-countErrorLines({"FATAL":0, "ERROR":0})
-</text></argument>
+  <argument name="reference"><text>../refs/test_davinci_filters.ref</text></argument>
+  <argument name="error_reference"><text>../refs/empty.ref</text></argument>
+  <argument name="validator"><text>                                                                                       
+from DaVinciTests.QMTest.DaVinciExclusions import preprocessor                                                            
+validateWithReference(preproc = preprocessor) 
+  </text></argument>
 </extension>
diff --git a/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_recVertices.qmt b/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_recVertices.qmt
index a524b301163c4df4dcab932bef2bdccfc5570b01..de2dd07845da92222e7d56fea8af653b338e9113 100644
--- a/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_recVertices.qmt
+++ b/DaVinciTests/tests/qmtest/davinci.qms/test_davinci_recVertices.qmt
@@ -36,32 +36,11 @@
   <text>--user_algorithms</text>
   <text>../options/option_davinci_recVertices:alg_config</text>
   </set></argument>
-  <argument name="validator"><text>
-findReferenceBlock("""
-LAZY_AND: DaVinci                                         #=110     Sum=79          Eff=|( 71.81818 +- 4.28949 )%|
- NONLAZY_OR: FileSummaryRecords                           #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-  LAZY_AND: GenFSR                                        #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RecordStream/FSROutputStreamDstWriter                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
- NONLAZY_OR: UserAnalysis                                 #=110     Sum=79          Eff=|( 71.81818 +- 4.28949 )%|
-  LAZY_AND: UserAlgs                                      #=110     Sum=79          Eff=|( 71.81818 +- 4.28949 )%|
-   LHCb__UnpackRawEvent/LHCb__UnpackRawEvent              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   HltPackedDataDecoder/HltPackedDataDecoder              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCParticle/UnpackMCParticle                      #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackMCVertex/UnpackMCVertex                          #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackTrack/UnpackTracks                               #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackRecVertex/UnpackPVs                              #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackChargedProtos                #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackProtoParticle/UnpackNeutralProtos                #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloElectrons                     #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloPhotons                       #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloMergedPi0s                    #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackCaloHypo/UnpackCaloSplitPhotons                  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   MuonPIDUnpacker/UnpackMuonPIDs                         #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   RichPIDUnpacker/UnpackRichPIDs                         #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   UnpackParticlesAndVertices/UnpackParticlesAndVertices  #=110     Sum=110         Eff=|( 100.0000 +- 0.00000 )%|
-   LoKi__HDRFilter/HDRFilter_B0DsK                        #=110     Sum=79          Eff=|( 71.81818 +- 4.28949 )%|
-   FunTupleBase_Particles/B0DsK_Tuple                     #=79      Sum=79          Eff=|( 100.0000 +- 0.00000 )%|
-""", stdout, result, causes, signature_offset = 0, id = "Stream3")
+  <argument name="reference"><text>../refs/test_davinci_recVertices.ref</text></argument>
+  <argument name="error_reference"><text>../refs/empty.ref</text></argument>
+  <argument name="validator"><text>                                                                                      
+from DaVinciTests.QMTest.DaVinciExclusions import preprocessor                                                           
+validateWithReference(preproc = preprocessor) 
 countErrorLines({"FATAL":0, "ERROR":0})
-</text></argument>
+  </text></argument>
 </extension>
diff --git a/Phys/DaVinci/options/DaVinciDB-Example.yaml b/Phys/DaVinci/options/DaVinciDB-Example.yaml
index e96f5d7fbb90373fb74b8d8b9aa923ddcd39e644..9c579e767907ca4c12b87a1a47c22a3331ccc18e 100644
--- a/Phys/DaVinci/options/DaVinciDB-Example.yaml
+++ b/Phys/DaVinci/options/DaVinciDB-Example.yaml
@@ -45,7 +45,7 @@ Upgrade_Bd2KstarMuMu_ldst:
 
 Spruce_all_lines_dst:
    filenames:
-   - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtimereco.dst'
+   - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtimereco_newPacking.dst'
    qualifiers:
       data_type: Upgrade
       input_type: DST
@@ -71,6 +71,21 @@ FEST_November_2021_dst:
       Data: '2021-11'
       Comment: "D0->Kpi ntuples with 10 events from Sim10aU1 27163003 MagDown using a very loose version of Hlt2CharmD0ToKmPipLine"
 
+
+FEST_November_2021_dst_newPacking:
+   filenames:
+   - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/NovemberFEST/hlt2_D0_Kpi_10evts_newPacking.dst'
+   qualifiers:
+      data_type: Upgrade
+      input_type: ROOT
+      simulation: true
+      conddb_tag: sim-20201218-vc-md100
+      dddb_tag: dddb-20201211
+   metadata:
+      Author: 'Sevda Esen'
+      Data: '2022-01'
+      Comment: "D0->Kpi ntuples with 10 events from Sim10Up08 27163003 MagDown using a very loose version of Hlt2CharmD0ToKmPipLine. This is an older simulation compare to FEST_November_2021_dst taken from TestFileDB:upgrade-magdown-sim10-up08-27163003-digi using Moore/Hlt/Hlt2Conf/tests/options/hlt2_dzero2kpi.py"
+
 test_read_xgen:
    filenames:
    - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/testDV_read_xgen/Gauss-12143001-100ev-20211117.xgen'
@@ -87,7 +102,7 @@ test_read_xgen:
 
 test_spruce_MCtools:
    filenames:
-   - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_MCtestfile.dst'
+   - 'root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_realtimereco_dstinput.dst'
    qualifiers:
       data_type: Upgrade
       input_type: DST
diff --git a/Phys/DaVinci/python/DaVinci/algorithms.py b/Phys/DaVinci/python/DaVinci/algorithms.py
index 2f981a98d2e335381d409be959e3690c21e95553..ab56ab8392e8e9878b6cb72c2c71c06649326700 100644
--- a/Phys/DaVinci/python/DaVinci/algorithms.py
+++ b/Phys/DaVinci/python/DaVinci/algorithms.py
@@ -14,10 +14,12 @@ from GaudiKernel.ProcessJobOptions import importOptions
 from PyConf.Algorithms import (LoKi__HDRFilter as HDRFilter, LoKi__VoidFilter
                                as VoidFilter)
 from DaVinci.optionChecker import DVImportError, log_click
+from DaVinci.configOptions import set_properties
+from PyConf.application import ComponentConfig
 from PyConf.application import default_raw_event, make_odin
-from PyConf.application import make_data_with_FetchDataFromFile
 from PyConf.Algorithms import FilterDecays
 from DaVinci.algorithms_pyconf import make_dvalgorithm
+from PyConf.components import force_location
 
 
 def setup_algorithms(options):
@@ -128,9 +130,10 @@ def apply_filters_and_unpacking(options, algs_dict, unpack_only_mc):
                 evt_filter = add_filter(title, code)
                 evt_pre_filters.append(evt_filter)
             algs_list += evt_pre_filters
-
         if options.enable_unpack:
-            algs_list += unpack_locations(options, unpack_only_mc)
+            unpackers = unpack_locations(options, unpack_only_mc)
+            algs_list += unpackers
+
         algs_list += algs
 
         alg_filterd_dict[name] = algs_list
@@ -250,30 +253,63 @@ def unpack_locations(options, unpack_only_mc):
 
     process = options.process
     stream = options.stream
-
-    unpack_raw_event = reading.unpack_rawevent(
-        bank_types=['ODIN', 'DstData', 'HltDecReports'],
-        process=process,
-        stream=stream,
-        configurables=False)
-
     reading_algs = []
+    set_properties(options)
 
-    if unpack_only_mc:
-        reading_algs += reading.mc_unpackers(
-            process=process, filtered_mc=False, configurables=False)
+    if process == "Spruce":
+        TES_ROOT = '/Event/Spruce'
     else:
+        TES_ROOT = '/Event/HLT2'
+
+    if unpack_only_mc:
+        if options.simulation:
+            reading_algs += reading.mc_unpackers(
+                process=process, filtered_mc=False, configurables=False)
+        else:
+            log_click(
+                "WARNING",
+                "Requested unpacking MC but simulation is set False. Check your DV options."
+            )
+    elif options.annsvc_config:
+        config = ComponentConfig()
+
+        config.update(reading.set_hltAnn_svc(options.annsvc_config))
+        ann = config['HltANNSvc/HltANNSvc']
+
+        unpack_raw_event = reading.unpack_rawevent(
+            bank_types=['ODIN', 'DstData', 'HltDecReports'],
+            process=process,
+            stream=stream,
+            output_level=5,
+            configurables=False)
+
+        locations = reading.make_locations(ann.PackedObjectLocations, TES_ROOT)
+        reading_algs += [unpack_raw_event]
+
         reading_algs += [
-            unpack_raw_event,
             reading.decoder(
-                process=process, stream=stream, configurables=False)
+                locations=locations,
+                ann=ann,
+                process=process,
+                stream=stream,
+                output_level=5,
+                configurables=False)
         ]
 
+        mc_unpackers = []
         if options.simulation:
-            reading_algs += reading.mc_unpackers(
+            mc_unpackers = reading.mc_unpackers(
                 process=process, configurables=False)
 
-        reading_algs += reading.unpackers(process=process, configurables=False)
+        reading_algs += mc_unpackers
+
+        reading_algs += reading.unpackers(
+            locations=locations,
+            ann=ann,
+            mc=mc_unpackers,
+            process=process,
+            output_level=5,
+            configurables=False)
 
     return reading_algs
 
@@ -327,11 +363,10 @@ def configured_FunTuple(config):
        - List of filters and tupling algorithms.
     """
     from FunTuple import FunTuple_Particles as Funtuple
-    from PyConf.application import make_data_with_FetchDataFromFile
 
     dictAlgs = {}
     for key in config.keys():
-        inputs = make_data_with_FetchDataFromFile(config[key]["location"])
+        inputs = force_location(config[key]["location"])
         dictAlgs[key] = []
 
         i = 0
@@ -351,6 +386,7 @@ def configured_FunTuple(config):
             variables=config[key]["variables"],
             loki_preamble=config[key]["preamble"],
             inputs=inputs)
+
         dictAlgs[key].append(funTuple)
 
     return dictAlgs
@@ -416,7 +452,7 @@ def filter_on(location, decay_descriptor=None, bank_type=None):
     Returns:
         data: TES location of the particles that are loaded from the input samples
     """
-    data = make_data_with_FetchDataFromFile(location, bank_type=bank_type)
+    data = force_location(location)
     if decay_descriptor:
         data = apply_algorithm([data], FilterDecays, Code=decay_descriptor)
     return data
diff --git a/Phys/DaVinci/python/DaVinci/algorithms_pyconf.py b/Phys/DaVinci/python/DaVinci/algorithms_pyconf.py
index cf75de778ab2560bda9fabd01085e637dccade54..20072c51fd37aa22088f072ca7913bf8b3ff9037 100644
--- a/Phys/DaVinci/python/DaVinci/algorithms_pyconf.py
+++ b/Phys/DaVinci/python/DaVinci/algorithms_pyconf.py
@@ -98,12 +98,24 @@ def make_dvalgorithm(algorithm):
     """
 
     def wrapped(**kwargs):
+        input_particles = kwargs.pop("particles")
+        input_pvs = kwargs.pop("pvs") if "pvs" in kwargs.keys() else ""
+        return algorithm(
+            Inputs=input_particles,
+            ExtraInputs=input_particles,
+            InputPrimaryVertices=input_pvs,
+            output_transform=_dvalgorithm_outputs,
+            WriteP2PVRelations=False,
+            ModifyLocations=False,
+            **kwargs)
+        """
         return algorithm(
             input_transform=_dvalgorithm_inputs,
             output_transform=_dvalgorithm_outputs,
             WriteP2PVRelations=False,
             ModifyLocations=False,
             **kwargs)
+        """
 
     return wrapped
 
diff --git a/Phys/DaVinci/python/DaVinci/common_particles_from_file.py b/Phys/DaVinci/python/DaVinci/common_particles_from_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..35f58502e1c77416e133e209f32343736262d8c5
--- /dev/null
+++ b/Phys/DaVinci/python/DaVinci/common_particles_from_file.py
@@ -0,0 +1,185 @@
+###############################################################################
+# (c) Copyright 2021 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Definitions of "common particles" very similar to those of Runs 1 & 2.
+"""
+
+from PyConf.tonic import configurable
+from PyConf.Algorithms import FunctionalParticleMaker
+from PyConf.Algorithms import LHCb__Phys__ParticleMakers__PhotonMaker as PhotonMaker
+
+from .reco_objects_from_file import make_charged_protoparticles as _make_charged_protoparticles
+from .reco_objects_from_file import make_neutral_protoparticles as _make_neutral_protoparticles
+from .reco_objects_from_file import make_pvs as _make_pvs
+
+from .filters_selectors import default_particle_cuts, default_track_cuts
+from .filters_selectors import get_long_track_selector, get_down_track_selector
+from .filters_selectors import all_protoparticle_filter as standard_protoparticle_filter
+
+from .algorithms_pyconf import ParticleFilterWithPVs, ParticleCombinerWithPVs
+
+#########
+# Helpers
+#########
+
+
+@configurable
+def _make_particles(species,
+                    make_protoparticles=_make_charged_protoparticles,
+                    get_track_selector=get_long_track_selector,
+                    make_protoparticle_filter=standard_protoparticle_filter):
+    """
+    Helper configurable to create `LHCb::Particle`s from `LHCb::ProtoParticle`s.
+
+    Args:
+        species (str): Particle species hypothesis accepted by
+            `FunctionalParticleMaker`, i.e. one of the strings
+            "pion", "kaon", "muon", "electron", "proton".
+    """
+    particles = FunctionalParticleMaker(
+        ParticleID=species,
+        InputProtoParticles=make_protoparticles(),
+        TrackSelector=get_track_selector(),
+        ProtoParticleFilter=make_protoparticle_filter()).Particles
+    return particles
+
+
+@configurable
+def make_photons(make_neutral_protoparticles=_make_neutral_protoparticles,
+                 pvs=_make_pvs,
+                 **kwargs):
+    """
+    Configurable to create photon `LHCb::Particle`s from `LHCb::ProtoParticle`s.
+    """
+    particles = PhotonMaker(
+        InputProtoParticles=make_neutral_protoparticles(),
+        InputPrimaryVertices=pvs(),
+        **kwargs).Particles
+    return particles
+
+
+def _make_std_loose_particles(particles, pvs, name):
+    return ParticleFilterWithPVs(
+        particles, pvs, name=name, Code=default_particle_cuts())
+
+
+#######################
+# Bacic particle makers
+#######################
+
+
+def make_long_pions():
+    return _make_particles(species="pion")
+
+
+def make_long_kaons():
+    return _make_particles(species="kaon")
+
+
+def make_long_protons():
+    return _make_particles(species="proton")
+
+
+def make_long_muons():
+    return _make_particles(species="muon")
+
+
+def make_long_electrons_no_brem():
+    return _make_particles(species="electron")
+
+
+def make_down_pions():
+    return _make_particles(
+        species="pion", get_track_selector=get_down_track_selector)
+
+
+def make_down_kaons():
+    return _make_particles(
+        species="kaon", get_track_selector=get_down_track_selector)
+
+
+def make_down_protons():
+    return _make_particles(
+        species="proton", get_track_selector=get_down_track_selector)
+
+
+#################################
+# Particle makers with loose cuts
+#################################
+
+
+@configurable
+def make_std_loose_pions():
+    with get_long_track_selector.bind(
+            Code=default_track_cuts()), standard_protoparticle_filter.bind(
+                Code='PP_HASRICH'):
+        return _make_std_loose_particles(
+            make_long_pions(), _make_pvs(), name='StdLoosePions')
+
+
+@configurable
+def make_std_loose_kaons():
+    with get_long_track_selector.bind(
+            Code=default_track_cuts()), standard_protoparticle_filter.bind(
+                Code='PP_HASRICH'):
+        return _make_std_loose_particles(
+            make_long_kaons(), _make_pvs(), name='StdLooseKaons')
+
+
+@configurable
+def make_std_loose_protons():
+    with get_long_track_selector.bind(
+            Code=default_track_cuts()), standard_protoparticle_filter.bind(
+                Code='PP_HASRICH'):
+        return _make_std_loose_particles(
+            make_long_protons(), _make_pvs(), name='StdLooseProtons')
+
+
+def make_std_loose_muons():
+    #with get_long_track_selector.bind(Code=default_track_cuts()):
+    return _make_std_loose_particles(
+        make_long_muons(), _make_pvs(), name='StdLooseMuons')
+
+
+@configurable
+def make_std_loose_jpsi2mumu():
+    muons = make_std_loose_muons()
+    descriptors = ["J/psi(1S) -> mu+ mu-"]
+    daughters_code = {"mu+": "ALL", "mu-": "ALL"}
+    combination_code = "(ADAMASS('J/psi(1S)') < 100.*MeV) & (ADOCACHI2CUT(30,''))"
+    vertex_code = "(CHI2VX < 25.)"
+
+    return ParticleCombinerWithPVs(
+        name="StdLooseJpsi2MuMu",
+        particles=muons,
+        pvs=_make_pvs(),
+        DecayDescriptors=descriptors,
+        DaughtersCuts=daughters_code,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+@configurable
+def make_std_loose_d2kk():
+    kaons = make_std_loose_kaons()
+    descriptors = ["D0 -> K+ K-"]
+    daughters_code = {"K+": "ALL", "K-": "ALL"}
+    combination_code = "(ADAMASS('D0') < 100.*MeV) & (ADOCACHI2CUT(30,''))"
+    vertex_code = "(CHI2VX < 25.)"
+
+    return ParticleCombinerWithPVs(
+        name="StdLooseD02KK",
+        particles=kaons,
+        pvs=_make_pvs(),
+        DecayDescriptors=descriptors,
+        DaughtersCuts=daughters_code,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
diff --git a/Phys/DaVinci/python/DaVinci/data_from_file.py b/Phys/DaVinci/python/DaVinci/data_from_file.py
index b9786d56e3341afaa483611e9d91a3068390200c..52725c3977f137e499f8c13f012b43c144b83105 100644
--- a/Phys/DaVinci/python/DaVinci/data_from_file.py
+++ b/Phys/DaVinci/python/DaVinci/data_from_file.py
@@ -49,7 +49,7 @@ from Configurables import (UnpackCaloHypo, UnpackProtoParticle,
                            UnpackRecVertex, UnpackTrack, UnpackMCParticle,
                            UnpackMCVertex)
 from Configurables import (
-    MuonPIDUnpacker, RichPIDUnpacker, MCVPHitUnpacker as UnpackMCVPHit,
+    UnpackMuonPIDs, UnpackRichPIDs, MCVPHitUnpacker as UnpackMCVPHit,
     MCUTHitUnpacker as UnpackMCUTHit, MCFTHitUnpacker as UnpackMCFTHit,
     MCRichHitUnpacker as UnpackMCRichHit, MCEcalHitUnpacker as UnpackMCEcalHit,
     MCHcalHitUnpacker as UnpackMCHcalHit, MCMuonHitUnpacker as UnpackMCMuonHit,
@@ -77,16 +77,16 @@ def reco_unpackers():
     unpacked data.
     The names (keys) are the following:
         'PVs',
-        'Tracks', 'MuonTracks',
+        'Tracks', 
         'NeutralProtos', 'ChargedProtos',
         'CaloElectrons', 'CaloPhotons', 'CaloMergedPi0s', 'CaloSplitPhotons',
         'MuonPIDs', 'RichPIDs'.
     """
     muonPIDs = reco_unpacker(LocationsPackedReco.PackedMuonPIDs.name,
-                             MuonPIDUnpacker, "UnpackMuonPIDs")
+                             UnpackMuonPIDs, "UnpackMuonPIDs")
     richPIDs = reco_unpacker(
         LocationsPackedReco.PackedRichPIDs.name,
-        RichPIDUnpacker,
+        UnpackRichPIDs,
         "UnpackRichPIDs",
         OutputLevel=ERROR)
     # The OutputLevel above suppresses the following useless warnings (plus more?)
@@ -114,9 +114,6 @@ def reco_unpackers():
         ("Tracks",
          reco_unpacker(LocationsPackedReco.PackedTracks.name, UnpackTrack,
                        "UnpackBestTracks")),
-        ("MuonTracks",
-         reco_unpacker(LocationsPackedReco.PackedMuonTracks.name, UnpackTrack,
-                       "UnpackMuonTracks")),
         ("NeutralProtos",
          reco_unpacker(LocationsPackedReco.PackedNeutralProtos.name,
                        UnpackProtoParticle, "UnpackNeutralProtos")),
diff --git a/Phys/DaVinci/python/DaVinci/locations.py b/Phys/DaVinci/python/DaVinci/locations.py
index 0889b6b80bde2fdd45fecef3c4cd2a9131bbd0a9..0fab66b1dee502924ec9391dd174a3c9686f79b6 100644
--- a/Phys/DaVinci/python/DaVinci/locations.py
+++ b/Phys/DaVinci/python/DaVinci/locations.py
@@ -34,7 +34,6 @@ class LocationsPackedReco(Enum):
     PackedMuonPIDs = "/Event/pRec/Muon/MuonPID"
     PackedRichPIDs = "/Event/pRec/Rich/PIDs"
     PackedTracks = "/Event/pRec/Track/Best"
-    PackedMuonTracks = "/Event/pRec/Track/Muon"
     PackedNeutralProtos = "/Event/pRec/ProtoP/Neutrals"
     PackedChargedProtos = "/Event/pRec/ProtoP/Charged"
 
@@ -118,7 +117,7 @@ class LocationsBrunelMCLinkers(Enum):
     Tracks = "/Event/Link/Rec/Track/Best"
 
 
-def enums_as_dict(enums):
+def enums_as_dict(enums, strip=None):
     """
     Return a {name: value} dict of all enum members.
 
@@ -130,4 +129,10 @@ def enums_as_dict(enums):
         >>> enums_as_dict(MyEnum)
         {'a': 1, 'b': 2}
     """
-    return {e.name: e.value for e in enums}
+
+    def _strip(word):
+        if strip:
+            return word.replace(strip, '')
+        return word
+
+    return {e.name: _strip(e.value) for e in enums}
diff --git a/Phys/DaVinci/python/DaVinci/reco_objects.py b/Phys/DaVinci/python/DaVinci/reco_objects.py
index c89c8e74d124643a99009729dddf235a4f452ccc..d690bb577a90ffc6cc7afedf1b6021458d446fdc 100644
--- a/Phys/DaVinci/python/DaVinci/reco_objects.py
+++ b/Phys/DaVinci/python/DaVinci/reco_objects.py
@@ -18,13 +18,18 @@ packed data on file.
     and the definition of what gets persisted gets formalised.
     2) Code very heavily relies on its Moore equivalent. Thank you, RTA team.
 """
-from GaudiConf import PersistRecoConf
-from PyConf import configurable
-from PyConf.Algorithms import (RecV1ToPVConverter)
-from .data_from_file import reco_unpackers
+from GaudiConf.PersistRecoConf import PersistRecoPacking
 
+from PyConf.location_prefix import prefix, packed_prefix
+from PyConf.components import force_location
+from PyConf.tonic import configurable
+from PyConf.Algorithms import RecV1ToPVConverter
 
-def upfront_reconstruction_from_file():  # renamed from upfront_reconstruction
+from DaVinci.locations import LocationsUnpackedReco, enums_as_dict
+
+
+@configurable
+def upfront_reconstruction(process='Spruce'):
     """Return a list DataHandles that define the upfront reconstruction output.
 
     This differs from `reconstruction` as it should not be used as inputs to
@@ -32,12 +37,45 @@ def upfront_reconstruction_from_file():  # renamed from upfront_reconstruction
     value of this function should be ran before all HLT2 lines.
 
     """
-    return list(reco_unpackers().values())
+    TES_ROOT = '/Event/Spruce'
+    RECO = 'HLT2'
+    if process == 'Hlt2':
+        TES_ROOT = '/Event/HLT2'
+        RECO = ''
+
+    conf = PersistRecoPacking(
+        stream=TES_ROOT, reco_stream=RECO, data_type='Upgrade')
+
+    unpackers = list(conf.unpackers())
+
+    return unpackers
 
 
-def reconstruction_from_file():  # renamed from reconstruction
+@configurable
+def reconstruction(process='Spruce'):
     """Return a {name: DataHandle} dict that define the reconstruction output."""
-    return {k: v.OutputName for k, v in reco_unpackers().items()}
+
+    map = {}
+
+    TES_ROOT = '/Event/Spruce/HLT2'
+    if process == 'Hlt2':
+        TES_ROOT = '/Event/HLT2'
+
+    packed_loc = enums_as_dict(LocationsUnpackedReco, strip="/Event/")
+
+    for key, value in packed_loc.items():
+        map[key.replace('Packed', '')] = force_location(
+            prefix(value, TES_ROOT))
+
+    return map
+
+
+def make_rich_pids():
+    return reconstruction()['RichPIDs']
+
+
+def make_muon_pids():
+    return reconstruction()['MuonPIDs']
 
 
 def make_charged_protoparticles():
@@ -56,63 +94,34 @@ def make_tracks():
     return reconstruction()['Tracks']
 
 
-def make_pvs_using(tracks, pvs):
-    #tracks_v2 = LHCb__Converters__Track__v2__fromV1TrackV2Track(
-    #    InputTracksName=tracks).OutputTracksName
+def make_pvs_v2():
+
+    pvs = make_pvs()
+
     # FIXME: this is a temporary solution until we have persistency
     # for the new PV container.  Note that this converter does not
     # fill the associated track list. This should be fixed as well.
     return RecV1ToPVConverter(InputVertices=pvs).OutputVertices
 
 
-def make_pvs_with(persist_conf, tracks='Tracks', pvs='PVs'):
-    return make_pvs_using(persist_conf.unpackers_by_key()[tracks],
-                          persist_conf.unpackers_by_key()[pvs])
-
+def get_particles(process="Spruce", location=""):
 
-def make_pvs_for(process="Spruce", data_type="Upgrade"):
     if process == 'Spruce':
-        stream_pvs = 'Spruce/HLT2'
+        stream = '/Event/Spruce'
+        stream_reco = 'HLT2'
     else:
-        stream_pvs = 'HLT2'
-    conf = PersistRecoConf.PersistRecoPacking(
-        stream='/Event/' + stream_pvs, data_type=data_type)
-    return make_pvs_with(conf)
+        stream = '/Event/HLT2'
+        stream_reco = ''
 
+    conf = PersistRecoPacking(
+        stream=stream,
+        reco_stream=stream_reco,
+        packed={"Particles": [packed_prefix(location, stream)]},
+        unpacked={"Particles": [location]})
 
-@configurable
-def reconstruction(from_file=True):
-    """Return reconstruction objects.
+    particles = conf.unpackers_by_key()["Particles"]
 
-    Note:
-        It is advised to use this function if more than one object is needed,
-    rather than the accessors below as it makes the configuration slower.
-    """
-    # removed reco since it will not be done in DV
-    reco = reconstruction_from_file()
-    upfront_reconstruction = upfront_reconstruction_from_file()
-
-    charged_protos = reco["ChargedProtos"]
-    neutral_protos = reco["NeutralProtos"]
-    best_tracks = reco["Tracks"]
-    pvs = reco["PVs"]
-    electrons = reco["CaloElectrons"]
-    photons = reco["CaloPhotons"]
-    mergedPi0s = reco["CaloMergedPi0s"]
-    splitPhotons = reco["CaloSplitPhotons"]
-    muon_pids = reco["MuonPIDs"]
-    rich_pids = reco["RichPIDs"]
-
-    return {
-        "ChargedProtos": charged_protos,
-        "NeutralProtos": neutral_protos,
-        "Tracks": best_tracks,
-        "PVs": pvs,
-        "UpfrontReconstruction": upfront_reconstruction,
-        "CaloElectrons": electrons,
-        "CaloPhotons": photons,
-        "CaloMergedPi0s": mergedPi0s,
-        "CaloSplitPhotons": splitPhotons,
-        "MuonPIDs": muon_pids,
-        "RichPIDs": rich_pids,
-    }
+    if len(particles) > 0:
+        return particles[0].OutputName
+    else:
+        assert "no particles found in location: " + location
diff --git a/Phys/DaVinci/python/DaVinci/reco_objects_from_file.py b/Phys/DaVinci/python/DaVinci/reco_objects_from_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d3142ffdaf4d13d88def5492d77063bd5bbcbb8
--- /dev/null
+++ b/Phys/DaVinci/python/DaVinci/reco_objects_from_file.py
@@ -0,0 +1,43 @@
+###############################################################################
+# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from .data_from_file import reco_unpackers
+
+
+def upfront_reconstruction():
+    """Return a list DataHandles that define the upfront reconstruction output.
+
+    This differs from `reconstruction` as it should not be used as inputs to
+    other algorithms, but only to define the control flow, i.e. the return
+    value of this function should be ran before all HLT2 lines.
+
+    """
+    return list(reco_unpackers().values())
+
+
+def reconstruction():
+    """Return a {name: DataHandle} dict that define the reconstruction output."""
+    return {k: v.OutputName for k, v in reco_unpackers().items()}
+
+
+def make_charged_protoparticles():
+    return reconstruction()['ChargedProtos']
+
+
+def make_neutral_protoparticles():
+    return reconstruction()['NeutralProtos']
+
+
+def make_pvs():
+    return reconstruction()['PVs']
+
+
+def make_tracks():
+    return reconstruction()['Tracks']
diff --git a/Phys/DaVinci/python/DaVinci/standard_particles.py b/Phys/DaVinci/python/DaVinci/standard_particles.py
index 5af8ab5133d53a0090197c9376f819aeb03395f8..c12c5984551295589bb09c3175142dbb8ff9db27 100644
--- a/Phys/DaVinci/python/DaVinci/standard_particles.py
+++ b/Phys/DaVinci/python/DaVinci/standard_particles.py
@@ -644,47 +644,8 @@ def make_mass_constrained_jpsi2mumu(name='MassConstrJpsi2MuMuMaker',
 # TO BE REMOVED AS SOON AS THIS PYTHON MODULE IS MOVED INTO ANOTHER SHARED REPO OR
 # IT'S REDESIGNED SPECIFICALLY FOR DAVINCI.
 @configurable
-def make_long_pions_from_spruce(stream="/Event/Spruce/HLT2"):
-    from Configurables import MuonPIDUnpacker, RichPIDUnpacker, UnpackProtoParticle
-    from PyConf.application import make_data_with_FetchDataFromFile
-    from PyConf.components import Algorithm, force_location
-    from PyConf.Tools import (ChargedProtoParticleAddRichInfo,
-                              ChargedProtoParticleAddMuonInfo,
-                              ChargedProtoParticleAddCombineDLLs)
-    from DaVinci.locations import LocationsUnpackedReco
-
-    # Define a new reco_unpackers taking Spruce locations as input
-    def reco_unpacker(key, configurable, input_location, **kwargs):
-        """
-        Return a `PyConf.Algorithm` instance that reading from a given input location unpacks a specific reconstructed object, identified by a 'key', to the
-        forced output location `LocationsUnpackedReco[key]`.
-        """
-        alg = Algorithm(
-            configurable,
-            name=f"Unpacker_{key}",
-            InputName=make_data_with_FetchDataFromFile(input_location),
-            outputs={
-                "OutputName": force_location(LocationsUnpackedReco[key].value)
-            },
-            **kwargs)
-        return alg
-
-    richPIDs = reco_unpacker("PackedRichPIDs", RichPIDUnpacker,
-                             f"{stream}/pRec/Rich/PIDs")
-    muonPIDs = reco_unpacker("PackedMuonPIDs", MuonPIDUnpacker,
-                             f"{stream}/pRec/Muon/MuonPID")
-
-    charged_protos = reco_unpacker(
-        "PackedChargedProtos",
-        UnpackProtoParticle,
-        f"{stream}/pRec/ProtoP/Charged",
-        AddInfo=[
-            ChargedProtoParticleAddRichInfo(
-                InputRichPIDLocation=richPIDs.OutputName),
-            ChargedProtoParticleAddMuonInfo(
-                InputMuonPIDLocation=muonPIDs.OutputName),
-            ChargedProtoParticleAddCombineDLLs()
-        ])
+def make_long_pions_from_spruce():
+    charged_protos = _make_charged_protoparticles()
 
     particles = FunctionalParticleMaker(
         InputProtoParticles=charged_protos,
diff --git a/Phys/DaVinci/python/DaVinci/standard_particles_from_file.py b/Phys/DaVinci/python/DaVinci/standard_particles_from_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..625c3a8c02fc67800eceaa376a8cd602e79e79a9
--- /dev/null
+++ b/Phys/DaVinci/python/DaVinci/standard_particles_from_file.py
@@ -0,0 +1,637 @@
+###############################################################################
+# (c) Copyright 2019 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""Maker functions for Particle definitions common across HLT2.
+
+The Run 2 code makes the sensible choice of creating Particle objects first,
+and then filtering these with FilterDesktop instances. Because the
+FunctionalParticleMaker can apply LoKi cut strings directly to Track and
+ProtoParticle objects, we just do the one step.
+"""
+from __future__ import absolute_import, division, print_function
+
+from GaudiKernel.SystemOfUnits import GeV, MeV, mm, picosecond
+
+from PyConf import configurable
+
+from PyConf.Algorithms import (
+    FunctionalParticleMaker, LHCb__Phys__ParticleMakers__PhotonMaker as
+    PhotonMaker, LHCb__Phys__ParticleMakers__MergedPi0Maker as MergedPi0Maker,
+    Proto2ChargedBasic)
+
+from .algorithms_pyconf import (
+    require_all,
+    ParticleFilter,
+    ParticleFilterWithPVs,
+    ParticleCombiner,
+    ParticleCombinerWithPVs,
+    NeutralParticleCombinerWithPVs,
+)
+
+from .filters_selectors import get_all_track_selector, get_long_track_selector, get_down_track_selector
+from .filters_selectors import all_protoparticle_filter as standard_protoparticle_filter
+
+from .reco_objects_from_file import (
+    make_charged_protoparticles as _make_charged_protoparticles, make_pvs as
+    _make_pvs, make_neutral_protoparticles as _make_neutral_protoparticles)
+
+_KAON0_M = 497.611 * MeV  # +/- 0.013, PDG, PR D98, 030001 and 2019 update
+_LAMBDA_M = 1115.683 * MeV  # +/- 0.006, PDG, PR D98, 030001 and 2019 update
+
+
+@configurable
+def _make_particles(species,
+                    make_protoparticles=_make_charged_protoparticles,
+                    get_track_selector=get_long_track_selector,
+                    make_protoparticle_filter=standard_protoparticle_filter):
+    """ creates LHCb::Particles from LHCb::ProtoParticles """
+    particles = FunctionalParticleMaker(
+        InputProtoParticles=make_protoparticles(),
+        ParticleID=species,
+        TrackSelector=get_track_selector(),
+        ProtoParticleFilter=make_protoparticle_filter()).Particles
+    return particles
+
+
+@configurable
+def _make_ChargedBasics(
+        species,
+        make_protoparticles=_make_charged_protoparticles,
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter):
+    """ creates LHCb::v2::ChargedBasics from LHCb::ProtoParticles """
+    particles = Proto2ChargedBasic(
+        InputProtoParticles=make_protoparticles(),
+        ParticleID=species,
+        TrackSelector=get_track_selector(),
+        ProtoParticleFilter=make_protoparticle_filter()).Particles
+    return particles
+
+
+@configurable
+def _make_all_ChargedBasics(species):
+    return _make_ChargedBasics(
+        species=species,
+        get_track_selector=get_all_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+@configurable
+def _make_long_ChargedBasics(species):
+    return _make_ChargedBasics(
+        species=species,
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_long_cb_electrons():
+    return _make_long_ChargedBasics('electron')
+
+
+def make_long_cb_muons():
+    return _make_long_ChargedBasics('muon')
+
+
+def make_long_cb_protons():
+    return _make_long_ChargedBasics('proton')
+
+
+def make_long_cb_kaons():
+    return _make_long_ChargedBasics('kaon')
+
+
+def make_long_cb_pions():
+    return _make_long_ChargedBasics('pion')
+
+
+def make_has_rich_long_cb_kaons():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_long_cb_kaons()
+
+
+def make_has_rich_long_cb_pions():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_long_cb_pions()
+
+
+def make_all_cb_electrons():
+    return _make_all_ChargedBasics('electron')
+
+
+def make_all_cb_muons():
+    return _make_all_ChargedBasics('muon')
+
+
+def make_all_cb_protons():
+    return _make_all_ChargedBasics('proton')
+
+
+def make_all_cb_kaons():
+    return _make_all_ChargedBasics('kaon')
+
+
+def make_all_cb_pions():
+    return _make_all_ChargedBasics('pion')
+
+
+@configurable
+def make_photons(make_neutral_protoparticles=_make_neutral_protoparticles,
+                 pvs=_make_pvs,
+                 **kwargs):
+    """ creates photon LHCb::Particles from LHCb::ProtoParticles (PVs are optional) """
+    particles = PhotonMaker(
+        InputProtoParticles=make_neutral_protoparticles(),
+        InputPrimaryVertices=pvs(),
+        **kwargs).Particles
+    return particles
+
+
+@configurable
+def make_resolved_pi0s(particles=make_photons,
+                       mass_window=30. * MeV,
+                       pvs=_make_pvs,
+                       PtCut=0. * MeV,
+                       **kwargs):
+    comb_code = require_all("ADAMASS('pi0') < {mass_window}").format(
+        mass_window=mass_window)
+    mother_code = require_all("PT > {PtCut}").format(PtCut=PtCut)
+    return NeutralParticleCombinerWithPVs(
+        particles=particles(**kwargs),
+        pvs=pvs(),
+        DecayDescriptors=["pi0 -> gamma gamma"],
+        CombinationCut=comb_code,
+        MotherCut=mother_code)
+
+
+@configurable
+def make_merged_pi0s(mass_window=60. * MeV,
+                     PtCut=2000. * MeV,
+                     make_neutral_protoparticles=_make_neutral_protoparticles,
+                     pvs=_make_pvs,
+                     **kwargs):
+    particles = MergedPi0Maker(
+        InputProtoParticles=make_neutral_protoparticles(),
+        InputPrimaryVertices=pvs(),
+        MassWindow=mass_window,
+        PtCut=PtCut,
+        **kwargs).Particles
+    return particles
+
+
+#Long particles
+def make_long_electrons_no_brem():
+    return _make_particles(
+        species="electron",
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_long_pions():
+    return _make_particles(
+        species="pion",
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_long_kaons():
+    return _make_particles(
+        species="kaon",
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_long_protons():
+    return _make_particles(
+        species="proton",
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_long_muons():
+    return _make_particles(
+        species="muon",
+        get_track_selector=get_long_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+#Down particles
+def make_down_pions():
+    return _make_particles(
+        species="pion",
+        get_track_selector=get_down_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_down_kaons():
+    return _make_particles(
+        species="kaon",
+        get_track_selector=get_down_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+def make_down_protons():
+    return _make_particles(
+        species="proton",
+        get_track_selector=get_down_track_selector,
+        make_protoparticle_filter=standard_protoparticle_filter)
+
+
+@configurable
+def make_phi2kk(am_max=1100. * MeV, adoca_chi2=30, vchi2=25.0):
+    kaons = make_long_kaons()
+    descriptors = ['phi(1020) -> K+ K-']
+    combination_code = require_all("AM < {am_max}",
+                                   "ADOCACHI2CUT({adoca_chi2}, '')").format(
+                                       am_max=am_max, adoca_chi2=adoca_chi2)
+    vertex_code = "(VFASPF(VCHI2) < {vchi2})".format(vchi2=vchi2)
+    return ParticleCombiner(
+        particles=kaons,
+        DecayDescriptors=descriptors,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+# Make V0s
+def _make_long_for_V0(particles, pvs):
+    code = require_all("BPVVALID()", "MIPCHI2DV(PRIMARY)>36")
+    return ParticleFilterWithPVs(particles, pvs, Code=code)
+
+
+def _make_down_for_V0(particles):
+    code = require_all("P>3000*MeV", "PT > 175.*MeV")
+    return ParticleFilter(particles, Code=code)
+
+
+def make_long_pions_for_V0():
+    return _make_long_for_V0(make_long_pions(), _make_pvs())
+
+
+def make_long_protons_for_V0():
+    return _make_long_for_V0(make_long_protons(), _make_pvs())
+
+
+def make_down_pions_for_V0():
+    return _make_down_for_V0(make_down_pions())
+
+
+def make_down_protons_for_V0():
+    return _make_down_for_V0(make_down_protons())
+
+
+@configurable
+def _make_V0LL(particles,
+               descriptors,
+               pname,
+               pvs,
+               am_dmass=50 * MeV,
+               m_dmass=35 * MeV,
+               vchi2pdof_max=30,
+               bpvltime_min=2.0 * picosecond):
+    """Make long-long V0 -> h+ h'- candidates
+    Initial implementation a replication of the old Hlt2SharedParticles
+    """
+    combination_code = require_all("ADAMASS('{pname}') < {am_dmass}").format(
+        pname=pname, am_dmass=am_dmass)
+    vertex_code = require_all("ADMASS('{pname}')<{m_dmass}",
+                              "CHI2VXNDOF<{vchi2pdof_max}",
+                              "BPVLTIME() > {bpvltime_min}").format(
+                                  pname=pname,
+                                  m_dmass=m_dmass,
+                                  vchi2pdof_max=vchi2pdof_max,
+                                  bpvltime_min=bpvltime_min)
+    return ParticleCombinerWithPVs(
+        particles=particles,
+        pvs=pvs,
+        DecayDescriptors=descriptors,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+@configurable
+def _make_V0DD(particles,
+               descriptors,
+               pvs,
+               am_min=_KAON0_M - 80 * MeV,
+               am_max=_KAON0_M + 80 * MeV,
+               m_min=_KAON0_M - 64 * MeV,
+               m_max=_KAON0_M + 64 * MeV,
+               vchi2pdof_max=30,
+               bpvvdz_min=400 * mm):
+    """Make down-down V0 -> h+ h'- candidates
+    Initial implementation a replication of the old Hlt2SharedParticles
+    """
+    combination_code = require_all("in_range({am_min},  AM, {am_max})").format(
+        am_min=am_min, am_max=am_max)
+    vertex_code = require_all("in_range({m_min},  M, {m_max})",
+                              "CHI2VXNDOF<{vchi2pdof_max}",
+                              "BPVVDZ() > {bpvvdz_min}").format(
+                                  m_min=m_min,
+                                  m_max=m_max,
+                                  vchi2pdof_max=vchi2pdof_max,
+                                  bpvvdz_min=bpvvdz_min)
+    return ParticleCombinerWithPVs(
+        particles=particles,
+        pvs=pvs,
+        DecayDescriptors=descriptors,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+def make_KsLL():
+    pions = make_long_pions_for_V0()
+    descriptors = ["KS0 -> pi+ pi-"]
+    return _make_V0LL(
+        particles=[pions],
+        descriptors=descriptors,
+        pname='KS0',
+        pvs=_make_pvs())
+
+
+def make_KsDD():
+    pions = make_down_pions_for_V0()
+    descriptors = ["KS0 -> pi+ pi-"]
+    return _make_V0DD(
+        particles=[pions], descriptors=descriptors, pvs=_make_pvs())
+
+
+def make_LambdaLL():
+    pions = make_long_pions_for_V0()
+    protons = make_long_protons_for_V0()
+    descriptors = ["[Lambda0 -> p+ pi-]cc"]
+    return _make_V0LL(
+        particles=[pions, protons],
+        descriptors=descriptors,
+        pname='Lambda0',
+        pvs=_make_pvs(),
+        am_dmass=50 * MeV,
+        m_dmass=20 * MeV,
+        vchi2pdof_max=30,
+        bpvltime_min=2.0 * picosecond)
+
+
+@configurable
+def make_LambdaDD():
+    pions = make_down_pions_for_V0()
+    protons = make_down_protons_for_V0()
+    descriptors = ["[Lambda0 -> p+ pi-]cc"]
+    return _make_V0DD(
+        particles=[pions, protons],
+        descriptors=descriptors,
+        pvs=_make_pvs(),
+        am_min=_LAMBDA_M - 80 * MeV,
+        am_max=_LAMBDA_M + 80 * MeV,
+        m_min=_LAMBDA_M - 21 * MeV,
+        m_max=_LAMBDA_M + 24 * MeV,
+        vchi2pdof_max=30,
+        bpvvdz_min=400 * mm)
+
+
+# Make pions
+@configurable
+def make_has_rich_long_pions():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_long_pions()
+
+
+@configurable
+def make_has_rich_down_pions():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_down_pions()
+
+
+# Make kaons
+@configurable
+def make_has_rich_long_kaons():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_long_kaons()
+
+
+@configurable
+def make_has_rich_down_kaons():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_down_kaons()
+
+
+# Make protons
+@configurable
+def make_has_rich_long_protons():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_long_protons()
+
+
+@configurable
+def make_has_rich_down_protons():
+    with standard_protoparticle_filter.bind(Code='PP_HASRICH'):
+        return make_down_protons()
+
+
+@configurable
+def make_detached_mumu(probnn_mu=0.2,
+                       pt_mu=0. * GeV,
+                       minipchi2=9.,
+                       trghostprob=0.25,
+                       adocachi2cut=30,
+                       bpvvdchi2=30,
+                       vfaspfchi2ndof=10):
+    #def make_detached_mumu(probnn_mu=-0.2, pt_mu=0.*GeV, minipchi2=0., trghostprob=0.925, adocachi2cut=30, bpvvdchi2=30, vfaspfchi2ndof=10):
+    muons = make_long_muons()
+    descriptors = ['J/psi(1S) -> mu+ mu-', '[J/psi(1S) -> mu+ mu+]cc']
+    daughters_code = {
+        'mu+':
+        '(PROBNNmu > {probnn_mu}) & (PT > {pt_mu}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_mu=probnn_mu,
+            pt_mu=pt_mu,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob),
+        'mu-':
+        '(PROBNNmu > {probnn_mu}) & (PT > {pt_mu}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_mu=probnn_mu,
+            pt_mu=pt_mu,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob)
+    }
+    combination_code = "ADOCACHI2CUT({adocachi2cut}, '')".format(
+        adocachi2cut=adocachi2cut)
+    vertex_code = require_all(
+        "(VFASPF(VCHI2/VDOF) < {vfaspfchi2ndof}) & (BPVVDCHI2() > {bpvvdchi2})"
+    ).format(
+        vfaspfchi2ndof=vfaspfchi2ndof, bpvvdchi2=bpvvdchi2)
+
+    return ParticleCombinerWithPVs(
+        particles=muons,
+        pvs=_make_pvs(),
+        DecayDescriptors=descriptors,
+        DaughtersCuts=daughters_code,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+#Update to ProbNNe once the variables are ready
+@configurable
+def make_detached_ee(probnn_e=2,
+                     pt_e=0.25 * GeV,
+                     minipchi2=9.,
+                     trghostprob=0.25,
+                     adocachi2cut=30,
+                     bpvvdchi2=30,
+                     vfaspfchi2ndof=10):
+    electrons = make_long_electrons_no_brem()
+    descriptors = ['J/psi(1S) -> e+ e-', '[J/psi(1S) -> e+ e+]cc']
+    daughters_code = {
+        'e+':
+        '(PIDe > {probnn_e}) & (PT > {pt_e}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_e=probnn_e,
+            pt_e=pt_e,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob),
+        'e-':
+        '(PIDe > {probnn_e}) & (PT > {pt_e}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_e=probnn_e,
+            pt_e=pt_e,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob)
+    }
+    combination_code = require_all("ADOCACHI2CUT({adocachi2cut}, '')").format(
+        adocachi2cut=adocachi2cut)
+    vertex_code = require_all(
+        "(VFASPF(VCHI2/VDOF) < {vfaspfchi2ndof}) & (BPVVDCHI2() > {bpvvdchi2})"
+    ).format(
+        vfaspfchi2ndof=vfaspfchi2ndof, bpvvdchi2=bpvvdchi2)
+    return ParticleCombinerWithPVs(
+        particles=electrons,
+        pvs=_make_pvs(),
+        DecayDescriptors=descriptors,
+        DaughtersCuts=daughters_code,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+@configurable
+def make_detached_mue(probnn_mu=0.2,
+                      pt_mu=0. * GeV,
+                      probnn_e=2,
+                      pt_e=0.25 * GeV,
+                      minipchi2=9.,
+                      trghostprob=0.25,
+                      adocachi2cut=30,
+                      bpvvdchi2=30,
+                      vfaspfchi2ndof=10):
+    muons = make_long_muons()
+    electrons = make_long_electrons_no_brem()
+    descriptors = ['[J/psi(1S) -> mu+ e-]cc', '[J/psi(1S) -> mu+ e+]cc']
+    daughters_code = {
+        'mu+':
+        '(PROBNNmu > {probnn_mu}) & (PT > {pt_mu}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_mu=probnn_mu,
+            pt_mu=pt_mu,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob),
+        'mu-':
+        '(PROBNNmu > {probnn_mu}) & (PT > {pt_mu}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_mu=probnn_mu,
+            pt_mu=pt_mu,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob),
+        'e+':
+        '(PIDe > {probnn_e}) & (PT > {pt_e}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_e=probnn_e,
+            pt_e=pt_e,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob),
+        'e-':
+        '(PIDe > {probnn_e}) & (PT > {pt_e}) & (MIPCHI2DV(PRIMARY) > {minipchi2}) & (TRGHOSTPROB < {trghostprob})'
+        .format(
+            probnn_e=probnn_e,
+            pt_e=pt_e,
+            minipchi2=minipchi2,
+            trghostprob=trghostprob)
+    }
+    combination_code = require_all("ADOCACHI2CUT({adocachi2cut}, '')").format(
+        adocachi2cut=adocachi2cut)
+    vertex_code = require_all(
+        "(VFASPF(VCHI2/VDOF) < {vfaspfchi2ndof}) & (BPVVDCHI2() > {bpvvdchi2})"
+    ).format(
+        vfaspfchi2ndof=vfaspfchi2ndof, bpvvdchi2=bpvvdchi2)
+    return ParticleCombinerWithPVs(
+        particles=[muons, electrons],
+        pvs=_make_pvs(),
+        DecayDescriptors=descriptors,
+        DaughtersCuts=daughters_code,
+        CombinationCut=combination_code,
+        MotherCut=vertex_code)
+
+
+# Make muons
+@configurable
+def make_ismuon_long_muon():
+    with standard_protoparticle_filter.bind(Code='PP_ISMUON'):
+        return make_long_muons()
+
+
+@configurable
+def make_dimuon_base(name='DiMuonBaseCombiner', maxVCHI2PDOF=25):
+    """Basic dimuon without any requirements but common vertex
+    Please DO NOT add pt requirements here:
+    a dedicated (tighter) dimuon filter is implemented in the dimuon module.
+    """
+
+    # get the long muons
+    muons = make_ismuon_long_muon()
+
+    # require that the muons come from the same vertex
+    mother_code = require_all("VFASPF(VCHI2PDOF) < {vchi2}").format(
+        vchi2=maxVCHI2PDOF)
+
+    return ParticleCombiner(
+        name=name,
+        particles=muons,
+        DecayDescriptors=['J/psi(1S) -> mu+ mu-'],
+        CombinationCut='AALL',
+        MotherCut=mother_code)
+
+
+@configurable
+def make_mass_constrained_jpsi2mumu(name='MassConstrJpsi2MuMuMaker',
+                                    jpsi_maker=make_dimuon_base,
+                                    pid_mu=0,
+                                    pt_mu=0.5 * GeV,
+                                    admass=250. * MeV,
+                                    adoca_chi2=20,
+                                    vchi2=16):
+    """Make the Jpsi, starting from dimuons"""
+
+    # get the dimuons with basic cuts (only vertexing)
+    # note that the make_dimuon_base combiner uses vertexChi2/ndof < 25,
+    # which is looser than the vertexChi2 < 16 required here
+    dimuons = jpsi_maker()
+
+    code = require_all(
+        'ADMASS("J/psi(1S)") < {admass}',
+        'DOCACHI2MAX < {adoca_chi2}',
+        'VFASPF(VCHI2) < {vchi2}',
+        'INTREE(("mu+" == ABSID)  & (PIDmu > {pid_mu}))',
+        'INTREE(("mu+" == ABSID)  & (PT > {pt_mu}))',
+        #'MFIT',  # not really needed
+    ).format(
+        admass=admass,
+        adoca_chi2=adoca_chi2,
+        vchi2=vchi2,
+        pid_mu=pid_mu,
+        pt_mu=pt_mu,
+    )
+
+    return ParticleFilter(dimuons, name=name, Code=code)
diff --git a/Phys/DaVinci/tests/config/test_algorithms.py b/Phys/DaVinci/tests/config/test_algorithms.py
index 394f869bf7eb81c6c6d64796b5900eb96e7b212d..26e7f3cf4b932a304ada04307c71a460b76d0d2c 100644
--- a/Phys/DaVinci/tests/config/test_algorithms.py
+++ b/Phys/DaVinci/tests/config/test_algorithms.py
@@ -213,7 +213,7 @@ def test_unpack_locations():
     test_algs = unpack_locations(options, False)
 
     assert isinstance(test_algs, list)
-    assert any("Unpack" in alg.fullname for alg in test_algs)
+    #assert any("Unpack" in alg.fullname for alg in test_algs)
 
 
 def test_unpack_locations_xgen():
@@ -228,7 +228,7 @@ def test_unpack_locations_xgen():
 
     assert isinstance(test_algs, list)
     assert any("UnpackMCParticle" in alg.fullname for alg in test_algs)
-    assert not any("UnpackTrack" in alg.fullname for alg in test_algs)
+    assert not any("TrackUnpacker" in alg.fullname for alg in test_algs)
 
 
 def test_apply_filters_and_unpack():
@@ -239,12 +239,10 @@ def test_apply_filters_and_unpack():
     """
     options.evt_pre_filters = {"test_filter": "EVT_PREFILTER"}
     options.enable_unpack = True
+    options.simulation = True
     alg_dict = {"test_alg": [VoidConsumer()]}
     test_alg_dict = apply_filters_and_unpacking(options, alg_dict, False)
-    list_of_main_expected_algs = [
-        "LoKi__VoidFilter", "LHCb__UnpackRawEvent", "HltPackedDataDecoder",
-        "UnpackMCParticle", "UnpackTrack"
-    ]
+    list_of_main_expected_algs = ["LoKi__VoidFilter"]
 
     for exp_alg in list_of_main_expected_algs:
         assert any(
diff --git a/Phys/DaVinci/tests/config/test_configuration.py b/Phys/DaVinci/tests/config/test_configuration.py
index f1934851831e3df63648ee5eececb3cae2177a3a..6354eb781e09b3e0dbb5b649b739a716dc2f4fb2 100644
--- a/Phys/DaVinci/tests/config/test_configuration.py
+++ b/Phys/DaVinci/tests/config/test_configuration.py
@@ -53,7 +53,7 @@ def test_set_hltAnnSvc():
         fileDB_file="TestFileDB",
         ctx_args=[
             "--annsvc_config",
-            "root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime.tck.json"
+            "root://eoslhcb.cern.ch//eos/lhcb/wg/dpa/wp3/tests/spruce_all_lines_realtime_newPacking.tck.json"
         ])
     assert "HltANNSvc/HltANNSvc" in config