diff --git a/pvassoc/dv.py b/pvassoc/dv.py
new file mode 100755
index 0000000000000000000000000000000000000000..7e9bde4bcc23040bef59ab88d0ebc6c3e4fcbc5a
--- /dev/null
+++ b/pvassoc/dv.py
@@ -0,0 +1,398 @@
+###############################################################################
+# (c) Copyright 2024 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from PyConf.reading import get_pvs, get_particles  # type: ignore[import]
+import Functors as F # type: ignore[import]
+from FunTuple import FunTuple_Event  # type: ignore[import]
+from DaVinci.algorithms import create_lines_filter  # type: ignore[import]
+import FunTuple.functorcollections as FC  # type: ignore[import]
+from DaVinciMCTools import MCTruthAndBkgCat  # type: ignore[import]
+from Hlt2Conf.algorithms_thor import ParticleFilter  # type: ignore[import]
+from PyConf.Algorithms import VoidFilter  # type: ignore[import]
+import Functors.math as fmath  # type: ignore[import]
+from PyConf.Algorithms import FlattenDecayTree  # type: ignore[import]
+from GaudiKernel import SystemOfUnits  # type: ignore[import]
+from RecoConf.reconstruction_objects import make_tracks  # type: ignore[import]
+
+#Variables for pvs
+ALLPV_KEY       = lambda Vertices: F.MAP(F.OBJECT_KEY) @ F.TES(Vertices)
+ALLPV_CHI2      = lambda Vertices: F.MAP(F.CHI2) @ F.TES(Vertices)
+ALLPV_NDOF      = lambda Vertices: F.MAP(F.NDOF) @ F.TES(Vertices)  # Can infer nVelo tracks from this
+ALLPV_X         = lambda Vertices: F.MAP(F.X_COORDINATE @ F.POSITION) @ F.TES(Vertices)
+ALLPV_Y         = lambda Vertices: F.MAP(F.Y_COORDINATE @ F.POSITION) @ F.TES(Vertices)
+ALLPV_Z         = lambda Vertices: F.MAP(F.Z_COORDINATE @ F.POSITION) @ F.TES(Vertices)
+ALLPV_POSERR    = lambda Vertices, indx_i: F.MAP(fmath.sqrt(F.CALL(indx_i, indx_i)) @ F.POS_COV_MATRIX) @ F.TES(Vertices)
+
+#Variables for particles
+ALLPART_KEY      = lambda particles: F.MAP(F.VALUE_OR(-1) @ F.OBJECT_KEY) @ F.TES(particles) #to differentiate from particles forming B candidate
+ALLPART_Q        = lambda particles: F.MAP(F.CHARGE) @ F.TES(particles)
+ALLPART_PX       = lambda particles: F.MAP(F.PX) @ F.TES(particles)
+ALLPART_PY       = lambda particles: F.MAP(F.PY) @ F.TES(particles)
+ALLPART_PZ       = lambda particles: F.MAP(F.PZ) @ F.TES(particles)
+ALLPART_MOMERR   = lambda particles, indx_i: F.MAP(F.CALL(indx_i, indx_i) @ F.THREE_MOM_COV_MATRIX) @ F.TES(particles)
+ALLPART_ENERGY   = lambda particles: F.MAP(F.ENERGY) @ F.TES(particles)
+ALLPART_MASS     = lambda particles: F.MAP(F.MASS) @ F.TES(particles)
+ALLPART_ETA      = lambda particles: F.MAP(F.ETA) @ F.TES(particles)
+ALLPART_PHI      = lambda particles: F.MAP(F.PHI) @ F.TES(particles)
+ALLPART_PIDPI    = lambda particles: F.MAP(F.PID_PI) @ F.TES(particles)
+ALLPART_PIDK     = lambda particles: F.MAP(F.PID_K) @ F.TES(particles)
+ALLPART_PIDP     = lambda particles: F.MAP(F.PID_P) @ F.TES(particles)
+ALLPART_PIDE     = lambda particles: F.MAP(F.PID_E) @ F.TES(particles)
+ALLPART_PIDMU    = lambda particles: F.MAP(F.PID_MU) @ F.TES(particles)
+#P -> Proto -> Track
+ALLPART_TRACK_CHI2     = lambda particles: F.MAP(F.CHI2 @ F.TRACK) @ F.TES(particles)
+ALLPART_TRACK_NDOF     = lambda particles: F.MAP(F.VALUE_OR(-1) @ F.NDOF @ F.TRACK) @ F.TES(particles)
+ALLPART_TRACK_TYPE     = lambda particles: F.MAP(F.VALUE_OR(-1) @ F.CAST_TO_INT @ F.TRACKTYPE @ F.TRACK) @ F.TES(particles)
+ALLPART_TRACK_GHOSTPROB= lambda particles: F.MAP(F.GHOSTPROB) @ F.TES(particles)
+#get the BPV information for particles
+ALLPART_BPV_KEY = lambda particles, pvs: F.MAP(F.VALUE_OR(-1) @ F.OBJECT_KEY @ F.BPV(pvs)) @ F.TES(particles)
+ALLPART_BPV_X  = lambda particles, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVX(pvs)) @ F.TES(particles)
+ALLPART_BPV_Y  = lambda particles, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVY(pvs)) @ F.TES(particles)
+ALLPART_BPV_Z  = lambda particles, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVZ(pvs)) @ F.TES(particles)
+ALLPART_BPV_IP = lambda particles, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVIP(pvs)) @ F.TES(particles)
+ALLPART_BPV_IPCHI2 = lambda particles, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVIPCHI2(pvs)) @ F.TES(particles)
+## particle has track but not a closest to beam state, so throws an error (Should this be an error at all?)
+ALLPART_CLOSESTTOBEAM_X = lambda particles:  F.MAP(F.VALUE_OR(F.NaN) @ F.X_COORDINATE @ F.POSITION @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_CLOSESTTOBEAM_Y = lambda particles:  F.MAP(F.VALUE_OR(F.NaN) @ F.Y_COORDINATE @ F.POSITION @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_CLOSESTTOBEAM_TX = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.TX @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_CLOSESTTOBEAM_TY = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.TY @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_CLOSESTTOBEAM_QOP = lambda particles:F.MAP(F.VALUE_OR(F.NaN) @ F.QOVERP @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_CLOSESTTOBEAM_STATEERR = lambda particles, indx_i: F.MAP(F.VALUE_OR(F.NaN) @ F.CALL(indx_i, indx_i) @ F.TRACK_COVARIANCE @ F.STATE_AT("ClosestToBeam") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_X = lambda particles:   F.MAP(F.VALUE_OR(F.NaN) @ F.X_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_Y = lambda particles:   F.MAP(F.VALUE_OR(F.NaN) @ F.Y_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_Z = lambda particles:   F.MAP(F.VALUE_OR(F.NaN) @ F.Z_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_TX = lambda particles:  F.MAP(F.VALUE_OR(F.NaN) @ F.TX @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_TY = lambda particles:  F.MAP(F.VALUE_OR(F.NaN) @ F.TY @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_QOP = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.QOVERP @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+ALLPART_FIRSTMEAS_STATEERR = lambda particles, indx_i: F.MAP(F.VALUE_OR(F.NaN) @ F.CALL(indx_i, indx_i) @ F.TRACK_COVARIANCE @ F.STATE_AT("FirstMeasurement") @ F.TRACK) @ F.TES(particles)
+### particle has no endut state, so throws an error (should this be an error at all?)
+#ALLPART_ENDUT_X = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.X_COORDINATE @ F.POSITION @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_Y = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.Y_COORDINATE @ F.POSITION @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_Z = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.Z_COORDINATE @ F.POSITION @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_TX = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.TX @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_TY = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.TY @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_QOP = lambda particles: F.MAP(F.VALUE_OR(F.NaN) @ F.QOVERP @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#ALLPART_ENDUT_STATEERR = lambda particles, indx_i: F.MAP(F.VALUE_OR(F.NaN) @ F.CALL(indx_i, indx_i) @ F.TRACK_COVARIANCE @ F.STATE_AT("EndUT") @ F.TRACK) @ F.TES(particles)
+#P -> MCP
+ALLPART_MC_ISPROMPT  = lambda particles, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.MC_ISPROMPT())) @ F.TES(particles)
+ALLPART_TRUE_PV_X = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VX)) @ F.TES(particles) #for association of PV <-> MCPV
+ALLPART_TRUE_PV_Y = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VY)) @ F.TES(particles)
+ALLPART_TRUE_PV_Z = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VZ)) @ F.TES(particles)
+#P -> LONG LIVED ANCESTOR (Helpful to find which PV has produced B using MCPV <-> PV association)
+## 10^-15 seconds is between the lifetimes of the pi0 (considered prompt) and the tau (nonprompt).
+LONGLIVED_ANCESTOR = F.MC_FIRST_LONGLIVED_ANCESTOR(MinLifetime=1e-7 * SystemOfUnits.ns)
+ALLPART_LONGLIVED_ANCESTOR_ID  = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID  @ LONGLIVED_ANCESTOR)) @ F.TES(particles)
+ALLPART_LONGLIVED_ANCESTOR_KEY = lambda particles, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.OBJECT_KEY  @ LONGLIVED_ANCESTOR)) @ F.TES(particles)
+#the following we don't need (but keep commented out in case we do)
+#ALLPART_LONGLIVED_ANCESTOR_PVX = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VX @ LONGLIVED_ANCESTOR)) @ F.TES(particles)
+#ALLPART_LONGLIVED_ANCESTOR_PVY = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VY @ LONGLIVED_ANCESTOR)) @ F.TES(particles)
+#ALLPART_LONGLIVED_ANCESTOR_PVZ = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VZ @ LONGLIVED_ANCESTOR)) @ F.TES(particles)
+ALLPART_TRUE_ID  = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID)) @ F.TES(particles)
+ALLPART_TRUE_KEY = lambda particles, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.OBJECT_KEY)) @ F.TES(particles)
+ALLPART_TRUE_PX  = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.PX)) @ F.TES(particles)
+ALLPART_TRUE_PY  = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.PY)) @ F.TES(particles)
+ALLPART_TRUE_PZ  = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.PZ)) @ F.TES(particles)
+ALLPART_TRUE_ENERGY = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.ENERGY)) @ F.TES(particles)
+ALLPART_TRUE_MASS = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MASS)) @ F.TES(particles)
+ALLPART_TRUE_ORGIN_KEY  = lambda particles, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.OBJECT_KEY @ F.MC_PRIMARYVERTEX)) @ F.TES(particles)
+ALLPART_TRUE_ORGIN_X = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.ORIGIN_VX)) @ F.TES(particles)
+ALLPART_TRUE_ORGIN_Y = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.ORIGIN_VY)) @ F.TES(particles)
+ALLPART_TRUE_ORGIN_Z = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.ORIGIN_VZ)) @ F.TES(particles)
+ALLPART_MOTHER_ID  = lambda particles, ngen, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.MC_MOTHER(ngen, F.PARTICLE_ID))) @ F.TES(particles)
+ALLPART_MOTHER_KEY = lambda particles, ngen, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.MC_MOTHER(ngen, F.OBJECT_KEY))) @ F.TES(particles)
+ALLPART_ID        = lambda particles: F.MAP(F.VALUE_OR(0) @ F.PARTICLE_ID) @ F.TES(particles)
+#ALLPART_HISTORY   = lambda particles: F.MAP(F.VALUE_OR(-1) @ F.CAST_TO_INT @ F.TRACKHISTORY @ F.TRACK) @ F.TES(particles)
+#ALLPART_FLAG      = lambda particles: F.MAP(F.VALUE_OR(-1) @ F.CAST_TO_INT @ F.TRACKFLAG @ F.TRACK) @ F.TES(particles)
+
+#Variables for particles
+ALLB_ID      = lambda particles: F.MAP(F.PARTICLE_ID) @ F.TES(particles)
+ALLB_KEY     = lambda particles: F.MAP(F.OBJECT_KEY) @ F.TES(particles)
+ALLB_ENDVTX_CHI2 = lambda particles: F.MAP(F.CHI2 @ F.ENDVERTEX) @ F.TES(particles)
+ALLB_ENDVTX_NDOF = lambda particles: F.MAP(F.NDOF @ F.ENDVERTEX) @ F.TES(particles)
+ALLB_ENDVTX_X    = lambda particles: F.MAP(F.END_VX) @ F.TES(particles)
+ALLB_ENDVTX_Y    = lambda particles: F.MAP(F.END_VY) @ F.TES(particles)
+ALLB_ENDVTX_Z    = lambda particles: F.MAP(F.END_VZ) @ F.TES(particles)
+ALLB_ENDVTX_POSERR = lambda particles, indx_i: F.MAP(fmath.sqrt(F.CALL(indx_i, indx_i)) @ F.POS_COV_MATRIX @ F.ENDVERTEX) @ F.TES(particles)
+ALLB_PX        = lambda particles: F.MAP(F.PX) @ F.TES(particles)
+ALLB_PY        = lambda particles: F.MAP(F.PY) @ F.TES(particles)
+ALLB_PZ        = lambda particles: F.MAP(F.PZ) @ F.TES(particles)
+ALLB_MOMERR    = lambda particles, indx_i: F.MAP(F.CALL(indx_i, indx_i) @ F.THREE_MOM_COV_MATRIX) @ F.TES(particles)
+ALLB_ENERGY    = lambda particles: F.MAP(F.ENERGY) @ F.TES(particles)
+ALLB_MASS      = lambda particles: F.MAP(F.MASS) @ F.TES(particles)
+ALLB_ETA       = lambda particles: F.MAP(F.ETA) @ F.TES(particles)
+ALLB_PHI       = lambda particles: F.MAP(F.PHI) @ F.TES(particles)
+#P -> MCP
+ALLB_TRUE_ID   = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID)) @ F.TES(particles)
+ALLB_TRUE_KEY  = lambda particles, mctruth: F.MAP(F.VALUE_OR(-1) @ mctruth(F.OBJECT_KEY)) @ F.TES(particles)
+ALLB_TRUE_PV_X = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VX)) @ F.TES(particles) #for association of PV <-> MCPV
+ALLB_TRUE_PV_Y = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VY)) @ F.TES(particles)
+ALLB_TRUE_PV_Z = lambda particles, mctruth: F.MAP(F.VALUE_OR(F.NaN) @ mctruth(F.MC_PV_VZ)) @ F.TES(particles)
+#the following we don't need (but keep commented out in case we do)
+#ALLB_TRUE_CHILD1_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(1, F.FORWARDARGS)) @ F.TES(particles) #D*-
+#ALLB_TRUE_CHILD11_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(1, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS)) @ F.TES(particles) #D0
+#ALLB_TRUE_CHILD12_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(2, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS)) @ F.TES(particles) #pi
+#ALLB_TRUE_CHILD111_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(1, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS)) @ F.TES(particles) #K
+#ALLB_TRUE_CHILD112_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(2, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS) @ F.CHILD(1, F.FORWARDARGS)) @ F.TES(particles) #pi
+#ALLB_TRUE_CHILD2_ID = lambda particles, mctruth: F.MAP(F.VALUE_OR(0) @ mctruth(F.PARTICLE_ID) @ F.CHILD(2, F.FORWARDARGS)) @ F.TES(particles) #mu
+
+#velo tracks
+ALLVELOTRACKS_ETA = lambda tracks: F.MAP(F.ETA) @ F.TES(tracks)
+ALLVELOTRACKS_PHI = lambda tracks: F.MAP(F.PHI) @ F.TES(tracks)
+ALLVELOTRACKS_X = lambda tracks: F.MAP(F.X_COORDINATE @ F.POSITION) @ F.TES(tracks)
+ALLVELOTRACKS_Y = lambda tracks: F.MAP(F.Y_COORDINATE @ F.POSITION) @ F.TES(tracks)
+ALLVELOTRACKS_Z = lambda tracks: F.MAP(F.Z_COORDINATE @ F.POSITION) @ F.TES(tracks)
+ALLVELOTRACKS_TX = lambda tracks: F.MAP(F.TX) @ F.TES(tracks)
+ALLVELOTRACKS_TY = lambda tracks: F.MAP(F.TY) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_KEY= lambda tracks, pvs: F.MAP(F.VALUE_OR(-1) @ F.OBJECT_KEY @ F.BPV(pvs)) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_X  = lambda tracks, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVX(pvs)) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_Y  = lambda tracks, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVY(pvs)) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_Z  = lambda tracks, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVZ(pvs)) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_IP = lambda tracks, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVIP(pvs)) @ F.TES(tracks)
+ALLVELOTRACKS_BPV_IPCHI2 = lambda tracks, pvs: F.MAP(F.VALUE_OR(F.NaN) @ F.BPVIPCHI2(pvs)) @ F.TES(tracks)
+##throw as error about pointers
+#ALLVELOTRACKS_FIRSTMEAS_X = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.X_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement")) @ F.TES(tracks)
+#ALLVELOTRACKS_FIRSTMEAS_Y = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.Y_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement")) @ F.TES(tracks)
+#ALLVELOTRACKS_FIRSTMEAS_Z = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.Z_COORDINATE @ F.POSITION @ F.STATE_AT("FirstMeasurement")) @ F.TES(tracks)
+#ALLVELOTRACKS_POSERR = lambda tracks, indx_i: F.MAP(F.VALUE_OR(F.NaN) @ F.CALL(indx_i, indx_i) @ F.TRACK_COVARIANCE @ F.STATE_AT("FirstMeasurement")) @ F.TES(tracks)
+#ALLVELOTRACKS_ENDVELO_X = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.X_COORDINATE @ F.POSITION @ F.STATE_AT("EndVelo")) @ F.TES(tracks)
+#ALLVELOTRACKS_ENDVELO_Y = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.Y_COORDINATE @ F.POSITION @ F.STATE_AT("EndVelo")) @ F.TES(tracks)
+#ALLVELOTRACKS_ENDVELO_Z = lambda tracks: F.MAP(F.VALUE_OR(F.NaN) @ F.Z_COORDINATE @ F.POSITION @ F.STATE_AT("EndVelo")) @ F.TES(tracks)
+#ALLVELOTRACKS_ENDVELO_POSERR = lambda tracks, indx_i: F.MAP(F.VALUE_OR(F.NaN) @ F.CALL(indx_i, indx_i) @ F.TRACK_COVARIANCE @ F.STATE_AT("EndVelo")) @ F.TES(tracks)
+
+def get_all_part_vars(evt_vars, particles, pvs, MCTRUTH_parts, index_name, suffix = 'PARTS', is_long = False):
+  evt_vars[f'ALL{suffix}_ID[{index_name}]'] = ALLPART_ID(particles)
+  evt_vars[f'ALL{suffix}_KEY[{index_name}]'] = ALLPART_KEY(particles)
+  evt_vars[f'ALL{suffix}_Q[{index_name}]']  = ALLPART_Q(particles)
+  evt_vars[f'ALL{suffix}_PX[{index_name}]'] = ALLPART_PX(particles)
+  evt_vars[f'ALL{suffix}_PY[{index_name}]'] = ALLPART_PY(particles)
+  evt_vars[f'ALL{suffix}_PZ[{index_name}]'] = ALLPART_PZ(particles)
+  evt_vars[f'ALL{suffix}_PXERR[{index_name}]'] = ALLPART_MOMERR(particles, 0)
+  evt_vars[f'ALL{suffix}_PYERR[{index_name}]'] = ALLPART_MOMERR(particles, 1)
+  evt_vars[f'ALL{suffix}_PZERR[{index_name}]'] = ALLPART_MOMERR(particles, 2)
+  evt_vars[f'ALL{suffix}_MASS[{index_name}]'] = ALLPART_MASS(particles)
+  evt_vars[f'ALL{suffix}_ENERGY[{index_name}]'] = ALLPART_ENERGY(particles)
+  evt_vars[f'ALL{suffix}_ETA[{index_name}]'] = ALLPART_ETA(particles)
+  evt_vars[f'ALL{suffix}_PHI[{index_name}]'] = ALLPART_PHI(particles)
+  evt_vars[f'ALL{suffix}_PIDPI[{index_name}]'] = ALLPART_PIDPI(particles)
+  evt_vars[f'ALL{suffix}_PIDK[{index_name}]'] = ALLPART_PIDK(particles)
+  evt_vars[f'ALL{suffix}_PIDP[{index_name}]'] = ALLPART_PIDP(particles)
+  evt_vars[f'ALL{suffix}_PIDE[{index_name}]'] = ALLPART_PIDE(particles)
+  evt_vars[f'ALL{suffix}_PIDMU[{index_name}]'] = ALLPART_PIDMU(particles)
+  #P -> Proto -> Track
+  evt_vars[f'ALL{suffix}_TRACK_CHI2[{index_name}]'] = ALLPART_TRACK_CHI2(particles)
+  evt_vars[f'ALL{suffix}_TRACK_NDOF[{index_name}]'] = ALLPART_TRACK_NDOF(particles)
+  evt_vars[f'ALL{suffix}_TRACK_TYPE[{index_name}]'] = ALLPART_TRACK_TYPE(particles)
+  evt_vars[f'ALL{suffix}_TRACK_GHOSTPROB[{index_name}]'] = ALLPART_TRACK_GHOSTPROB(particles)
+  # BPV information
+  evt_vars[f'ALL{suffix}_BPV_KEY[{index_name}]'] = ALLPART_BPV_KEY(particles, pvs)
+  evt_vars[f'ALL{suffix}_BPV_X[{index_name}]'] = ALLPART_BPV_X(particles, pvs)
+  evt_vars[f'ALL{suffix}_BPV_Y[{index_name}]'] = ALLPART_BPV_Y(particles, pvs)
+  evt_vars[f'ALL{suffix}_BPV_Z[{index_name}]'] = ALLPART_BPV_Z(particles, pvs)
+  evt_vars[f'ALL{suffix}_BPV_IP[{index_name}]'] = ALLPART_BPV_IP(particles, pvs)
+  evt_vars[f'ALL{suffix}_BPV_IPCHI2[{index_name}]'] = ALLPART_BPV_IPCHI2(particles, pvs)
+  # particle has track but not a closest to beam state, so throws an error (Should this be an error at all?)
+  if is_long:
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_X[{index_name}]'] = ALLPART_CLOSESTTOBEAM_X(particles)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_Y[{index_name}]'] = ALLPART_CLOSESTTOBEAM_Y(particles)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_TX[{index_name}]'] = ALLPART_CLOSESTTOBEAM_TX(particles)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_TY[{index_name}]'] = ALLPART_CLOSESTTOBEAM_TY(particles)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_QOP[{index_name}]'] = ALLPART_CLOSESTTOBEAM_QOP(particles)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_XERR[{index_name}]'] = ALLPART_CLOSESTTOBEAM_STATEERR(particles, 0)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_YERR[{index_name}]'] = ALLPART_CLOSESTTOBEAM_STATEERR(particles, 1)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_TXERR[{index_name}]'] = ALLPART_CLOSESTTOBEAM_STATEERR(particles, 2)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_TYERR[{index_name}]'] = ALLPART_CLOSESTTOBEAM_STATEERR(particles, 3)
+    evt_vars[f'ALL{suffix}_CLOSESTTOBEAM_QOPERR[{index_name}]'] = ALLPART_CLOSESTTOBEAM_STATEERR(particles, 4)
+
+  # store first measurement
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_X[{index_name}]'] = ALLPART_FIRSTMEAS_X(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_Y[{index_name}]'] = ALLPART_FIRSTMEAS_Y(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_Z[{index_name}]'] = ALLPART_FIRSTMEAS_Z(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_TX[{index_name}]'] = ALLPART_FIRSTMEAS_TX(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_TY[{index_name}]'] = ALLPART_FIRSTMEAS_TY(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_QOP[{index_name}]'] = ALLPART_FIRSTMEAS_QOP(particles)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_XERR[{index_name}]'] = ALLPART_FIRSTMEAS_STATEERR(particles, 0)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_YERR[{index_name}]'] = ALLPART_FIRSTMEAS_STATEERR(particles, 1)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_TXERR[{index_name}]'] = ALLPART_FIRSTMEAS_STATEERR(particles, 2)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_TYERR[{index_name}]'] = ALLPART_FIRSTMEAS_STATEERR(particles, 3)
+  evt_vars[f'ALL{suffix}_FIRSTMEAS_QOPERR[{index_name}]'] = ALLPART_FIRSTMEAS_STATEERR(particles, 4)
+  #P -> MCP
+  evt_vars[f'ALL{suffix}_TRUE_ID[{index_name}]'] = ALLPART_TRUE_ID(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_KEY[{index_name}]'] = ALLPART_TRUE_KEY(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PX[{index_name}]'] = ALLPART_TRUE_PX(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PY[{index_name}]'] = ALLPART_TRUE_PY(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PZ[{index_name}]'] = ALLPART_TRUE_PZ(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_ENERGY[{index_name}]'] = ALLPART_TRUE_ENERGY(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_MASS[{index_name}]'] = ALLPART_TRUE_MASS(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MC_ISPROMPT[{index_name}]'] = ALLPART_MC_ISPROMPT(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PV_X[{index_name}]'] = ALLPART_TRUE_PV_X(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PV_Y[{index_name}]'] = ALLPART_TRUE_PV_Y(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_PV_Z[{index_name}]'] = ALLPART_TRUE_PV_Z(particles, MCTRUTH_parts)
+  #P -> LONG LIVED ANCESTOR
+  evt_vars[f'ALL{suffix}_LONGLIVED_ANCESTOR_ID[{index_name}]']  = ALLPART_LONGLIVED_ANCESTOR_ID(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_LONGLIVED_ANCESTOR_KEY[{index_name}]'] = ALLPART_LONGLIVED_ANCESTOR_KEY(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_ORGIN_KEY[{index_name}]'] = ALLPART_TRUE_ORGIN_KEY(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_ORGIN_X[{index_name}]'] = ALLPART_TRUE_ORGIN_X(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_ORGIN_Y[{index_name}]'] = ALLPART_TRUE_ORGIN_Y(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_TRUE_ORGIN_Z[{index_name}]'] = ALLPART_TRUE_ORGIN_Z(particles, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_1_ID[{index_name}]'] = ALLPART_MOTHER_ID(particles, 1, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_1_KEY[{index_name}]'] = ALLPART_MOTHER_KEY(particles, 1, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_2_ID[{index_name}]'] = ALLPART_MOTHER_ID(particles, 2, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_2_KEY[{index_name}]'] = ALLPART_MOTHER_KEY(particles, 2, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_3_ID[{index_name}]'] = ALLPART_MOTHER_ID(particles, 3, MCTRUTH_parts)
+  evt_vars[f'ALL{suffix}_MOTHER_3_KEY[{index_name}]'] = ALLPART_MOTHER_KEY(particles, 3, MCTRUTH_parts)
+  #evt_vars[f'ALL{suffix}_HISTORY[{index_name}]'] = ALLPART_HISTORY(particles)
+  #evt_vars[f'ALL{suffix}_FLAG[{index_name}]'] = ALLPART_FLAG(particles)
+
+def get_user_algs(decay_channel):
+  """
+  decay_channel: decay channel to be used in the lines. 
+    Options: DstMu, DstTau_Lep, DstTau_Had, JpsiK_E, JpsiK_Mu
+  """
+  #line name
+  line_name = f'Hlt2SLB_BTo{decay_channel}'
+  #Run over events that have fired the line
+  my_filter = create_lines_filter(name="Filter", lines=[line_name])
+
+  #get Bs and particles
+  Bs = get_particles(f"/Event/HLT2/{line_name}/Particles")
+  long_particles = get_particles(f"/Event/HLT2/{line_name}/ExtraLongPions/Particles")
+  down_particles = get_particles(f"/Event/HLT2/{line_name}/ExtraDownPions/Particles")
+  up_particles = get_particles(f"/Event/HLT2/{line_name}/ExtraUpPions/Particles")
+
+  #get MCTRUTH for Bs
+  MCTRUTH_Bs = MCTruthAndBkgCat(Bs, name='MCTRUTH_Bs')
+  #get MCTRUTH for particles
+  MCTRUTH_long_parts = MCTruthAndBkgCat(long_particles, name='MCTRUTHLONG_extra')
+  MCTRUTH_down_parts = MCTruthAndBkgCat(down_particles, name='MCTRUTHDOWN_extra')
+  MCTRUTH_up_parts = MCTruthAndBkgCat(up_particles, name='MCTRUTHUP_extra')
+
+  #get true id of B candidate (hepful to run over events that have a true Bcand)
+  if decay_channel == 'DstMu' or decay_channel == 'DstTau_Lep':
+    #Bd -> D*- mu+ nu; D*- -> D0 pi-; D0 -> K pi
+    #Bd -> D*- tau+ nu; D*- -> D0 pi-; D0 -> K pi and tau -> mu nu nu
+    B_ID = 511
+  elif decay_channel == 'D0Tau_Had' or decay_channel == 'JpsiK_E' or decay_channel == 'JpsiK_Mu':
+    #Bu -> D*- tau+ nu; D*- -> D0 pi-; D0 -> K pi and tau -> 3pi
+    #Bu -> Jpsi K; Jpsi -> mu mu
+    B_ID = 521
+  else:
+    raise ValueError(f"Invalid decay channel {decay_channel}")
+
+  #run over events that have a true Bcand
+  cut_event = F.MAP_ANY_OF(F.VALUE_OR(0) @ MCTRUTH_Bs(F.ABS @ F.PARTICLE_ID) == B_ID) @ F.TES(Bs)
+  truth_filter_event = VoidFilter(name='EventTruthFilter', Cut=cut_event)
+
+  #filter Bcands
+  cut_B = F.VALUE_OR(0) @ MCTRUTH_Bs(F.ABS @ F.PARTICLE_ID) == B_ID
+  Bcands = ParticleFilter(Input=Bs, Cut=F.FILTER(cut_B), name='signalB') #filter the B candidates
+
+  #get TES location of all descendants
+  all_descendants = FlattenDecayTree(InputParticles=Bcands, name="FlattenedDecayTree").OutputParticles
+  all_basics = ParticleFilter(all_descendants, F.FILTER(F.ISBASICPARTICLE), name = "AllBasics")
+
+  #get velo tracks
+  velo_tracks = make_tracks('VeloTracks')
+
+  #get pvs
+  pvs  = get_pvs()
+
+  #get global event variables
+  evt_vars = FC.RecSummary()
+
+  #get pv variables
+  evt_vars['ALLPV_CHI2[pv_indx]']   = ALLPV_CHI2(pvs)
+  evt_vars['ALLPV_NDOF[pv_indx]']   = ALLPV_NDOF(pvs)
+  evt_vars['ALLPV_X[pv_indx]']      = ALLPV_X(pvs)
+  evt_vars['ALLPV_Y[pv_indx]']      = ALLPV_Y(pvs)
+  evt_vars['ALLPV_Z[pv_indx]']      = ALLPV_Z(pvs)
+  evt_vars['ALLPV_XERR[pv_indx]']   = ALLPV_POSERR(pvs, 0)
+  evt_vars['ALLPV_YERR[pv_indx]']   = ALLPV_POSERR(pvs, 1)
+  evt_vars['ALLPV_ZERR[pv_indx]']   = ALLPV_POSERR(pvs, 2)
+
+  #get long/down/up type particle variables
+  get_all_part_vars(evt_vars, long_particles, pvs, MCTRUTH_long_parts, index_name='long_indx', suffix="LONGPART", is_long=True)
+  get_all_part_vars(evt_vars, down_particles, pvs, MCTRUTH_down_parts, index_name='down_indx', suffix="DOWNPART")
+  get_all_part_vars(evt_vars,   up_particles, pvs,   MCTRUTH_up_parts, index_name='up_indx', suffix="UPPART")
+
+  #get Bcand variables
+  evt_vars['ALLB_ID[bcand_indx]'] = ALLB_ID(Bcands)
+  evt_vars['ALLB_KEY[bcand_indx]'] = ALLB_KEY(Bcands)
+  evt_vars['ALLB_ENDVTX_CHI2[bcand_indx]'] = ALLB_ENDVTX_CHI2(Bcands)
+  evt_vars['ALLB_ENDVTX_NDOF[bcand_indx]'] = ALLB_ENDVTX_NDOF(Bcands)
+  evt_vars['ALLB_ENDVTX_X[bcand_indx]'] = ALLB_ENDVTX_X(Bcands)
+  evt_vars['ALLB_ENDVTX_Y[bcand_indx]'] = ALLB_ENDVTX_Y(Bcands)
+  evt_vars['ALLB_ENDVTX_Z[bcand_indx]'] = ALLB_ENDVTX_Z(Bcands)
+  evt_vars['ALLB_ENDVTX_XERR[bcand_indx]'] = ALLB_ENDVTX_POSERR(Bcands, 0)
+  evt_vars['ALLB_ENDVTX_YERR[bcand_indx]'] = ALLB_ENDVTX_POSERR(Bcands, 1)
+  evt_vars['ALLB_ENDVTX_ZERR[bcand_indx]'] = ALLB_ENDVTX_POSERR(Bcands, 2)
+  evt_vars['ALLB_PX[bcand_indx]'] = ALLB_PX(Bcands)
+  evt_vars['ALLB_PY[bcand_indx]'] = ALLB_PY(Bcands)
+  evt_vars['ALLB_PZ[bcand_indx]'] = ALLB_PZ(Bcands)
+  evt_vars['ALLB_PXERR[bcand_indx]'] = ALLB_MOMERR(Bcands, 0)
+  evt_vars['ALLB_PYERR[bcand_indx]'] = ALLB_MOMERR(Bcands, 1)
+  evt_vars['ALLB_PZERR[bcand_indx]'] = ALLB_MOMERR(Bcands, 2)
+  evt_vars['ALLB_ENERGY[bcand_indx]'] = ALLB_ENERGY(Bcands)
+  evt_vars['ALLB_MASS[bcand_indx]'] = ALLB_MASS(Bcands)
+  evt_vars['ALLB_ETA[bcand_indx]'] = ALLB_ETA(Bcands)
+  evt_vars['ALLB_PHI[bcand_indx]'] = ALLB_PHI(Bcands)
+  #P -> MCP
+  evt_vars['ALLB_TRUE_ID[bcand_indx]'] = ALLB_TRUE_ID(Bcands, MCTRUTH_Bs)
+  evt_vars['ALLB_TRUE_KEY[bcand_indx]'] = ALLB_TRUE_KEY(Bcands, MCTRUTH_Bs)
+  evt_vars['ALLB_TRUE_PV_X[bcand_indx]'] = ALLB_TRUE_PV_X(Bcands, MCTRUTH_Bs)
+  evt_vars['ALLB_TRUE_PV_Y[bcand_indx]'] = ALLB_TRUE_PV_Y(Bcands, MCTRUTH_Bs)
+  evt_vars['ALLB_TRUE_PV_Z[bcand_indx]'] = ALLB_TRUE_PV_Z(Bcands, MCTRUTH_Bs)
+  ##evt_vars['ALLB_TRUE_B0[bcand_indx]'] = ALLB_TRUE_ID(Bcands, MCTRUTH_Bs)
+  ##evt_vars['ALLB_TRUE_DST[bcand_indx]'] = ALLB_TRUE_CHILD1_ID(Bcands, MCTRUTH_Bs) #D*-
+  ##evt_vars['ALLB_TRUE_MU[bcand_indx]'] = ALLB_TRUE_CHILD2_ID(Bcands, MCTRUTH_Bs) #mu
+  ##evt_vars['ALLB_TRUE_D0[bcand_indx]'] = ALLB_TRUE_CHILD11_ID(Bcands, MCTRUTH_Bs) #D0
+  ##evt_vars['ALLB_TRUE_PI1[bcand_indx]'] = ALLB_TRUE_CHILD12_ID(Bcands, MCTRUTH_Bs) #pi
+  ##evt_vars['ALLB_TRUE_K[bcand_indx]'] = ALLB_TRUE_CHILD111_ID(Bcands, MCTRUTH_Bs) #K
+  ##evt_vars['ALLB_TRUE_PI2[bcand_indx]'] = ALLB_TRUE_CHILD112_ID(Bcands, MCTRUTH_Bs) #pi
+
+  ##all basic particles of Bcands
+  get_all_part_vars(evt_vars, all_basics, pvs, MCTRUTH_Bs, index_name='basics_indx', suffix="BASICS")
+
+  #get velo tracks variables
+  evt_vars['ALLVELOTRACKS_ETA[velo_trk_indx]'] = ALLVELOTRACKS_ETA(velo_tracks)
+  evt_vars['ALLVELOTRACKS_PHI[velo_trk_indx]'] = ALLVELOTRACKS_PHI(velo_tracks)
+  #FirstState
+  evt_vars['ALLVELOTRACKS_X[velo_trk_indx]'] = ALLVELOTRACKS_X(velo_tracks)
+  evt_vars['ALLVELOTRACKS_Y[velo_trk_indx]'] = ALLVELOTRACKS_Y(velo_tracks)
+  evt_vars['ALLVELOTRACKS_Z[velo_trk_indx]'] = ALLVELOTRACKS_Z(velo_tracks)
+  evt_vars['ALLVELOTRACKS_TX[velo_trk_indx]'] = ALLVELOTRACKS_TX(velo_tracks)
+  evt_vars['ALLVELOTRACKS_TY[velo_trk_indx]'] = ALLVELOTRACKS_TY(velo_tracks)
+  #velo BPV information
+  evt_vars['ALLVELOTRACKS_BPV_KEY[velo_trk_indx]'] = ALLVELOTRACKS_BPV_KEY(velo_tracks, pvs)
+  evt_vars['ALLVELOTRACKS_BPV_X[velo_trk_indx]'] = ALLVELOTRACKS_BPV_X(velo_tracks, pvs)
+  evt_vars['ALLVELOTRACKS_BPV_Y[velo_trk_indx]'] = ALLVELOTRACKS_BPV_Y(velo_tracks, pvs)
+  evt_vars['ALLVELOTRACKS_BPV_Z[velo_trk_indx]'] = ALLVELOTRACKS_BPV_Z(velo_tracks, pvs)
+  evt_vars['ALLVELOTRACKS_BPV_IP[velo_trk_indx]'] = ALLVELOTRACKS_BPV_IP(velo_tracks, pvs)
+  evt_vars['ALLVELOTRACKS_BPV_IPCHI2[velo_trk_indx]'] = ALLVELOTRACKS_BPV_IPCHI2(velo_tracks, pvs)
+  #throws error about pointer
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_X[velo_trk_indx]'] = ALLVELOTRACKS_FIRSTMEAS_X(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_Y[velo_trk_indx]'] = ALLVELOTRACKS_FIRSTMEAS_Y(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_Z[velo_trk_indx]'] = ALLVELOTRACKS_FIRSTMEAS_Z(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_XERR[velo_trk_indx]'] = ALLVELOTRACKS_POSERR(velo_tracks, 0)
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_YERR[velo_trk_indx]'] = ALLVELOTRACKS_POSERR(velo_tracks, 1)
+  #evt_vars['ALLVELOTRACKS_FIRSTMEAS_ZERR[velo_trk_indx]'] = ALLVELOTRACKS_POSERR(velo_tracks, 2)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_X[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_X(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_Y[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_Y(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_Z[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_Z(velo_tracks)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_XERR[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_POSERR(velo_tracks, 0)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_YERR[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_POSERR(velo_tracks, 1)
+  #evt_vars['ALLVELOTRACKS_ENDVELO_ZERR[velo_trk_indx]'] = ALLVELOTRACKS_ENDVELO_POSERR(velo_tracks, 2)
+
+  # define tupling algorithm
+  tuple_file = FunTuple_Event(name="Tuple", tuple_name="EventInfo", variables=evt_vars)
+
+  #define algorithms
+  user_algorithms = {}
+  user_algorithms['Alg'] = [my_filter, truth_filter_event, tuple_file]
+  return user_algorithms
\ No newline at end of file
diff --git a/pvassoc/hlt2.py b/pvassoc/hlt2.py
new file mode 100755
index 0000000000000000000000000000000000000000..0943a4333e73ad21964bf8b3cfda7619c1f72eea
--- /dev/null
+++ b/pvassoc/hlt2.py
@@ -0,0 +1,250 @@
+###############################################################################
+# (c) Copyright 2024 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Common HLT2 utilities for semileptonic lines
+"""
+from Hlt2Conf.lines.semileptonic.builders.charm_hadron_builder import (
+    make_Hc_to_nbody)  # type: ignore
+from GaudiKernel.SystemOfUnits import MeV, GeV, mm  # type: ignore
+from Hlt2Conf.algorithms_thor import ParticleCombiner  # type: ignore
+import Functors as F  # type: ignore
+from Hlt2Conf.lines.semileptonic.builders.base_builder import (  # type: ignore
+    make_candidate, make_kaons, make_pions, make_kaons_from_b,
+    make_tauons_hadronic_decay, make_muons_from_b)  # type: ignore
+from Hlt2Conf.standard_particles import (  # type: ignore
+    make_long_pions, make_up_pions, make_down_pions, make_resolved_pi0s,
+    make_merged_pi0s, make_photons, make_detached_mumu,
+    make_detached_dielectron_with_brem)
+from Hlt2Conf.algorithms_thor import ParticleContainersMerger  # type: ignore
+from Hlt2Conf.lines.semileptonic.builders import sl_line_prefilter # type: ignore
+from Moore.lines import Hlt2Line  # type: ignore
+
+
+def make_D0():
+    "Descriptor: [D0 -> K- pi+]cc"
+    # get kaons and pions for D0
+    daughter_p_min = 1. * GeV  # 2 * GeV 
+    daughter_pt_min = 50. * MeV  # 250 * MeV (could do 100)
+    daughter_mipchi2_min = 2.  # 4.
+    kaon_pid = (F.PID_K > 0)  # > 3
+    pion_pid = (F.PID_K < 40)  # < 20
+    with make_candidate.bind(p_min=daughter_p_min,
+                             pt_min=daughter_pt_min,
+                             mipchi2_min=daughter_mipchi2_min):
+        particles = [make_kaons(pid=kaon_pid), make_pions(pid=pion_pid)]
+
+    # set descriptor for D0
+    descriptor = '[D0 -> K- pi+]cc'
+
+    # set mass cuts for D0
+    D0_mass = 1864.84  # D0 mass
+    delta_mass = 120.
+    comb_m_min = (D0_mass - delta_mass) * MeV
+    comb_m_max = (D0_mass + delta_mass) * MeV
+    mother_m_min = None
+    mother_m_max = None
+    mother_pt_min = None
+    comb_pt_min = 500 * MeV # 2000 * MeV
+    comb_pt_any_min = None
+    comb_pt_sum_min = None
+    comb_doca_max = 1.0 * mm # 0.1 * mm
+    comb_docachi2_max = None
+    vchi2pdof_max = 12 # 6
+    bpvdira_min = 0.98 # 0.99
+    bpvfdchi2_min = 10 # 25
+    bpvvdz_min = None
+
+    # get D0 to Kpi
+    D0s = make_Hc_to_nbody(particles,
+                           descriptor,
+                           comb_m_min,
+                           comb_m_max,
+                           mother_m_min,
+                           mother_m_max,
+                           mother_pt_min,
+                           name='D0ToKpi_combiner',
+                           comb_pt_min=comb_pt_min,
+                           comb_pt_any_min=comb_pt_any_min,
+                           comb_pt_sum_min=comb_pt_sum_min,
+                           comb_doca_max=comb_doca_max,
+                           comb_docachi2_max=comb_docachi2_max,
+                           vchi2pdof_max=vchi2pdof_max,
+                           bpvdira_min=bpvdira_min,
+                           bpvfdchi2_min=bpvfdchi2_min,
+                           bpvvdz_min=bpvvdz_min)
+    return D0s
+
+
+def make_Dst():
+    "Descriptor: [D*(2010)+ -> D0 pi+]cc"
+    # get D0 and pions for D*(2010)+
+    daughter_p_min = 1. * GeV  # 2 * GeV
+    daughter_pt_min = 50. * MeV  # 250 * MeV (could do 100)
+    daughter_mipchi2_min = 4.  # 4.
+    pion_pid = (F.PID_K < 40)  # < 20
+    with make_candidate.bind(p_min=daughter_p_min,
+                             pt_min=daughter_pt_min,
+                             mipchi2_min=daughter_mipchi2_min):
+        pions = make_pions(pid=pion_pid)
+    particles = [make_D0(), pions]
+
+    # make descriptor for D*(2010)+
+    descriptor = '[D*(2010)+ -> D0 pi+]cc'
+
+    # apply some DOCA cuts on D0 and pions
+    cut_combination = F.ALL
+    Dsts = ParticleCombiner(Inputs=particles,
+                           name='DstToD0pi_combiner',
+                           DecayDescriptor=descriptor,
+                           CombinationCut=cut_combination,
+                           CompositeCut=F.ALL,
+                           ParticleCombiner="ParticleVertexFitter")
+    return Dsts
+
+def make_loose_mu():
+    "Descriptor: [mu+]cc"
+    return make_muons_from_b(
+      p_min=1. * GeV,  # original 2 * GeV, muons-for-b is 6 * GeV, tau-to-mu is 3 * GeV
+      pt_min=None,  # original 250 * MeV, muons-for-b is 1 * GeV, tau-to-mu is None
+      mipchi2_min=2.,  # original 4., muons-for-b is 9., tau-to-mu is 16
+      mip_min=0,  # original 0, muons-for-b is 0
+      pid=(F.PID_MU > -8.0))  # original None, muons-for-b is > 0.0, tau-to-mu is > 0.0
+
+def make_tau_to_3pi():
+    "Descriptor: [tau+ -> pi- pi+ pi+]cc"
+    return make_tauons_hadronic_decay(
+        name='HadronicTauTo3Pi_combiner',
+        comb_m_min=100. * MeV,  # original 400 * MeV
+        comb_m_max=5000. * MeV,  # original 3500 * MeV
+        comb_pt_any_max=500 * MeV,  # original 300 * MeV
+        comb_doca_max=0.6 * mm,  # original 0.15 * mm
+        mipchi2_min=5,  # original 15
+        bpvdira_min=0.98,  # original 0.99
+        vchi2_max=25,  # original 16
+        m_min=100. * MeV,  # original 400 * MeV
+        m_max=5000. * MeV,  # original 3500 * MeV
+        twobody_m_max=2500 * MeV)  # original 1670 * MeV
+
+def make_jpsi_mumu():
+    "Descriptor: [J/psi(1S) -> mu+ mu-]cc"
+    return make_detached_mumu(opposite_sign=False)
+
+def make_jpsi_ee():
+    "Descriptor: [J/psi(1S) -> e+ e-]cc"
+    return make_detached_dielectron_with_brem(opposite_sign=False)
+
+def make_kaons_for_b():
+    "Descriptor: [K+]cc"
+    return make_kaons_from_b(p_min=3000. * MeV,
+                             pt_min=400. * MeV,
+                             mipchi2_min=9.,
+                             pid=(F.PID_K > 5.),
+                             mip_min=0.)
+
+def make_b_to_Dstpmu():
+    "Descriptor: [B~0 -> D*(2010)+ mu-]cc"
+    particles = [make_Dst(), make_loose_mu()]
+
+    # make descriptor for B+
+    descriptor = '[B~0 -> D*(2010)+ mu-]cc'
+
+    # apply some DOCA cuts on D*(2010)+ and muons
+    #cut_combination = F.require_all(F.MAXDOCA < 8.0, F.MAXDOCACHI2 < 20)
+    cut_combination = F.require_all(F.MAXDOCA < 16.0, F.MAXDOCACHI2 < 30)
+    return ParticleCombiner(Inputs=particles,
+                              name='BToDstpmu_combiner',
+                              DecayDescriptor=descriptor,
+                              CombinationCut=cut_combination,
+                              CompositeCut=F.ALL,
+                              ParticleCombiner="ParticleVertexFitter")
+
+def make_b_to_D0tau_hadronic():
+    "Descriptor: [B+ -> D~0 tau+]cc"
+    # get D0 and tauons for B+
+    particles = [make_D0(), make_tau_to_3pi()]
+
+    # make descriptor for B+
+    descriptor = '[B+ -> D~0 tau+]cc'
+
+    # apply some DOCA cuts on D0 and tauons
+    #cut_combination = F.require_all(F.MAXDOCA < 8.0, F.MAXDOCACHI2 < 20)
+    cut_combination = F.require_all(F.MAXDOCA < 16.0, F.MAXDOCACHI2 < 30)
+    return ParticleCombiner(Inputs=particles,
+                              name='BuToD0tau_combiner',
+                              DecayDescriptor=descriptor,
+                              CombinationCut=cut_combination,
+                              CompositeCut=F.ALL,
+                              ParticleCombiner="ParticleVertexFitter")
+
+def make_b_to_jpsik(jpsi_final_state):
+    "Descriptor: [B+ -> J/psi(1S) K+]cc"
+    # get J/psi(1S) and kaons for B+
+    if jpsi_final_state == 'ee':
+        jpsi = make_jpsi_ee()
+    elif jpsi_final_state == 'mumu':
+        jpsi = make_jpsi_mumu()
+    else:
+        raise ValueError(f"Jpsi final state {jpsi_final_state} not supported")
+
+    kaons = make_kaons_for_b()
+    particles = [jpsi, kaons]
+
+    # make descriptor for B+
+    descriptor = '[B+ -> J/psi(1S) K+]cc'
+
+    # apply some DOCA cuts on J/psi(1S) and kaons
+    #cut_combination = F.require_all(F.MAXDOCA < 8.0, F.MAXDOCACHI2 < 20)
+    cut_combination = F.require_all(F.MAXDOCA < 16.0, F.MAXDOCACHI2 < 30)
+    return ParticleCombiner(Inputs=particles,
+                              name='BToJpsik_combiner',
+                              DecayDescriptor=descriptor,
+                              CombinationCut=cut_combination,
+                              CompositeCut=F.ALL,
+                              ParticleCombiner="ParticleVertexFitter")
+
+def make_b(decay_channel):
+    if decay_channel == 'DstMu' or decay_channel == 'DstTau_Lep':
+        Bcands = make_b_to_Dstpmu()
+    elif decay_channel == 'D0Tau_Had':
+        Bcands = make_b_to_D0tau_hadronic()
+    elif decay_channel == 'JpsiK_E':
+        Bcands = make_b_to_jpsik('ee')
+    elif decay_channel == 'JpsiK_Mu':
+        Bcands = make_b_to_jpsik('mumu')
+    else:
+        raise ValueError(f"Decay channel {decay_channel} not supported")
+
+    return Bcands
+
+def make_my_lines(decay_channel):
+    HLT2LINE = f'Hlt2SLB_BTo{decay_channel}'
+    def hlt2_lines(name=HLT2LINE, prescale=1):
+        Bcands = make_b(decay_channel)
+        long_pions = make_long_pions()
+        up_pions = make_up_pions()
+        down_pions = make_down_pions()
+        resolved_pi0s = make_resolved_pi0s()
+        merged_pi0s = make_merged_pi0s()
+        photons = make_photons()
+        extra_output = [('ExtraLongPions', long_pions),
+                        ('ExtraUpPions', up_pions),
+                        ('ExtraDownPions', down_pions),
+                        ('ExtraResolvedPi0s', resolved_pi0s),
+                        ('ExtraMergedPi0s', merged_pi0s),
+                        ('ExtraPhotons', photons)]
+        return Hlt2Line(name=name,
+                        prescale=prescale,
+                        algs=sl_line_prefilter() + [Bcands],
+                        extra_outputs=extra_output, 
+                        persistreco=True)
+
+    lines = [hlt2_lines()]
+    return lines
\ No newline at end of file
diff --git a/pvassoc/info.yaml b/pvassoc/info.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db7113460a5466ac4e74443acef7bdf296df1676
--- /dev/null
+++ b/pvassoc/info.yaml
@@ -0,0 +1,74 @@
+defaults:
+  wg: SL
+  automatically_configure: no 
+  inform:
+    - abhijit.mathad@cern.ch
+
+{%- set datasets = [
+    ('DstMu',  11574020),
+    ('DstTau_Lep', 11574010),
+    ('D0Tau_Had', 12562000),
+    ('JpsiK_Mu', 12143001),
+    ('JpsiK_E', 12153001)
+] %}
+
+{%- set magtypes = [
+    ('MagDown', 'md'),
+    ('MagUp', 'mu')
+] %}
+
+  
+{%- for sample, evtID in datasets %}
+  {%- for polarity, tagpol in magtypes %}
+
+Hlt1{{sample}}_{{polarity}}:
+  application: Moore/v55r9@x86_64_v2-el9-gcc13+detdesc-opt
+  input:
+    bk_query: /MC/Dev/Beam6800GeV-expected-2024-{{polarity}}-Nu7.6-25ns-Pythia8/Sim10c/{{evtID}}/DIGI
+  output: MYHLT1.dst
+  options:
+    entrypoint: pvassoc.run_hlt1:main
+    extra_options:
+      input_raw_format: 0.5
+      conddb_tag: sim-20231017-vc-{{tagpol}}100
+      dddb_tag: dddb-20231017
+      input_type: ROOT 
+      output_type: ROOT
+      simulation: True
+      data_type: "Upgrade"
+      scheduler_legacy_mode: False
+
+Hlt2{{sample}}_{{polarity}}:
+  application: Moore/v55r9@x86_64_v2-el9-gcc13+detdesc-opt
+  input:
+    job_name: Hlt1{{sample}}_{{polarity}}
+  output: MYHLT2.dst
+  options:
+    entrypoint: pvassoc.run_hlt2:main_{{sample}}
+    extra_options:
+      input_raw_format: 0.5
+      conddb_tag: sim-20231017-vc-{{tagpol}}100
+      dddb_tag: dddb-20231017
+      input_type: ROOT 
+      output_type: ROOT
+      simulation: True
+      data_type: "Upgrade"
+      scheduler_legacy_mode: False
+
+DV{{sample}}_{{polarity}}:
+  application: DaVinci/v64r6@x86_64_v2-el9-gcc13+detdesc-opt
+  input:
+    job_name: Hlt2{{sample}}_{{polarity}}
+  output: MYNTUPLE.root
+  options:
+    entrypoint: pvassoc.run_dv:main_{{sample}}
+    extra_options:
+      input_raw_format: 0.5
+      input_type: ROOT
+      simulation: True
+      data_type: "Upgrade"
+      conddb_tag: sim-20231017-vc-{{tagpol}}100
+      dddb_tag: dddb-20231017
+      input_process: "Hlt2"
+  {%- endfor %}
+{%- endfor %}
diff --git a/pvassoc/run_dv.py b/pvassoc/run_dv.py
new file mode 100755
index 0000000000000000000000000000000000000000..7ecfa0a288e6d24ffe0ad49dfc4b58e50d55749f
--- /dev/null
+++ b/pvassoc/run_dv.py
@@ -0,0 +1,37 @@
+###############################################################################
+# (c) Copyright 2024 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+from DaVinci import Options
+from pvassoc.dv import get_user_algs
+from DaVinci import make_config
+
+def main_DstMu(options: Options):
+  """
+  decay_channel: decay channel to be used in the lines. 
+    Options: DstMu, DstTau_Lep, D0Tau_Had, JpsiK_E, JpsiK_Mu
+  """
+  user_algorithms = get_user_algs(decay_channel='DstMu')
+  return make_config(options, user_algorithms)
+
+def main_DstTau_Lep(options: Options):
+  user_algorithms = get_user_algs(decay_channel='DstTau_Lep')
+  return make_config(options, user_algorithms)
+
+def main_D0Tau_Had(options: Options):
+  user_algorithms = get_user_algs(decay_channel='D0Tau_Had')
+  return make_config(options, user_algorithms)
+
+def main_JpsiK_E(options: Options):
+  user_algorithms = get_user_algs(decay_channel='JpsiK_E')
+  return make_config(options, user_algorithms)
+
+def main_JpsiK_Mu(options: Options):
+  user_algorithms = get_user_algs(decay_channel='JpsiK_Mu')
+  return make_config(options, user_algorithms)
\ No newline at end of file
diff --git a/pvassoc/run_hlt1.py b/pvassoc/run_hlt1.py
new file mode 100755
index 0000000000000000000000000000000000000000..ebe98b7a7163574eb859e18ccc3e9fcb34c046f5
--- /dev/null
+++ b/pvassoc/run_hlt1.py
@@ -0,0 +1,42 @@
+###############################################################################
+# (c) Copyright 2024 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Configures running HLT1 via Moore.
+"""
+
+from Moore import Options  # type: ignore
+from Moore.config import allen_control_flow  # type: ignore
+from RecoConf.hlt1_allen import allen_gaudi_config, get_allen_line_names  # type: ignore
+from PyConf.application import configure_input, configure  # type: ignore
+
+# WITH UT
+def main(options: Options):
+    """
+    Configures algorithm running of HLT1 via Moore to be passed to Analysis Productions.
+    """
+    config = configure_input(options)
+    with allen_gaudi_config.bind(sequence="hlt1_pp_forward_then_matching"):
+        line_names = get_allen_line_names()
+        allen_node = allen_control_flow(options)
+        config.update(configure(options, allen_node))
+    return config
+
+# WITHOUT UT
+#def main_without_ut(options: Options):
+#    """
+#    Configures algorithm running of HLT1 via Moore to be passed to Analysis Productions.
+#    """
+#    config = configure_input(options)
+#    with allen_gaudi_config.bind(sequence="hlt1_pp_matching_no_ut_1000KHz"):
+#        line_names = get_allen_line_names()
+#        allen_node = allen_control_flow(options)
+#        config.update(configure(options, allen_node))
+#    return config
\ No newline at end of file
diff --git a/pvassoc/run_hlt2.py b/pvassoc/run_hlt2.py
new file mode 100755
index 0000000000000000000000000000000000000000..4978389827e21eb24f09d7f40e0f16127d126138
--- /dev/null
+++ b/pvassoc/run_hlt2.py
@@ -0,0 +1,91 @@
+###############################################################################
+# (c) Copyright 2024 CERN for the benefit of the LHCb Collaboration           #
+#                                                                             #
+# This software is distributed under the terms of the GNU General Public      #
+# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING".   #
+#                                                                             #
+# In applying this licence, CERN does not waive the privileges and immunities #
+# granted to it by virtue of its status as an Intergovernmental Organization  #
+# or submit itself to any jurisdiction.                                       #
+###############################################################################
+"""
+Configures running HLT2 via Moore.
+"""
+
+from pvassoc.hlt2 import make_my_lines
+from Moore import Options, run_moore
+from RecoConf.hlt2_global_reco import reconstruction as hlt2_reconstruction, make_light_reco_pr_kf, make_light_reco_pr_kf_without_UT
+from RecoConf.global_tools import stateProvider_with_simplified_geom, trackMasterExtrapolator_with_simplified_geom
+from RecoConf.reconstruction_objects import reconstruction
+
+def make_lines_DstMu():
+  """
+  decay_channel: decay channel to be used in the lines. 
+    Options: DstMu, DstTau_Lep, D0Tau_Had, JpsiK_E, JpsiK_Mu
+  """
+  return make_my_lines(decay_channel='DstMu')
+
+def make_lines_DstTau_Lep():
+  return make_my_lines(decay_channel='DstTau_Lep')
+
+def make_lines_D0Tau_Had():
+  return make_my_lines(decay_channel='D0Tau_Had')
+
+def make_lines_JpsiK_E():
+  return make_my_lines(decay_channel='JpsiK_E')
+
+def make_lines_JpsiK_Mu():
+  return make_my_lines(decay_channel='JpsiK_Mu')
+
+# WITH UT
+public_tools = [trackMasterExtrapolator_with_simplified_geom(), stateProvider_with_simplified_geom()]
+def main_DstMu(options: Options):
+  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf):
+    config = run_moore(options, make_streams=make_lines_DstMu, public_tools=public_tools)
+    return config
+
+def main_DstTau_Lep(options: Options):
+  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf):
+    config = run_moore(options, make_streams=make_lines_DstTau_Lep, public_tools=public_tools)
+    return config
+  
+def main_D0Tau_Had(options: Options):
+  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf):
+    config = run_moore(options, make_streams=make_lines_D0Tau_Had, public_tools=public_tools)
+    return config
+  
+def main_JpsiK_E(options: Options):
+  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf):
+    config = run_moore(options, make_streams=make_lines_JpsiK_E, public_tools=public_tools)
+    return config
+  
+def main_JpsiK_Mu(options: Options):
+  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf):
+    config = run_moore(options, make_streams=make_lines_JpsiK_Mu, public_tools=public_tools)
+    return config
+  
+# Without UT
+#def main_without_ut_DstTau_Lep(options: Options):
+#  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf_without_UT):
+#    config = run_moore(options, make_streams=make_lines_DstTau_Lep, public_tools=public_tools)
+#    return config
+#  
+#def main_without_ut_D0Tau_Had(options: Options):
+#  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf_without_UT):
+#    config = run_moore(options, make_streams=make_lines_D0Tau_Had, public_tools=public_tools)
+#    return config
+#  
+#def main_without_ut_JpsiK_E(options: Options):
+#  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf_without_UT):
+#    config = run_moore(options, make_streams=make_lines_JpsiK_E, public_tools=public_tools)
+#    return config
+#  
+#def main_without_ut_DstMu(options: Options):
+#  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf_without_UT):
+#    config = run_moore(options, make_streams=make_lines_DstMu, public_tools=public_tools)
+#    return config
+#  
+#def main_without_ut_JpsiK_Mu(options: Options):
+#  with reconstruction.bind(from_file=False), hlt2_reconstruction.bind(make_reconstruction=make_light_reco_pr_kf_without_UT):
+#    config = run_moore(options, make_streams=make_lines_JpsiK_Mu, public_tools=public_tools)
+#    return config