diff --git a/PhysicsAnalysis/NtupleDumper/CMakeLists.txt b/PhysicsAnalysis/NtupleDumper/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..944ed9689053915dd526251fabf1c5b1af5dfa4e
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/CMakeLists.txt
@@ -0,0 +1,12 @@
+atlas_subdir(NtupleDumper)
+
+atlas_add_component(
+        NtupleDumper
+        src/NtupleDumperAlg.h
+        src/NtupleDumperAlg.cxx
+        src/component/NtupleDumper_entries.cxx
+        LINK_LIBRARIES AthenaBaseComps StoreGateLib xAODFaserWaveform xAODFaserTrigger ScintIdentifier FaserCaloIdentifier GeneratorObjects FaserActsGeometryLib TrackerSimEvent TrackerSimData TrackerIdentifier TrackerReadoutGeometry TrkTrack GeoPrimitives TrackerRIO_OnTrack TrackerSpacePoint
+)
+
+atlas_install_python_modules(python/*.py)
+atlas_install_scripts(scripts/*.py)
diff --git a/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py b/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bc52c41df03c934465f97658a39aeb342350631
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/python/NtupleDumperConfig.py
@@ -0,0 +1,104 @@
+"""
+    Copyright (C) 2002-2022 CERN for the benefit of the ATLAS collaboration
+"""
+
+from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
+from AthenaConfiguration.ComponentFactory import CompFactory
+from MagFieldServices.MagFieldServicesConfig import MagneticFieldSvcCfg
+
+def NtupleDumperAlgCfg(flags, **kwargs):
+    # Initialize GeoModel
+    from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg
+    acc = FaserGeometryCfg(flags)
+
+    acc.merge(MagneticFieldSvcCfg(flags))
+    # acc.merge(FaserActsTrackingGeometrySvcCfg(flags))
+    # acc.merge(FaserActsAlignmentCondAlgCfg(flags))
+
+    actsExtrapolationTool = CompFactory.FaserActsExtrapolationTool("FaserActsExtrapolationTool")
+    actsExtrapolationTool.MaxSteps = 1000
+    actsExtrapolationTool.TrackingGeometryTool = CompFactory.FaserActsTrackingGeometryTool("TrackingGeometryTool")
+
+    NtupleDumperAlg = CompFactory.NtupleDumperAlg("NtupleDumperAlg",**kwargs)
+    NtupleDumperAlg.ExtrapolationTool = actsExtrapolationTool
+    acc.addEventAlgo(NtupleDumperAlg)
+
+    thistSvc = CompFactory.THistSvc()
+    thistSvc.Output += ["HIST2 DATAFILE='Data-tuple.root' OPT='RECREATE'"]
+    acc.addService(thistSvc)
+
+    return acc
+
+if __name__ == "__main__":
+
+    import sys
+    from AthenaCommon.Logging import log, logging
+    from AthenaCommon.Constants import DEBUG, VERBOSE, INFO
+    from AthenaCommon.Configurable import Configurable
+    from CalypsoConfiguration.AllConfigFlags import ConfigFlags
+    from AthenaConfiguration.TestDefaults import defaultTestFiles
+    from CalypsoConfiguration.MainServicesConfig import MainServicesCfg
+    from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg
+    # from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
+
+    # Set up logging and new style config
+    log.setLevel(INFO)
+    Configurable.configurableRun3Behavior = True
+
+    # Configure
+    ConfigFlags.Input.Files = [
+          '/eos/experiment/faser/rec/2022/p0008//008119/Faser-Physics-008119-00168-p0008-xAOD.root',
+
+
+    ]
+    ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02"             # Always needed; must match FaserVersionS
+    ConfigFlags.IOVDb.DatabaseInstance = "OFLP200"               # Use MC conditions for now
+    ConfigFlags.Input.ProjectName = "data21"                     # Needed to bypass autoconfig
+    ConfigFlags.Input.isMC = False                                # Needed to bypass autoconfig
+    ConfigFlags.GeoModel.FaserVersion     = "FASERNU-03"           # FASER geometry
+    ConfigFlags.Common.isOnline = False
+    ConfigFlags.GeoModel.Align.Dynamic = False
+    ConfigFlags.Beam.NumberOfCollisions = 0.
+
+    ConfigFlags.Detector.GeometryFaserSCT = True
+
+    ConfigFlags.lock()
+
+    # Core components
+    acc = MainServicesCfg(ConfigFlags)
+    acc.merge(PoolReadCfg(ConfigFlags))
+
+    # algorithm
+    acc.merge(NtupleDumperAlgCfg(ConfigFlags, UseFlukaWeights=True))
+
+    # silencio
+    AthenaEventLoopMgr = CompFactory.AthenaEventLoopMgr()
+    AthenaEventLoopMgr.EventPrintoutInterval=500
+    acc.addService(AthenaEventLoopMgr)
+
+    # # Hack to avoid problem with our use of MC databases when isMC = False
+    replicaSvc = acc.getService("DBReplicaSvc")
+    replicaSvc.COOLSQLiteVetoPattern = ""
+    replicaSvc.UseCOOLSQLite = True
+    replicaSvc.UseCOOLFrontier = False
+    replicaSvc.UseGeomSQLite = True
+
+    # Timing
+    #acc.merge(MergeRecoTimingObjCfg(ConfigFlags))
+
+    # Dump config
+    # logging.getLogger('forcomps').setLevel(VERBOSE)
+    # acc.foreach_component("*").OutputLevel = VERBOSE
+    # acc.foreach_component("*ClassID*").OutputLevel = INFO
+    # acc.getCondAlgo("FaserSCT_AlignCondAlg").OutputLevel = VERBOSE
+    # acc.getCondAlgo("FaserSCT_DetectorElementCondAlg").OutputLevel = VERBOSE
+    # acc.getService("StoreGateSvc").Dump = True
+    # acc.getService("ConditionStore").Dump = True
+    # acc.printConfig(withDetails=True)
+    # ConfigFlags.dump()
+
+    # Execute and finish
+    sc = acc.run(maxEvents=-1)
+
+    # Success should be 0
+    sys.exit(not sc.isSuccess())    
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py b/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py
new file mode 100755
index 0000000000000000000000000000000000000000..7d735f1599e455001162fff0e5a33acad16d2e35
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeNtuple.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+
+# Set up (Py)ROOT.
+import ROOT
+import glob
+import sys
+import pandas as pd
+
+
+# Define a Landau convoluted with a gaussian for MIP fitting
+landguas_conv = ROOT.TF1Convolution("landau","gaus",-10,100,True) # the functions have to be close to zero at min and max bin of convolution or else circular Fourier transform will move convolve values at max and min
+landguas_conv.SetNofPointsFFT(10000)
+landgaus = ROOT.TF1("landgaus",landguas_conv, -10, 100, landguas_conv.GetNpar())
+landgaus.SetParNames("Landau constant","Landau MPV","Landau width","Gaussian mean","Gaussian width")
+
+user_input = str(sys.argv[1]) # set to either 'all_high', 'all_low', or a run number
+
+t = ROOT.TChain("nt")
+nfiles = 0
+all_run_paths = glob.glob("/eos/project/f/faser-commissioning/DeionsNtuples/*")
+
+if user_input=="all_high":
+    runconfig = "High_gain"
+    print("processing high-gain runs")
+    gain = 30.0
+    for run_path in all_run_paths:
+        nfiles += t.Add(run_path+"/Data-tuple-High_gain*.root") # chain all ntuples from all runs that are high gain
+    rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/7930/Data-tuple-High_gain-007930-00000-100.root"); # load file from largest high gain run to get noise histograms
+elif user_input=="all_low":
+    runconfig = "Low_gain"
+    print("processing low-gain runs")
+    gain = 1.0
+    for run_path in all_run_paths:
+        nfiles += t.Add(run_path+"/Data-tuple-Low_gain*.root") # chain all ntuples from all runs that are high gain
+    rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/8137/Data-tuple-Low_gain-008137-00000-100.root"); # load file from largest low gain run to get noise histograms
+else: # assume user_input is a run number
+    # get run configuration from table oon Brian's website
+    table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website
+    df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first one
+    runconfig=str(df.at[df.loc[df['Run'] == int(user_input)].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo 
+    print("processing run "+runconfig+" ("+runconfig+")")
+    if runconfig=="High_gain":
+        gain = 30.0
+    elif runconfig=="Low_gain":
+        gain = 1.0
+    else:
+        print("run config is neither 'High_gain' nor 'Low_gain', calo histogram ranges may be messed up")
+        gain = 1.0 # assume low gain
+
+    nfiles += t.Add("/eos/project/f/faser-commissioning/DeionsNtuples/"+user_input+"/*.root") # chain all ntuples from all runs that are high gain
+    rootFile = ROOT.TFile("/eos/project/f/faser-commissioning/DeionsNtuples/"+user_input+"/Data-tuple-"+runconfig+"-00"+user_input+"-00000-100.root"); # load file from largest low gain run to get noise histograms
+
+
+
+
+print("number of files chained together = ",nfiles)
+
+#ROOT.gROOT.SetStyle("ATLAS")
+#ROOT.gStyle.SetOptStat(111110) #take away option box in histograms
+#ROOT.gStyle.SetOptTitle(1)
+#ROOT.gStyle.SetOptFit(1)
+
+# Define histograms here
+hCaloCharge = []
+hCaloPeak = []
+hXYvsEcalo = []
+for chan in range(4):
+    hCaloCharge.append(ROOT.TH1F("hCalo"+str(chan)+"charge", "Charge in calo ch"+str(chan)+";Q (pC);# of events",100,0.2*gain,2.0*gain))
+    hCaloPeak.append(ROOT.TH1F("hCalo"+str(chan)+"peak", "Peak in calo ch"+str(chan)+";peak (mV);# of events",100,1.0*gain,5.0*gain))
+    hXYvsEcalo.append(ROOT.TProfile2D("hXYvsEcalo"+str(chan)+"" , "Calo ch"+str(chan)+" Charge vs Pos;X pos (mm);Y pos (mm)",26, -130.0, 130.0, 26, -130.0, 130.0))
+
+hCaloChargeTotal = ROOT.TH1F("hCaloChargeTotal", "Charge in Calo;Charge (pC);# of events",100,0.2*gain,2.0*gain)
+hCaloEdep = ROOT.TH1F("hCaloEdep", "Edep in Calo;Edep (GeV);# of events",100,0.0,1.8)
+
+hCaloThetaX = ROOT.TH1F("hCaloThetaX", "Track #theta_{x} at Calo face;#theta_{x} (radians);# of tracks",100,-0.1,0.1)
+hCaloThetaY = ROOT.TH1F("hCaloThetaY", "Track #theta_{y} at Calo face;#theta_{y} (radians);# of tracks",100,-0.1,0.1)
+
+hTrackPvsPYdiff = ROOT.TProfile("hTrackPvsPYdiff" , "Track #Deltap_{Y}/p vs p;Track p (MeV);(pY_{upstream} - pY_{downstream}) / p_{total}",100, 1000.0, 200000.0)
+hTrackPvsPXdiff = ROOT.TProfile("hTrackPvsPXdiff" , "Track #Deltap_{X}/p vs p;Track p (MeV);(pX_{upstream} - pX_{downstream}) / p_{total}",100, 1000.0, 200000.0)
+
+#t.Print() # will show you all variables in ttree
+
+i = 0
+for event in t:
+    i += 1
+
+    if i%1000 == 0:
+        print( "Processing event #%i of %i" % (i, t.GetEntries() ) )
+
+    if event.longTracks > 0: # only process events with at least one track that has hits in last 3 tracking stations
+        for j in range(event.longTracks): # loop over all long tracks in the event (long = has hits in last 3 tracking stations)
+            if event.Track_p0[j] != 0.0:
+                hTrackPvsPYdiff.Fill(event.Track_p0[j],(event.Track_py0[j] - event.Track_py1[j])/event.Track_p0[j])
+                hTrackPvsPXdiff.Fill(event.Track_p0[j],(event.Track_px0[j] - event.Track_px1[j])/event.Track_p0[j])
+
+            #print("track charge = %i and nLayers = %i" % (event.Track_charge[j],event.Track_nLayers[j]))
+            #print("track upstream   (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.Track_x0[j],event.Track_y0[j],event.Track_z0[j],event.Track_px0[j],event.Track_py0[j],event.Track_pz0[j]))
+            #print("track downstream (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.Track_x1[j],event.Track_y1[j],event.Track_z1[j],event.Track_px1[j],event.Track_py1[j],event.Track_pz1[j]))
+
+            #print("track at vetoNu (x,y) (thetaX,thetaY) = (%f,%f) (%f,%f)" % (event.Track_X_atVetoNu[j],event.Track_Y_atVetoNu[j],event.Track_ThetaX_atVetoNu[j],event.Track_ThetaY_atVetoNu[j]))
+            #print("track at Calo (x,y) (thetaX,thetaY) = (%f,%f) (%f,%f)" % (event.Track_X_atCalo[j],event.Track_Y_atCalo[j],event.Track_ThetaX_atCalo[j],event.Track_ThetaY_atCalo[j]))
+
+        #print("number of track segments = ",event.TrackSegments)
+        #for j in range(event.TrackSegments):
+            #print("trackseg (x,y,z) (px,py,pz) = (%f,%f,%f) (%f,%f,%f)" % (event.TrackSegment_x[j],event.TrackSegment_y[j],event.TrackSegment_z[j],event.TrackSegment_px[j],event.TrackSegment_py[j],event.TrackSegment_pz[j]))
+            #print("trackseg chi2 = %i and ndof = %i" % (event.TrackSegment_Chi2[j],event.TrackSegment_nDoF[j]))
+
+        #print("number of SpacePoints = ",event.SpacePoints)
+        #for j in range(event.SpacePoints):
+            #print("Spacepoint #",j)
+            #print("SpacePoint (x,y,z) = (%f,%f,%f)" % (event.SpacePoint_x[j],event.SpacePoint_y[j],event.SpacePoint_z[j]))
+
+        hCaloEdep.Fill(event.Calo_total_Edep)
+        hCaloChargeTotal.Fill(event.Calo_total_charge)
+
+        x_calo = event.Track_X_atCalo[0]
+        y_calo = event.Track_Y_atCalo[0]
+
+        hCaloThetaX.Fill(event.Track_ThetaX_atCalo[0])
+        hCaloThetaY.Fill(event.Track_ThetaY_atCalo[0])
+
+        if abs(event.Track_ThetaX_atCalo[0]) > 0.1 or abs(event.Track_ThetaX_atCalo[0]) > 0.1: continue
+
+        for chan,charge in enumerate([event.Calo0_raw_charge,event.Calo1_raw_charge,event.Calo2_raw_charge,event.Calo3_raw_charge]):
+            if charge > 0.2*gain and charge < 2.0*gain:
+                hXYvsEcalo[chan].Fill(x_calo,y_calo,charge)
+
+        if x_calo > -60.0 and x_calo < -20.0 and y_calo > -80.0 and y_calo < -10.0:
+            hCaloCharge[0].Fill(event.Calo0_raw_charge)
+            hCaloPeak[0].Fill(event.Calo0_raw_peak)
+        elif x_calo > 70.0 and x_calo < 100.0 and y_calo > -90.0 and y_calo < -10.0:
+            hCaloCharge[1].Fill(event.Calo1_raw_charge)
+            hCaloPeak[1].Fill(event.Calo1_raw_peak)
+        elif x_calo > -60.0 and x_calo < -20.0 and y_calo > 20.0 and y_calo < 110.0:
+            hCaloCharge[2].Fill(event.Calo2_raw_charge)
+            hCaloPeak[2].Fill(event.Calo2_raw_peak)
+        elif x_calo > 70.0 and x_calo < 100.0 and y_calo > 20.0 and y_calo < 110.0:
+            hCaloCharge[3].Fill(event.Calo3_raw_charge)
+            hCaloPeak[3].Fill(event.Calo3_raw_peak)
+
+#    if i > 10000:
+#        break
+
+# create a list of histograms of random event integrals
+hRandomCharge = []
+for chan in range(15):
+    hRandomCharge.append(rootFile.Get("hRandomCharge"+str(chan)))
+
+# Now make some plots
+filename = "analyze-"+runconfig+"-Ntuples.pdf"
+
+c = ROOT.TCanvas()
+c.Print(filename+'[')
+hCaloEdep.Draw()
+ROOT.gPad.SetLogy()
+c.Print(filename)
+
+c = ROOT.TCanvas()
+hCaloChargeTotal.Draw()
+c.Print(filename)
+
+c = ROOT.TCanvas()
+c.Divide(2,2)
+for chan in range(4):
+    c.cd(1+chan)
+    hXYvsEcalo[chan].GetZaxis().SetRangeUser(hCaloCharge[chan].GetMean() - 0.3*hCaloCharge[chan].GetStdDev(),hCaloCharge[chan].GetMean() + 0.4*hCaloCharge[chan].GetStdDev())
+    hXYvsEcalo[chan].Draw('COLZ')
+c.Print(filename)
+
+leg = []
+c = ROOT.TCanvas()
+c.Divide(2,2)
+for chan in range(4):
+    c.cd(1+chan)
+    hCaloCharge[chan].Fit("landau")
+    landgaus.SetParameters(hCaloCharge[chan].GetFunction("landau").GetParameter(0),hCaloCharge[chan].GetFunction("landau").GetParameter(1),hCaloCharge[chan].GetFunction("landau").GetParameter(2),0.0,hRandomCharge[chan].GetStdDev())
+    landgaus.SetParLimits(0,0.1*hCaloCharge[chan].GetFunction("landau").GetParameter(0),20.0*hCaloCharge[chan].GetFunction("landau").GetParameter(0))
+    landgaus.SetParLimits(1,0.5*hCaloCharge[chan].GetFunction("landau").GetParameter(1),1.2*hCaloCharge[chan].GetFunction("landau").GetParameter(1))
+    landgaus.SetParLimits(2,0.1*hCaloCharge[chan].GetFunction("landau").GetParameter(2),1.2*hCaloCharge[chan].GetFunction("landau").GetParameter(2))
+    landgaus.FixParameter(3,0.0)
+    landgaus.FixParameter(4,hRandomCharge[chan].GetStdDev()) # fix gaussian smearing to the noise seen in randomly triggered events
+    hCaloCharge[chan].Fit("landgaus","+")
+    hCaloCharge[chan].GetFunction("landgaus").SetLineColor(4)
+    hCaloCharge[chan].Draw()
+
+    leg.append( ROOT.TLegend(0.55,0.55,0.89,0.75) )
+    leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landau"),"Landau MPV = "+str(hCaloCharge[chan].GetFunction("landau").GetParameter(1))[:6]+" #pm "+str(hCaloCharge[chan].GetFunction("landau").GetParError(1))[:6],"L")
+    leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landgaus"),"Landguas MPV = "+str(hCaloCharge[chan].GetFunction("landgaus").GetParameter(1))[:6]+" #pm "+str(hCaloCharge[chan].GetFunction("landgaus").GetParError(1))[:6],"L")
+    leg[chan].AddEntry(hCaloCharge[chan].GetFunction("landgaus"),"Landguas gaussian width = "+str(hCaloCharge[chan].GetFunction("landgaus").GetParameter(4))[:6],"")
+    leg[chan].SetBorderSize(0)
+    leg[chan].Draw()
+c.Print(filename)
+
+leg = []
+c = ROOT.TCanvas()
+c.Divide(2,2)
+for chan in range(4):
+    c.cd(1+chan)
+    hCaloPeak[chan].Fit("landau")
+    hCaloPeak[chan].Draw()
+
+    leg.append( ROOT.TLegend(0.55,0.55,0.89,0.75) )
+    leg[chan].AddEntry(hCaloPeak[chan].GetFunction("landau"),"Landau MPV = "+str(hCaloPeak[chan].GetFunction("landau").GetParameter(1))[:6]+" #pm "+str(hCaloPeak[chan].GetFunction("landau").GetParError(1))[:6],"L")
+    leg[chan].SetBorderSize(0)
+    leg[chan].Draw()
+c.Print(filename)
+
+c = ROOT.TCanvas()
+c.Divide(1,2)
+c.cd(1)
+hCaloThetaX.Draw()
+c.cd(2)
+hCaloThetaY.Draw()
+c.Print(filename)
+
+c = ROOT.TCanvas()
+c.Divide(1,2)
+c.cd(1)
+hTrackPvsPYdiff.GetYaxis().SetRangeUser(hTrackPvsPYdiff.GetMean(2) - hTrackPvsPYdiff.GetStdDev(2), hTrackPvsPYdiff.GetMean(2) + hTrackPvsPYdiff.GetStdDev(2))
+hTrackPvsPYdiff.Draw()
+c.cd(2)
+hTrackPvsPXdiff.GetYaxis().SetRangeUser(hTrackPvsPXdiff.GetMean(2) - hTrackPvsPXdiff.GetStdDev(2), hTrackPvsPXdiff.GetMean(2) + hTrackPvsPXdiff.GetStdDev(2))
+hTrackPvsPXdiff.Draw()
+c.Print(filename)
+
+c = ROOT.TCanvas()
+c.Divide(4,4)
+for chan in range(15):
+    c.cd(1+chan)
+    hRandomCharge[chan].Draw()
+c.Print(filename)
+
+# Must close file at the end
+c.Print(filename+']')
+
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py
new file mode 100755
index 0000000000000000000000000000000000000000..c8ab9bb76af0be2c537fd201c6ffb2387bbbd319
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+
+"""
+    Copyright (C) 2002-2022 CERN for the benefit of the FASER collaboration
+"""
+
+from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator
+from AthenaConfiguration.ComponentFactory import CompFactory
+from MagFieldServices.MagFieldServicesConfig import MagneticFieldSvcCfg
+
+
+def NtupleDumperAlgCfg(flags, OutName, **kwargs):
+    # Initialize GeoModel
+    from FaserGeoModel.FaserGeoModelConfig import FaserGeometryCfg
+    acc = FaserGeometryCfg(flags)
+
+    acc.merge(MagneticFieldSvcCfg(flags))
+    # acc.merge(FaserActsTrackingGeometrySvcCfg(flags))
+    # acc.merge(FaserActsAlignmentCondAlgCfg(flags))
+
+    actsExtrapolationTool = CompFactory.FaserActsExtrapolationTool("FaserActsExtrapolationTool")
+    actsExtrapolationTool.MaxSteps = 10000
+    actsExtrapolationTool.TrackingGeometryTool = CompFactory.FaserActsTrackingGeometryTool("TrackingGeometryTool")
+
+    NtupleDumperAlg = CompFactory.NtupleDumperAlg("NtupleDumperAlg",**kwargs)
+    NtupleDumperAlg.ExtrapolationTool = actsExtrapolationTool
+    acc.addEventAlgo(NtupleDumperAlg)
+
+    thistSvc = CompFactory.THistSvc()
+    thistSvc.Output += [f"HIST2 DATAFILE='{OutName}' OPT='RECREATE'"]
+    acc.addService(thistSvc)
+
+    return acc
+
+if __name__ == "__main__":
+
+    import glob
+    import sys
+    import ROOT
+
+    runno=int(sys.argv[1])
+    num=int(sys.argv[2])
+    filesPerJob=int(sys.argv[3])
+    run_config=str(sys.argv[4]) 
+
+    ptag="p0008"
+
+    from AthenaCommon.Logging import log, logging
+    from AthenaCommon.Constants import DEBUG, VERBOSE, INFO
+    from AthenaCommon.Configurable import Configurable
+    from CalypsoConfiguration.AllConfigFlags import ConfigFlags
+    from AthenaConfiguration.TestDefaults import defaultTestFiles
+    from CalypsoConfiguration.MainServicesConfig import MainServicesCfg
+    from AthenaPoolCnvSvc.PoolReadConfig import PoolReadCfg
+    # from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg
+    # Set up logging and new style config
+    log.setLevel(INFO)
+    Configurable.configurableRun3Behavior = True
+
+    dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}"
+    files=sorted(glob.glob(f"{dataDir}/Faser-Physics*"))
+    fileListInitial=files[num*filesPerJob:(num+1)*filesPerJob]
+    fileList=[]
+    for fName in fileListInitial:
+        try:
+            fh=ROOT.TFile(fName)
+            fileList.append(fName)
+        except OSError:
+            print("Warning bad file: ",fName)
+
+    log.info(f"Analyzing Run {runno} files {num*filesPerJob} to {(num+1)*filesPerJob} (num={num})")
+    log.info(f"Got {len(fileList)} files out of {len(fileListInitial)}")
+
+    outName=f"Data-tuple-{run_config}-{runno:06d}-{num:05d}-{filesPerJob}.root"
+
+    # Configure
+    ConfigFlags.Input.Files = fileList
+    ConfigFlags.IOVDb.GlobalTag = "OFLCOND-FASER-02"             # Always needed; must match FaserVersionS
+    ConfigFlags.IOVDb.DatabaseInstance = "OFLP200"               # Use MC conditions for now
+    ConfigFlags.Input.ProjectName = "data21"                     # Needed to bypass autoconfig
+    ConfigFlags.Input.isMC = False                                # Needed to bypass autoconfig
+    ConfigFlags.GeoModel.FaserVersion     = "FASERNU-03"           # FASER geometry
+    ConfigFlags.Common.isOnline = False
+    ConfigFlags.GeoModel.Align.Dynamic = False
+    ConfigFlags.Beam.NumberOfCollisions = 0.
+
+    ConfigFlags.Detector.GeometryFaserSCT = True
+
+    ConfigFlags.lock()
+
+    # Core components
+    acc = MainServicesCfg(ConfigFlags)
+    acc.merge(PoolReadCfg(ConfigFlags))
+
+    # algorithm
+    acc.merge(NtupleDumperAlgCfg(ConfigFlags, outName, UseFlukaWeights=True, CaloConfig=run_config))
+
+    AthenaEventLoopMgr = CompFactory.AthenaEventLoopMgr()
+    AthenaEventLoopMgr.EventPrintoutInterval=1000
+    acc.addService(AthenaEventLoopMgr)
+
+    # # Hack to avoid problem with our use of MC databases when isMC = False
+    replicaSvc = acc.getService("DBReplicaSvc")
+    replicaSvc.COOLSQLiteVetoPattern = ""
+    replicaSvc.UseCOOLSQLite = True
+    replicaSvc.UseCOOLFrontier = False
+    replicaSvc.UseGeomSQLite = True
+
+    # Timing
+    #acc.merge(MergeRecoTimingObjCfg(ConfigFlags))
+
+    # Dump config
+    # logging.getLogger('forcomps').setLevel(VERBOSE)
+    # acc.foreach_component("*").OutputLevel = VERBOSE
+    # acc.foreach_component("*ClassID*").OutputLevel = INFO
+    # acc.getCondAlgo("FaserSCT_AlignCondAlg").OutputLevel = VERBOSE
+    # acc.getCondAlgo("FaserSCT_DetectorElementCondAlg").OutputLevel = VERBOSE
+    # acc.getService("StoreGateSvc").Dump = True
+    # acc.getService("ConditionStore").Dump = True
+    # acc.printConfig(withDetails=True)
+    # ConfigFlags.dump()
+
+    # Execute and finish
+    sc = acc.run(maxEvents=-1)
+
+    # Success should be 0
+    sys.exit(not sc.isSuccess())    
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh
new file mode 100755
index 0000000000000000000000000000000000000000..dc42bc521c678a3bed8536e93b6b68ad25549ca7
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/analyzeRun.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+runno=$1
+num=$2
+filesPerJob=$3
+runconfig=$4
+
+WD=$PWD
+
+cd /afs/cern.ch/user/d/dfellers/faser
+source setup.sh
+cd build
+source x86_64-centos7-gcc11-opt/setup.sh
+cd $WD
+echo "Starting analysis"
+analyzeRun.py $runno $num $filesPerJob $runconfig
+cp Data-tuple*.root /eos/project/f/faser-commissioning/DeionsNtuples/$runno/
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py b/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py
new file mode 100755
index 0000000000000000000000000000000000000000..5f0805b7d876e4573c5cbab096deba0364314726
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/submitAllJobsThatHaveErrorLogs.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+import glob
+import os
+import sys
+import pandas as pd
+
+print("NtupleDumping all condor jobs that produced non-empty error logs")
+
+table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website
+df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first table
+
+# make a liist of all runs and batch job numbers that failed and thus have error logs that are not empty
+run_list = []
+allErrorLogs_list = glob.glob('/afs/cern.ch/user/d/dfellers/faser/ntuple-dumper/run/logs/*/*err')
+for error_log in allErrorLogs_list:
+    if os.path.getsize(error_log) != 0:
+        print('Error Log is not empty: ', error_log)
+        run_num = int(error_log.split('/')[-2].split('-')[-1])
+        bath_num = int(error_log.split('.')[-2])
+        run_list.append([run_num,bath_num])
+
+print("list to be re-submitted:", run_list)
+
+ptag="p0008"
+filesPerJob=100
+
+for i,run_info in enumerate(run_list):
+    runno = run_info[0]
+    batch_number = run_info[1]
+
+    runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo 
+
+    print("%i of %i runs processed. Currently processing run %i-%i (%s)"%(i,len(run_list),runno,batch_number,runconfig))
+
+    batchFile=f"batch/Run-{runno:06d}-{batch_number}.sub"
+    fh=open(batchFile,"w")
+    pwd=os.getenv("PWD")
+    fh.write(f"""
+    executable              = {pwd}/analyzeRun.sh
+    arguments               = {runno} {batch_number} {filesPerJob} {runconfig}
+    output                  = {pwd}/logs/Run-{runno:06d}/batch.{batch_number}.out
+    error                   = {pwd}/logs/Run-{runno:06d}/batch.{batch_number}.err
+    log                     = {pwd}/logs/Run-{runno:06d}/batch.log
+    requirements            = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7")
+    getenv                  = False
+    transfer_output_files   = ""
+    +JobFlavour             = "workday"
+    queue 1
+    """)
+    fh.close()
+    os.system(f"echo condor_submit {batchFile}")
+    os.system(f"condor_submit {batchFile}")
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py b/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py
new file mode 100755
index 0000000000000000000000000000000000000000..92f49d3392476a245892a5a322c8cfd5c352fde3
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/submitAllStableRuns.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+import glob
+import os
+import sys
+import pandas as pd
+
+print("NtupleDumping all stable-beam runs found at http://aagaard.web.cern.ch/aagaard/FASERruns.html")
+
+table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website
+df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first table
+df.columns = [c.replace(' ', '_') for c in df.columns] # rename the columns such that names with spaces are replaced with '_' (needed to access 'Stable_Beam' column)
+df.drop(df[df.Stable_Beam != 'Yes'].index, inplace=True) # drop all runs that are not stable beam runs
+df.drop(df[(df.Configuration != 'Low gain') & (df.Configuration != 'High gain')].index, inplace=True) # drop all runs that are not 'Low gain' or 'High gain'
+
+run_list = df['Run'].tolist()
+
+ptag="p0008"
+filesPerJob=100
+
+for i,runno in enumerate(run_list):
+    runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo 
+
+    print("%i of %i runs processed. Currently processing run %i (%s)"%(i,len(run_list),runno,runconfig))
+
+    os.system(f"mkdir -p logs/Run-{runno:06d}")
+    os.system(f"mkdir -p batch")
+    os.system(f"mkdir -p /eos/project/f/faser-commissioning/DeionsNtuples/{runno}")
+
+    dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}"
+    files=glob.glob(f"{dataDir}/Faser-Physics*")
+    numFiles=len(files)
+    numJobs=numFiles//filesPerJob+(numFiles%filesPerJob!=0)
+    batchFile=f"batch/Run-{runno:06d}.sub"
+    fh=open(batchFile,"w")
+    pwd=os.getenv("PWD")
+    fh.write(f"""
+    executable              = {pwd}/analyzeRun.sh
+    arguments               = {runno} $(ProcId) {filesPerJob} {runconfig}
+    output                  = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).out
+    error                   = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).err
+    log                     = {pwd}/logs/Run-{runno:06d}/batch.log
+    requirements            = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7")
+    getenv                  = False
+    transfer_output_files   = ""
+    +JobFlavour             = "workday"
+    queue {numJobs}
+    """)
+    fh.close()
+    os.system(f"echo condor_submit {batchFile}")
+    os.system(f"condor_submit {batchFile}")
+
diff --git a/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py b/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py
new file mode 100755
index 0000000000000000000000000000000000000000..b408759504ab5a1afc1df332154250cfcd7b4325
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/scripts/submitRun.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+import glob
+import os
+import sys
+import pandas as pd
+
+table_runs = pd.read_html('http://aagaard.web.cern.ch/aagaard/FASERruns.html') # load in run tables from website
+df = table_runs[0] # table_runs is a list of all tables on the website, we only want the first one
+
+ptag="p0008"
+filesPerJob=100
+
+runno=int(sys.argv[1])
+runconfig=str(df.at[df.loc[df['Run'] == runno].index[0],'Configuration'].replace(' ','_')) # get config from website run log telling if run is High_gain or Low_gain calo 
+
+os.system(f"mkdir -p logs/Run-{runno:06d}")
+os.system(f"mkdir -p batch")
+os.system(f"mkdir -p /eos/project/f/faser-commissioning/DeionsNtuples/{runno}")
+
+dataDir=f"/eos/experiment/faser/rec/2022/{ptag}/{runno:06d}"
+files=glob.glob(f"{dataDir}/Faser-Physics*")
+numFiles=len(files)
+numJobs=numFiles//filesPerJob+(numFiles%filesPerJob!=0)
+batchFile=f"batch/Run-{runno:06d}.sub"
+fh=open(batchFile,"w")
+pwd=os.getenv("PWD")
+fh.write(f"""
+executable              = {pwd}/analyzeRun.sh
+arguments               = {runno} $(ProcId) {filesPerJob} {runconfig}
+output                  = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).out
+error                   = {pwd}/logs/Run-{runno:06d}/batch.$(ProcId).err
+log                     = {pwd}/logs/Run-{runno:06d}/batch.log
+requirements            = (Arch == "X86_64" && OpSysAndVer =?= "CentOS7")
+getenv                  = False
+transfer_output_files   = ""
++JobFlavour             = "workday"
+queue {numJobs}
+""")
+fh.close()
+os.system(f"echo condor_submit {batchFile}")
+
diff --git a/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..d1a07f428c73dfb7d6c7f1bf69ab20b73f12a196
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.cxx
@@ -0,0 +1,890 @@
+#include "NtupleDumperAlg.h"
+#include "TrkTrack/Track.h"
+#include "TrackerRIO_OnTrack/FaserSCT_ClusterOnTrack.h"
+#include "TrackerIdentifier/FaserSCT_ID.h"
+#include "ScintIdentifier/VetoNuID.h"
+#include "ScintIdentifier/VetoID.h"
+#include "ScintIdentifier/TriggerID.h"
+#include "ScintIdentifier/PreshowerID.h"
+#include "FaserCaloIdentifier/EcalID.h"
+#include "TrackerPrepRawData/FaserSCT_Cluster.h"
+#include "TrackerSpacePoint/FaserSCT_SpacePoint.h"
+#include "Identifier/Identifier.h"
+#include "TrackerReadoutGeometry/SCT_DetectorManager.h"
+#include "TrackerReadoutGeometry/SiDetectorElement.h"
+#include "TrackerPrepRawData/FaserSCT_Cluster.h"
+#include "xAODTruth/TruthParticle.h"
+#include <cmath>
+#include <TH1F.h>
+
+
+NtupleDumperAlg::NtupleDumperAlg(const std::string &name, 
+                                    ISvcLocator *pSvcLocator)
+    : AthReentrantAlgorithm(name, pSvcLocator), 
+      AthHistogramming(name),
+      m_histSvc("THistSvc/THistSvc", name) {}
+
+
+void NtupleDumperAlg::addBranch(const std::string &name,
+				float* var) {
+  m_tree->Branch(name.c_str(),var,(name+"/F").c_str());
+}
+void NtupleDumperAlg::addBranch(const std::string &name,
+				unsigned int* var) {
+  m_tree->Branch(name.c_str(),var,(name+"/I").c_str());
+}
+
+void NtupleDumperAlg::addWaveBranches(const std::string &name,
+				      int nchannels,
+				      int first) {
+  for(int ch=0;ch<nchannels;ch++) {
+    std::string base=name+std::to_string(ch)+"_";
+    addBranch(base+"time",&m_wave_localtime[first]);
+    addBranch(base+"peak",&m_wave_peak[first]);
+    addBranch(base+"width",&m_wave_width[first]);
+    addBranch(base+"charge",&m_wave_charge[first]);
+    addBranch(base+"raw_peak",&m_wave_raw_peak[first]);
+    addBranch(base+"raw_charge",&m_wave_raw_charge[first]);
+    addBranch(base+"baseline",&m_wave_baseline_mean[first]);
+    addBranch(base+"baseline_rms",&m_wave_baseline_rms[first]);
+    addBranch(base+"status",&m_wave_status[first]);
+    first++;
+  }
+}
+
+void NtupleDumperAlg::FillWaveBranches(const xAOD::WaveformHitContainer &wave) const {
+  for (auto hit : wave) {
+    if ((hit->hit_status()&2)==0) { // dont store secoondary hits as they can overwrite the primary hit
+      int ch=hit->channel();
+      m_wave_localtime[ch]=hit->localtime()+m_clock_phase;
+      m_wave_peak[ch]=hit->peak();
+      m_wave_width[ch]=hit->width();
+      m_wave_charge[ch]=hit->integral()/50;
+
+      m_wave_raw_peak[ch]=hit->raw_peak();
+      m_wave_raw_charge[ch]=hit->raw_integral()/50;
+      m_wave_baseline_mean[ch]=hit->baseline_mean();
+      m_wave_baseline_rms[ch]=hit->baseline_rms();
+      m_wave_status[ch]=hit->hit_status();  
+    }
+  }
+}
+
+StatusCode NtupleDumperAlg::initialize() 
+{
+  ATH_CHECK(m_truthEventContainer.initialize());
+  ATH_CHECK(m_truthParticleContainer.initialize());
+  ATH_CHECK(m_trackCollection.initialize());
+  ATH_CHECK(m_trackSegmentCollection.initialize());
+  ATH_CHECK(m_vetoNuContainer.initialize());
+  ATH_CHECK(m_vetoContainer.initialize());
+  ATH_CHECK(m_triggerContainer.initialize());
+  ATH_CHECK(m_preshowerContainer.initialize());
+  ATH_CHECK(m_ecalContainer.initialize());
+  ATH_CHECK(m_clusterContainer.initialize());
+  ATH_CHECK(m_simDataCollection.initialize());
+  ATH_CHECK(m_FaserTriggerData.initialize());
+  ATH_CHECK(m_ClockWaveformContainer.initialize());
+
+  ATH_CHECK(detStore()->retrieve(m_sctHelper,       "FaserSCT_ID"));
+  ATH_CHECK(detStore()->retrieve(m_vetoNuHelper,    "VetoNuID"));
+  ATH_CHECK(detStore()->retrieve(m_vetoHelper,      "VetoID"));
+  ATH_CHECK(detStore()->retrieve(m_triggerHelper,   "TriggerID"));
+  ATH_CHECK(detStore()->retrieve(m_preshowerHelper, "PreshowerID"));
+  ATH_CHECK(detStore()->retrieve(m_ecalHelper,      "EcalID"));
+
+  ATH_CHECK(detStore()->retrieve(m_detMgr, "SCT"));
+  ATH_CHECK(m_extrapolationTool.retrieve());
+  ATH_CHECK(m_trackingGeometryTool.retrieve());
+
+  ATH_CHECK(m_spacePointContainerKey.initialize());
+
+  if (m_useFlukaWeights)
+  {
+    m_baseEventCrossSection = (m_flukaCrossSection * kfemtoBarnsPerMilliBarn)/m_flukaCollisions;
+  }
+  else if (m_useGenieWeights)
+  {
+    m_baseEventCrossSection = 1.0/m_genieLuminosity;
+  }
+  else
+  {
+    m_baseEventCrossSection = 1.0;
+  }
+
+  m_tree = new TTree("nt", "NtupleDumper tree");
+  m_tree->Branch("run", &m_run_number, "run/I");
+  m_tree->Branch("eventID", &m_event_number, "eventID/I");
+  m_tree->Branch("eventTime", &m_event_time, "eventTime/I");
+  m_tree->Branch("BCID", &m_bcid, "BCID/I");
+
+  m_tree->Branch("TBP", &m_tbp, "TBP/I");
+  m_tree->Branch("TAP", &m_tap, "TAP/I");
+  m_tree->Branch("inputBits", &m_inputBits, "inputBits/I");
+  m_tree->Branch("inputBitsNext", &m_inputBitsNext, "inputBitsNext/I");
+
+  addWaveBranches("VetoNu",2,4);
+  addWaveBranches("VetoSt1",2,6);
+  addWaveBranches("VetoSt2",1,14);
+  addWaveBranches("Timing",4,8);
+  addWaveBranches("Preshower",2,12);
+  addWaveBranches("Calo",4,0);
+  addBranch("Calo_total_charge", &m_calo_total);
+  addBranch("Calo_total_raw_charge", &m_calo_rawtotal);
+
+  addBranch("Calo0_Edep", &m_Calo0_Edep);
+  addBranch("Calo1_Edep", &m_Calo1_Edep);
+  addBranch("Calo2_Edep", &m_Calo2_Edep);
+  addBranch("Calo3_Edep", &m_Calo3_Edep);
+  addBranch("Calo_total_Edep", &m_Calo_Total_Edep);
+  addBranch("Preshower12_Edep", &m_Preshower12_Edep);
+  addBranch("Preshower13_Edep", &m_Preshower13_Edep);
+
+  addBranch("nClusters0",&m_station0Clusters);
+  addBranch("nClusters1",&m_station1Clusters);
+  addBranch("nClusters2",&m_station2Clusters);
+  addBranch("nClusters3",&m_station3Clusters);
+
+  addBranch("SpacePoints",&m_nspacepoints);
+  m_tree->Branch("SpacePoint_x", &m_spacepointX);
+  m_tree->Branch("SpacePoint_y", &m_spacepointY);
+  m_tree->Branch("SpacePoint_z", &m_spacepointZ);
+
+  addBranch("TrackSegments",&m_ntracksegs);
+  m_tree->Branch("TrackSegment_Chi2", &m_trackseg_Chi2);
+  m_tree->Branch("TrackSegment_nDoF", &m_trackseg_DoF);
+  m_tree->Branch("TrackSegment_x", &m_trackseg_x);
+  m_tree->Branch("TrackSegment_y", &m_trackseg_y);
+  m_tree->Branch("TrackSegment_z", &m_trackseg_z);
+  m_tree->Branch("TrackSegment_px", &m_trackseg_px);
+  m_tree->Branch("TrackSegment_py", &m_trackseg_py);
+  m_tree->Branch("TrackSegment_pz", &m_trackseg_pz);
+
+  m_tree->Branch("longTracks", &m_longTracks, "longTracks/I");
+  m_tree->Branch("Track_Chi2", &m_Chi2);
+  m_tree->Branch("Track_nDoF", &m_DoF);
+  m_tree->Branch("Track_x0", &m_xup);
+  m_tree->Branch("Track_y0", &m_yup);
+  m_tree->Branch("Track_z0", &m_zup);
+  m_tree->Branch("Track_px0", &m_pxup);
+  m_tree->Branch("Track_py0", &m_pyup);
+  m_tree->Branch("Track_pz0", &m_pzup);
+  m_tree->Branch("Track_p0", &m_pup);
+  m_tree->Branch("Track_x1", &m_xdown);
+  m_tree->Branch("Track_y1", &m_ydown);
+  m_tree->Branch("Track_z1", &m_zdown);
+  m_tree->Branch("Track_px1", &m_pxdown);
+  m_tree->Branch("Track_py1", &m_pydown);
+  m_tree->Branch("Track_pz1", &m_pzdown);
+  m_tree->Branch("Track_p1", &m_pdown);
+  m_tree->Branch("Track_charge", &m_charge);
+  m_tree->Branch("Track_nLayers", &m_nLayers);
+
+  m_tree->Branch("Track_InStation0",&m_nHit0);
+  m_tree->Branch("Track_InStation1",&m_nHit1);
+  m_tree->Branch("Track_InStation2",&m_nHit2);
+  m_tree->Branch("Track_InStation3",&m_nHit3);
+
+  m_tree->Branch("Track_X_atVetoNu", &m_xVetoNu);
+  m_tree->Branch("Track_Y_atVetoNu", &m_yVetoNu);
+  m_tree->Branch("Track_ThetaX_atVetoNu", &m_thetaxVetoNu);
+  m_tree->Branch("Track_ThetaY_atVetoNu", &m_thetayVetoNu);
+
+  m_tree->Branch("Track_X_atVetoStation1", &m_xVetoStation1);
+  m_tree->Branch("Track_Y_atVetoStation1", &m_yVetoStation1);
+  m_tree->Branch("Track_ThetaX_atVetoStation1", &m_thetaxVetoStation1);
+  m_tree->Branch("Track_ThetaY_atVetoStation1", &m_thetayVetoStation1);
+
+  m_tree->Branch("Track_X_atVetoStation2", &m_xVetoStation2);
+  m_tree->Branch("Track_Y_atVetoStation2", &m_yVetoStation2);
+  m_tree->Branch("Track_ThetaX_atVetoStation2", &m_thetaxVetoStation2);
+  m_tree->Branch("Track_ThetaY_atVetoStation2", &m_thetayVetoStation2);
+
+  m_tree->Branch("Track_X_atTrig", &m_xTrig);
+  m_tree->Branch("Track_Y_atTrig", &m_yTrig);
+  m_tree->Branch("Track_ThetaX_atTrig", &m_thetaxTrig);
+  m_tree->Branch("Track_ThetaY_atTrig", &m_thetayTrig);
+
+  m_tree->Branch("Track_X_atPreshower1", &m_xPreshower1);
+  m_tree->Branch("Track_Y_atPreshower1", &m_yPreshower1);
+  m_tree->Branch("Track_ThetaX_atPreshower1", &m_thetaxPreshower1);
+  m_tree->Branch("Track_ThetaY_atPreshower1", &m_thetayPreshower1);
+
+  m_tree->Branch("Track_X_atPreshower2", &m_xPreshower2);
+  m_tree->Branch("Track_Y_atPreshower2", &m_yPreshower2);
+  m_tree->Branch("Track_ThetaX_atPreshower2", &m_thetaxPreshower2);
+  m_tree->Branch("Track_ThetaY_atPreshower2", &m_thetayPreshower2);
+
+  m_tree->Branch("Track_X_atCalo", &m_xCalo);
+  m_tree->Branch("Track_Y_atCalo", &m_yCalo);
+  m_tree->Branch("Track_ThetaX_atCalo", &m_thetaxCalo);
+  m_tree->Branch("Track_ThetaY_atCalo", &m_thetayCalo);
+
+  m_tree->Branch("pTruthLepton", &m_truthLeptonMomentum, "pTruthLepton/D");
+  m_tree->Branch("truthBarcode", &m_truthBarcode, "truthBarcode/I");
+  m_tree->Branch("truthPdg", &m_truthPdg, "truthPdg/I");
+  m_tree->Branch("CrossSection", &m_crossSection, "crossSection/D");
+
+  ATH_CHECK(histSvc()->regTree("/HIST2/tree", m_tree));
+
+  // Register histograms
+  m_HistRandomCharge[0] = new TH1F("hRandomCharge0", "Calo ch0 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[1] = new TH1F("hRandomCharge1", "Calo ch1 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[2] = new TH1F("hRandomCharge2", "Calo ch2 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[3] = new TH1F("hRandomCharge3", "Calo ch3 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[4] = new TH1F("hRandomCharge4", "VetoNu ch4 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[5] = new TH1F("hRandomCharge5", "VetoNu ch5 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[6] = new TH1F("hRandomCharge6", "Veto ch6 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[7] = new TH1F("hRandomCharge7", "Veto ch7 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[8] = new TH1F("hRandomCharge8", "Trig ch8 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[9] = new TH1F("hRandomCharge9", "Trig ch9 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[10] = new TH1F("hRandomCharge10", "Trig ch10 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[11] = new TH1F("hRandomCharge11", "Trig ch11 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[12] = new TH1F("hRandomCharge12", "Preshower ch12 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[13] = new TH1F("hRandomCharge13", "Preshower ch13 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+  m_HistRandomCharge[14] = new TH1F("hRandomCharge14", "Veto ch14 Charge from Random Events;charge (pC);Events/bin", 100, -1.0, 1.0);
+
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge0", m_HistRandomCharge[0]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge1", m_HistRandomCharge[1]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge2", m_HistRandomCharge[2]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge3", m_HistRandomCharge[3]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge4", m_HistRandomCharge[4]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge5", m_HistRandomCharge[5]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge6", m_HistRandomCharge[6]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge7", m_HistRandomCharge[7]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge8", m_HistRandomCharge[8]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge9", m_HistRandomCharge[9]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge10", m_HistRandomCharge[10]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge11", m_HistRandomCharge[11]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge12", m_HistRandomCharge[12]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge13", m_HistRandomCharge[13]));
+  ATH_CHECK(histSvc()->regHist("/HIST2/RandomCharge14", m_HistRandomCharge[14]));
+
+  m_MIP_sim_Edep_calo = 0.0585; // MIP deposits 0.0585 GeV of energy in calo
+  m_MIP_sim_Edep_preshower = 0.004894; // MIP deposits 0.004894 GeV of energy in a preshower layer
+
+  if (m_doBlinding) {
+    ATH_MSG_INFO("Blinding will be enforced for real data.");
+  } else {
+    ATH_MSG_INFO("Blinding will NOT be enforced for real data.");
+  }
+
+  return StatusCode::SUCCESS;
+}
+
+
+StatusCode NtupleDumperAlg::execute(const EventContext &ctx) const 
+{
+  clearTree();
+
+  // check if real data or simulation data
+  bool realData = true;
+  SG::ReadHandle<xAOD::TruthEventContainer> truthEventContainer { m_truthEventContainer, ctx };
+  if (truthEventContainer.isValid() && truthEventContainer->size() > 0)
+  {
+    realData = false;
+  }
+
+  // if real data, store charge in histograms from random events and only fill ntuple from coincidence events
+  if (realData) { //no trigger simulation yet
+    SG::ReadHandle<xAOD::FaserTriggerData> triggerData(m_FaserTriggerData, ctx);
+    m_tap=triggerData->tap();
+    if (m_tap==16) { // random trigger, store charge of scintillators in histograms
+      // Read in Waveform containers
+      SG::ReadHandle<xAOD::WaveformHitContainer> vetoNuContainer { m_vetoNuContainer, ctx };
+      ATH_CHECK(vetoNuContainer.isValid());
+
+      SG::ReadHandle<xAOD::WaveformHitContainer> vetoContainer { m_vetoContainer, ctx };
+      ATH_CHECK(vetoContainer.isValid());
+
+      SG::ReadHandle<xAOD::WaveformHitContainer> triggerContainer { m_triggerContainer, ctx };
+      ATH_CHECK(triggerContainer.isValid());
+
+      SG::ReadHandle<xAOD::WaveformHitContainer> preshowerContainer { m_preshowerContainer, ctx };
+      ATH_CHECK(preshowerContainer.isValid());
+
+      SG::ReadHandle<xAOD::WaveformHitContainer> ecalContainer { m_ecalContainer, ctx };
+      ATH_CHECK(ecalContainer.isValid());
+
+      if (vetoNuContainer.isValid()) {
+        for (auto hit : *vetoNuContainer) {
+          int ch=hit->channel();
+          m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0);
+        }
+      }
+      if (vetoContainer.isValid()) {
+        for (auto hit : *vetoContainer) {
+          int ch=hit->channel();
+          m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0);
+        }
+      }
+      if (triggerContainer.isValid()) {
+        for (auto hit : *triggerContainer) {
+          int ch=hit->channel();
+          m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0);
+        }
+      }
+      if (preshowerContainer.isValid()) {
+        for (auto hit : *preshowerContainer) {
+          int ch=hit->channel();
+          m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0);
+        }
+      }
+      if (ecalContainer.isValid()) {
+        for (auto hit : *ecalContainer) {
+          int ch=hit->channel();
+          m_HistRandomCharge[ch]->Fill(hit->raw_integral()/50.0);
+        }
+      }
+
+      return StatusCode::SUCCESS; // finished with this event
+
+    } else if ( ((m_tap&8)==0) && (((m_tap&4)==0)||((m_tap&2)==0)) && (((m_tap&4)==0)||((m_tap&1)==0)) && (((m_tap&2)==0)||((m_tap&1)==0)) ) { // don't process events that don't trigger coincidence triggers: 1=calo, 2=veotnu|neto1|preshower, 4=TimingLayer, 8=(VetoNu|Veto2)&Preshower 
+      return StatusCode::SUCCESS;
+    }
+    m_tbp=triggerData->tbp();
+    m_tap=triggerData->tap();
+    m_inputBits=triggerData->inputBits();
+    m_inputBitsNext=triggerData->inputBitsNextClk();
+  }
+
+  m_run_number = ctx.eventID().run_number();
+  m_event_number = ctx.eventID().event_number();
+  m_event_time = ctx.eventID().time_stamp();
+  m_bcid = ctx.eventID().bunch_crossing_id();
+
+  if (!realData) { // if simulation find MC cross section and primary lepton
+    // Work out effective cross section for MC
+    if (m_useFlukaWeights)
+    {
+        double flukaWeight = truthEventContainer->at(0)->weights()[0];
+        ATH_MSG_ALWAYS("Found fluka weight = " << flukaWeight);
+        m_crossSection = m_baseEventCrossSection * flukaWeight;
+    }
+    else if (m_useGenieWeights)
+    {
+        m_crossSection = m_baseEventCrossSection;
+    }
+    else
+    {
+      //ATH_MSG_WARNING("Monte carlo event with no weighting scheme specified.  Setting crossSection (weight) to " << m_baseEventCrossSection << " fb.");
+        m_crossSection = m_baseEventCrossSection;
+    }
+
+    // Find the primary lepton (if any)
+    SG::ReadHandle<xAOD::TruthParticleContainer> truthParticleContainer { m_truthParticleContainer, ctx };
+    if (truthParticleContainer.isValid() && truthParticleContainer->size() > 0)
+    {
+      for (auto particle : *truthParticleContainer)
+      {
+        if ( particle->absPdgId() == 11 || particle->absPdgId() == 13 || particle->absPdgId() == 15 )
+        {
+          if (particle->status() == 1 && (particle->nParents() == 0 || particle->nParents() == 2) )
+          {
+            m_truthLeptonMomentum = particle->p4().P();
+            break;
+          }
+        }
+      }
+    }
+  }
+
+  if (realData) { // correct waveform time with clock phase
+    SG::ReadHandle<xAOD::WaveformClock> clockHandle(m_ClockWaveformContainer, ctx);
+    ATH_CHECK(clockHandle.isValid());
+
+    if (clockHandle->phase() < -2.0) { // wrap around clock pahse so -pi goes to pi
+      m_clock_phase = ((clockHandle->phase() + 3.14159) / 3.14159) * 12.5;
+    } else {
+      m_clock_phase = (clockHandle->phase() / 3.14159) * 12.5;
+    }
+  }
+
+  SG::ReadHandle<xAOD::WaveformHitContainer> vetoNuContainer { m_vetoNuContainer, ctx };
+  ATH_CHECK(vetoNuContainer.isValid());
+
+  SG::ReadHandle<xAOD::WaveformHitContainer> vetoContainer { m_vetoContainer, ctx };
+  ATH_CHECK(vetoContainer.isValid());
+
+  SG::ReadHandle<xAOD::WaveformHitContainer> triggerContainer { m_triggerContainer, ctx };
+  ATH_CHECK(triggerContainer.isValid());
+
+  SG::ReadHandle<xAOD::WaveformHitContainer> preshowerContainer { m_preshowerContainer, ctx };
+  ATH_CHECK(preshowerContainer.isValid());
+
+  SG::ReadHandle<xAOD::WaveformHitContainer> ecalContainer { m_ecalContainer, ctx };
+  ATH_CHECK(ecalContainer.isValid());
+
+  FillWaveBranches(*vetoNuContainer);
+  FillWaveBranches(*vetoContainer);
+  FillWaveBranches(*triggerContainer);
+  FillWaveBranches(*preshowerContainer);
+  FillWaveBranches(*ecalContainer);
+  
+  m_calo_total=m_wave_charge[0]+m_wave_charge[1]+m_wave_charge[2]+m_wave_charge[3];
+  m_calo_rawtotal=m_wave_raw_charge[0]+m_wave_raw_charge[1]+m_wave_raw_charge[2]+m_wave_raw_charge[3];
+
+  // do calibration of calo channels from pC to GeV deposited
+  if (m_CaloConfig == "High_gain") {
+    m_Calo0_Edep = (m_wave_charge[0] / 23.709) * m_MIP_sim_Edep_calo;
+    m_Calo1_Edep = (m_wave_charge[1] / 24.333) * m_MIP_sim_Edep_calo;
+    m_Calo2_Edep = (m_wave_charge[2] / 24.409) * m_MIP_sim_Edep_calo;
+    m_Calo3_Edep = (m_wave_charge[3] / 25.555) * m_MIP_sim_Edep_calo;
+  } else if (m_CaloConfig == "Low_gain") { // assume low gain calo 
+    m_Calo0_Edep = (m_wave_charge[0] / 0.7909) * m_MIP_sim_Edep_calo;
+    m_Calo1_Edep = (m_wave_charge[1] / 0.8197) * m_MIP_sim_Edep_calo;
+    m_Calo2_Edep = (m_wave_charge[2] / 0.8256) * m_MIP_sim_Edep_calo;
+    m_Calo3_Edep = (m_wave_charge[3] / 0.8821) * m_MIP_sim_Edep_calo;
+  } else {
+   ATH_MSG_WARNING("Run config is neither High_gain nor Low_gain, it is " << m_CaloConfig << ", calo calibration will be zero"); 
+  }
+  m_Calo_Total_Edep = m_Calo0_Edep + m_Calo1_Edep + m_Calo2_Edep + m_Calo3_Edep;
+
+  // do calibration of preshower channels from pC to GeV deposited
+  m_Preshower12_Edep = (m_wave_charge[12] / 5.0) * m_MIP_sim_Edep_preshower; // 5 pC per MIP is rough measurement
+  m_Preshower13_Edep = (m_wave_charge[12] / 5.0) * m_MIP_sim_Edep_preshower;
+
+  if (realData && m_doBlinding) { // enforce blinding such that events with large calo signals are skipped and not in the output root file
+    if ((m_Calo_Total_Edep/0.155) > 10.0) { // only save events with a shower less than a 10 GeV e- (assume 10 GeV electron deposits 15.5% of their energy in calo)
+      return StatusCode::SUCCESS;
+    }
+  }
+
+  SG::ReadHandle<Tracker::FaserSCT_ClusterContainer> clusterContainer { m_clusterContainer, ctx };
+  ATH_CHECK(clusterContainer.isValid());
+
+  FaserActsGeometryContext faserGeometryContext = m_trackingGeometryTool->getNominalGeometryContext();
+  auto gctx = faserGeometryContext.context();
+
+  for (auto collection : *clusterContainer)
+  {
+    Identifier id = collection->identify();
+    int station = m_sctHelper->station(id);
+    int clusters = (int) collection->size();
+    switch (station)
+    {
+      case 0:
+        m_station0Clusters += clusters;
+        // following lines commented out depict how to access cluster position
+        //for (auto cluster : *collection) {
+        //  if (cluster == nullptr) continue;
+        //  auto pos = cluster->globalPosition();
+        //  m_station0ClusterX.push_back(pos.x());
+        //}
+        break;
+      case 1:
+        m_station1Clusters += clusters;
+        break;
+      case 2:
+        m_station2Clusters += clusters;
+        break;
+      case 3:
+        m_station3Clusters += clusters;
+        break;
+      default:
+        ATH_MSG_FATAL("Unknown tracker station number " << station);
+        break;
+    }
+  }
+
+  SG::ReadHandle<FaserSCT_SpacePointContainer> spacePointContainer {m_spacePointContainerKey, ctx};
+  ATH_CHECK(spacePointContainer.isValid());
+  for (const FaserSCT_SpacePointCollection* spacePointCollection : *spacePointContainer) {
+    m_nspacepoints += spacePointCollection->size();
+    for (const Tracker::FaserSCT_SpacePoint *spacePoint: *spacePointCollection) {
+      auto pos = spacePoint->globalPosition();
+      m_spacepointX.push_back(pos.x());
+      m_spacepointY.push_back(pos.y());
+      m_spacepointZ.push_back(pos.z());
+    }
+  }
+
+  SG::ReadHandle<TrackCollection> trackSegmentCollection {m_trackSegmentCollection, ctx};
+  ATH_CHECK(trackSegmentCollection.isValid());
+  for (const Trk::Track* trackSeg : *trackSegmentCollection) {
+    if (trackSeg == nullptr) continue;
+    m_ntracksegs += 1;
+    m_trackseg_Chi2.push_back(trackSeg->fitQuality()->chiSquared());
+    m_trackseg_DoF.push_back(trackSeg->fitQuality()->numberDoF());
+    auto SegParameters = trackSeg->trackParameters()->front();
+    const Amg::Vector3D SegPosition = SegParameters->position();
+    const Amg::Vector3D SegMomentum = SegParameters->momentum();
+    m_trackseg_x.push_back(SegPosition.x());
+    m_trackseg_y.push_back(SegPosition.y());
+    m_trackseg_z.push_back(SegPosition.z());
+    m_trackseg_px.push_back(SegMomentum.x());
+    m_trackseg_py.push_back(SegMomentum.y());
+    m_trackseg_pz.push_back(SegMomentum.z());
+  }
+
+  SG::ReadHandle<TrackCollection> trackCollection {m_trackCollection, ctx};
+  ATH_CHECK(trackCollection.isValid());
+  const Trk::TrackParameters* candidateParameters {nullptr};
+  const Trk::TrackParameters* candidateDownParameters {nullptr};
+  for (const Trk::Track* track : *trackCollection)
+  {
+    if (track == nullptr) continue;
+    std::set<std::pair<int, int>> layerMap;
+    std::set<int> stationMap;
+
+    // Check for hit in the three downstream stations
+    for (auto measurement : *(track->measurementsOnTrack())) {
+        const Tracker::FaserSCT_ClusterOnTrack* cluster = dynamic_cast<const Tracker::FaserSCT_ClusterOnTrack*>(measurement);
+        if (cluster != nullptr) {
+            Identifier id = cluster->identify();
+            int station = m_sctHelper->station(id);
+            int layer = m_sctHelper->layer(id);
+            stationMap.emplace(station);
+            layerMap.emplace(station, layer);
+        }
+    }
+    if (stationMap.count(1) == 0 || stationMap.count(2) == 0 || stationMap.count(3) == 0) continue;
+
+    int nLayers = std::count_if(layerMap.begin(), layerMap.end(), [](std::pair<int,int> p){return p.first != 0;});
+    const Trk::TrackParameters* upstreamParameters = track->trackParameters()->front();
+    const Trk::TrackParameters* downstreamParameters = track->trackParameters()->back();
+
+    if (candidateParameters == nullptr || upstreamParameters->momentum().mag() > candidateParameters->momentum().mag())
+    {
+        candidateParameters = upstreamParameters;
+        candidateDownParameters = downstreamParameters;
+    }
+
+    if ((candidateParameters == nullptr) || (candidateDownParameters == nullptr)) continue;
+
+	m_nLayers.push_back(nLayers);
+
+    m_Chi2.push_back(track->fitQuality()->chiSquared());
+    m_DoF.push_back(track->fitQuality()->numberDoF());
+
+    m_nHit0.push_back(stationMap.count(0));
+    m_nHit1.push_back(stationMap.count(1));
+    m_nHit2.push_back(stationMap.count(2));
+    m_nHit3.push_back(stationMap.count(3));
+
+    m_charge.push_back( (int) candidateParameters->charge() );
+
+    m_xup.push_back(candidateParameters->position().x());
+    m_yup.push_back(candidateParameters->position().y());
+    m_zup.push_back(candidateParameters->position().z());
+    m_pxup.push_back(candidateParameters->momentum().x());
+    m_pyup.push_back(candidateParameters->momentum().y());
+    m_pzup.push_back(candidateParameters->momentum().z());
+    m_pup.push_back(sqrt( pow(candidateParameters->momentum().x(),2) + pow(candidateParameters->momentum().y(),2) + pow(candidateParameters->momentum().z(),2) ));
+
+    m_xdown.push_back(candidateDownParameters->position().x());
+    m_ydown.push_back(candidateDownParameters->position().y());
+    m_zdown.push_back(candidateDownParameters->position().z());
+    m_pxdown.push_back(candidateDownParameters->momentum().x());
+    m_pydown.push_back(candidateDownParameters->momentum().y());
+    m_pzdown.push_back(candidateDownParameters->momentum().z());
+    m_pdown.push_back(sqrt( pow(candidateDownParameters->momentum().x(),2) + pow(candidateDownParameters->momentum().y(),2) + pow(candidateDownParameters->momentum().z(),2) ));
+
+    // fill extrapolation vectors with filler values that get changed iif the track extrapolation succeeds
+    m_xVetoNu.push_back(-10000);
+    m_yVetoNu.push_back(-10000);
+    m_thetaxVetoNu.push_back(-10000);
+    m_thetayVetoNu.push_back(-10000);
+    m_xVetoStation1.push_back(-10000);
+    m_yVetoStation1.push_back(-10000);
+    m_thetaxVetoStation1.push_back(-10000);
+    m_thetayVetoStation1.push_back(-10000);
+    m_xVetoStation2.push_back(-10000);
+    m_yVetoStation2.push_back(-10000);
+    m_thetaxVetoStation2.push_back(-10000);
+    m_thetayVetoStation2.push_back(-10000);
+    m_xTrig.push_back(-10000);
+    m_yTrig.push_back(-10000);
+    m_thetaxTrig.push_back(-10000);
+    m_thetayTrig.push_back(-10000);
+    m_xPreshower1.push_back(-10000);
+    m_yPreshower1.push_back(-10000);
+    m_thetaxPreshower1.push_back(-10000);
+    m_thetayPreshower1.push_back(-10000);
+    m_xPreshower2.push_back(-10000);
+    m_yPreshower2.push_back(-10000);
+    m_thetaxPreshower2.push_back(-10000);
+    m_thetayPreshower2.push_back(-10000);
+    m_xCalo.push_back(-10000);
+    m_yCalo.push_back(-10000);
+    m_thetaxCalo.push_back(-10000);
+    m_thetayCalo.push_back(-10000);
+
+    // extrapolate track from IFT
+    if (stationMap.count(0) > 0) { // extrapolation crashes if the track does not start in the IFT, as it is too far away to extrapolate
+      Amg::Vector3D position = candidateParameters->position();
+      Amg::Vector3D momentum = candidateParameters->momentum();
+      Acts::BoundVector params = Acts::BoundVector::Zero();
+      params[Acts::eBoundLoc0] = -position.y();
+      params[Acts::eBoundLoc1] = position.x();
+      params[Acts::eBoundPhi] = momentum.phi();
+      params[Acts::eBoundTheta] = momentum.theta();
+      params[Acts::eBoundQOverP] = candidateParameters->charge() / momentum.mag();
+      params[Acts::eBoundTime] = 0;
+      auto startSurface = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, position.z()), Acts::Vector3(0, 0, 1));
+      Acts::BoundTrackParameters startParameters(std::move(startSurface), params, candidateParameters->charge());
+
+      auto targetSurface_VetoNu = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -3112.0), Acts::Vector3(0, 0, 1)); // -3112 mm is z position of VetoNu planes touching
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_VetoNu =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_VetoNu, Acts::backward);
+      if (targetParameters_VetoNu != nullptr) {
+        auto targetPosition_VetoNu = targetParameters_VetoNu->position(gctx);
+        auto targetMomentum_VetoNu = targetParameters_VetoNu->momentum();
+        m_xVetoNu[m_longTracks] = targetPosition_VetoNu.x();
+        m_yVetoNu[m_longTracks] = targetPosition_VetoNu.y();
+        m_thetaxVetoNu[m_longTracks] = atan(targetMomentum_VetoNu[0]/targetMomentum_VetoNu[2]);
+        m_thetayVetoNu[m_longTracks] = atan(targetMomentum_VetoNu[1]/targetMomentum_VetoNu[2]);
+      } else {
+        ATH_MSG_INFO("vetoNu null targetParameters");
+      }
+
+      auto targetSurface_Veto1 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -1769.65), Acts::Vector3(0, 0, 1)); // -1769.65 mm is z position of center of operational layer in Veto station 1
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Veto1 =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Veto1, Acts::forward);
+      if (targetParameters_Veto1 != nullptr) {
+        auto targetPosition_Veto1 = targetParameters_Veto1->position(gctx);
+        auto targetMomentum_Veto1 = targetParameters_Veto1->momentum();
+        m_xVetoStation1[m_longTracks] = targetPosition_Veto1.x();
+        m_yVetoStation1[m_longTracks] = targetPosition_Veto1.y();
+        m_thetaxVetoStation1[m_longTracks] = atan(targetMomentum_Veto1[0]/targetMomentum_Veto1[2]);
+        m_thetayVetoStation1[m_longTracks] = atan(targetMomentum_Veto1[1]/targetMomentum_Veto1[2]);
+      } else {
+        ATH_MSG_INFO("veto1 null targetParameters");
+      }
+
+      auto targetSurface_Veto2 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, -1609.65), Acts::Vector3(0, 0, 1)); // -1609.65 mm is z position of where planes touch in Veto station 2
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Veto2 =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Veto2, Acts::forward);
+      if (targetParameters_Veto2 != nullptr) {
+        auto targetPosition_Veto2 = targetParameters_Veto2->position(gctx);
+        auto targetMomentum_Veto2 = targetParameters_Veto2->momentum();
+        m_xVetoStation2[m_longTracks] = targetPosition_Veto2.x();
+        m_yVetoStation2[m_longTracks] = targetPosition_Veto2.y();
+        m_thetaxVetoStation2[m_longTracks] = atan(targetMomentum_Veto2[0]/targetMomentum_Veto2[2]);
+        m_thetayVetoStation2[m_longTracks] = atan(targetMomentum_Veto2[1]/targetMomentum_Veto2[2]);
+      } else {
+        ATH_MSG_INFO("veto2 null targetParameters");
+      }
+
+      auto targetSurface_Trig = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 0.0), Acts::Vector3(0, 0, 1)); // 0 mm is z position of Trig planes overlapping
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Trig =m_extrapolationTool->propagate(ctx, startParameters, *targetSurface_Trig, Acts::forward); // must extrapolate forward to trig plane if track starts in IFT
+      if (targetParameters_Trig != nullptr) {
+        auto targetPosition_Trig = targetParameters_Trig->position(gctx);
+        auto targetMomentum_Trig = targetParameters_Trig->momentum();
+        m_xTrig[m_longTracks] = targetPosition_Trig.x();
+        m_yTrig[m_longTracks] = targetPosition_Trig.y();
+        m_thetaxTrig[m_longTracks] = atan(targetMomentum_Trig[0]/targetMomentum_Trig[2]);
+        m_thetayTrig[m_longTracks] = atan(targetMomentum_Trig[1]/targetMomentum_Trig[2]);
+      } else {
+        ATH_MSG_INFO("Trig null targetParameters");
+      }
+
+    } 
+
+    // extrapolate track from tracking station 3
+    if (stationMap.count(3) > 0) { // extrapolation crashes if the track does not end in the Station 3, as it is too far away to extrapolate
+      Amg::Vector3D positionDown = candidateDownParameters->position();
+      Amg::Vector3D momentumDown = candidateDownParameters->momentum();
+      Acts::BoundVector paramsDown = Acts::BoundVector::Zero();
+      paramsDown[Acts::eBoundLoc0] = -positionDown.y();
+      paramsDown[Acts::eBoundLoc1] = positionDown.x();
+      paramsDown[Acts::eBoundPhi] = momentumDown.phi();
+      paramsDown[Acts::eBoundTheta] = momentumDown.theta();
+      paramsDown[Acts::eBoundQOverP] = candidateDownParameters->charge() / momentumDown.mag();
+      paramsDown[Acts::eBoundTime] = 0;
+      auto startSurfaceDown = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, positionDown.z()), Acts::Vector3(0, 0, 1));
+      Acts::BoundTrackParameters startParametersDown(std::move(startSurfaceDown), paramsDown, candidateDownParameters->charge());
+
+      auto targetSurface_Preshower1 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2582.68), Acts::Vector3(0, 0, 1)); // 2582.68  mm is z position of center of upstream preshower layer
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Preshower1 =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Preshower1, Acts::forward);
+      if (targetParameters_Preshower1 != nullptr) {
+        auto targetPosition_Preshower1 = targetParameters_Preshower1->position(gctx);
+        auto targetMomentum_Preshower1 = targetParameters_Preshower1->momentum();
+        m_xPreshower1[m_longTracks] = targetPosition_Preshower1.x();
+        m_yPreshower1[m_longTracks] = targetPosition_Preshower1.y();
+        m_thetaxPreshower1[m_longTracks] = atan(targetMomentum_Preshower1[0]/targetMomentum_Preshower1[2]);
+        m_thetayPreshower1[m_longTracks] = atan(targetMomentum_Preshower1[1]/targetMomentum_Preshower1[2]);
+      } else {
+        ATH_MSG_INFO("Preshower1 null targetParameters");
+      }
+
+      auto targetSurface_Preshower2 = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2657.68), Acts::Vector3(0, 0, 1)); // 2657.68  mm is z position of center of downstream preshower layer
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Preshower2 =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Preshower2, Acts::forward);
+      if (targetParameters_Preshower2 != nullptr) {
+        auto targetPosition_Preshower2 = targetParameters_Preshower2->position(gctx);
+        auto targetMomentum_Preshower2 = targetParameters_Preshower2->momentum();
+        m_xPreshower2[m_longTracks] = targetPosition_Preshower2.x();
+        m_yPreshower2[m_longTracks] = targetPosition_Preshower2.y();
+        m_thetaxPreshower2[m_longTracks] = atan(targetMomentum_Preshower2[0]/targetMomentum_Preshower2[2]);
+        m_thetayPreshower2[m_longTracks] =  atan(targetMomentum_Preshower2[1]/targetMomentum_Preshower2[2]);
+      } else {
+        ATH_MSG_INFO("Preshower2 null targetParameters");
+      }
+
+      auto targetSurface_Calo = Acts::Surface::makeShared<Acts::PlaneSurface>(Acts::Vector3(0, 0, 2760.0), Acts::Vector3(0, 0, 1)); // 2760  mm is estimated z position of calorimeter face
+      std::unique_ptr<const Acts::BoundTrackParameters> targetParameters_Calo =m_extrapolationTool->propagate(ctx, startParametersDown, *targetSurface_Calo, Acts::forward);
+      if (targetParameters_Calo != nullptr) {
+        auto targetPosition_Calo = targetParameters_Calo->position(gctx);
+        auto targetMomentum_Calo = targetParameters_Calo->momentum();
+        m_xCalo[m_longTracks] = targetPosition_Calo.x();
+        m_yCalo[m_longTracks] = targetPosition_Calo.y();
+        m_thetaxCalo[m_longTracks] = atan(targetMomentum_Calo[0]/targetMomentum_Calo[2]) ;
+        m_thetayCalo[m_longTracks] = atan(targetMomentum_Calo[1]/targetMomentum_Calo[2]) ;
+      } else {
+        ATH_MSG_INFO("Calo null targetParameters");
+      }
+    }
+
+    m_longTracks++;
+  }
+
+  /*
+  // Here we apply the signal selection
+  // Very simple/unrealistic to start
+  if (m_vetoUpstream == 0 || m_vetoDownstream == 0 ||
+        m_triggerTotal == 0 ||
+        m_preshower0 == 0 || m_preshower1 == 0 ||
+        // m_ecalTotal == 0 ||
+        candidateParameters == nullptr)
+      return StatusCode::SUCCESS;
+  */
+  m_tree->Fill();
+
+  return StatusCode::SUCCESS;
+}
+
+
+StatusCode NtupleDumperAlg::finalize() 
+{
+  return StatusCode::SUCCESS;
+}
+
+bool NtupleDumperAlg::waveformHitOK(const xAOD::WaveformHit* hit) const
+{
+    if (hit->status_bit(xAOD::WaveformStatus::THRESHOLD_FAILED) || hit->status_bit(xAOD::WaveformStatus::SECONDARY)) return false;
+    return true;
+}
+
+void
+NtupleDumperAlg::clearTree() const
+{
+  m_run_number = 0;
+  m_event_number = 0;
+  m_event_time = 0;
+  m_bcid = 0;
+
+  m_tbp=0;
+  m_tap=0;
+  m_inputBits=0;
+  m_inputBitsNext=0;
+
+  for(int ii=0;ii<15;ii++) {
+      m_wave_localtime[ii]=0;
+      m_wave_peak[ii]=0;
+      m_wave_width[ii]=0;
+      m_wave_charge[ii]=0;
+
+      m_wave_raw_peak[ii]=0;
+      m_wave_raw_charge[ii]=0;
+      m_wave_baseline_mean[ii]=0;
+      m_wave_baseline_rms[ii]=0;
+      m_wave_status[ii]=0;
+  }
+
+  m_calo_total=0;
+  m_calo_rawtotal=0;
+
+  m_Calo0_Edep=0;
+  m_Calo1_Edep=0;
+  m_Calo2_Edep=0;
+  m_Calo3_Edep=0;
+  m_Calo_Total_Edep=0;
+  m_Preshower12_Edep=0;
+  m_Preshower13_Edep=0;
+
+  m_clock_phase=0;
+
+  m_station0Clusters = 0;
+  m_station1Clusters = 0;
+  m_station2Clusters = 0;
+  m_station3Clusters = 0;
+  m_crossSection = 0;
+
+  m_nspacepoints = 0;
+  m_spacepointX.clear();
+  m_spacepointY.clear();
+  m_spacepointZ.clear();
+
+  m_ntracksegs = 0;
+  m_trackseg_Chi2.clear();
+  m_trackseg_DoF.clear();
+  m_trackseg_x.clear();
+  m_trackseg_y.clear();
+  m_trackseg_z.clear();
+  m_trackseg_px.clear();
+  m_trackseg_py.clear();
+  m_trackseg_pz.clear();
+
+  m_xup.clear();
+  m_yup.clear();
+  m_zup.clear();
+  m_pxup.clear();
+  m_pyup.clear();
+  m_pzup.clear();
+  m_pup.clear();
+
+  m_xdown.clear();
+  m_ydown.clear();
+  m_zdown.clear();
+  m_pxdown.clear();
+  m_pydown.clear();
+  m_pzdown.clear();
+  m_pdown.clear();
+
+  m_Chi2.clear();
+  m_DoF.clear();
+  m_charge.clear();
+  m_nLayers.clear();
+  m_longTracks = 0;
+ 
+  m_nHit0.clear();
+  m_nHit1.clear();
+  m_nHit2.clear();
+  m_nHit3.clear();
+
+  m_xVetoNu.clear();
+  m_yVetoNu.clear();
+  m_thetaxVetoNu.clear();
+  m_thetayVetoNu.clear();
+
+  m_xVetoStation1.clear();
+  m_yVetoStation1.clear();
+  m_thetaxVetoStation1.clear();
+  m_thetayVetoStation1.clear();
+
+  m_xVetoStation2.clear();
+  m_yVetoStation2.clear();
+  m_thetaxVetoStation2.clear();
+  m_thetayVetoStation2.clear();
+
+  m_xTrig.clear();
+  m_yTrig.clear();
+  m_thetaxTrig.clear();
+  m_thetayTrig.clear();
+
+  m_xPreshower1.clear();
+  m_yPreshower1.clear();
+  m_thetaxPreshower1.clear();
+  m_thetayPreshower1.clear();
+
+  m_xPreshower2.clear();
+  m_yPreshower2.clear();
+  m_thetaxPreshower2.clear();
+  m_thetayPreshower2.clear();
+
+  m_xCalo.clear();
+  m_yCalo.clear();
+  m_thetaxCalo.clear();
+  m_thetayCalo.clear();
+
+  m_truthLeptonMomentum = 0;
+  m_truthBarcode = 0;
+  m_truthPdg = 0;
+}
diff --git a/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h
new file mode 100644
index 0000000000000000000000000000000000000000..1b26caadeb7290c34a199ef0e941f9203b600bba
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/src/NtupleDumperAlg.h
@@ -0,0 +1,217 @@
+#ifndef NTUPLEDUMPER_NTUPLEDUMPERALG_H
+#define NTUPLEDUMPER_NTUPLEDUMPERALG_H
+
+#include "AthenaBaseComps/AthReentrantAlgorithm.h"
+#include "AthenaBaseComps/AthHistogramming.h"
+#include "TrkTrack/TrackCollection.h"
+#include "xAODFaserTrigger/FaserTriggerData.h"
+#include "xAODFaserWaveform/WaveformHitContainer.h"
+#include "xAODFaserWaveform/WaveformHit.h"
+#include "xAODFaserWaveform/WaveformClock.h"
+#include "xAODTruth/TruthEventContainer.h"
+#include "xAODTruth/TruthParticleContainer.h"
+#include "TrackerPrepRawData/FaserSCT_ClusterContainer.h"
+#include "TrackerSpacePoint/FaserSCT_SpacePointContainer.h"
+#include "TrackerSimData/TrackerSimDataCollection.h"
+#include "FaserActsGeometryInterfaces/IFaserActsExtrapolationTool.h"
+#include "FaserActsGeometryInterfaces/IFaserActsTrackingGeometryTool.h"
+
+#include <vector>
+
+class TTree;
+class TH1;
+class FaserSCT_ID;
+class VetoNuID;
+class VetoID;
+class TriggerID;
+class PreshowerID;
+class EcalID;
+namespace  TrackerDD
+{
+    class SCT_DetectorManager;
+}
+
+class NtupleDumperAlg : public AthReentrantAlgorithm, AthHistogramming {
+public:
+  NtupleDumperAlg(const std::string &name, ISvcLocator *pSvcLocator);
+  virtual ~NtupleDumperAlg() = default;
+  virtual StatusCode initialize() override;
+  virtual StatusCode execute(const EventContext &ctx) const override;
+  virtual StatusCode finalize() override;
+  const ServiceHandle <ITHistSvc> &histSvc() const;
+
+private:
+
+  bool waveformHitOK(const xAOD::WaveformHit* hit) const;
+  void clearTree() const;
+  void addBranch(const std::string &name,float* var);
+  void addBranch(const std::string &name,unsigned int* var);
+  void addWaveBranches(const std::string &name, int nchannels, int first);
+  void FillWaveBranches(const xAOD::WaveformHitContainer &wave) const;
+
+  ServiceHandle <ITHistSvc> m_histSvc;
+
+  SG::ReadHandleKey<xAOD::TruthEventContainer> m_truthEventContainer { this, "EventContainer", "TruthEvents", "Truth event container name." };
+  SG::ReadHandleKey<xAOD::TruthParticleContainer> m_truthParticleContainer { this, "ParticleContainer", "TruthParticles", "Truth particle container name." };
+  SG::ReadHandleKey<TrackerSimDataCollection> m_simDataCollection {this, "TrackerSimDataCollection", "SCT_SDO_Map"};
+
+  SG::ReadHandleKey<TrackCollection> m_trackCollection { this, "TrackCollection", "CKFTrackCollection", "Input track collection name" };
+  SG::ReadHandleKey<TrackCollection> m_trackSegmentCollection {this, "TrackSegmentCollection", "SegmentFit", "Input track segment collection name"};
+  SG::ReadHandleKey<xAOD::WaveformHitContainer> m_vetoNuContainer { this, "VetoNuContainer", "VetoNuWaveformHits", "VetoNu hit container name" };
+  SG::ReadHandleKey<xAOD::WaveformHitContainer> m_vetoContainer { this, "VetoContainer", "VetoWaveformHits", "Veto hit container name" };
+  SG::ReadHandleKey<xAOD::WaveformHitContainer> m_triggerContainer { this, "TriggerContainer", "TriggerWaveformHits", "Trigger hit container name" };
+  SG::ReadHandleKey<xAOD::WaveformHitContainer> m_preshowerContainer { this, "PreshowerContainer", "PreshowerWaveformHits", "Preshower hit container name" };
+  SG::ReadHandleKey<xAOD::WaveformHitContainer> m_ecalContainer { this, "EcalContainer", "CaloWaveformHits", "Ecal hit container name" };
+  SG::ReadHandleKey<Tracker::FaserSCT_ClusterContainer> m_clusterContainer { this, "ClusterContainer", "SCT_ClusterContainer", "Tracker cluster container name" };
+  SG::ReadHandleKey<FaserSCT_SpacePointContainer> m_spacePointContainerKey { this, "SpacePoints", "SCT_SpacePointContainer", "space point container"};
+
+  SG::ReadHandleKey<xAOD::FaserTriggerData> m_FaserTriggerData     { this, "FaserTriggerDataKey", "FaserTriggerData", "ReadHandleKey for xAOD::FaserTriggerData"};
+  SG::ReadHandleKey<xAOD::WaveformClock> m_ClockWaveformContainer     { this, "WaveformClockKey", "WaveformClock", "ReadHandleKey for ClockWaveforms Container"};
+  ToolHandle<IFaserActsExtrapolationTool> m_extrapolationTool { this, "ExtrapolationTool", "FaserActsExtrapolationTool" };  
+  ToolHandle<IFaserActsTrackingGeometryTool> m_trackingGeometryTool {this, "TrackingGeometryTool", "FaserActsTrackingGeometryTool"};
+
+  const TrackerDD::SCT_DetectorManager* m_detMgr {nullptr};
+
+  const FaserSCT_ID* m_sctHelper;
+  const VetoNuID*    m_vetoNuHelper;
+  const VetoID*      m_vetoHelper;
+  const TriggerID*   m_triggerHelper;
+  const PreshowerID* m_preshowerHelper;
+  const EcalID*      m_ecalHelper;
+
+  StringProperty  m_CaloConfig        { this, "CaloConfig", "Low_gain", "Configuration found at http://aagaard.web.cern.ch/aagaard/FASERruns.html (spaces replaced with '_')" };
+  BooleanProperty m_doBlinding        { this, "DoBlinding", true, "Blinding will not output events with Calo signal > 10 GeV e-" };
+  BooleanProperty m_useFlukaWeights   { this, "UseFlukaWeights", false, "Flag to weight events according to value stored in HepMC::GenEvent" };
+  BooleanProperty m_useGenieWeights   { this, "UseGenieWeights", false, "Flag to weight events according to Genie luminosity" };
+  IntegerProperty m_flukaCollisions   { this, "FlukaCollisions", 137130000, "Number of proton-proton collisions in FLUKA sample." };
+  DoubleProperty  m_flukaCrossSection { this, "FlukaCrossSection", 80.0, "Fluka p-p inelastic cross-section in millibarns." };
+  DoubleProperty  m_genieLuminosity   { this, "GenieLuminosity", 150.0, "Genie luminosity in inverse fb." };
+
+  double m_baseEventCrossSection {1.0};
+  const double kfemtoBarnsPerMilliBarn {1.0e12};
+
+  mutable TTree* m_tree;
+
+  mutable TH1* m_HistRandomCharge[15];
+
+  mutable unsigned int m_run_number;
+  mutable unsigned int m_event_number;
+  mutable unsigned int m_event_time;
+  mutable unsigned int m_bcid;
+
+  mutable unsigned int m_tbp;
+  mutable unsigned int m_tap;
+  mutable unsigned int m_inputBits;
+  mutable unsigned int m_inputBitsNext;
+
+  mutable float m_wave_localtime[15];
+  mutable float m_wave_peak[15];
+  mutable float m_wave_width[15];
+  mutable float m_wave_charge[15];
+
+  mutable float m_wave_raw_peak[15];
+  mutable float m_wave_raw_charge[15];
+  mutable float m_wave_baseline_mean[15];
+  mutable float m_wave_baseline_rms[15];
+  mutable unsigned int m_wave_status[15];
+  
+  mutable float m_calo_total;
+  mutable float m_calo_rawtotal;
+
+  mutable float m_Calo0_Edep;
+  mutable float m_Calo1_Edep;
+  mutable float m_Calo2_Edep;
+  mutable float m_Calo3_Edep;
+  mutable float m_Calo_Total_Edep;
+  mutable float m_Preshower12_Edep;
+  mutable float m_Preshower13_Edep;
+
+  mutable float m_MIP_sim_Edep_calo;
+  mutable float m_MIP_sim_Edep_preshower;
+
+  mutable float m_clock_phase;
+
+  mutable unsigned int m_station0Clusters;
+  mutable unsigned int m_station1Clusters;
+  mutable unsigned int m_station2Clusters;
+  mutable unsigned int m_station3Clusters;
+
+  mutable unsigned int m_nspacepoints;
+  mutable std::vector<double> m_spacepointX;
+  mutable std::vector<double> m_spacepointY;
+  mutable std::vector<double> m_spacepointZ;
+
+  mutable unsigned int m_ntracksegs;
+  mutable std::vector<double> m_trackseg_Chi2;
+  mutable std::vector<double> m_trackseg_DoF;
+  mutable std::vector<double> m_trackseg_x;
+  mutable std::vector<double> m_trackseg_y;
+  mutable std::vector<double> m_trackseg_z;
+  mutable std::vector<double> m_trackseg_px;
+  mutable std::vector<double> m_trackseg_py;
+  mutable std::vector<double> m_trackseg_pz;
+
+  mutable int    m_longTracks;
+  mutable std::vector<double> m_Chi2;
+  mutable std::vector<double> m_DoF;
+  mutable std::vector<double> m_xup;
+  mutable std::vector<double> m_yup;
+  mutable std::vector<double> m_zup;
+  mutable std::vector<double> m_pxup;
+  mutable std::vector<double> m_pyup;
+  mutable std::vector<double> m_pzup;
+  mutable std::vector<double> m_pup;
+  mutable std::vector<double> m_xdown;
+  mutable std::vector<double> m_ydown;
+  mutable std::vector<double> m_zdown;
+  mutable std::vector<double> m_pxdown;
+  mutable std::vector<double> m_pydown;
+  mutable std::vector<double> m_pzdown;
+  mutable std::vector<double> m_pdown;
+  mutable std::vector<int> m_charge;
+  mutable std::vector<unsigned int> m_nLayers;
+  mutable std::vector<unsigned int> m_nHit0;
+  mutable std::vector<unsigned int> m_nHit1;
+  mutable std::vector<unsigned int> m_nHit2;
+  mutable std::vector<unsigned int> m_nHit3;
+  mutable std::vector<double> m_xVetoNu;
+  mutable std::vector<double> m_yVetoNu;
+  mutable std::vector<double> m_thetaxVetoNu;
+  mutable std::vector<double> m_thetayVetoNu;
+  mutable std::vector<double> m_xVetoStation1;
+  mutable std::vector<double> m_yVetoStation1;
+  mutable std::vector<double> m_thetaxVetoStation1;
+  mutable std::vector<double> m_thetayVetoStation1;
+  mutable std::vector<double> m_xVetoStation2;
+  mutable std::vector<double> m_yVetoStation2;
+  mutable std::vector<double> m_thetaxVetoStation2;
+  mutable std::vector<double> m_thetayVetoStation2;
+  mutable std::vector<double> m_xTrig;
+  mutable std::vector<double> m_yTrig;
+  mutable std::vector<double> m_thetaxTrig;
+  mutable std::vector<double> m_thetayTrig;
+  mutable std::vector<double> m_xPreshower1;
+  mutable std::vector<double> m_yPreshower1;
+  mutable std::vector<double> m_thetaxPreshower1;
+  mutable std::vector<double> m_thetayPreshower1;
+  mutable std::vector<double> m_xPreshower2;
+  mutable std::vector<double> m_yPreshower2;
+  mutable std::vector<double> m_thetaxPreshower2;
+  mutable std::vector<double> m_thetayPreshower2;
+  mutable std::vector<double> m_xCalo;
+  mutable std::vector<double> m_yCalo;
+  mutable std::vector<double> m_thetaxCalo;
+  mutable std::vector<double> m_thetayCalo;
+
+  mutable double m_truthLeptonMomentum;
+  mutable int    m_truthBarcode;
+  mutable int    m_truthPdg;
+  mutable double m_crossSection;
+
+};
+
+inline const ServiceHandle <ITHistSvc> &NtupleDumperAlg::histSvc() const {
+  return m_histSvc;
+}
+
+#endif  // NTUPLEDUMPER_NTUPLEDUMPERALG_H
diff --git a/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx b/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..15dc244e170829e01af0d848e3cac55abbc83619
--- /dev/null
+++ b/PhysicsAnalysis/NtupleDumper/src/component/NtupleDumper_entries.cxx
@@ -0,0 +1,3 @@
+#include "../NtupleDumperAlg.h"
+
+DECLARE_COMPONENT(NtupleDumperAlg)
diff --git a/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx b/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx
index b6cbc9da2508bd734634af3702165b262885ceff..6dc7c62a91fb18c44e701175173d3df82b4c02b0 100644
--- a/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx
+++ b/Tracker/TrackerEventCnv/TrackerEventTPCnv/src/FaserSCT_SpacePointCnv_p0.cxx
@@ -15,7 +15,7 @@ StatusCode FaserSCT_SpacePointCnv_p0::initialize(MsgStream& log ) {
 //  ISvcLocator* svcLocator = Gaudi::svcLocator();
 
   // Get the messaging service, print where you are
-  log << MSG::INFO << "FaserSCT_SpacePointCnv::initialize()" << endmsg;
+  log << MSG::DEBUG << "FaserSCT_SpacePointCnv::initialize()" << endmsg;
   if(m_sctClusContName.initialize()!=StatusCode::SUCCESS)
     log << MSG::WARNING<< "FaserSCT_SpacePointCnv failed to initialize the sct cluster container" << endmsg;
 
diff --git a/faser-common b/faser-common
index 69a90ec95da88a00097fb809bede6c2bae8c02d6..89ce6a07128eb2ebc367b6b68f29c9c88220e3e6 160000
--- a/faser-common
+++ b/faser-common
@@ -1 +1 @@
-Subproject commit 69a90ec95da88a00097fb809bede6c2bae8c02d6
+Subproject commit 89ce6a07128eb2ebc367b6b68f29c9c88220e3e6